file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
source_contributions.rs | extern crate clap;
extern crate csv;
extern crate reqwest;
extern crate serde;
extern crate failure;
#[macro_use]
extern crate failure_derive;
use clap::{App, Arg};
use csv::StringRecord;
use reqwest::{Client, Url};
use serde::de::DeserializeOwned;
use serde::Deserialize;
use std::{thread, time};
use std::collections::HashSet;
use std::env;
use std::fs::{File, OpenOptions};
use std::io::Write;
enum HttpMethod {
Get,
}
#[derive(Debug, Fail)]
enum AppError {
// Returned when we couldn't extract an owner and a repo from the repository URL.
#[fail(display = "Couldn't extract project metadata for {}", repo_url)]
MetadataExtractionFailed { repo_url: String },
// Returned in case of generic I/O error.
#[fail(display = "i/o error when reading/writing on the CSV file {}", _0)]
IOError(std::io::Error),
// Returned when the OSRANK_GITHUB_TOKEN is not present as an env var.
#[fail(display = "Couldn't find OSRANK_GITHUB_TOKEN in your env vars: {}", _0)]
GithubTokenNotFound(std::env::VarError),
// Returned when we failed to issue the HTTP request.
#[fail(display = "Request to Github failed: {}", _0)]
GithubAPIRequestFailed(reqwest::Error),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "Github returned non-200 {} with body {}", _0, _1)]
GithubAPINotOK(reqwest::StatusCode, String),
// Returned when the parsing of the http URL to query Github failed.
#[fail(display = "Github URL failed parsing into a valid HTTP URL: {}", _0)]
GithubUrlParsingFailed(reqwest::UrlError),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "Couldn't deserialise the JSON returned by Github: {}", _0)]
DeserialisationFailure(reqwest::Error),
// Returned when the Github API returned a non-2xx status code.
#[fail(display = "No more retries.")]
NoRetriesLeft,
}
impl From<std::io::Error> for AppError {
fn from(err: std::io::Error) -> AppError {
AppError::IOError(err)
}
}
impl From<std::env::VarError> for AppError {
fn from(err: std::env::VarError) -> AppError {
AppError::GithubTokenNotFound(err)
}
}
impl From<reqwest::Error> for AppError {
fn from(err: reqwest::Error) -> AppError {
AppError::GithubAPIRequestFailed(err)
}
}
impl From<reqwest::UrlError> for AppError {
fn from(err: reqwest::UrlError) -> AppError {
AppError::GithubUrlParsingFailed(err)
}
}
// The order of the fields must be the same of the input file.
#[derive(Debug)]
struct Project<'a> {
id: u32,
platform: &'a str,
project_name: &'a str,
repository_url: &'a str,
repository_fork: bool,
repository_display_name: &'a str,
}
struct Retries {
retries_num: u8,
}
impl Retries {
fn new(retries_num: u8) -> Self {
Retries { retries_num }
}
}
#[derive(Debug, Deserialize)]
struct GithubContribution {
total: u64,
author: GithubUser,
weeks: Vec<GithubWeek>,
}
#[derive(Debug, Deserialize)]
struct GithubWeek {
// Unix timestamp of the beginning of this week.
w: u64,
}
#[derive(Debug, Deserialize)]
struct GithubUser {
login: String,
id: u64,
}
type UniqueProjects = HashSet<String>;
/// Calls the Github API using the given HttpMethod and url_path. Due to the
/// fact some endpoints like the statistics one use cached information and
/// might return a 202 with an empty JSON as the stats are computed, we need
/// to wait a little bit and retry, up to a certain number of times.
fn call_github<T>(
http_client: &Client,
http_method: HttpMethod,
token: &str,
url_path: &str,
retries: Retries,
) -> Result<T, AppError>
where
T: DeserializeOwned,
{
let retries_left = retries.retries_num;
if retries_left == 0 {
Err(AppError::NoRetriesLeft)
} else {
let bearer = format!("Bearer {}", token);
match http_method {
HttpMethod::Get => {
let url: Url = format!("{}{}", GITHUB_BASE_URL, url_path)
.as_str()
.parse()?;
let mut res = http_client
.get(url)
.header(reqwest::header::AUTHORIZATION, bearer.as_str())
.send()?;
match res.status() {
reqwest::StatusCode::OK => res
.json()
.or_else(|e| Err(AppError::DeserialisationFailure(e))),
// Github needs a bit more time to compute the stats.
// We retry.
reqwest::StatusCode::ACCEPTED => {
println!("Retrying, only {} retries left...", retries_left);
thread::sleep(time::Duration::from_secs(1));
call_github(
http_client,
http_method,
token,
url_path,
Retries::new(retries_left - 1),
)
}
err => {
let body = res.text()?;
Err(AppError::GithubAPINotOK(err, body))
}
}
}
}
}
}
fn deserialise_project(sr: &StringRecord) -> Option<Project> {
if let Some(Ok(pid)) = sr.get(0).map(|s: &str| s.parse::<u32>()) {
let platform = sr.get(1);
let project_name = sr.get(2);
let repository_url = sr.get(9);
let repository_fork = sr.get(24).and_then(|s: &str| match s {
"0" => Some(false),
"1" => Some(true),
"t" => Some(true),
"f" => Some(false),
"true" => Some(true),
"false" => Some(false),
_ => None,
});
let repository_display_name = sr.get(54);
match (
platform,
project_name,
repository_url,
repository_fork,
repository_display_name,
) {
(Some(pl), Some(pn), Some(ru), Some(rf), Some(dn)) => Some(Project {
id: pid,
platform: pl,
project_name: pn,
repository_url: ru,
repository_fork: rf,
repository_display_name: dn,
}),
_ => None,
}
} else {
None
}
}
fn source_contributors(
github_token: &str,
path: &str,
platform: &str,
resume_from: Option<&str>,
) -> Result<(), AppError> {
let projects_file = File::open(path)?;
// Build the CSV reader and iterate over each record.
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(projects_file);
let mut contributions = OpenOptions::new()
.append(resume_from.is_some())
.write(resume_from.is_none())
.create_new(resume_from.is_none()) // Allow re-opening if we need to resume.
.open(format!("data/{}_contributions.csv", platform.to_lowercase()).as_str())?;
let mut unique_projects = HashSet::new();
let http_client = reqwest::Client::new();
let mut skip_resumed_record = resume_from.is_some();
//Write the header (if we are not resuming)
if resume_from.is_none() {
contributions.write_all(b"ID,MAINTAINER,REPO,CONTRIBUTIONS,NAME\n")?;
}
for result in rdr
.records()
.filter_map(|e| e.ok())
.filter(by_platform(platform))
.skip_while(resumes(resume_from))
{
// As we cannot know which is the /next/ element we need to process
// and we are resuming from the last (known) one, we need to skip it
// in order to not create a dupe.
if skip_resumed_record {
skip_resumed_record = false;
continue;
}
if let Some(project) = deserialise_project(&result) {
extract_contribution(
&http_client,
&mut contributions,
&mut unique_projects,
project,
github_token,
)?;
}
}
Ok(())
}
| match repo_url.split('/').collect::<Vec<&str>>().as_slice() {
[_, "", "github.com", owner, repo] => Ok((owner, repo)),
_ => Err(AppError::MetadataExtractionFailed {
repo_url: repo_url.to_string(),
}),
}
}
// Extract the contribution relative to this project. For now only GitHub is
// supported.
fn extract_contribution(
http_client: &Client,
contributions: &mut File,
unique_projects: &mut UniqueProjects,
project: Project,
auth_token: &str,
) -> Result<(), AppError> {
// If this is an authentic project and not a fork, proceed.
if!project.repository_fork && unique_projects.get(project.project_name) == None {
match extract_github_owner_and_repo(project.repository_url) {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
Ok(())
}
Ok((owner, name)) => {
unique_projects.insert(String::from(project.project_name));
println!("Processing {} ({}/{})", project.project_name, owner, name);
let res: Result<Vec<GithubContribution>, AppError> = call_github(
&http_client,
HttpMethod::Get,
auth_token,
format!("/repos/{}/{}/stats/contributors", owner, name).as_str(),
Retries::new(5),
);
match res {
Err(err) => {
println!("Skipping {} due to {:#?}", &project.repository_url, err);
}
Ok(stats) => {
let stats_len = stats.len();
for contribution in stats {
if is_maintainer(&owner, &contribution, stats_len) {
contributions.write_all(
format!(
"{},github@{},{},{},{}\n",
project.id,
contribution.author.login,
project.repository_url,
contribution.total,
project.project_name
)
.as_bytes(),
)?;
}
}
}
}
// Wait 800 ms to not overload Github and not hit the quota limit.
// GH allows us 5000 requests per hour. If we wait 800ms, we
// aim for the theoretical limit, while preserving a certain
// slack.
let delay = time::Duration::from_millis(800);
thread::sleep(delay);
Ok(())
}
}
} else {
Ok(())
}
}
// FIXME(adn) Totally arbitrary choice: consider a maintainer
// for a project a user that has been contributed for more
// than 6 months. Furthermore, it needs to have a somewhat steady contribution
// history.
fn is_maintainer(owner: &str, stat: &GithubContribution, stats_len: usize) -> bool {
// Users are considered a contributor if one of the following occur:
// 1. The owner of the repo is equal to their username;
// 2. They have at least 50 contributions
// 3. They are the only contributor to the repo.
stat.author.login == owner || { stat.total > 50 } || stats_len as u32 == 1
}
fn by_platform<'a>(platform: &'a str) -> Box<dyn FnMut(&StringRecord) -> bool + 'a> {
Box::new(move |e| e[1] == *platform)
}
// Returns false if the user didn't ask to resume the process from a particular
// project URL. If the user supplied a project, it skips StringRecord entries
// until it matches the input URL.
fn resumes<'a>(resume_from: Option<&'a str>) -> Box<dyn FnMut(&StringRecord) -> bool + 'a> {
Box::new(move |e| match resume_from {
None => false,
Some(repo_url) => Some(repo_url)!= e.get(9),
})
}
const GITHUB_BASE_URL: &str = "https://api.github.com";
fn main() -> Result<(), AppError> {
let github_token = env::var("OSRANK_GITHUB_TOKEN")?;
let input_help = r###"Where to read the data from.
Example: ~/Downloads/libraries-1.4.0-2018-12-22/projects_with_repository_fields-1.4.0-2018-12-22.csv"###;
let matches = App::new("Source contributions from Github")
.arg(
Arg::with_name("input")
.short("i")
.long("input")
.help(input_help)
.index(1)
.required(true),
)
.arg(
Arg::with_name("platform")
.short("p")
.long("platform")
.help("Example: Rust,NPM,Rubygems,..")
.index(2)
.required(true),
)
.arg(
Arg::with_name("resume-from")
.long("resume-from")
.help("which repository URL to resume from.")
.takes_value(true)
.required(false),
)
.get_matches();
source_contributors(
&github_token,
matches
.value_of("input")
.expect("input parameter wasn't given."),
matches
.value_of("platform")
.expect("platform parameter wasn't given."),
matches.value_of("resume-from"),
)
}
#[test]
fn test_rncryptor_deserialise() {
let input:String = String::from(r###"
2084361,Cargo,rncryptor,2016-12-23 09:57:46 UTC,2018-01-03 08:59:05 UTC,Rust implementation of the RNCryptor AES file format,"",http://rncryptor.github.io/,MIT,https://github.com/RNCryptor/rncryptor-rs,1,0,2016-12-23 09:57:29 UTC,0.1.0,,0,Rust,,2018-01-03 08:59:02 UTC,0,17362897,GitHub,RNCryptor/rncryptor-rs,Pure Rust implementation of the RNCryptor cryptographic format by Rob Napier,false,2016-12-18 17:37:39 UTC,2016-12-30 02:04:24 UTC,2016-12-26 17:33:32 UTC,,58,4,Rust,true,true,false,0,,1,master,0,76797122,,MIT,0,"","","","","","","",,2016-12-18 17:38:00 UTC,2,GitHub,,git,,,""
"###);
let mut rdr = csv::ReaderBuilder::new()
.flexible(true)
.from_reader(input.as_bytes());
for result in rdr.records() {
let r = result.expect("impossible");
assert_eq!(deserialise_project(&r).is_some(), true)
}
}
#[test]
fn skip_while_ok() {
let a = [1, -1i32, 0, 1];
let mut iter = a.into_iter().skip_while(|x| x.is_negative());
assert_eq!(iter.next(), Some(&1));
} | fn extract_github_owner_and_repo(repo_url: &str) -> Result<(&str, &str), AppError> { | random_line_split |
lib.rs | /*!
This crate implements various macros detailed in [The Little Book of Rust Macros](https://danielkeep.github.io/tlborm/).
If you use selective macro importing, you should make sure to *always* use the `tlborm_util` macro, as most macros in this crate depend on it being present.
*/
/**
Forces the parser to interpret this macro's argument as an expression, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use] extern crate tlborm;
# fn main() {
assert_eq!(as_expr!(42), 42);
macro_rules! conceal_as_tts {
// The `tt` substitution will break regular parsing.
(passthru, $($tts:tt)*) => {$($tts)*};
($callback:ident, $($tts:tt)*) => {$callback!($($tts)*)};
}
assert_eq!(conceal_as_tts!(as_expr, 2 * (3 + 4)), 14);
# }
```
The following will *not* compile:
<!-- NO-FAILING-TESTS -->
```ignore
# #[macro_use(as_expr, tlborm_util)] extern crate tlborm;
# fn main() {
# macro_rules! conceal_as_tts {
# (passthru, $($tts:tt)*) => {$($tts)*};
# ($callback:ident, $($tts:tt)*) => {$callback!($($tts)*)};
# }
assert_eq!(conceal_as_tts!(passthru, 2 * (3 + 4)), 14);
# }
```
*/
#[macro_export]
macro_rules! as_expr { ($e:expr) => {$e} }
/**
Forces the parser to interpret this macro's argument as an item, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use(as_item, tlborm_util)] extern crate tlborm;
macro_rules! enoom {
($name:ident { $($body:tt)* }) => {
as_item! {
// The `tt` substitution breaks regular parsing.
enum $name { $($body)* }
}
}
}
enoom! {
Dash { Solid, Dash, Dot }
}
# fn main() {}
```
*/
#[macro_export]
macro_rules! as_item { ($i:item) => {$i} }
/**
Forces the parser to interpret this macro's argument as a pattern, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use(as_pat, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! tuple_pat {
($($names:tt)*) => {
// The `tt` substitution breaks regular parsing.
as_pat!( ( $($names,)* ) )
}
}
match (1, 2, 3) {
tuple_pat!(a b c) => assert_eq!((a, b, c), (1, 2, 3))
}
# }
```
*/
#[macro_export]
macro_rules! as_pat { ($p:pat) => {$p} }
/**
Forces the parser to interpret this macro's argument as a statement, even in the presence of `tt` substitutions.
See [TLBoRM: AST Coercion](https://danielkeep.github.io/tlborm/book/blk-ast-coercion.html).
## Examples
```rust
# #[macro_use(as_stmt, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! let_stmt {
($name:tt = $($init:tt)*) => {
// The `tt` substitution breaks regular parsing.
as_stmt!(let $name = $($init)*);
}
}
let_stmt!(x = 42);
assert_eq!(x, 42);
# }
```
*/
#[macro_export]
macro_rules! as_stmt { ($s:stmt) => {$s} }
/**
Expands to the number of identifiers provided. The expansion is suitable for use in a constant expression, and is of type `u32`.
The identifiers provided **must** be mutually unique; *i.e.* there cannot be any repeated identifiers. In addition, the identifier `__CountIdentsLast` **must not** be used in the invocation. This macro should be usable for even very large numbers of identifiers.
See [TLBoRM: Counting (Enum counting)](https://danielkeep.github.io/tlborm/book/blk-counting.html#enum-counting).
## Examples
```rust
# #[macro_use(count_idents_enum, tlborm_util)] extern crate tlborm;
# fn main() {
const NUM: u32 = count_idents_enum!(Silly swingers get your feeling under spell);
assert_eq!(NUM, 7);
# }
*/
#[macro_export]
macro_rules! count_idents_enum {
($($idents:ident)*) => {tlborm_util!(@count_idents_enum $($idents)*)};
}
/**
Expands to the number of token trees provided. The expansion is suitable for use in a constant expression, and is of type `usize`.
This macro is limited to input of approximately 500 tokens, but efficiently expands in a single pass. This makes it useful in recursion-limited contexts, or when you want fast expansion of small inputs.
See [TLBoRM: Counting (Repetition with replacement)](https://danielkeep.github.io/tlborm/book/blk-counting.html#repetition-with-replacement).
## Examples
```rust
# #[macro_use(count_tts_flat, tlborm_util)] extern crate tlborm;
# fn main() {
const NUM: usize = count_tts_flat!(Everybody's rhythm mad (and I love that rhythm too!));
assert_eq!(NUM, 5);
# }
*/
#[macro_export]
macro_rules! count_tts_flat {
($($tts:tt)*) => {tlborm_util!(@count_tts_flat $($tts)*)};
}
/**
Expands to the number of token trees provided. The expansion is suitable for use in a constant expression, and is of type `usize`.
This macro is limited to input of approximately 1,200 tokens, but requires multiple recursive expansion passes. This macro is useful when you need to count a large number of things *and* you need the result to be a compile-time constant.
See [TLBoRM: Counting (Recursion)](https://danielkeep.github.io/tlborm/book/blk-counting.html#recursion).
## Examples
```rust
# #[macro_use(count_tts_recur, tlborm_util)] extern crate tlborm;
# fn main() {
const NUM: usize = count_tts_recur!(De l'enfer au paradis!);
assert_eq!(NUM, 6);
# }
*/
#[macro_export]
macro_rules! count_tts_recur {
($($tts:tt)*) => {tlborm_util!(@count_tts_recur $($tts)*)};
}
/**
Expands to the number of token trees provided. The expansion is **not** suitable for use in a constant expression, though it should be optimised to a simple integer constant in release builds.
This macro is has no practical limit (and has been tested to over 10,000 tokens).
See [TLBoRM: Counting (Slice length)](https://danielkeep.github.io/tlborm/book/blk-counting.html#slice-length).
## Examples
```rust
# #[macro_use(count_tts_slice, tlborm_util)] extern crate tlborm;
# fn main() {
let num = count_tts_slice!(You have no idea how tedious this is! #examplesrhard);
assert_eq!(num, 11);
# }
*/
#[macro_export]
macro_rules! count_tts_slice {
($($tts:tt)*) => {tlborm_util!(@count_tts_slice $($tts)*)};
}
/**
Expands to an invocation of the `$callback` macro, with a list of the unitary variant names of the provided enum separated by commas. The invocation's argument will be prefixed by the contents of `$arg`.
If `$arg` is of the form `{…}`, then the expansion will be parsed as one or more items. If it is of the form `(…)`, the expansion will be parsed as an expression.
See [TLBoRM: Enum Parsing](https://danielkeep.github.io/tlborm/book/blk-enum-parsing.html).
## Examples
```rust
# #[macro_use(parse_unitary_variants, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! variant_list {
(sep: $sep:tt, ($($var:ident),*)) => {
concat!($(stringify!($var), $sep,)*)
}
}
const LIST: &'static str = parse_unitary_variants!(
enum Currency { Trenni, Phiring, Ryut, FakeMarinne, Faram, SoManyCoins }
=> variant_list(sep: ", ", )
);
assert_eq!(LIST, "Trenni, Phiring, Ryut, FakeMarinne, Faram, SoManyCoins, ");
# }
*/
#[macro_export]
macro_rules! parse_unitary_variants {
(
enum $name:ident {$($body:tt)*} => $callback:ident $arg:tt
) => {
tlborm_util! {
@parse_unitary_variants
enum $name {$($body)*} => $callback $arg
}
};
}
|
This is typically used to replace elements of an arbitrary token sequence with some fixed expression.
See [TLBoRM: Repetition replacement](https://danielkeep.github.io/tlborm/book/pat-repetition-replacement.html).
## Examples
```rust
# #[macro_use(replace_expr, tlborm_util)] extern crate tlborm;
# fn main() {
macro_rules! tts_to_zeroes {
($($tts:tt)*) => {
[$(replace_expr!($tts 0)),*]
}
}
assert_eq!(tts_to_zeroes!(pub const unsafe impl), [0, 0, 0, 0]);
# }
```
*/
#[macro_export]
macro_rules! replace_expr {
($_t:tt $sub:expr) => {tlborm_util!(@replace_expr $_t $sub)};
}
#[doc(hidden)]
#[macro_export]
macro_rules! tlborm_util {
(@as_expr $e:expr) => {$e};
(@as_item $($i:item)+) => {$($i)+};
(@as_pat $p:pat) => {$p};
(@as_stmt $s:stmt) => {$s};
(@count_idents_enum $($idents:ident)*) => {
{
#[allow(dead_code, non_camel_case_types)]
enum Idents { $($idents,)* __CountIdentsLast }
const COUNT: u32 = Idents::__CountIdentsLast as u32;
COUNT
}
};
(@count_tts_flat $($tts:tt)*) => {0usize $(+ tlborm_util!(@replace_expr $tts 1usize))*};
(@count_tts_recur
$_a:tt $_b:tt $_c:tt $_d:tt $_e:tt
$_f:tt $_g:tt $_h:tt $_i:tt $_j:tt
$_k:tt $_l:tt $_m:tt $_n:tt $_o:tt
$_p:tt $_q:tt $_r:tt $_s:tt $_t:tt
$($tail:tt)*)
=> {20usize + tlborm_util!(@count_tts_recur $($tail)*)};
(@count_tts_recur
$_a:tt $_b:tt $_c:tt $_d:tt $_e:tt
$_f:tt $_g:tt $_h:tt $_i:tt $_j:tt
$($tail:tt)*)
=> {10usize + tlborm_util!(@count_tts_recur $($tail)*)};
(@count_tts_recur
$_a:tt $_b:tt $_c:tt $_d:tt $_e:tt
$($tail:tt)*)
=> {5usize + tlborm_util!(@count_tts_recur $($tail)*)};
(@count_tts_recur
$_a:tt
$($tail:tt)*)
=> {1usize + tlborm_util!(@count_tts_recur $($tail)*)};
(@count_tts_recur) => {0usize};
(@count_tts_slice $($tts:tt)*)
=> {<[()]>::len(&[$(tlborm_util!(@replace_expr $tts ())),*])};
(@replace_expr $_t:tt $sub:expr) => {$sub};
// ========================================================================
// @parse_unitary_variants
(
@parse_unitary_variants
enum $name:ident {$($body:tt)*} => $callback:ident $arg:tt
) => {
tlborm_util! {
@collect_unitary_variants
($callback $arg), ($($body)*,) -> ()
}
};
// ========================================================================
// @collect_unitary_variants
// Exit rules.
(
@collect_unitary_variants ($callback:ident ( $($args:tt)* )),
($(,)*) -> ($($var_names:ident,)*)
) => {
tlborm_util! {
@as_expr
$callback!{ $($args)* ($($var_names),*) }
}
};
(
@collect_unitary_variants ($callback:ident { $($args:tt)* }),
($(,)*) -> ($($var_names:ident,)*)
) => {
tlborm_util! {
@as_item
$callback!{ $($args)* ($($var_names),*) }
}
};
// Consume an attribute.
(
@collect_unitary_variants $fixed:tt,
(#[$_attr:meta] $($tail:tt)*) -> ($($var_names:tt)*)
) => {
tlborm_util! {
@collect_unitary_variants $fixed,
($($tail)*) -> ($($var_names)*)
}
};
// Handle a variant, optionally with an with initialiser.
(
@collect_unitary_variants $fixed:tt,
($var:ident $(= $_val:expr)*, $($tail:tt)*) -> ($($var_names:tt)*)
) => {
tlborm_util! {
@collect_unitary_variants $fixed,
($($tail)*) -> ($($var_names)* $var,)
}
};
// Abort on variant with a payload.
(
@collect_unitary_variants $fixed:tt,
($var:ident $_struct:tt, $($tail:tt)*) -> ($($var_names:tt)*)
) => {
const _error: () = "cannot parse unitary variants from enum with non-unitary variants";
};
} | /**
Utility macro that takes a token tree and an expression, expanding to the expression. | random_line_split |
mod.rs | use game::PieceType::BLACK;
use game::PieceType::WHITE;
use game::players::ai::IdiotAi;
use self::board::Board;
use self::coord::CoordinationFlat;
use self::players::LocalHumanPlayer;
use self::players::Player;
use std::char;
use std::fmt;
mod board;
mod players;
mod coord {
use std::fmt;
/// Define coordination type
type Coordination = usize;
// 2D Coordination
#[derive(Copy, Clone)]
pub struct CoordinationFlat {
pub x: Coordination,
pub y: Coordination
}
impl CoordinationFlat {
pub fn new(x: Coordination, y: Coordination) -> CoordinationFlat {
CoordinationFlat { x, y }
}
}
impl fmt::Display for CoordinationFlat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
}
/// Define array index type
pub type ArrayIndex = usize;
/// The Piece type includes black and white
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum PieceType {
WHITE, BLACK
}
impl PieceType {
pub fn get_name(&self) -> &str {
match self {
PieceType::WHITE => "White",
PieceType::BLACK => "Black"
}
}
pub fn | (&self) -> board::BoardPieceType {
match self {
PieceType::BLACK => board::BoardPieceType::BLACK,
PieceType::WHITE => board::BoardPieceType::WHITE,
}
}
}
impl fmt::Display for PieceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.get_name())
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum GameBuilderPlayerType {
Human,
IdiotAi,
}
/// Game builder
pub struct GameBuilder {
first_player: GameBuilderPlayerType,
second_player: GameBuilderPlayerType
}
impl GameBuilder {
/// Create an game builder object
pub fn new() -> GameBuilder {
GameBuilder {
first_player: GameBuilderPlayerType::Human,
second_player: GameBuilderPlayerType::Human
}
}
/// Set the first player (Uses black piece)
pub fn set_first_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.first_player = player_type;
self
}
/// Set the second player (Uses black piece)
pub fn set_second_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.second_player = player_type;
self
}
pub fn build(&self) -> Game {
Game::new(
GameBuilder::create_player(self.first_player, BLACK),
GameBuilder::create_player(self.second_player, WHITE),
)
}
fn create_player(player_type: GameBuilderPlayerType, piece: PieceType) -> Box<Player> {
match player_type {
GameBuilderPlayerType::Human => Box::new(LocalHumanPlayer::new(piece)),
GameBuilderPlayerType::IdiotAi => Box::new(IdiotAi::new(piece))
}
}
}
///
/// Game context in game, typically is same as Game struct
///
pub(in game) struct GameContext {
/// A board 2D array copy
board: Board,
/// None if it's first player point
last_point: Option<CoordinationFlat>,
/// Total pieces in the game
total_pieces: usize
}
impl GameContext {
pub fn new(board: Board, last_point: Option<CoordinationFlat>, total_pieces: usize)
-> Self {
GameContext {
board,
last_point,
total_pieces
}
}
}
///
/// A Gomoku game instance.
///
pub struct Game {
board: Board,
players: [Box<Player>; 2],
current_player: usize,
// TODO Can history put reference of player into?
history: Vec<(PieceType, CoordinationFlat)>,
started: bool,
ended: bool,
}
impl Game {
/// Create a new game with black first
fn new(first_player: Box<Player>, second_player: Box<Player>) -> Game {
Game {
board: Board::new(),
current_player: 0,
players: [first_player, second_player],
history: vec![],
started: false,
ended: false,
}
}
/// Create an game builder object, equals with GameBuilder::new()
pub fn game_builder() -> GameBuilder {
GameBuilder::new()
}
/// Start the game!
///
/// This function will initialize the game,
/// and start main game loop.
pub fn start(&mut self) {
self.init();
self.started = true;
self.main_loop();
}
/// Initialize the game.
///
/// This function will initialize the game board,
/// but currently is unreusable, so that is not needed.
///
/// Currently for the console version Gomoku game,
/// this method prints the game board to console.
fn init(&mut self) {
self.draw();
}
/// Draw game graphic
fn draw(&self) {
println!();
self.board.draw_console();
if!self.ended {
self.print_player();
}
}
/// Print who will point this time
fn print_player(&self) {
let p = self.get_current_player();
print!("{} ({}) turn to point: ", p.name(), p.piece_type().get_name());
}
/// Print where is pointed
fn print_point(&self, coord: CoordinationFlat) {
let x = coord.x;
let y = coord.y;
let char_x = char::from_digit((x + 9) as u32, 36).unwrap();
print!("{}{}", char_x, y);
}
/// Start the game main loop, loop the two player to point, until the game is end.
///
/// In the loop, when every player placed a piece, the game updates it's board and print,
/// then invoke the blocking function `Player::point()`, let another place piece.
fn main_loop(&mut self) {
let mut fail_count = 0;
loop {
// Initialize the game context every lap
// TODO Is there a better way to references the board?
let context = GameContext::new(self.board.clone(),
self.history.last().map(|z| { z.1 }),
self.history.len());
// Read input from player
let coord = self.get_current_player_mut().point(&context);
// Try point the coordinate
let optional_winner = match self.point(coord) {
Ok(v) => v,
Err(e) => {
fail_count += 1;
println!("Failed point to ({}, {}), {}", coord.x, coord.y, e);
// Panic if too many invalid point
if fail_count >= 6 {
panic!("Fail to point 6 times, may due to invalid AI implementation, panic")
}
continue;
}
};
// Print
self.print_point(coord);
self.draw();
// See if there is a winner.
match optional_winner {
Some(_) => {
// Current player cannot point anything because another player is wined
let winner = self.get_another_player();
println!("Winner is {} ({}).", winner.name(), winner.piece_type());
break;
},
None => { }
};
fail_count = 0;
}
}
// TODO Can I returns the reference of winner player?
/// Place a piece in the game
///
/// Returns the winner if the game is end.
fn point(&mut self, coord: CoordinationFlat) -> Result<Option<PieceType>, String> {
if!self.started {
return Err(String::from("The game has not started yet"))
}
if self.ended {
return Err(String::from("The game is over"))
}
// place the piece to board, and check the game is end
let current_piece = self.get_current_player().piece_type();
let place = self.board.place(coord, current_piece.to_board_piece_type());
if place.is_err() {
return Err(place.err().unwrap())
}
self.history.push((current_piece, coord));
let winner = if self.check_game_end() {
self.ended = true;
Some(current_piece)
} else {
None
};
self.change_to_another_player();
Ok(winner)
}
// Change current player to another player, and returns new current player.
fn change_to_another_player(&mut self) -> &Box<Player> {
if self.current_player == 0 {
self.current_player = 1
} else {
self.current_player = 0
}
self.get_current_player()
}
/// Get another player, don't change the current player state
fn get_another_player(&self) -> &Box<Player> {
if self.current_player == 0 {
&self.players[1]
} else {
&self.players[0]
}
}
/// Get another player mutable reference, don't change the current player state
fn get_another_player_mut(&mut self) -> &mut Box<Player> {
if self.current_player == 0 {
&mut self.players[1]
} else {
&mut self.players[0]
}
}
/// Get the current player
fn get_current_player(&self) -> &Box<Player> {
&self.players[self.current_player]
}
/// Get the current player mutable reference
fn get_current_player_mut(&mut self) -> &mut Box<Player> {
&mut self.players[self.current_player]
}
/// Check the game is end, if end, returns true; not end the return false.
///
/// So the winner is the top of history stack
fn check_game_end(&self) -> bool {
let last_point = match self.history.last() {
Some(a) => a,
None => return false
};
// Current position information
let last_player_color: board::BoardPieceType = last_point.0.to_board_piece_type();
let last_coordination = last_point.1;
// Define 4 non-parallel directions
const MOVE_DIRECTION: [(isize, isize); 4] = [
(0, 1),
(1, 1),
(1, 0),
(1, -1)
];
fn move_dir(coord: &CoordinationFlat, dir: &(isize, isize)) -> Result<CoordinationFlat, &'static str> {
let new_x = (coord.x as isize) + dir.0;
let new_y = (coord.y as isize) + dir.1;
if new_x < 0 {
return Err("x is out of bound");
} else if new_y < 0 {
return Err("y is out of bound");
}
Ok(CoordinationFlat::new(new_x as usize, new_y as usize))
}
fn move_dir_reverse(coord: &CoordinationFlat, dir: &(isize, isize)) -> Result<CoordinationFlat, &'static str> {
let new_x = (coord.x as isize) - dir.0;
let new_y = (coord.y as isize) - dir.1;
if new_x < 0 {
return Err("x is out of bound")
} else if new_y < 0 {
return Err("y is out of bound")
}
Ok(CoordinationFlat::new(new_x as usize, new_y as usize))
}
// Check 4 directions negative and positive directions from point position
for dir in MOVE_DIRECTION.iter() {
let mut score = 1;
{
let mut next_coord = move_dir(&last_coordination, dir);
if next_coord.is_ok() {
let mut a = self.board.get(next_coord.unwrap());
while next_coord.is_ok() && a.is_ok() && a.unwrap() == last_player_color {
score += 1;
next_coord = move_dir(&next_coord.unwrap(), dir);
a = self.board.get(next_coord.unwrap());
}
}
}
{
let mut next_coord = move_dir_reverse(&last_coordination, dir);
if next_coord.is_ok() {
let mut a = self.board.get(next_coord.unwrap());
while next_coord.is_ok() && a.is_ok() && a.unwrap() == last_player_color {
score += 1;
next_coord = move_dir_reverse(&next_coord.unwrap(), dir);
a = self.board.get(next_coord.unwrap());
}
}
}
if score >= 5 {
return true;
}
}
false
}
}
| to_board_piece_type | identifier_name |
mod.rs | use game::PieceType::BLACK;
use game::PieceType::WHITE;
use game::players::ai::IdiotAi;
use self::board::Board;
use self::coord::CoordinationFlat;
use self::players::LocalHumanPlayer;
use self::players::Player;
use std::char;
use std::fmt;
mod board;
mod players;
mod coord {
use std::fmt;
/// Define coordination type
type Coordination = usize;
// 2D Coordination
#[derive(Copy, Clone)]
pub struct CoordinationFlat {
pub x: Coordination,
pub y: Coordination
}
impl CoordinationFlat {
pub fn new(x: Coordination, y: Coordination) -> CoordinationFlat {
CoordinationFlat { x, y }
}
}
impl fmt::Display for CoordinationFlat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
}
/// Define array index type
pub type ArrayIndex = usize;
/// The Piece type includes black and white
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum PieceType {
WHITE, BLACK
}
impl PieceType {
pub fn get_name(&self) -> &str {
match self {
PieceType::WHITE => "White",
PieceType::BLACK => "Black"
}
}
pub fn to_board_piece_type(&self) -> board::BoardPieceType {
match self {
PieceType::BLACK => board::BoardPieceType::BLACK,
PieceType::WHITE => board::BoardPieceType::WHITE,
}
}
}
impl fmt::Display for PieceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.get_name())
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum GameBuilderPlayerType {
Human,
IdiotAi,
}
/// Game builder
pub struct GameBuilder {
first_player: GameBuilderPlayerType,
second_player: GameBuilderPlayerType
}
impl GameBuilder {
/// Create an game builder object
pub fn new() -> GameBuilder {
GameBuilder {
first_player: GameBuilderPlayerType::Human,
second_player: GameBuilderPlayerType::Human
}
}
/// Set the first player (Uses black piece) | self
}
/// Set the second player (Uses black piece)
pub fn set_second_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.second_player = player_type;
self
}
pub fn build(&self) -> Game {
Game::new(
GameBuilder::create_player(self.first_player, BLACK),
GameBuilder::create_player(self.second_player, WHITE),
)
}
fn create_player(player_type: GameBuilderPlayerType, piece: PieceType) -> Box<Player> {
match player_type {
GameBuilderPlayerType::Human => Box::new(LocalHumanPlayer::new(piece)),
GameBuilderPlayerType::IdiotAi => Box::new(IdiotAi::new(piece))
}
}
}
///
/// Game context in game, typically is same as Game struct
///
pub(in game) struct GameContext {
/// A board 2D array copy
board: Board,
/// None if it's first player point
last_point: Option<CoordinationFlat>,
/// Total pieces in the game
total_pieces: usize
}
impl GameContext {
pub fn new(board: Board, last_point: Option<CoordinationFlat>, total_pieces: usize)
-> Self {
GameContext {
board,
last_point,
total_pieces
}
}
}
///
/// A Gomoku game instance.
///
pub struct Game {
board: Board,
players: [Box<Player>; 2],
current_player: usize,
// TODO Can history put reference of player into?
history: Vec<(PieceType, CoordinationFlat)>,
started: bool,
ended: bool,
}
impl Game {
/// Create a new game with black first
fn new(first_player: Box<Player>, second_player: Box<Player>) -> Game {
Game {
board: Board::new(),
current_player: 0,
players: [first_player, second_player],
history: vec![],
started: false,
ended: false,
}
}
/// Create an game builder object, equals with GameBuilder::new()
pub fn game_builder() -> GameBuilder {
GameBuilder::new()
}
/// Start the game!
///
/// This function will initialize the game,
/// and start main game loop.
pub fn start(&mut self) {
self.init();
self.started = true;
self.main_loop();
}
/// Initialize the game.
///
/// This function will initialize the game board,
/// but currently is unreusable, so that is not needed.
///
/// Currently for the console version Gomoku game,
/// this method prints the game board to console.
fn init(&mut self) {
self.draw();
}
/// Draw game graphic
fn draw(&self) {
println!();
self.board.draw_console();
if!self.ended {
self.print_player();
}
}
/// Print who will point this time
fn print_player(&self) {
let p = self.get_current_player();
print!("{} ({}) turn to point: ", p.name(), p.piece_type().get_name());
}
/// Print where is pointed
fn print_point(&self, coord: CoordinationFlat) {
let x = coord.x;
let y = coord.y;
let char_x = char::from_digit((x + 9) as u32, 36).unwrap();
print!("{}{}", char_x, y);
}
/// Start the game main loop, loop the two player to point, until the game is end.
///
/// In the loop, when every player placed a piece, the game updates it's board and print,
/// then invoke the blocking function `Player::point()`, let another place piece.
fn main_loop(&mut self) {
let mut fail_count = 0;
loop {
// Initialize the game context every lap
// TODO Is there a better way to references the board?
let context = GameContext::new(self.board.clone(),
self.history.last().map(|z| { z.1 }),
self.history.len());
// Read input from player
let coord = self.get_current_player_mut().point(&context);
// Try point the coordinate
let optional_winner = match self.point(coord) {
Ok(v) => v,
Err(e) => {
fail_count += 1;
println!("Failed point to ({}, {}), {}", coord.x, coord.y, e);
// Panic if too many invalid point
if fail_count >= 6 {
panic!("Fail to point 6 times, may due to invalid AI implementation, panic")
}
continue;
}
};
// Print
self.print_point(coord);
self.draw();
// See if there is a winner.
match optional_winner {
Some(_) => {
// Current player cannot point anything because another player is wined
let winner = self.get_another_player();
println!("Winner is {} ({}).", winner.name(), winner.piece_type());
break;
},
None => { }
};
fail_count = 0;
}
}
// TODO Can I returns the reference of winner player?
/// Place a piece in the game
///
/// Returns the winner if the game is end.
fn point(&mut self, coord: CoordinationFlat) -> Result<Option<PieceType>, String> {
if!self.started {
return Err(String::from("The game has not started yet"))
}
if self.ended {
return Err(String::from("The game is over"))
}
// place the piece to board, and check the game is end
let current_piece = self.get_current_player().piece_type();
let place = self.board.place(coord, current_piece.to_board_piece_type());
if place.is_err() {
return Err(place.err().unwrap())
}
self.history.push((current_piece, coord));
let winner = if self.check_game_end() {
self.ended = true;
Some(current_piece)
} else {
None
};
self.change_to_another_player();
Ok(winner)
}
// Change current player to another player, and returns new current player.
fn change_to_another_player(&mut self) -> &Box<Player> {
if self.current_player == 0 {
self.current_player = 1
} else {
self.current_player = 0
}
self.get_current_player()
}
/// Get another player, don't change the current player state
fn get_another_player(&self) -> &Box<Player> {
if self.current_player == 0 {
&self.players[1]
} else {
&self.players[0]
}
}
/// Get another player mutable reference, don't change the current player state
fn get_another_player_mut(&mut self) -> &mut Box<Player> {
if self.current_player == 0 {
&mut self.players[1]
} else {
&mut self.players[0]
}
}
/// Get the current player
fn get_current_player(&self) -> &Box<Player> {
&self.players[self.current_player]
}
/// Get the current player mutable reference
fn get_current_player_mut(&mut self) -> &mut Box<Player> {
&mut self.players[self.current_player]
}
/// Check the game is end, if end, returns true; not end the return false.
///
/// So the winner is the top of history stack
fn check_game_end(&self) -> bool {
let last_point = match self.history.last() {
Some(a) => a,
None => return false
};
// Current position information
let last_player_color: board::BoardPieceType = last_point.0.to_board_piece_type();
let last_coordination = last_point.1;
// Define 4 non-parallel directions
const MOVE_DIRECTION: [(isize, isize); 4] = [
(0, 1),
(1, 1),
(1, 0),
(1, -1)
];
fn move_dir(coord: &CoordinationFlat, dir: &(isize, isize)) -> Result<CoordinationFlat, &'static str> {
let new_x = (coord.x as isize) + dir.0;
let new_y = (coord.y as isize) + dir.1;
if new_x < 0 {
return Err("x is out of bound");
} else if new_y < 0 {
return Err("y is out of bound");
}
Ok(CoordinationFlat::new(new_x as usize, new_y as usize))
}
fn move_dir_reverse(coord: &CoordinationFlat, dir: &(isize, isize)) -> Result<CoordinationFlat, &'static str> {
let new_x = (coord.x as isize) - dir.0;
let new_y = (coord.y as isize) - dir.1;
if new_x < 0 {
return Err("x is out of bound")
} else if new_y < 0 {
return Err("y is out of bound")
}
Ok(CoordinationFlat::new(new_x as usize, new_y as usize))
}
// Check 4 directions negative and positive directions from point position
for dir in MOVE_DIRECTION.iter() {
let mut score = 1;
{
let mut next_coord = move_dir(&last_coordination, dir);
if next_coord.is_ok() {
let mut a = self.board.get(next_coord.unwrap());
while next_coord.is_ok() && a.is_ok() && a.unwrap() == last_player_color {
score += 1;
next_coord = move_dir(&next_coord.unwrap(), dir);
a = self.board.get(next_coord.unwrap());
}
}
}
{
let mut next_coord = move_dir_reverse(&last_coordination, dir);
if next_coord.is_ok() {
let mut a = self.board.get(next_coord.unwrap());
while next_coord.is_ok() && a.is_ok() && a.unwrap() == last_player_color {
score += 1;
next_coord = move_dir_reverse(&next_coord.unwrap(), dir);
a = self.board.get(next_coord.unwrap());
}
}
}
if score >= 5 {
return true;
}
}
false
}
} | pub fn set_first_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.first_player = player_type; | random_line_split |
mod.rs | use game::PieceType::BLACK;
use game::PieceType::WHITE;
use game::players::ai::IdiotAi;
use self::board::Board;
use self::coord::CoordinationFlat;
use self::players::LocalHumanPlayer;
use self::players::Player;
use std::char;
use std::fmt;
mod board;
mod players;
mod coord {
use std::fmt;
/// Define coordination type
type Coordination = usize;
// 2D Coordination
#[derive(Copy, Clone)]
pub struct CoordinationFlat {
pub x: Coordination,
pub y: Coordination
}
impl CoordinationFlat {
pub fn new(x: Coordination, y: Coordination) -> CoordinationFlat {
CoordinationFlat { x, y }
}
}
impl fmt::Display for CoordinationFlat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
}
/// Define array index type
pub type ArrayIndex = usize;
/// The Piece type includes black and white
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum PieceType {
WHITE, BLACK
}
impl PieceType {
pub fn get_name(&self) -> &str {
match self {
PieceType::WHITE => "White",
PieceType::BLACK => "Black"
}
}
pub fn to_board_piece_type(&self) -> board::BoardPieceType {
match self {
PieceType::BLACK => board::BoardPieceType::BLACK,
PieceType::WHITE => board::BoardPieceType::WHITE,
}
}
}
impl fmt::Display for PieceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.get_name())
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum GameBuilderPlayerType {
Human,
IdiotAi,
}
/// Game builder
pub struct GameBuilder {
first_player: GameBuilderPlayerType,
second_player: GameBuilderPlayerType
}
impl GameBuilder {
/// Create an game builder object
pub fn new() -> GameBuilder {
GameBuilder {
first_player: GameBuilderPlayerType::Human,
second_player: GameBuilderPlayerType::Human
}
}
/// Set the first player (Uses black piece)
pub fn set_first_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.first_player = player_type;
self
}
/// Set the second player (Uses black piece)
pub fn set_second_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.second_player = player_type;
self
}
pub fn build(&self) -> Game {
Game::new(
GameBuilder::create_player(self.first_player, BLACK),
GameBuilder::create_player(self.second_player, WHITE),
)
}
fn create_player(player_type: GameBuilderPlayerType, piece: PieceType) -> Box<Player> {
match player_type {
GameBuilderPlayerType::Human => Box::new(LocalHumanPlayer::new(piece)),
GameBuilderPlayerType::IdiotAi => Box::new(IdiotAi::new(piece))
}
}
}
///
/// Game context in game, typically is same as Game struct
///
pub(in game) struct GameContext {
/// A board 2D array copy
board: Board,
/// None if it's first player point
last_point: Option<CoordinationFlat>,
/// Total pieces in the game
total_pieces: usize
}
impl GameContext {
pub fn new(board: Board, last_point: Option<CoordinationFlat>, total_pieces: usize)
-> Self {
GameContext {
board,
last_point,
total_pieces
}
}
}
///
/// A Gomoku game instance.
///
pub struct Game {
board: Board,
players: [Box<Player>; 2],
current_player: usize,
// TODO Can history put reference of player into?
history: Vec<(PieceType, CoordinationFlat)>,
started: bool,
ended: bool,
}
impl Game {
/// Create a new game with black first
fn new(first_player: Box<Player>, second_player: Box<Player>) -> Game {
Game {
board: Board::new(),
current_player: 0,
players: [first_player, second_player],
history: vec![],
started: false,
ended: false,
}
}
/// Create an game builder object, equals with GameBuilder::new()
pub fn game_builder() -> GameBuilder {
GameBuilder::new()
}
/// Start the game!
///
/// This function will initialize the game,
/// and start main game loop.
pub fn start(&mut self) {
self.init();
self.started = true;
self.main_loop();
}
/// Initialize the game.
///
/// This function will initialize the game board,
/// but currently is unreusable, so that is not needed.
///
/// Currently for the console version Gomoku game,
/// this method prints the game board to console.
fn init(&mut self) {
self.draw();
}
/// Draw game graphic
fn draw(&self) {
println!();
self.board.draw_console();
if!self.ended {
self.print_player();
}
}
/// Print who will point this time
fn print_player(&self) {
let p = self.get_current_player();
print!("{} ({}) turn to point: ", p.name(), p.piece_type().get_name());
}
/// Print where is pointed
fn print_point(&self, coord: CoordinationFlat) {
let x = coord.x;
let y = coord.y;
let char_x = char::from_digit((x + 9) as u32, 36).unwrap();
print!("{}{}", char_x, y);
}
/// Start the game main loop, loop the two player to point, until the game is end.
///
/// In the loop, when every player placed a piece, the game updates it's board and print,
/// then invoke the blocking function `Player::point()`, let another place piece.
fn main_loop(&mut self) {
let mut fail_count = 0;
loop {
// Initialize the game context every lap
// TODO Is there a better way to references the board?
let context = GameContext::new(self.board.clone(),
self.history.last().map(|z| { z.1 }),
self.history.len());
// Read input from player
let coord = self.get_current_player_mut().point(&context);
// Try point the coordinate
let optional_winner = match self.point(coord) {
Ok(v) => v,
Err(e) => {
fail_count += 1;
println!("Failed point to ({}, {}), {}", coord.x, coord.y, e);
// Panic if too many invalid point
if fail_count >= 6 {
panic!("Fail to point 6 times, may due to invalid AI implementation, panic")
}
continue;
}
};
// Print
self.print_point(coord);
self.draw();
// See if there is a winner.
match optional_winner {
Some(_) => {
// Current player cannot point anything because another player is wined
let winner = self.get_another_player();
println!("Winner is {} ({}).", winner.name(), winner.piece_type());
break;
},
None => { }
};
fail_count = 0;
}
}
// TODO Can I returns the reference of winner player?
/// Place a piece in the game
///
/// Returns the winner if the game is end.
fn point(&mut self, coord: CoordinationFlat) -> Result<Option<PieceType>, String> {
if!self.started {
return Err(String::from("The game has not started yet"))
}
if self.ended {
return Err(String::from("The game is over"))
}
// place the piece to board, and check the game is end
let current_piece = self.get_current_player().piece_type();
let place = self.board.place(coord, current_piece.to_board_piece_type());
if place.is_err() {
return Err(place.err().unwrap())
}
self.history.push((current_piece, coord));
let winner = if self.check_game_end() {
self.ended = true;
Some(current_piece)
} else {
None
};
self.change_to_another_player();
Ok(winner)
}
// Change current player to another player, and returns new current player.
fn change_to_another_player(&mut self) -> &Box<Player> {
if self.current_player == 0 {
self.current_player = 1
} else {
self.current_player = 0
}
self.get_current_player()
}
/// Get another player, don't change the current player state
fn get_another_player(&self) -> &Box<Player> {
if self.current_player == 0 {
&self.players[1]
} else {
&self.players[0]
}
}
/// Get another player mutable reference, don't change the current player state
fn get_another_player_mut(&mut self) -> &mut Box<Player> {
if self.current_player == 0 {
&mut self.players[1]
} else {
&mut self.players[0]
}
}
/// Get the current player
fn get_current_player(&self) -> &Box<Player> {
&self.players[self.current_player]
}
/// Get the current player mutable reference
fn get_current_player_mut(&mut self) -> &mut Box<Player> {
&mut self.players[self.current_player]
}
/// Check the game is end, if end, returns true; not end the return false.
///
/// So the winner is the top of history stack
fn check_game_end(&self) -> bool {
let last_point = match self.history.last() {
Some(a) => a,
None => return false
};
// Current position information
let last_player_color: board::BoardPieceType = last_point.0.to_board_piece_type();
let last_coordination = last_point.1;
// Define 4 non-parallel directions
const MOVE_DIRECTION: [(isize, isize); 4] = [
(0, 1),
(1, 1),
(1, 0),
(1, -1)
];
fn move_dir(coord: &CoordinationFlat, dir: &(isize, isize)) -> Result<CoordinationFlat, &'static str> {
let new_x = (coord.x as isize) + dir.0;
let new_y = (coord.y as isize) + dir.1;
if new_x < 0 {
return Err("x is out of bound");
} else if new_y < 0 {
return Err("y is out of bound");
}
Ok(CoordinationFlat::new(new_x as usize, new_y as usize))
}
fn move_dir_reverse(coord: &CoordinationFlat, dir: &(isize, isize)) -> Result<CoordinationFlat, &'static str> {
let new_x = (coord.x as isize) - dir.0;
let new_y = (coord.y as isize) - dir.1;
if new_x < 0 {
return Err("x is out of bound")
} else if new_y < 0 {
return Err("y is out of bound")
}
Ok(CoordinationFlat::new(new_x as usize, new_y as usize))
}
// Check 4 directions negative and positive directions from point position
for dir in MOVE_DIRECTION.iter() {
let mut score = 1;
{
let mut next_coord = move_dir(&last_coordination, dir);
if next_coord.is_ok() {
let mut a = self.board.get(next_coord.unwrap());
while next_coord.is_ok() && a.is_ok() && a.unwrap() == last_player_color {
score += 1;
next_coord = move_dir(&next_coord.unwrap(), dir);
a = self.board.get(next_coord.unwrap());
}
}
}
{
let mut next_coord = move_dir_reverse(&last_coordination, dir);
if next_coord.is_ok() |
}
if score >= 5 {
return true;
}
}
false
}
}
| {
let mut a = self.board.get(next_coord.unwrap());
while next_coord.is_ok() && a.is_ok() && a.unwrap() == last_player_color {
score += 1;
next_coord = move_dir_reverse(&next_coord.unwrap(), dir);
a = self.board.get(next_coord.unwrap());
}
} | conditional_block |
mod.rs | use game::PieceType::BLACK;
use game::PieceType::WHITE;
use game::players::ai::IdiotAi;
use self::board::Board;
use self::coord::CoordinationFlat;
use self::players::LocalHumanPlayer;
use self::players::Player;
use std::char;
use std::fmt;
mod board;
mod players;
mod coord {
use std::fmt;
/// Define coordination type
type Coordination = usize;
// 2D Coordination
#[derive(Copy, Clone)]
pub struct CoordinationFlat {
pub x: Coordination,
pub y: Coordination
}
impl CoordinationFlat {
pub fn new(x: Coordination, y: Coordination) -> CoordinationFlat {
CoordinationFlat { x, y }
}
}
impl fmt::Display for CoordinationFlat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
}
/// Define array index type
pub type ArrayIndex = usize;
/// The Piece type includes black and white
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum PieceType {
WHITE, BLACK
}
impl PieceType {
pub fn get_name(&self) -> &str {
match self {
PieceType::WHITE => "White",
PieceType::BLACK => "Black"
}
}
pub fn to_board_piece_type(&self) -> board::BoardPieceType {
match self {
PieceType::BLACK => board::BoardPieceType::BLACK,
PieceType::WHITE => board::BoardPieceType::WHITE,
}
}
}
impl fmt::Display for PieceType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.get_name())
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum GameBuilderPlayerType {
Human,
IdiotAi,
}
/// Game builder
pub struct GameBuilder {
first_player: GameBuilderPlayerType,
second_player: GameBuilderPlayerType
}
impl GameBuilder {
/// Create an game builder object
pub fn new() -> GameBuilder {
GameBuilder {
first_player: GameBuilderPlayerType::Human,
second_player: GameBuilderPlayerType::Human
}
}
/// Set the first player (Uses black piece)
pub fn set_first_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.first_player = player_type;
self
}
/// Set the second player (Uses black piece)
pub fn set_second_player(&mut self, player_type: GameBuilderPlayerType) -> &mut Self {
self.second_player = player_type;
self
}
pub fn build(&self) -> Game {
Game::new(
GameBuilder::create_player(self.first_player, BLACK),
GameBuilder::create_player(self.second_player, WHITE),
)
}
fn create_player(player_type: GameBuilderPlayerType, piece: PieceType) -> Box<Player> {
match player_type {
GameBuilderPlayerType::Human => Box::new(LocalHumanPlayer::new(piece)),
GameBuilderPlayerType::IdiotAi => Box::new(IdiotAi::new(piece))
}
}
}
///
/// Game context in game, typically is same as Game struct
///
pub(in game) struct GameContext {
/// A board 2D array copy
board: Board,
/// None if it's first player point
last_point: Option<CoordinationFlat>,
/// Total pieces in the game
total_pieces: usize
}
impl GameContext {
pub fn new(board: Board, last_point: Option<CoordinationFlat>, total_pieces: usize)
-> Self {
GameContext {
board,
last_point,
total_pieces
}
}
}
///
/// A Gomoku game instance.
///
pub struct Game {
board: Board,
players: [Box<Player>; 2],
current_player: usize,
// TODO Can history put reference of player into?
history: Vec<(PieceType, CoordinationFlat)>,
started: bool,
ended: bool,
}
impl Game {
/// Create a new game with black first
fn new(first_player: Box<Player>, second_player: Box<Player>) -> Game {
Game {
board: Board::new(),
current_player: 0,
players: [first_player, second_player],
history: vec![],
started: false,
ended: false,
}
}
/// Create an game builder object, equals with GameBuilder::new()
pub fn game_builder() -> GameBuilder {
GameBuilder::new()
}
/// Start the game!
///
/// This function will initialize the game,
/// and start main game loop.
pub fn start(&mut self) {
self.init();
self.started = true;
self.main_loop();
}
/// Initialize the game.
///
/// This function will initialize the game board,
/// but currently is unreusable, so that is not needed.
///
/// Currently for the console version Gomoku game,
/// this method prints the game board to console.
fn init(&mut self) {
self.draw();
}
/// Draw game graphic
fn draw(&self) {
println!();
self.board.draw_console();
if!self.ended {
self.print_player();
}
}
/// Print who will point this time
fn print_player(&self) {
let p = self.get_current_player();
print!("{} ({}) turn to point: ", p.name(), p.piece_type().get_name());
}
/// Print where is pointed
fn print_point(&self, coord: CoordinationFlat) {
let x = coord.x;
let y = coord.y;
let char_x = char::from_digit((x + 9) as u32, 36).unwrap();
print!("{}{}", char_x, y);
}
/// Start the game main loop, loop the two player to point, until the game is end.
///
/// In the loop, when every player placed a piece, the game updates it's board and print,
/// then invoke the blocking function `Player::point()`, let another place piece.
fn main_loop(&mut self) {
let mut fail_count = 0;
loop {
// Initialize the game context every lap
// TODO Is there a better way to references the board?
let context = GameContext::new(self.board.clone(),
self.history.last().map(|z| { z.1 }),
self.history.len());
// Read input from player
let coord = self.get_current_player_mut().point(&context);
// Try point the coordinate
let optional_winner = match self.point(coord) {
Ok(v) => v,
Err(e) => {
fail_count += 1;
println!("Failed point to ({}, {}), {}", coord.x, coord.y, e);
// Panic if too many invalid point
if fail_count >= 6 {
panic!("Fail to point 6 times, may due to invalid AI implementation, panic")
}
continue;
}
};
// Print
self.print_point(coord);
self.draw();
// See if there is a winner.
match optional_winner {
Some(_) => {
// Current player cannot point anything because another player is wined
let winner = self.get_another_player();
println!("Winner is {} ({}).", winner.name(), winner.piece_type());
break;
},
None => { }
};
fail_count = 0;
}
}
// TODO Can I returns the reference of winner player?
/// Place a piece in the game
///
/// Returns the winner if the game is end.
fn point(&mut self, coord: CoordinationFlat) -> Result<Option<PieceType>, String> {
if!self.started {
return Err(String::from("The game has not started yet"))
}
if self.ended {
return Err(String::from("The game is over"))
}
// place the piece to board, and check the game is end
let current_piece = self.get_current_player().piece_type();
let place = self.board.place(coord, current_piece.to_board_piece_type());
if place.is_err() {
return Err(place.err().unwrap())
}
self.history.push((current_piece, coord));
let winner = if self.check_game_end() {
self.ended = true;
Some(current_piece)
} else {
None
};
self.change_to_another_player();
Ok(winner)
}
// Change current player to another player, and returns new current player.
fn change_to_another_player(&mut self) -> &Box<Player> {
if self.current_player == 0 {
self.current_player = 1
} else {
self.current_player = 0
}
self.get_current_player()
}
/// Get another player, don't change the current player state
fn get_another_player(&self) -> &Box<Player> {
if self.current_player == 0 {
&self.players[1]
} else {
&self.players[0]
}
}
/// Get another player mutable reference, don't change the current player state
fn get_another_player_mut(&mut self) -> &mut Box<Player> |
/// Get the current player
fn get_current_player(&self) -> &Box<Player> {
&self.players[self.current_player]
}
/// Get the current player mutable reference
fn get_current_player_mut(&mut self) -> &mut Box<Player> {
&mut self.players[self.current_player]
}
/// Check the game is end, if end, returns true; not end the return false.
///
/// So the winner is the top of history stack
fn check_game_end(&self) -> bool {
let last_point = match self.history.last() {
Some(a) => a,
None => return false
};
// Current position information
let last_player_color: board::BoardPieceType = last_point.0.to_board_piece_type();
let last_coordination = last_point.1;
// Define 4 non-parallel directions
const MOVE_DIRECTION: [(isize, isize); 4] = [
(0, 1),
(1, 1),
(1, 0),
(1, -1)
];
fn move_dir(coord: &CoordinationFlat, dir: &(isize, isize)) -> Result<CoordinationFlat, &'static str> {
let new_x = (coord.x as isize) + dir.0;
let new_y = (coord.y as isize) + dir.1;
if new_x < 0 {
return Err("x is out of bound");
} else if new_y < 0 {
return Err("y is out of bound");
}
Ok(CoordinationFlat::new(new_x as usize, new_y as usize))
}
fn move_dir_reverse(coord: &CoordinationFlat, dir: &(isize, isize)) -> Result<CoordinationFlat, &'static str> {
let new_x = (coord.x as isize) - dir.0;
let new_y = (coord.y as isize) - dir.1;
if new_x < 0 {
return Err("x is out of bound")
} else if new_y < 0 {
return Err("y is out of bound")
}
Ok(CoordinationFlat::new(new_x as usize, new_y as usize))
}
// Check 4 directions negative and positive directions from point position
for dir in MOVE_DIRECTION.iter() {
let mut score = 1;
{
let mut next_coord = move_dir(&last_coordination, dir);
if next_coord.is_ok() {
let mut a = self.board.get(next_coord.unwrap());
while next_coord.is_ok() && a.is_ok() && a.unwrap() == last_player_color {
score += 1;
next_coord = move_dir(&next_coord.unwrap(), dir);
a = self.board.get(next_coord.unwrap());
}
}
}
{
let mut next_coord = move_dir_reverse(&last_coordination, dir);
if next_coord.is_ok() {
let mut a = self.board.get(next_coord.unwrap());
while next_coord.is_ok() && a.is_ok() && a.unwrap() == last_player_color {
score += 1;
next_coord = move_dir_reverse(&next_coord.unwrap(), dir);
a = self.board.get(next_coord.unwrap());
}
}
}
if score >= 5 {
return true;
}
}
false
}
}
| {
if self.current_player == 0 {
&mut self.players[1]
} else {
&mut self.players[0]
}
} | identifier_body |
lib.rs | use chrono::{DateTime, NaiveDateTime, Utc};
use gexiv2_sys;
use gpx::read;
use gpx::TrackSegment;
use gpx::{Gpx, Track};
use log::*;
use regex::Regex;
use reqwest::Url;
use serde_derive::{Deserialize, Serialize};
use std::collections::HashMap;
use std::error;
use std::fs;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::{Error, ErrorKind};
use std::path::{Path, PathBuf};
use tera::{compile_templates, Context, Tera};
#[derive(Serialize, Deserialize)]
pub struct Coordinate {
lon: f64,
lat: f64,
}
pub struct Photo {
path: PathBuf,
datetime: NaiveDateTime,
}
#[derive(Serialize, Deserialize)]
pub struct Config {
pub site: Site,
pub data: Data,
}
#[derive(Serialize, Deserialize)]
pub struct Site {
pub base_uri: String,
pub name: String,
pub proto: String,
pub description: String,
}
#[derive(Serialize, Deserialize)]
pub struct Data {
pub gpx_input: String,
pub img_input: String,
pub site_output: String,
}
#[derive(Serialize, Deserialize)]
pub struct TrackArticle {
pub title: String,
pub underscored_title: String,
pub photos_number: usize,
pub country: String,
pub start_time: DateTime<Utc>,
pub end_time: DateTime<Utc>,
pub coordinate_avg: Coordinate,
}
#[derive(Serialize, Deserialize)]
pub struct ReverseGeocoding {
pub address: HashMap<String, String>,
}
pub fn read_config(file: &Path) -> Result<Config, io::Error> {
let mut config_file = File::open(file)?;
let mut config_str = String::new();
config_file.read_to_string(&mut config_str)?;
// Not sure about that, maybe I should use a Box<Error>?
match toml::from_str(&config_str) {
Ok(config) => Ok(config),
Err(error) => Err(Error::new(ErrorKind::Interrupted, error)),
}
}
pub fn process_gpx_dir(config: &Config) -> Vec<TrackArticle> {
let gpx_dir = Path::new(&config.data.gpx_input);
let target_dir = Path::new(&config.data.site_output);
let mut articles: Vec<TrackArticle> = Vec::new();
let tera = compile_templates!("site/templates/*");
let img_input_dir = Path::new(&config.data.img_input);
let photo_all = parse_photos(img_input_dir);
for entry in fs::read_dir(gpx_dir).unwrap() {
let gpx_path = entry.unwrap().path();
if gpx_path.extension().unwrap() == "gpx" {
info!("Processing {}", gpx_path.display());
match generate_article(&gpx_path, target_dir, &tera, &config, &photo_all) {
Some(article) => articles.push(article),
None => continue,
}
}
}
articles.sort_by(|a, b| a.start_time.cmp(&b.start_time));
articles
}
pub fn article_gpx_info(gpx_file: &Path) -> (TrackArticle, Vec<Coordinate>) {
let file = File::open(&gpx_file).unwrap();
let reader = BufReader::new(file);
let gpx: Gpx = read(reader).unwrap();
let track: &Track = &gpx.tracks[0];
let segment: &TrackSegment = &track.segments[0];
let mut track_coordinates: Vec<Coordinate> = Vec::new();
for s in segment.points.iter() {
track_coordinates.push(Coordinate {
lon: s.point().x(),
lat: s.point().y(),
});
}
// type annotations required: cannot resolve `_: std::iter::Sum<f64>` error
// is generated if avg calculation done in one time, I don't known to fix it
// for now
let mut lon_avg: f64 = track_coordinates.iter().map(|x| x.lon).sum();
lon_avg = lon_avg / track_coordinates.len() as f64;
let mut lat_avg: f64 = track_coordinates.iter().map(|x| x.lat).sum();
lat_avg = lat_avg / track_coordinates.len() as f64;
let coordinate_avg: Coordinate = Coordinate {
lon: lon_avg,
lat: lat_avg,
};
let start_time = segment.points.first().unwrap().time.unwrap();
let end_time = segment.points.last().unwrap().time.unwrap();
let article_title = match gpx.metadata.unwrap().name {
Some(name) => name,
None => gpx_file.file_stem().unwrap().to_str().unwrap().to_string(),
};
let special_chars_re = Regex::new(r"( |/|\|<|>)").unwrap();
let article_underscored_title = special_chars_re
.replace_all(&article_title, "_")
.to_string();
(
TrackArticle {
title: article_title,
underscored_title: article_underscored_title,
photos_number: 0,
country: String::new(),
start_time: start_time,
end_time: end_time,
coordinate_avg: coordinate_avg,
},
track_coordinates,
)
}
pub fn generate_article(
gpx_file: &Path,
target_dir: &Path,
tera: &Tera,
config: &Config,
photo_list: &Vec<Photo>,
) -> Option<TrackArticle> {
let (article_info, track_coordinates) = article_gpx_info(gpx_file);
let photo_article = find_photos(photo_list, article_info.start_time, article_info.end_time);
let mut copied_photos: Vec<String> = Vec::new();
let photo_target_dir = target_dir
.join("static/photos")
.join(article_info.underscored_title.to_string());
let photo_target_dir_relative =
Path::new("static/photos").join(article_info.underscored_title.to_string());
match photo_article {
Some(photo_article) => {
let photos = photo_article;
fs::create_dir_all(&photo_target_dir).unwrap();
fs::create_dir_all(&photo_target_dir.join("thumbnails")).unwrap();
for (i, p) in photos.iter().enumerate() {
let extension = p.path.extension().unwrap().to_str().unwrap();
let photo_target_file = photo_target_dir.join(format!("{}.{}", i + 1, extension));
match fs::copy(Path::new(&p.path), &photo_target_file) {
Ok(file) => file,
Err(error) => {
error!("unable to copy {}: {}", &p.path.display(), error);
continue;
}
};
let img = image::open(&Path::new(&photo_target_file))
.ok()
.expect("Opening image failed");
let thumbnail = img.thumbnail(300, 300);
thumbnail
.save(&photo_target_dir.join("thumbnails").join(format!(
"{}.{}",
i + 1,
extension
)))
.unwrap();
copied_photos.push(format!("{}.{}", i + 1, extension));
remove_exif(&photo_target_file);
}
}
None => {
info!("No photos found for {}, skipping", gpx_file.display());
return None;
}
};
let mut context = Context::new();
context.add("track_coordinates", &track_coordinates);
context.add("article_title", &article_info.title);
context.add("lon_avg", &article_info.coordinate_avg.lon);
context.add("lat_avg", &article_info.coordinate_avg.lat);
context.add("start_time", &article_info.start_time.to_string());
context.add("end_time", &article_info.end_time.to_string());
context.add("static_dir", "../static");
context.add("config", config);
context.add("copied_photos", &copied_photos);
context.add("photo_target_dir_relative", &photo_target_dir_relative);
render_html(
tera,
context,
&target_dir.join("tracks"),
&article_info.underscored_title,
"track_article.html",
)
.unwrap();
let track_country = match reverse_geocoding(&article_info.coordinate_avg) {
Ok(geocoding) => geocoding.address["country"].clone(),
Err(error) => {
error!("error while reverse geocoding : {}", error);
String::new()
}
};
Some(TrackArticle {
title: article_info.title,
underscored_title: article_info.underscored_title,
photos_number: copied_photos.len(),
country: track_country.to_string(),
start_time: article_info.start_time,
end_time: article_info.end_time,
coordinate_avg: article_info.coordinate_avg,
})
}
pub fn render_html(
tera: &Tera,
context: Context,
dir: &Path,
file: &str,
template: &str,
) -> Result<(), io::Error> {
let res = tera.render(template, &context).unwrap();
let mut generated_file = File::create(format!("{}/{}.html", dir.to_str().unwrap(), file))?;
generated_file.write(res.as_bytes())?;
Ok(())
}
fn find_photos(
photos: &Vec<Photo>,
start_time: DateTime<Utc>,
end_time: DateTime<Utc>,
) -> Option<Vec<&Photo>> {
let mut res: Vec<&Photo> = Vec::new();
for p in photos {
if start_time.timestamp() <= p.datetime.timestamp()
&& end_time.timestamp() >= p.datetime.timestamp()
{
res.push(p);
}
}
if res.len() > 0 {
res.sort_unstable_by_key(|r| r.datetime.timestamp());
return Some(res);
}
None
}
pub fn | (dir: &Path) -> Vec<Photo> {
let mut photos: Vec<Photo> = Vec::new();
unsafe {
gexiv2_sys::gexiv2_log_set_level(gexiv2_sys::GExiv2LogLevel::MUTE);
}
for entry in fs::read_dir(dir).unwrap() {
let img_path = entry.unwrap().path();
let file_metadata = rexiv2::Metadata::new_from_path(&img_path.to_str().unwrap()).unwrap();
if!file_metadata.has_exif() {
warn!(
"skipping {}: {}",
img_path.display(),
"File doesn't contains Exif metadata"
);
continue;
}
let datetime_string = file_metadata.get_tag_string("Exif.Image.DateTime").unwrap();
let datetime_parse =
match NaiveDateTime::parse_from_str(&datetime_string, "%Y:%m:%d %H:%M:%S") {
Ok(parse_date) => parse_date,
Err(error) => {
warn!("skipping {}: {}", img_path.display(), error);
continue;
}
};
photos.push(Photo {
path: img_path,
datetime: datetime_parse,
});
}
photos
}
pub fn generate_index(config: &Config, articles: Vec<TrackArticle>) {
let target_dir = Path::new(&config.data.site_output);
let tera = compile_templates!("site/templates/*");
let mut index_context = Context::new();
index_context.add("config", &config);
index_context.add("static_dir", "static");
index_context.add("articles", &articles);
render_html(&tera, index_context, &target_dir, "index", "index.html").unwrap();
}
fn remove_exif(img_path: &Path) {
let file_metadata = rexiv2::Metadata::new_from_path(&img_path.to_str().unwrap()).unwrap();
if!file_metadata.has_exif() {
info!(
"skipping {}: {}",
img_path.display(),
"File doesn't contains Exif metadata"
);
} else {
file_metadata.clear();
file_metadata.save_to_file(&img_path).unwrap();
}
}
// Get only the country informations (zoom=0) and in French (for now)
// Need error handling
fn reverse_geocoding(coordinate: &Coordinate) -> Result<ReverseGeocoding, Box<error::Error>> {
let uri = Url::parse_with_params(
"https://nominatim.openstreetmap.org/reverse.php",
&[
("format", "json"),
("lat", &coordinate.lat.to_string()),
("lon", &coordinate.lon.to_string()),
("accept-language", "fr"),
("zoom", "0"),
],
)?;
let resp: ReverseGeocoding = reqwest::get(uri)?.json().unwrap();
Ok(resp)
}
| parse_photos | identifier_name |
lib.rs | use chrono::{DateTime, NaiveDateTime, Utc};
use gexiv2_sys;
use gpx::read;
use gpx::TrackSegment;
use gpx::{Gpx, Track};
use log::*;
use regex::Regex;
use reqwest::Url;
use serde_derive::{Deserialize, Serialize};
use std::collections::HashMap;
use std::error;
use std::fs;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::{Error, ErrorKind};
use std::path::{Path, PathBuf};
use tera::{compile_templates, Context, Tera};
#[derive(Serialize, Deserialize)]
pub struct Coordinate {
lon: f64,
lat: f64,
}
pub struct Photo {
path: PathBuf,
datetime: NaiveDateTime,
}
#[derive(Serialize, Deserialize)]
pub struct Config {
pub site: Site,
pub data: Data,
}
#[derive(Serialize, Deserialize)]
pub struct Site {
pub base_uri: String,
pub name: String,
pub proto: String,
pub description: String,
}
#[derive(Serialize, Deserialize)]
pub struct Data {
pub gpx_input: String,
pub img_input: String,
pub site_output: String,
}
#[derive(Serialize, Deserialize)]
pub struct TrackArticle {
pub title: String,
pub underscored_title: String,
pub photos_number: usize,
pub country: String,
pub start_time: DateTime<Utc>,
pub end_time: DateTime<Utc>,
pub coordinate_avg: Coordinate,
}
#[derive(Serialize, Deserialize)]
pub struct ReverseGeocoding {
pub address: HashMap<String, String>,
}
pub fn read_config(file: &Path) -> Result<Config, io::Error> {
let mut config_file = File::open(file)?;
let mut config_str = String::new();
config_file.read_to_string(&mut config_str)?;
// Not sure about that, maybe I should use a Box<Error>?
match toml::from_str(&config_str) {
Ok(config) => Ok(config),
Err(error) => Err(Error::new(ErrorKind::Interrupted, error)),
}
}
pub fn process_gpx_dir(config: &Config) -> Vec<TrackArticle> {
let gpx_dir = Path::new(&config.data.gpx_input);
let target_dir = Path::new(&config.data.site_output);
let mut articles: Vec<TrackArticle> = Vec::new();
let tera = compile_templates!("site/templates/*");
let img_input_dir = Path::new(&config.data.img_input);
let photo_all = parse_photos(img_input_dir);
for entry in fs::read_dir(gpx_dir).unwrap() {
let gpx_path = entry.unwrap().path();
if gpx_path.extension().unwrap() == "gpx" {
info!("Processing {}", gpx_path.display());
match generate_article(&gpx_path, target_dir, &tera, &config, &photo_all) {
Some(article) => articles.push(article),
None => continue,
}
}
}
articles.sort_by(|a, b| a.start_time.cmp(&b.start_time));
articles
}
pub fn article_gpx_info(gpx_file: &Path) -> (TrackArticle, Vec<Coordinate>) {
let file = File::open(&gpx_file).unwrap();
let reader = BufReader::new(file);
let gpx: Gpx = read(reader).unwrap();
let track: &Track = &gpx.tracks[0];
let segment: &TrackSegment = &track.segments[0];
let mut track_coordinates: Vec<Coordinate> = Vec::new();
for s in segment.points.iter() {
track_coordinates.push(Coordinate {
lon: s.point().x(),
lat: s.point().y(),
});
}
// type annotations required: cannot resolve `_: std::iter::Sum<f64>` error
// is generated if avg calculation done in one time, I don't known to fix it
// for now
let mut lon_avg: f64 = track_coordinates.iter().map(|x| x.lon).sum();
lon_avg = lon_avg / track_coordinates.len() as f64;
let mut lat_avg: f64 = track_coordinates.iter().map(|x| x.lat).sum();
lat_avg = lat_avg / track_coordinates.len() as f64;
let coordinate_avg: Coordinate = Coordinate {
lon: lon_avg,
lat: lat_avg,
};
let start_time = segment.points.first().unwrap().time.unwrap();
let end_time = segment.points.last().unwrap().time.unwrap();
let article_title = match gpx.metadata.unwrap().name {
Some(name) => name,
None => gpx_file.file_stem().unwrap().to_str().unwrap().to_string(),
};
let special_chars_re = Regex::new(r"( |/|\|<|>)").unwrap();
let article_underscored_title = special_chars_re
.replace_all(&article_title, "_")
.to_string();
(
TrackArticle {
title: article_title,
underscored_title: article_underscored_title,
photos_number: 0,
country: String::new(),
start_time: start_time,
end_time: end_time,
coordinate_avg: coordinate_avg,
},
track_coordinates,
)
}
pub fn generate_article(
gpx_file: &Path,
target_dir: &Path,
tera: &Tera,
config: &Config,
photo_list: &Vec<Photo>,
) -> Option<TrackArticle> {
let (article_info, track_coordinates) = article_gpx_info(gpx_file);
let photo_article = find_photos(photo_list, article_info.start_time, article_info.end_time);
let mut copied_photos: Vec<String> = Vec::new();
let photo_target_dir = target_dir
.join("static/photos")
.join(article_info.underscored_title.to_string());
let photo_target_dir_relative =
Path::new("static/photos").join(article_info.underscored_title.to_string());
match photo_article {
Some(photo_article) => {
let photos = photo_article;
fs::create_dir_all(&photo_target_dir).unwrap();
fs::create_dir_all(&photo_target_dir.join("thumbnails")).unwrap();
for (i, p) in photos.iter().enumerate() {
let extension = p.path.extension().unwrap().to_str().unwrap();
let photo_target_file = photo_target_dir.join(format!("{}.{}", i + 1, extension));
match fs::copy(Path::new(&p.path), &photo_target_file) {
Ok(file) => file,
Err(error) => {
error!("unable to copy {}: {}", &p.path.display(), error);
continue;
}
};
let img = image::open(&Path::new(&photo_target_file))
.ok()
.expect("Opening image failed");
let thumbnail = img.thumbnail(300, 300);
thumbnail
.save(&photo_target_dir.join("thumbnails").join(format!(
"{}.{}",
i + 1,
extension
)))
.unwrap();
copied_photos.push(format!("{}.{}", i + 1, extension));
remove_exif(&photo_target_file);
}
}
None => {
info!("No photos found for {}, skipping", gpx_file.display());
return None;
}
};
let mut context = Context::new();
context.add("track_coordinates", &track_coordinates);
context.add("article_title", &article_info.title);
context.add("lon_avg", &article_info.coordinate_avg.lon);
context.add("lat_avg", &article_info.coordinate_avg.lat);
context.add("start_time", &article_info.start_time.to_string());
context.add("end_time", &article_info.end_time.to_string());
context.add("static_dir", "../static");
context.add("config", config);
context.add("copied_photos", &copied_photos);
context.add("photo_target_dir_relative", &photo_target_dir_relative);
render_html(
tera,
context,
&target_dir.join("tracks"),
&article_info.underscored_title,
"track_article.html",
)
.unwrap();
let track_country = match reverse_geocoding(&article_info.coordinate_avg) {
Ok(geocoding) => geocoding.address["country"].clone(),
Err(error) => {
error!("error while reverse geocoding : {}", error);
String::new()
}
};
Some(TrackArticle {
title: article_info.title,
underscored_title: article_info.underscored_title,
photos_number: copied_photos.len(),
country: track_country.to_string(),
start_time: article_info.start_time,
end_time: article_info.end_time,
coordinate_avg: article_info.coordinate_avg,
})
}
pub fn render_html(
tera: &Tera,
context: Context,
dir: &Path,
file: &str,
template: &str,
) -> Result<(), io::Error> {
let res = tera.render(template, &context).unwrap();
let mut generated_file = File::create(format!("{}/{}.html", dir.to_str().unwrap(), file))?;
generated_file.write(res.as_bytes())?;
Ok(())
}
fn find_photos(
photos: &Vec<Photo>,
start_time: DateTime<Utc>,
end_time: DateTime<Utc>,
) -> Option<Vec<&Photo>> {
let mut res: Vec<&Photo> = Vec::new();
for p in photos {
if start_time.timestamp() <= p.datetime.timestamp()
&& end_time.timestamp() >= p.datetime.timestamp()
{
res.push(p);
}
}
if res.len() > 0 |
None
}
pub fn parse_photos(dir: &Path) -> Vec<Photo> {
let mut photos: Vec<Photo> = Vec::new();
unsafe {
gexiv2_sys::gexiv2_log_set_level(gexiv2_sys::GExiv2LogLevel::MUTE);
}
for entry in fs::read_dir(dir).unwrap() {
let img_path = entry.unwrap().path();
let file_metadata = rexiv2::Metadata::new_from_path(&img_path.to_str().unwrap()).unwrap();
if!file_metadata.has_exif() {
warn!(
"skipping {}: {}",
img_path.display(),
"File doesn't contains Exif metadata"
);
continue;
}
let datetime_string = file_metadata.get_tag_string("Exif.Image.DateTime").unwrap();
let datetime_parse =
match NaiveDateTime::parse_from_str(&datetime_string, "%Y:%m:%d %H:%M:%S") {
Ok(parse_date) => parse_date,
Err(error) => {
warn!("skipping {}: {}", img_path.display(), error);
continue;
}
};
photos.push(Photo {
path: img_path,
datetime: datetime_parse,
});
}
photos
}
pub fn generate_index(config: &Config, articles: Vec<TrackArticle>) {
let target_dir = Path::new(&config.data.site_output);
let tera = compile_templates!("site/templates/*");
let mut index_context = Context::new();
index_context.add("config", &config);
index_context.add("static_dir", "static");
index_context.add("articles", &articles);
render_html(&tera, index_context, &target_dir, "index", "index.html").unwrap();
}
fn remove_exif(img_path: &Path) {
let file_metadata = rexiv2::Metadata::new_from_path(&img_path.to_str().unwrap()).unwrap();
if!file_metadata.has_exif() {
info!(
"skipping {}: {}",
img_path.display(),
"File doesn't contains Exif metadata"
);
} else {
file_metadata.clear();
file_metadata.save_to_file(&img_path).unwrap();
}
}
// Get only the country informations (zoom=0) and in French (for now)
// Need error handling
fn reverse_geocoding(coordinate: &Coordinate) -> Result<ReverseGeocoding, Box<error::Error>> {
let uri = Url::parse_with_params(
"https://nominatim.openstreetmap.org/reverse.php",
&[
("format", "json"),
("lat", &coordinate.lat.to_string()),
("lon", &coordinate.lon.to_string()),
("accept-language", "fr"),
("zoom", "0"),
],
)?;
let resp: ReverseGeocoding = reqwest::get(uri)?.json().unwrap();
Ok(resp)
}
| {
res.sort_unstable_by_key(|r| r.datetime.timestamp());
return Some(res);
} | conditional_block |
lib.rs | use chrono::{DateTime, NaiveDateTime, Utc};
use gexiv2_sys;
use gpx::read;
use gpx::TrackSegment;
use gpx::{Gpx, Track};
use log::*;
use regex::Regex;
use reqwest::Url; | use std::io;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::{Error, ErrorKind};
use std::path::{Path, PathBuf};
use tera::{compile_templates, Context, Tera};
#[derive(Serialize, Deserialize)]
pub struct Coordinate {
lon: f64,
lat: f64,
}
pub struct Photo {
path: PathBuf,
datetime: NaiveDateTime,
}
#[derive(Serialize, Deserialize)]
pub struct Config {
pub site: Site,
pub data: Data,
}
#[derive(Serialize, Deserialize)]
pub struct Site {
pub base_uri: String,
pub name: String,
pub proto: String,
pub description: String,
}
#[derive(Serialize, Deserialize)]
pub struct Data {
pub gpx_input: String,
pub img_input: String,
pub site_output: String,
}
#[derive(Serialize, Deserialize)]
pub struct TrackArticle {
pub title: String,
pub underscored_title: String,
pub photos_number: usize,
pub country: String,
pub start_time: DateTime<Utc>,
pub end_time: DateTime<Utc>,
pub coordinate_avg: Coordinate,
}
#[derive(Serialize, Deserialize)]
pub struct ReverseGeocoding {
pub address: HashMap<String, String>,
}
pub fn read_config(file: &Path) -> Result<Config, io::Error> {
let mut config_file = File::open(file)?;
let mut config_str = String::new();
config_file.read_to_string(&mut config_str)?;
// Not sure about that, maybe I should use a Box<Error>?
match toml::from_str(&config_str) {
Ok(config) => Ok(config),
Err(error) => Err(Error::new(ErrorKind::Interrupted, error)),
}
}
pub fn process_gpx_dir(config: &Config) -> Vec<TrackArticle> {
let gpx_dir = Path::new(&config.data.gpx_input);
let target_dir = Path::new(&config.data.site_output);
let mut articles: Vec<TrackArticle> = Vec::new();
let tera = compile_templates!("site/templates/*");
let img_input_dir = Path::new(&config.data.img_input);
let photo_all = parse_photos(img_input_dir);
for entry in fs::read_dir(gpx_dir).unwrap() {
let gpx_path = entry.unwrap().path();
if gpx_path.extension().unwrap() == "gpx" {
info!("Processing {}", gpx_path.display());
match generate_article(&gpx_path, target_dir, &tera, &config, &photo_all) {
Some(article) => articles.push(article),
None => continue,
}
}
}
articles.sort_by(|a, b| a.start_time.cmp(&b.start_time));
articles
}
pub fn article_gpx_info(gpx_file: &Path) -> (TrackArticle, Vec<Coordinate>) {
let file = File::open(&gpx_file).unwrap();
let reader = BufReader::new(file);
let gpx: Gpx = read(reader).unwrap();
let track: &Track = &gpx.tracks[0];
let segment: &TrackSegment = &track.segments[0];
let mut track_coordinates: Vec<Coordinate> = Vec::new();
for s in segment.points.iter() {
track_coordinates.push(Coordinate {
lon: s.point().x(),
lat: s.point().y(),
});
}
// type annotations required: cannot resolve `_: std::iter::Sum<f64>` error
// is generated if avg calculation done in one time, I don't known to fix it
// for now
let mut lon_avg: f64 = track_coordinates.iter().map(|x| x.lon).sum();
lon_avg = lon_avg / track_coordinates.len() as f64;
let mut lat_avg: f64 = track_coordinates.iter().map(|x| x.lat).sum();
lat_avg = lat_avg / track_coordinates.len() as f64;
let coordinate_avg: Coordinate = Coordinate {
lon: lon_avg,
lat: lat_avg,
};
let start_time = segment.points.first().unwrap().time.unwrap();
let end_time = segment.points.last().unwrap().time.unwrap();
let article_title = match gpx.metadata.unwrap().name {
Some(name) => name,
None => gpx_file.file_stem().unwrap().to_str().unwrap().to_string(),
};
let special_chars_re = Regex::new(r"( |/|\|<|>)").unwrap();
let article_underscored_title = special_chars_re
.replace_all(&article_title, "_")
.to_string();
(
TrackArticle {
title: article_title,
underscored_title: article_underscored_title,
photos_number: 0,
country: String::new(),
start_time: start_time,
end_time: end_time,
coordinate_avg: coordinate_avg,
},
track_coordinates,
)
}
pub fn generate_article(
gpx_file: &Path,
target_dir: &Path,
tera: &Tera,
config: &Config,
photo_list: &Vec<Photo>,
) -> Option<TrackArticle> {
let (article_info, track_coordinates) = article_gpx_info(gpx_file);
let photo_article = find_photos(photo_list, article_info.start_time, article_info.end_time);
let mut copied_photos: Vec<String> = Vec::new();
let photo_target_dir = target_dir
.join("static/photos")
.join(article_info.underscored_title.to_string());
let photo_target_dir_relative =
Path::new("static/photos").join(article_info.underscored_title.to_string());
match photo_article {
Some(photo_article) => {
let photos = photo_article;
fs::create_dir_all(&photo_target_dir).unwrap();
fs::create_dir_all(&photo_target_dir.join("thumbnails")).unwrap();
for (i, p) in photos.iter().enumerate() {
let extension = p.path.extension().unwrap().to_str().unwrap();
let photo_target_file = photo_target_dir.join(format!("{}.{}", i + 1, extension));
match fs::copy(Path::new(&p.path), &photo_target_file) {
Ok(file) => file,
Err(error) => {
error!("unable to copy {}: {}", &p.path.display(), error);
continue;
}
};
let img = image::open(&Path::new(&photo_target_file))
.ok()
.expect("Opening image failed");
let thumbnail = img.thumbnail(300, 300);
thumbnail
.save(&photo_target_dir.join("thumbnails").join(format!(
"{}.{}",
i + 1,
extension
)))
.unwrap();
copied_photos.push(format!("{}.{}", i + 1, extension));
remove_exif(&photo_target_file);
}
}
None => {
info!("No photos found for {}, skipping", gpx_file.display());
return None;
}
};
let mut context = Context::new();
context.add("track_coordinates", &track_coordinates);
context.add("article_title", &article_info.title);
context.add("lon_avg", &article_info.coordinate_avg.lon);
context.add("lat_avg", &article_info.coordinate_avg.lat);
context.add("start_time", &article_info.start_time.to_string());
context.add("end_time", &article_info.end_time.to_string());
context.add("static_dir", "../static");
context.add("config", config);
context.add("copied_photos", &copied_photos);
context.add("photo_target_dir_relative", &photo_target_dir_relative);
render_html(
tera,
context,
&target_dir.join("tracks"),
&article_info.underscored_title,
"track_article.html",
)
.unwrap();
let track_country = match reverse_geocoding(&article_info.coordinate_avg) {
Ok(geocoding) => geocoding.address["country"].clone(),
Err(error) => {
error!("error while reverse geocoding : {}", error);
String::new()
}
};
Some(TrackArticle {
title: article_info.title,
underscored_title: article_info.underscored_title,
photos_number: copied_photos.len(),
country: track_country.to_string(),
start_time: article_info.start_time,
end_time: article_info.end_time,
coordinate_avg: article_info.coordinate_avg,
})
}
pub fn render_html(
tera: &Tera,
context: Context,
dir: &Path,
file: &str,
template: &str,
) -> Result<(), io::Error> {
let res = tera.render(template, &context).unwrap();
let mut generated_file = File::create(format!("{}/{}.html", dir.to_str().unwrap(), file))?;
generated_file.write(res.as_bytes())?;
Ok(())
}
fn find_photos(
photos: &Vec<Photo>,
start_time: DateTime<Utc>,
end_time: DateTime<Utc>,
) -> Option<Vec<&Photo>> {
let mut res: Vec<&Photo> = Vec::new();
for p in photos {
if start_time.timestamp() <= p.datetime.timestamp()
&& end_time.timestamp() >= p.datetime.timestamp()
{
res.push(p);
}
}
if res.len() > 0 {
res.sort_unstable_by_key(|r| r.datetime.timestamp());
return Some(res);
}
None
}
pub fn parse_photos(dir: &Path) -> Vec<Photo> {
let mut photos: Vec<Photo> = Vec::new();
unsafe {
gexiv2_sys::gexiv2_log_set_level(gexiv2_sys::GExiv2LogLevel::MUTE);
}
for entry in fs::read_dir(dir).unwrap() {
let img_path = entry.unwrap().path();
let file_metadata = rexiv2::Metadata::new_from_path(&img_path.to_str().unwrap()).unwrap();
if!file_metadata.has_exif() {
warn!(
"skipping {}: {}",
img_path.display(),
"File doesn't contains Exif metadata"
);
continue;
}
let datetime_string = file_metadata.get_tag_string("Exif.Image.DateTime").unwrap();
let datetime_parse =
match NaiveDateTime::parse_from_str(&datetime_string, "%Y:%m:%d %H:%M:%S") {
Ok(parse_date) => parse_date,
Err(error) => {
warn!("skipping {}: {}", img_path.display(), error);
continue;
}
};
photos.push(Photo {
path: img_path,
datetime: datetime_parse,
});
}
photos
}
pub fn generate_index(config: &Config, articles: Vec<TrackArticle>) {
let target_dir = Path::new(&config.data.site_output);
let tera = compile_templates!("site/templates/*");
let mut index_context = Context::new();
index_context.add("config", &config);
index_context.add("static_dir", "static");
index_context.add("articles", &articles);
render_html(&tera, index_context, &target_dir, "index", "index.html").unwrap();
}
fn remove_exif(img_path: &Path) {
let file_metadata = rexiv2::Metadata::new_from_path(&img_path.to_str().unwrap()).unwrap();
if!file_metadata.has_exif() {
info!(
"skipping {}: {}",
img_path.display(),
"File doesn't contains Exif metadata"
);
} else {
file_metadata.clear();
file_metadata.save_to_file(&img_path).unwrap();
}
}
// Get only the country informations (zoom=0) and in French (for now)
// Need error handling
fn reverse_geocoding(coordinate: &Coordinate) -> Result<ReverseGeocoding, Box<error::Error>> {
let uri = Url::parse_with_params(
"https://nominatim.openstreetmap.org/reverse.php",
&[
("format", "json"),
("lat", &coordinate.lat.to_string()),
("lon", &coordinate.lon.to_string()),
("accept-language", "fr"),
("zoom", "0"),
],
)?;
let resp: ReverseGeocoding = reqwest::get(uri)?.json().unwrap();
Ok(resp)
} | use serde_derive::{Deserialize, Serialize};
use std::collections::HashMap;
use std::error;
use std::fs;
use std::fs::File; | random_line_split |
stateless.rs | /// PROJECT: Stateless Blockchain Experiment.
///
/// DESCRIPTION: This repository implements a UTXO-based stateless blockchain on Substrate using an
/// RSA accumulator. In this scheme, validators only need to track a single accumulator value and
/// users only need to store their own UTXOs and membership proofs. Unless a data service provider is
/// used, users must constantly watch for updates to the accumulator in order to update their proofs.
/// This particular implementation includes batching and aggregation techniques from this paper:
/// https://eprint.iacr.org/2018/1188.pdf.
///
/// NOTE: This repository is experimental and is not meant to be used in production. The design choices
/// made in this runtime are impractical from both a security and usability standpoint. Additionally,
/// the following code has not been checked for correctness nor has been optimized for efficiency.
use support::{decl_module, decl_storage, decl_event, ensure, StorageValue, dispatch::Result, traits::Get};
use system::ensure_signed;
use primitive_types::H256;
use rstd::prelude::Vec;
use rstd::vec;
use codec::{Encode, Decode};
use accumulator::*;
/// At the moment, this particular struct resembles more closely an NFT.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq, Copy)]
pub struct UTXO {
pub_key: H256,
id: u64,
}
/// Primitive transaction model with one input and one output.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq)]
pub struct | {
input: UTXO,
output: UTXO,
witness: Vec<u8>,
// Would in practice include a signature here.
}
pub trait Trait: system::Trait {
type Event: From<Event> + Into<<Self as system::Trait>::Event>;
}
decl_storage! {
trait Store for Module<T: Trait> as Stateless {
State get(get_state): U2048 = U2048::from(2); // Use 2 as an arbitrary generator with "unknown" order.
SpentCoins get(get_spent_coins): Vec<(U2048, U2048)>;
NewCoins get(get_new_coins): Vec<U2048>
}
}
decl_event!(
pub enum Event {
Deletion(U2048, U2048, U2048),
Addition(U2048, U2048, U2048),
}
);
decl_module! {
/// The module declaration.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Initialize generic event
fn deposit_event() = default;
/// Receive request to execute a transaction.
/// Verify the contents of a transaction and temporarily add it to a queue of verified transactions.
/// NOTE: Only works if one transaction per user per block is submitted.
pub fn addTransaction(origin, transaction: Transaction) -> Result {
ensure_signed(origin)?;
// Arbitrarily cap the number of pending transactions to 100
ensure!(SpentCoins::get().len() < 100, "Transaction queue full. Please try again next block.");
// Also verify that the user is not spending to themselves
ensure!(transaction.input.pub_key!= transaction.output.pub_key, "Cannot send coin to yourself.");
// Verify witness
let spent_elem = subroutines::hash_to_prime(&transaction.input.encode());
let witness = U2048::from_little_endian(&transaction.witness);
ensure!(witnesses::verify_mem_wit(State::get(), witness, spent_elem), "Witness is invalid");
let new_elem = subroutines::hash_to_prime(&transaction.output.encode());
// Update storage items.
SpentCoins::append(&vec![(spent_elem, witness)]);
Ok(())
}
/// Arbitrary replacement for Proof-of-Work to create new coins.
pub fn mint(origin, elem: u64) -> Result {
ensure_signed(origin)?;
let state = subroutines::mod_exp(Self::get_state(), U2048::from(elem), U2048::from_dec_str(MODULUS).unwrap());
State::put(state);
Ok(())
}
/// Batch delete spent coins and add new coins on block finalization
fn on_finalize() {
// Clause here to protect against empty blocks
if Self::get_spent_coins().len() > 0 {
// Delete spent coins from aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_delete(State::get(), &Self::get_spent_coins());
Self::deposit_event(Event::Deletion(state, agg, proof));
// Add new coins to aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_add(state, &Self::get_new_coins());
Self::deposit_event(Event::Addition(state, agg, proof));
// Update state
State::put(state);
}
// Clear storage
SpentCoins::kill();
NewCoins::kill();
}
}
}
/// tests for this module
#[cfg(test)]
mod tests {
use super::*;
use runtime_io::with_externalities;
use primitives::{H256, Blake2Hasher};
use support::{impl_outer_origin, parameter_types};
use sr_primitives::{traits::{BlakeTwo256, IdentityLookup, OnFinalize}, testing::Header};
use sr_primitives::weights::Weight;
use sr_primitives::Perbill;
impl_outer_origin! {
pub enum Origin for Test {}
}
// For testing the module, we construct most of a mock runtime. This means
// first constructing a configuration type (`Test`) which `impl`s each of the
// configuration traits of modules we want to use.
#[derive(Clone, Eq, PartialEq)]
pub struct Test;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
}
impl system::Trait for Test {
type Origin = Origin;
type Call = ();
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type WeightMultiplierUpdate = ();
type Event = ();
type BlockHashCount = BlockHashCount;
type MaximumBlockWeight = MaximumBlockWeight;
type MaximumBlockLength = MaximumBlockLength;
type AvailableBlockRatio = AvailableBlockRatio;
type Version = ();
}
impl Trait for Test {
type Event = ();
}
type Stateless = Module<Test>;
type System = system::Module<Test>;
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
system::GenesisConfig::default().build_storage::<Test>().unwrap().into()
}
#[test]
fn test_add() {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
});
}
#[test]
fn test_del() {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
// Collect witnesses for the added elements
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// Add elements
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
// Delete elements
let deletions = vec![(elems[0], witnesses[0]), (elems[1], witnesses[1]), (elems[2], witnesses[2])];
let (state, _, _) = accumulator::batch_delete(Stateless::get_state(), &deletions);
assert_eq!(state, U2048::from(2));
});
}
#[test]
fn test_block() {
with_externalities(&mut new_test_ext(), || {
// 1. Construct UTXOs.
let utxo_0 = UTXO {
pub_key: H256::from_low_u64_be(0),
id: 0,
};
let utxo_1 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 1,
};
let utxo_2 = UTXO {
pub_key: H256::from_low_u64_be(2),
id: 2,
};
// 2. Hash each UTXO to a prime.
let elem_0 = subroutines::hash_to_prime(&utxo_0.encode());
let elem_1 = subroutines::hash_to_prime(&utxo_1.encode());
let elem_2 = subroutines::hash_to_prime(&utxo_2.encode());
let elems = vec![elem_0, elem_1, elem_2];
// 3. Produce witnesses for the added elements.
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// 4. Add elements to the accumulator.
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
State::put(state);
// 5. Construct new UTXOs and derive integer representations.
let utxo_3 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 0,
};
let utxo_4 = UTXO {
pub_key: H256::from_low_u64_be(2),
id: 1,
};
let utxo_5 = UTXO {
pub_key: H256::from_low_u64_be(0),
id: 2,
};
let elem_3 = subroutines::hash_to_prime(&utxo_3.encode());
let elem_4 = subroutines::hash_to_prime(&utxo_4.encode());
let elem_5 = subroutines::hash_to_prime(&utxo_5.encode());
// 6. Construct transactions.
let mut witness_0: [u8; 256] = [0; 256];
witnesses[0].to_little_endian(&mut witness_0);
let tx_0 = Transaction {
input: utxo_0,
output: utxo_3,
witness: witness_0.to_vec(),
};
let mut witness_1: [u8; 256] = [0; 256];
witnesses[1].to_little_endian(&mut witness_1);
let tx_1 = Transaction {
input: utxo_1,
output: utxo_4,
witness: witness_1.to_vec(),
};
let mut witness_2: [u8; 256] = [0; 256];
witnesses[2].to_little_endian(&mut witness_2);
let tx_2 = Transaction {
input: utxo_2,
output: utxo_5,
witness: witness_2.to_vec(),
};
// 7. Verify transactions. Note that this logic will eventually be executed automatically
// by the block builder API eventually.
Stateless::addTransaction(Origin::signed(1), tx_0);
Stateless::addTransaction(Origin::signed(1), tx_1);
Stateless::addTransaction(Origin::signed(1), tx_2);
// 8. Finalize the block.
Stateless::on_finalize(System::block_number());
assert_eq!(Stateless::get_state(),
subroutines::mod_exp(U2048::from(2), elem_3 * elem_4 * elem_5, U2048::from_dec_str(MODULUS).unwrap()));
});
}
#[test]
fn test_mint() {
with_externalities(&mut new_test_ext(), || {
Stateless::mint(Origin::signed(1), 3);
assert_eq!(Stateless::get_state(), U2048::from(8));
});
}
} | Transaction | identifier_name |
stateless.rs | /// PROJECT: Stateless Blockchain Experiment.
///
/// DESCRIPTION: This repository implements a UTXO-based stateless blockchain on Substrate using an
/// RSA accumulator. In this scheme, validators only need to track a single accumulator value and
/// users only need to store their own UTXOs and membership proofs. Unless a data service provider is
/// used, users must constantly watch for updates to the accumulator in order to update their proofs.
/// This particular implementation includes batching and aggregation techniques from this paper:
/// https://eprint.iacr.org/2018/1188.pdf.
///
/// NOTE: This repository is experimental and is not meant to be used in production. The design choices
/// made in this runtime are impractical from both a security and usability standpoint. Additionally,
/// the following code has not been checked for correctness nor has been optimized for efficiency.
use support::{decl_module, decl_storage, decl_event, ensure, StorageValue, dispatch::Result, traits::Get};
use system::ensure_signed;
use primitive_types::H256;
use rstd::prelude::Vec;
use rstd::vec;
use codec::{Encode, Decode};
use accumulator::*;
/// At the moment, this particular struct resembles more closely an NFT.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq, Copy)]
pub struct UTXO {
pub_key: H256,
id: u64,
}
/// Primitive transaction model with one input and one output.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq)]
pub struct Transaction {
input: UTXO,
output: UTXO,
witness: Vec<u8>,
// Would in practice include a signature here.
}
pub trait Trait: system::Trait {
type Event: From<Event> + Into<<Self as system::Trait>::Event>;
}
decl_storage! {
trait Store for Module<T: Trait> as Stateless {
State get(get_state): U2048 = U2048::from(2); // Use 2 as an arbitrary generator with "unknown" order.
SpentCoins get(get_spent_coins): Vec<(U2048, U2048)>;
NewCoins get(get_new_coins): Vec<U2048>
}
}
decl_event!(
pub enum Event {
Deletion(U2048, U2048, U2048),
Addition(U2048, U2048, U2048),
}
);
| /// The module declaration.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Initialize generic event
fn deposit_event() = default;
/// Receive request to execute a transaction.
/// Verify the contents of a transaction and temporarily add it to a queue of verified transactions.
/// NOTE: Only works if one transaction per user per block is submitted.
pub fn addTransaction(origin, transaction: Transaction) -> Result {
ensure_signed(origin)?;
// Arbitrarily cap the number of pending transactions to 100
ensure!(SpentCoins::get().len() < 100, "Transaction queue full. Please try again next block.");
// Also verify that the user is not spending to themselves
ensure!(transaction.input.pub_key!= transaction.output.pub_key, "Cannot send coin to yourself.");
// Verify witness
let spent_elem = subroutines::hash_to_prime(&transaction.input.encode());
let witness = U2048::from_little_endian(&transaction.witness);
ensure!(witnesses::verify_mem_wit(State::get(), witness, spent_elem), "Witness is invalid");
let new_elem = subroutines::hash_to_prime(&transaction.output.encode());
// Update storage items.
SpentCoins::append(&vec![(spent_elem, witness)]);
Ok(())
}
/// Arbitrary replacement for Proof-of-Work to create new coins.
pub fn mint(origin, elem: u64) -> Result {
ensure_signed(origin)?;
let state = subroutines::mod_exp(Self::get_state(), U2048::from(elem), U2048::from_dec_str(MODULUS).unwrap());
State::put(state);
Ok(())
}
/// Batch delete spent coins and add new coins on block finalization
fn on_finalize() {
// Clause here to protect against empty blocks
if Self::get_spent_coins().len() > 0 {
// Delete spent coins from aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_delete(State::get(), &Self::get_spent_coins());
Self::deposit_event(Event::Deletion(state, agg, proof));
// Add new coins to aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_add(state, &Self::get_new_coins());
Self::deposit_event(Event::Addition(state, agg, proof));
// Update state
State::put(state);
}
// Clear storage
SpentCoins::kill();
NewCoins::kill();
}
}
}
/// tests for this module
#[cfg(test)]
mod tests {
use super::*;
use runtime_io::with_externalities;
use primitives::{H256, Blake2Hasher};
use support::{impl_outer_origin, parameter_types};
use sr_primitives::{traits::{BlakeTwo256, IdentityLookup, OnFinalize}, testing::Header};
use sr_primitives::weights::Weight;
use sr_primitives::Perbill;
impl_outer_origin! {
pub enum Origin for Test {}
}
// For testing the module, we construct most of a mock runtime. This means
// first constructing a configuration type (`Test`) which `impl`s each of the
// configuration traits of modules we want to use.
#[derive(Clone, Eq, PartialEq)]
pub struct Test;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
}
impl system::Trait for Test {
type Origin = Origin;
type Call = ();
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type WeightMultiplierUpdate = ();
type Event = ();
type BlockHashCount = BlockHashCount;
type MaximumBlockWeight = MaximumBlockWeight;
type MaximumBlockLength = MaximumBlockLength;
type AvailableBlockRatio = AvailableBlockRatio;
type Version = ();
}
impl Trait for Test {
type Event = ();
}
type Stateless = Module<Test>;
type System = system::Module<Test>;
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
system::GenesisConfig::default().build_storage::<Test>().unwrap().into()
}
#[test]
fn test_add() {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
});
}
#[test]
fn test_del() {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
// Collect witnesses for the added elements
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// Add elements
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
// Delete elements
let deletions = vec![(elems[0], witnesses[0]), (elems[1], witnesses[1]), (elems[2], witnesses[2])];
let (state, _, _) = accumulator::batch_delete(Stateless::get_state(), &deletions);
assert_eq!(state, U2048::from(2));
});
}
#[test]
fn test_block() {
with_externalities(&mut new_test_ext(), || {
// 1. Construct UTXOs.
let utxo_0 = UTXO {
pub_key: H256::from_low_u64_be(0),
id: 0,
};
let utxo_1 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 1,
};
let utxo_2 = UTXO {
pub_key: H256::from_low_u64_be(2),
id: 2,
};
// 2. Hash each UTXO to a prime.
let elem_0 = subroutines::hash_to_prime(&utxo_0.encode());
let elem_1 = subroutines::hash_to_prime(&utxo_1.encode());
let elem_2 = subroutines::hash_to_prime(&utxo_2.encode());
let elems = vec![elem_0, elem_1, elem_2];
// 3. Produce witnesses for the added elements.
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// 4. Add elements to the accumulator.
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
State::put(state);
// 5. Construct new UTXOs and derive integer representations.
let utxo_3 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 0,
};
let utxo_4 = UTXO {
pub_key: H256::from_low_u64_be(2),
id: 1,
};
let utxo_5 = UTXO {
pub_key: H256::from_low_u64_be(0),
id: 2,
};
let elem_3 = subroutines::hash_to_prime(&utxo_3.encode());
let elem_4 = subroutines::hash_to_prime(&utxo_4.encode());
let elem_5 = subroutines::hash_to_prime(&utxo_5.encode());
// 6. Construct transactions.
let mut witness_0: [u8; 256] = [0; 256];
witnesses[0].to_little_endian(&mut witness_0);
let tx_0 = Transaction {
input: utxo_0,
output: utxo_3,
witness: witness_0.to_vec(),
};
let mut witness_1: [u8; 256] = [0; 256];
witnesses[1].to_little_endian(&mut witness_1);
let tx_1 = Transaction {
input: utxo_1,
output: utxo_4,
witness: witness_1.to_vec(),
};
let mut witness_2: [u8; 256] = [0; 256];
witnesses[2].to_little_endian(&mut witness_2);
let tx_2 = Transaction {
input: utxo_2,
output: utxo_5,
witness: witness_2.to_vec(),
};
// 7. Verify transactions. Note that this logic will eventually be executed automatically
// by the block builder API eventually.
Stateless::addTransaction(Origin::signed(1), tx_0);
Stateless::addTransaction(Origin::signed(1), tx_1);
Stateless::addTransaction(Origin::signed(1), tx_2);
// 8. Finalize the block.
Stateless::on_finalize(System::block_number());
assert_eq!(Stateless::get_state(),
subroutines::mod_exp(U2048::from(2), elem_3 * elem_4 * elem_5, U2048::from_dec_str(MODULUS).unwrap()));
});
}
#[test]
fn test_mint() {
with_externalities(&mut new_test_ext(), || {
Stateless::mint(Origin::signed(1), 3);
assert_eq!(Stateless::get_state(), U2048::from(8));
});
}
} | decl_module! { | random_line_split |
stateless.rs | /// PROJECT: Stateless Blockchain Experiment.
///
/// DESCRIPTION: This repository implements a UTXO-based stateless blockchain on Substrate using an
/// RSA accumulator. In this scheme, validators only need to track a single accumulator value and
/// users only need to store their own UTXOs and membership proofs. Unless a data service provider is
/// used, users must constantly watch for updates to the accumulator in order to update their proofs.
/// This particular implementation includes batching and aggregation techniques from this paper:
/// https://eprint.iacr.org/2018/1188.pdf.
///
/// NOTE: This repository is experimental and is not meant to be used in production. The design choices
/// made in this runtime are impractical from both a security and usability standpoint. Additionally,
/// the following code has not been checked for correctness nor has been optimized for efficiency.
use support::{decl_module, decl_storage, decl_event, ensure, StorageValue, dispatch::Result, traits::Get};
use system::ensure_signed;
use primitive_types::H256;
use rstd::prelude::Vec;
use rstd::vec;
use codec::{Encode, Decode};
use accumulator::*;
/// At the moment, this particular struct resembles more closely an NFT.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq, Copy)]
pub struct UTXO {
pub_key: H256,
id: u64,
}
/// Primitive transaction model with one input and one output.
#[cfg_attr(feature = "std", derive(Debug))]
#[derive(Default, Clone, Encode, Decode, PartialEq, Eq)]
pub struct Transaction {
input: UTXO,
output: UTXO,
witness: Vec<u8>,
// Would in practice include a signature here.
}
pub trait Trait: system::Trait {
type Event: From<Event> + Into<<Self as system::Trait>::Event>;
}
decl_storage! {
trait Store for Module<T: Trait> as Stateless {
State get(get_state): U2048 = U2048::from(2); // Use 2 as an arbitrary generator with "unknown" order.
SpentCoins get(get_spent_coins): Vec<(U2048, U2048)>;
NewCoins get(get_new_coins): Vec<U2048>
}
}
decl_event!(
pub enum Event {
Deletion(U2048, U2048, U2048),
Addition(U2048, U2048, U2048),
}
);
decl_module! {
/// The module declaration.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Initialize generic event
fn deposit_event() = default;
/// Receive request to execute a transaction.
/// Verify the contents of a transaction and temporarily add it to a queue of verified transactions.
/// NOTE: Only works if one transaction per user per block is submitted.
pub fn addTransaction(origin, transaction: Transaction) -> Result {
ensure_signed(origin)?;
// Arbitrarily cap the number of pending transactions to 100
ensure!(SpentCoins::get().len() < 100, "Transaction queue full. Please try again next block.");
// Also verify that the user is not spending to themselves
ensure!(transaction.input.pub_key!= transaction.output.pub_key, "Cannot send coin to yourself.");
// Verify witness
let spent_elem = subroutines::hash_to_prime(&transaction.input.encode());
let witness = U2048::from_little_endian(&transaction.witness);
ensure!(witnesses::verify_mem_wit(State::get(), witness, spent_elem), "Witness is invalid");
let new_elem = subroutines::hash_to_prime(&transaction.output.encode());
// Update storage items.
SpentCoins::append(&vec![(spent_elem, witness)]);
Ok(())
}
/// Arbitrary replacement for Proof-of-Work to create new coins.
pub fn mint(origin, elem: u64) -> Result {
ensure_signed(origin)?;
let state = subroutines::mod_exp(Self::get_state(), U2048::from(elem), U2048::from_dec_str(MODULUS).unwrap());
State::put(state);
Ok(())
}
/// Batch delete spent coins and add new coins on block finalization
fn on_finalize() {
// Clause here to protect against empty blocks
if Self::get_spent_coins().len() > 0 {
// Delete spent coins from aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_delete(State::get(), &Self::get_spent_coins());
Self::deposit_event(Event::Deletion(state, agg, proof));
// Add new coins to aggregator and distribute proof
let (state, agg, proof) = accumulator::batch_add(state, &Self::get_new_coins());
Self::deposit_event(Event::Addition(state, agg, proof));
// Update state
State::put(state);
}
// Clear storage
SpentCoins::kill();
NewCoins::kill();
}
}
}
/// tests for this module
#[cfg(test)]
mod tests {
use super::*;
use runtime_io::with_externalities;
use primitives::{H256, Blake2Hasher};
use support::{impl_outer_origin, parameter_types};
use sr_primitives::{traits::{BlakeTwo256, IdentityLookup, OnFinalize}, testing::Header};
use sr_primitives::weights::Weight;
use sr_primitives::Perbill;
impl_outer_origin! {
pub enum Origin for Test {}
}
// For testing the module, we construct most of a mock runtime. This means
// first constructing a configuration type (`Test`) which `impl`s each of the
// configuration traits of modules we want to use.
#[derive(Clone, Eq, PartialEq)]
pub struct Test;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
}
impl system::Trait for Test {
type Origin = Origin;
type Call = ();
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type WeightMultiplierUpdate = ();
type Event = ();
type BlockHashCount = BlockHashCount;
type MaximumBlockWeight = MaximumBlockWeight;
type MaximumBlockLength = MaximumBlockLength;
type AvailableBlockRatio = AvailableBlockRatio;
type Version = ();
}
impl Trait for Test {
type Event = ();
}
type Stateless = Module<Test>;
type System = system::Module<Test>;
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
system::GenesisConfig::default().build_storage::<Test>().unwrap().into()
}
#[test]
fn test_add() |
#[test]
fn test_del() {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
// Collect witnesses for the added elements
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// Add elements
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
// Delete elements
let deletions = vec![(elems[0], witnesses[0]), (elems[1], witnesses[1]), (elems[2], witnesses[2])];
let (state, _, _) = accumulator::batch_delete(Stateless::get_state(), &deletions);
assert_eq!(state, U2048::from(2));
});
}
#[test]
fn test_block() {
with_externalities(&mut new_test_ext(), || {
// 1. Construct UTXOs.
let utxo_0 = UTXO {
pub_key: H256::from_low_u64_be(0),
id: 0,
};
let utxo_1 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 1,
};
let utxo_2 = UTXO {
pub_key: H256::from_low_u64_be(2),
id: 2,
};
// 2. Hash each UTXO to a prime.
let elem_0 = subroutines::hash_to_prime(&utxo_0.encode());
let elem_1 = subroutines::hash_to_prime(&utxo_1.encode());
let elem_2 = subroutines::hash_to_prime(&utxo_2.encode());
let elems = vec![elem_0, elem_1, elem_2];
// 3. Produce witnesses for the added elements.
let witnesses = witnesses::create_all_mem_wit(Stateless::get_state(), &elems);
// 4. Add elements to the accumulator.
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
State::put(state);
// 5. Construct new UTXOs and derive integer representations.
let utxo_3 = UTXO {
pub_key: H256::from_low_u64_be(1),
id: 0,
};
let utxo_4 = UTXO {
pub_key: H256::from_low_u64_be(2),
id: 1,
};
let utxo_5 = UTXO {
pub_key: H256::from_low_u64_be(0),
id: 2,
};
let elem_3 = subroutines::hash_to_prime(&utxo_3.encode());
let elem_4 = subroutines::hash_to_prime(&utxo_4.encode());
let elem_5 = subroutines::hash_to_prime(&utxo_5.encode());
// 6. Construct transactions.
let mut witness_0: [u8; 256] = [0; 256];
witnesses[0].to_little_endian(&mut witness_0);
let tx_0 = Transaction {
input: utxo_0,
output: utxo_3,
witness: witness_0.to_vec(),
};
let mut witness_1: [u8; 256] = [0; 256];
witnesses[1].to_little_endian(&mut witness_1);
let tx_1 = Transaction {
input: utxo_1,
output: utxo_4,
witness: witness_1.to_vec(),
};
let mut witness_2: [u8; 256] = [0; 256];
witnesses[2].to_little_endian(&mut witness_2);
let tx_2 = Transaction {
input: utxo_2,
output: utxo_5,
witness: witness_2.to_vec(),
};
// 7. Verify transactions. Note that this logic will eventually be executed automatically
// by the block builder API eventually.
Stateless::addTransaction(Origin::signed(1), tx_0);
Stateless::addTransaction(Origin::signed(1), tx_1);
Stateless::addTransaction(Origin::signed(1), tx_2);
// 8. Finalize the block.
Stateless::on_finalize(System::block_number());
assert_eq!(Stateless::get_state(),
subroutines::mod_exp(U2048::from(2), elem_3 * elem_4 * elem_5, U2048::from_dec_str(MODULUS).unwrap()));
});
}
#[test]
fn test_mint() {
with_externalities(&mut new_test_ext(), || {
Stateless::mint(Origin::signed(1), 3);
assert_eq!(Stateless::get_state(), U2048::from(8));
});
}
} | {
with_externalities(&mut new_test_ext(), || {
let elems = vec![U2048::from(3), U2048::from(5), U2048::from(7)];
let (state, _, _) = accumulator::batch_add(Stateless::get_state(), &elems);
assert_eq!(state, U2048::from(5));
});
} | identifier_body |
mod.rs | mod base;
mod breakpoints;
mod desc;
mod monitor;
mod resume;
mod thread;
mod traits;
mod utils;
use super::arch::RuntimeArch;
use crate::{BreakpointCause, CoreStatus, Error, HaltReason, Session};
use gdbstub::stub::state_machine::GdbStubStateMachine;
use std::net::{SocketAddr, TcpListener, TcpStream};
use std::num::NonZeroUsize;
use std::sync::Mutex;
use std::time::Duration;
use gdbstub::common::Signal;
use gdbstub::conn::ConnectionExt;
use gdbstub::stub::{GdbStub, MultiThreadStopReason};
use gdbstub::target::ext::base::BaseOps;
use gdbstub::target::ext::breakpoints::BreakpointsOps;
use gdbstub::target::ext::memory_map::MemoryMapOps;
use gdbstub::target::ext::monitor_cmd::MonitorCmdOps;
use gdbstub::target::ext::target_description_xml_override::TargetDescriptionXmlOverrideOps;
use gdbstub::target::Target;
pub(crate) use traits::{GdbErrorExt, ProbeRsErrorExt};
use desc::TargetDescription;
/// Actions for resuming a core
#[derive(Debug, Copy, Clone)]
pub(crate) enum ResumeAction {
/// Don't change the state
Unchanged,
/// Resume core
Resume,
/// Single step core
Step,
}
/// The top level gdbstub target for a probe-rs debug session
pub(crate) struct RuntimeTarget<'a> {
/// The probe-rs session object
session: &'a Mutex<Session>,
/// A list of core IDs for this stub
cores: Vec<usize>,
/// TCP listener accepting incoming connections
listener: TcpListener,
/// The current GDB stub state machine
gdb: Option<GdbStubStateMachine<'a, RuntimeTarget<'a>, TcpStream>>,
/// Resume action to be used upon a continue request
resume_action: (usize, ResumeAction),
/// Description of target's architecture and registers
target_desc: TargetDescription,
}
impl<'a> RuntimeTarget<'a> {
/// Create a new RuntimeTarget and get ready to start processing GDB input
pub fn new(
session: &'a Mutex<Session>,
cores: Vec<usize>,
addrs: &[SocketAddr],
) -> Result<Self, Error> {
let listener = TcpListener::bind(addrs).into_error()?;
listener.set_nonblocking(true).into_error()?;
Ok(Self {
session,
cores,
listener,
gdb: None,
resume_action: (0, ResumeAction::Unchanged),
target_desc: TargetDescription::default(),
})
}
/// Process any pending work for this target
///
/// Returns: Duration to wait before processing this target again
pub fn process(&mut self) -> Result<Duration, Error> {
// State 1 - unconnected
if self.gdb.is_none() {
// See if we have a connection
match self.listener.accept() {
Ok((s, addr)) => {
tracing::info!("New connection from {:#?}", addr);
for i in 0..self.cores.len() {
let core_id = self.cores[i];
// When we first attach to the core, GDB expects us to halt the core, so we do this here when a new client connects.
// If the core is already halted, nothing happens if we issue a halt command again, so we always do this no matter of core state.
self.session
.lock()
.unwrap()
.core(core_id)?
.halt(Duration::from_millis(100))?;
self.load_target_desc()?;
}
// Start the GDB Stub state machine
let stub = GdbStub::<RuntimeTarget, _>::new(s);
match stub.run_state_machine(self) {
Ok(gdbstub) => {
self.gdb = Some(gdbstub);
}
Err(e) => {
// Any errors at this state are either IO errors or fatal config errors
return Err(anyhow::Error::from(e).into());
}
};
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
// No connection yet
return Ok(Duration::from_millis(10));
}
Err(e) => {
// Fatal error
return Err(anyhow::Error::from(e).into());
}
};
}
// Stage 2 - connected
if self.gdb.is_some() {
let mut wait_time = Duration::ZERO;
let gdb = self.gdb.take().unwrap();
self.gdb = match gdb {
GdbStubStateMachine::Idle(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
GdbStubStateMachine::Running(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
// Check for break
let mut stop_reason: Option<MultiThreadStopReason<u64>> = None;
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
let status = core.status()?;
if let CoreStatus::Halted(reason) = status {
let tid = NonZeroUsize::new(i + 1).unwrap();
stop_reason = Some(match reason {
HaltReason::Breakpoint(BreakpointCause::Hardware)
| HaltReason::Breakpoint(BreakpointCause::Unknown) => {
// Some architectures do not allow us to distinguish between hardware and software breakpoints, so we just treat `Unknown` as hardware breakpoints.
MultiThreadStopReason::HwBreak(tid)
}
HaltReason::Step => MultiThreadStopReason::DoneStep,
_ => MultiThreadStopReason::SignalWithThread {
tid,
signal: Signal::SIGINT,
},
});
break;
}
}
// halt all remaining cores that are still running
// GDB expects all or nothing stops
if stop_reason.is_some() {
for i in &self.cores {
let mut core = session.core(*i)?;
if!core.core_halted()? {
core.halt(Duration::from_millis(100))?;
}
}
}
}
if let Some(reason) = stop_reason {
Some(state.report_stop(self, reason).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
}
GdbStubStateMachine::CtrlCInterrupt(state) => {
// Break core, handle interrupt
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
core.halt(Duration::from_millis(100))?;
}
}
Some(
state
.interrupt_handled(
self,
Some(MultiThreadStopReason::Signal(Signal::SIGINT)),
)
.into_error()?,
)
}
GdbStubStateMachine::Disconnected(state) => {
tracing::info!("GDB client disconnected: {:?}", state.get_reason());
None
}
};
return Ok(wait_time);
}
Ok(Duration::ZERO)
}
}
impl Target for RuntimeTarget<'_> { | }
fn support_target_description_xml_override(
&mut self,
) -> Option<TargetDescriptionXmlOverrideOps<'_, Self>> {
Some(self)
}
fn support_breakpoints(&mut self) -> Option<BreakpointsOps<'_, Self>> {
Some(self)
}
fn support_memory_map(&mut self) -> Option<MemoryMapOps<'_, Self>> {
Some(self)
}
fn support_monitor_cmd(&mut self) -> Option<MonitorCmdOps<'_, Self>> {
Some(self)
}
fn guard_rail_implicit_sw_breakpoints(&self) -> bool {
true
}
}
/// Read a byte from a stream if available, otherwise return None
fn read_if_available(conn: &mut TcpStream) -> Result<Option<u8>, Error> {
match conn.peek() {
Ok(p) => {
// Unwrap is safe because peek already showed
// there's data in the buffer
match p {
Some(_) => conn.read().map(Some).into_error(),
None => Ok(None),
}
}
Err(e) => Err(anyhow::Error::from(e).into()),
}
} | type Arch = RuntimeArch;
type Error = Error;
fn base_ops(&mut self) -> BaseOps<'_, Self::Arch, Self::Error> {
BaseOps::MultiThread(self) | random_line_split |
mod.rs | mod base;
mod breakpoints;
mod desc;
mod monitor;
mod resume;
mod thread;
mod traits;
mod utils;
use super::arch::RuntimeArch;
use crate::{BreakpointCause, CoreStatus, Error, HaltReason, Session};
use gdbstub::stub::state_machine::GdbStubStateMachine;
use std::net::{SocketAddr, TcpListener, TcpStream};
use std::num::NonZeroUsize;
use std::sync::Mutex;
use std::time::Duration;
use gdbstub::common::Signal;
use gdbstub::conn::ConnectionExt;
use gdbstub::stub::{GdbStub, MultiThreadStopReason};
use gdbstub::target::ext::base::BaseOps;
use gdbstub::target::ext::breakpoints::BreakpointsOps;
use gdbstub::target::ext::memory_map::MemoryMapOps;
use gdbstub::target::ext::monitor_cmd::MonitorCmdOps;
use gdbstub::target::ext::target_description_xml_override::TargetDescriptionXmlOverrideOps;
use gdbstub::target::Target;
pub(crate) use traits::{GdbErrorExt, ProbeRsErrorExt};
use desc::TargetDescription;
/// Actions for resuming a core
#[derive(Debug, Copy, Clone)]
pub(crate) enum ResumeAction {
/// Don't change the state
Unchanged,
/// Resume core
Resume,
/// Single step core
Step,
}
/// The top level gdbstub target for a probe-rs debug session
pub(crate) struct | <'a> {
/// The probe-rs session object
session: &'a Mutex<Session>,
/// A list of core IDs for this stub
cores: Vec<usize>,
/// TCP listener accepting incoming connections
listener: TcpListener,
/// The current GDB stub state machine
gdb: Option<GdbStubStateMachine<'a, RuntimeTarget<'a>, TcpStream>>,
/// Resume action to be used upon a continue request
resume_action: (usize, ResumeAction),
/// Description of target's architecture and registers
target_desc: TargetDescription,
}
impl<'a> RuntimeTarget<'a> {
/// Create a new RuntimeTarget and get ready to start processing GDB input
pub fn new(
session: &'a Mutex<Session>,
cores: Vec<usize>,
addrs: &[SocketAddr],
) -> Result<Self, Error> {
let listener = TcpListener::bind(addrs).into_error()?;
listener.set_nonblocking(true).into_error()?;
Ok(Self {
session,
cores,
listener,
gdb: None,
resume_action: (0, ResumeAction::Unchanged),
target_desc: TargetDescription::default(),
})
}
/// Process any pending work for this target
///
/// Returns: Duration to wait before processing this target again
pub fn process(&mut self) -> Result<Duration, Error> {
// State 1 - unconnected
if self.gdb.is_none() {
// See if we have a connection
match self.listener.accept() {
Ok((s, addr)) => {
tracing::info!("New connection from {:#?}", addr);
for i in 0..self.cores.len() {
let core_id = self.cores[i];
// When we first attach to the core, GDB expects us to halt the core, so we do this here when a new client connects.
// If the core is already halted, nothing happens if we issue a halt command again, so we always do this no matter of core state.
self.session
.lock()
.unwrap()
.core(core_id)?
.halt(Duration::from_millis(100))?;
self.load_target_desc()?;
}
// Start the GDB Stub state machine
let stub = GdbStub::<RuntimeTarget, _>::new(s);
match stub.run_state_machine(self) {
Ok(gdbstub) => {
self.gdb = Some(gdbstub);
}
Err(e) => {
// Any errors at this state are either IO errors or fatal config errors
return Err(anyhow::Error::from(e).into());
}
};
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
// No connection yet
return Ok(Duration::from_millis(10));
}
Err(e) => {
// Fatal error
return Err(anyhow::Error::from(e).into());
}
};
}
// Stage 2 - connected
if self.gdb.is_some() {
let mut wait_time = Duration::ZERO;
let gdb = self.gdb.take().unwrap();
self.gdb = match gdb {
GdbStubStateMachine::Idle(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
GdbStubStateMachine::Running(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
// Check for break
let mut stop_reason: Option<MultiThreadStopReason<u64>> = None;
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
let status = core.status()?;
if let CoreStatus::Halted(reason) = status {
let tid = NonZeroUsize::new(i + 1).unwrap();
stop_reason = Some(match reason {
HaltReason::Breakpoint(BreakpointCause::Hardware)
| HaltReason::Breakpoint(BreakpointCause::Unknown) => {
// Some architectures do not allow us to distinguish between hardware and software breakpoints, so we just treat `Unknown` as hardware breakpoints.
MultiThreadStopReason::HwBreak(tid)
}
HaltReason::Step => MultiThreadStopReason::DoneStep,
_ => MultiThreadStopReason::SignalWithThread {
tid,
signal: Signal::SIGINT,
},
});
break;
}
}
// halt all remaining cores that are still running
// GDB expects all or nothing stops
if stop_reason.is_some() {
for i in &self.cores {
let mut core = session.core(*i)?;
if!core.core_halted()? {
core.halt(Duration::from_millis(100))?;
}
}
}
}
if let Some(reason) = stop_reason {
Some(state.report_stop(self, reason).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
}
GdbStubStateMachine::CtrlCInterrupt(state) => {
// Break core, handle interrupt
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
core.halt(Duration::from_millis(100))?;
}
}
Some(
state
.interrupt_handled(
self,
Some(MultiThreadStopReason::Signal(Signal::SIGINT)),
)
.into_error()?,
)
}
GdbStubStateMachine::Disconnected(state) => {
tracing::info!("GDB client disconnected: {:?}", state.get_reason());
None
}
};
return Ok(wait_time);
}
Ok(Duration::ZERO)
}
}
impl Target for RuntimeTarget<'_> {
type Arch = RuntimeArch;
type Error = Error;
fn base_ops(&mut self) -> BaseOps<'_, Self::Arch, Self::Error> {
BaseOps::MultiThread(self)
}
fn support_target_description_xml_override(
&mut self,
) -> Option<TargetDescriptionXmlOverrideOps<'_, Self>> {
Some(self)
}
fn support_breakpoints(&mut self) -> Option<BreakpointsOps<'_, Self>> {
Some(self)
}
fn support_memory_map(&mut self) -> Option<MemoryMapOps<'_, Self>> {
Some(self)
}
fn support_monitor_cmd(&mut self) -> Option<MonitorCmdOps<'_, Self>> {
Some(self)
}
fn guard_rail_implicit_sw_breakpoints(&self) -> bool {
true
}
}
/// Read a byte from a stream if available, otherwise return None
fn read_if_available(conn: &mut TcpStream) -> Result<Option<u8>, Error> {
match conn.peek() {
Ok(p) => {
// Unwrap is safe because peek already showed
// there's data in the buffer
match p {
Some(_) => conn.read().map(Some).into_error(),
None => Ok(None),
}
}
Err(e) => Err(anyhow::Error::from(e).into()),
}
}
| RuntimeTarget | identifier_name |
mod.rs | mod base;
mod breakpoints;
mod desc;
mod monitor;
mod resume;
mod thread;
mod traits;
mod utils;
use super::arch::RuntimeArch;
use crate::{BreakpointCause, CoreStatus, Error, HaltReason, Session};
use gdbstub::stub::state_machine::GdbStubStateMachine;
use std::net::{SocketAddr, TcpListener, TcpStream};
use std::num::NonZeroUsize;
use std::sync::Mutex;
use std::time::Duration;
use gdbstub::common::Signal;
use gdbstub::conn::ConnectionExt;
use gdbstub::stub::{GdbStub, MultiThreadStopReason};
use gdbstub::target::ext::base::BaseOps;
use gdbstub::target::ext::breakpoints::BreakpointsOps;
use gdbstub::target::ext::memory_map::MemoryMapOps;
use gdbstub::target::ext::monitor_cmd::MonitorCmdOps;
use gdbstub::target::ext::target_description_xml_override::TargetDescriptionXmlOverrideOps;
use gdbstub::target::Target;
pub(crate) use traits::{GdbErrorExt, ProbeRsErrorExt};
use desc::TargetDescription;
/// Actions for resuming a core
#[derive(Debug, Copy, Clone)]
pub(crate) enum ResumeAction {
/// Don't change the state
Unchanged,
/// Resume core
Resume,
/// Single step core
Step,
}
/// The top level gdbstub target for a probe-rs debug session
pub(crate) struct RuntimeTarget<'a> {
/// The probe-rs session object
session: &'a Mutex<Session>,
/// A list of core IDs for this stub
cores: Vec<usize>,
/// TCP listener accepting incoming connections
listener: TcpListener,
/// The current GDB stub state machine
gdb: Option<GdbStubStateMachine<'a, RuntimeTarget<'a>, TcpStream>>,
/// Resume action to be used upon a continue request
resume_action: (usize, ResumeAction),
/// Description of target's architecture and registers
target_desc: TargetDescription,
}
impl<'a> RuntimeTarget<'a> {
/// Create a new RuntimeTarget and get ready to start processing GDB input
pub fn new(
session: &'a Mutex<Session>,
cores: Vec<usize>,
addrs: &[SocketAddr],
) -> Result<Self, Error> {
let listener = TcpListener::bind(addrs).into_error()?;
listener.set_nonblocking(true).into_error()?;
Ok(Self {
session,
cores,
listener,
gdb: None,
resume_action: (0, ResumeAction::Unchanged),
target_desc: TargetDescription::default(),
})
}
/// Process any pending work for this target
///
/// Returns: Duration to wait before processing this target again
pub fn process(&mut self) -> Result<Duration, Error> {
// State 1 - unconnected
if self.gdb.is_none() {
// See if we have a connection
match self.listener.accept() {
Ok((s, addr)) => {
tracing::info!("New connection from {:#?}", addr);
for i in 0..self.cores.len() {
let core_id = self.cores[i];
// When we first attach to the core, GDB expects us to halt the core, so we do this here when a new client connects.
// If the core is already halted, nothing happens if we issue a halt command again, so we always do this no matter of core state.
self.session
.lock()
.unwrap()
.core(core_id)?
.halt(Duration::from_millis(100))?;
self.load_target_desc()?;
}
// Start the GDB Stub state machine
let stub = GdbStub::<RuntimeTarget, _>::new(s);
match stub.run_state_machine(self) {
Ok(gdbstub) => {
self.gdb = Some(gdbstub);
}
Err(e) => {
// Any errors at this state are either IO errors or fatal config errors
return Err(anyhow::Error::from(e).into());
}
};
}
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
// No connection yet
return Ok(Duration::from_millis(10));
}
Err(e) => {
// Fatal error
return Err(anyhow::Error::from(e).into());
}
};
}
// Stage 2 - connected
if self.gdb.is_some() {
let mut wait_time = Duration::ZERO;
let gdb = self.gdb.take().unwrap();
self.gdb = match gdb {
GdbStubStateMachine::Idle(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
GdbStubStateMachine::Running(mut state) => {
// Read data if available
let next_byte = {
let conn = state.borrow_conn();
read_if_available(conn)?
};
if let Some(b) = next_byte {
Some(state.incoming_data(self, b).into_error()?)
} else {
// Check for break
let mut stop_reason: Option<MultiThreadStopReason<u64>> = None;
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
let status = core.status()?;
if let CoreStatus::Halted(reason) = status {
let tid = NonZeroUsize::new(i + 1).unwrap();
stop_reason = Some(match reason {
HaltReason::Breakpoint(BreakpointCause::Hardware)
| HaltReason::Breakpoint(BreakpointCause::Unknown) => |
HaltReason::Step => MultiThreadStopReason::DoneStep,
_ => MultiThreadStopReason::SignalWithThread {
tid,
signal: Signal::SIGINT,
},
});
break;
}
}
// halt all remaining cores that are still running
// GDB expects all or nothing stops
if stop_reason.is_some() {
for i in &self.cores {
let mut core = session.core(*i)?;
if!core.core_halted()? {
core.halt(Duration::from_millis(100))?;
}
}
}
}
if let Some(reason) = stop_reason {
Some(state.report_stop(self, reason).into_error()?)
} else {
wait_time = Duration::from_millis(10);
Some(state.into())
}
}
}
GdbStubStateMachine::CtrlCInterrupt(state) => {
// Break core, handle interrupt
{
let mut session = self.session.lock().unwrap();
for i in &self.cores {
let mut core = session.core(*i)?;
core.halt(Duration::from_millis(100))?;
}
}
Some(
state
.interrupt_handled(
self,
Some(MultiThreadStopReason::Signal(Signal::SIGINT)),
)
.into_error()?,
)
}
GdbStubStateMachine::Disconnected(state) => {
tracing::info!("GDB client disconnected: {:?}", state.get_reason());
None
}
};
return Ok(wait_time);
}
Ok(Duration::ZERO)
}
}
impl Target for RuntimeTarget<'_> {
type Arch = RuntimeArch;
type Error = Error;
fn base_ops(&mut self) -> BaseOps<'_, Self::Arch, Self::Error> {
BaseOps::MultiThread(self)
}
fn support_target_description_xml_override(
&mut self,
) -> Option<TargetDescriptionXmlOverrideOps<'_, Self>> {
Some(self)
}
fn support_breakpoints(&mut self) -> Option<BreakpointsOps<'_, Self>> {
Some(self)
}
fn support_memory_map(&mut self) -> Option<MemoryMapOps<'_, Self>> {
Some(self)
}
fn support_monitor_cmd(&mut self) -> Option<MonitorCmdOps<'_, Self>> {
Some(self)
}
fn guard_rail_implicit_sw_breakpoints(&self) -> bool {
true
}
}
/// Read a byte from a stream if available, otherwise return None
fn read_if_available(conn: &mut TcpStream) -> Result<Option<u8>, Error> {
match conn.peek() {
Ok(p) => {
// Unwrap is safe because peek already showed
// there's data in the buffer
match p {
Some(_) => conn.read().map(Some).into_error(),
None => Ok(None),
}
}
Err(e) => Err(anyhow::Error::from(e).into()),
}
}
| {
// Some architectures do not allow us to distinguish between hardware and software breakpoints, so we just treat `Unknown` as hardware breakpoints.
MultiThreadStopReason::HwBreak(tid)
} | conditional_block |
packet_codec.rs | use std::io::Cursor;
use std::net::SocketAddr;
use std::u16;
use byteorder::{NetworkEndian, WriteBytesExt};
use bytes::Bytes;
use futures::sync::mpsc;
use futures::{future, Future, IntoFuture, Sink};
use num_traits::ToPrimitive;
use slog::{error, o, warn, Logger};
use tokio;
use crate::algorithms as algs;
use crate::connection::Connection;
use crate::connectionmanager::{ConnectionManager, Resender};
use crate::handler_data::{
ConnectionValue, Data, InCommandObserver, InPacketObserver,
};
use crate::packets::*;
use crate::{
Error, LockedHashMap, Result, MAX_FRAGMENTS_LENGTH, MAX_QUEUE_LEN,
};
/// Decodes incoming udp packets.
///
/// This part does the defragmentation, decryption and decompression.
pub struct PacketCodecReceiver<CM: ConnectionManager +'static> {
connections: LockedHashMap<CM::Key, ConnectionValue<CM::AssociatedData>>,
is_client: bool,
logger: Logger,
in_packet_observer:
LockedHashMap<String, Box<InPacketObserver<CM::AssociatedData>>>,
in_command_observer:
LockedHashMap<String, Box<InCommandObserver<CM::AssociatedData>>>,
/// The sink for `UdpPacket`s with no known connection.
///
/// This can stay `None` so all packets without connection will be dropped.
unknown_udp_packet_sink: Option<mpsc::Sender<(SocketAddr, InPacket)>>,
}
impl<CM: ConnectionManager +'static> PacketCodecReceiver<CM> {
pub fn new(
data: &Data<CM>,
unknown_udp_packet_sink: Option<mpsc::Sender<(SocketAddr, InPacket)>>,
) -> Self
{
Self {
connections: data.connections.clone(),
is_client: data.is_client,
logger: data.logger.clone(),
in_packet_observer: data.in_packet_observer.clone(),
in_command_observer: data.in_command_observer.clone(),
unknown_udp_packet_sink,
}
}
pub fn handle_udp_packet(
&mut self,
(addr, packet): (SocketAddr, InPacket),
) -> impl Future<Item = (), Error = Error>
{
// Find the right connection
let cons = self.connections.read();
if let Some(con) =
cons.get(&CM::get_connection_key(addr, &packet)).cloned()
{
// If we are a client and have only a single connection, we will do the
// work inside this future and not spawn a new one.
let logger = self.logger.new(o!("addr" => addr));
let in_packet_observer = self.in_packet_observer.clone();
let in_command_observer = self.in_command_observer.clone();
if self.is_client && cons.len() == 1 {
drop(cons);
Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
self.is_client,
&con,
addr,
packet,
)
.into_future()
} else {
drop(cons);
let is_client = self.is_client;
tokio::spawn(future::lazy(move || {
if let Err(e) = Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
is_client,
&con,
addr,
packet,
) {
error!(logger, "Error handling udp packet"; "error" =>?e);
}
Ok(())
}));
future::ok(())
}
} else {
drop(cons);
// Unknown connection
if let Some(sink) = &mut self.unknown_udp_packet_sink {
// Don't block if the queue is full
if sink.try_send((addr, packet)).is_err() {
warn!(self.logger, "Unknown connection handler overloaded \
– dropping udp packet");
}
} else {
warn!(
self.logger,
"Dropped packet without connection because no unknown \
packet handler is set"
);
}
future::ok(())
}
}
/// Handle a packet for a specific connection.
///
/// This part does the defragmentation, decryption and decompression.
fn connection_handle_udp_packet(
logger: &Logger,
in_packet_observer: LockedHashMap<
String,
Box<InPacketObserver<CM::AssociatedData>>,
>,
in_command_observer: LockedHashMap<
String,
Box<InCommandObserver<CM::AssociatedData>>,
>,
is_client: bool,
connection: &ConnectionValue<CM::AssociatedData>,
_: SocketAddr,
mut packet: InPacket,
) -> Result<()>
{
let con2 = connection.downgrade();
let packet_sink = con2.as_packet_sink();
let mut con = connection.mutex.lock();
let con = &mut *con;
let packet_res;
let mut ack = false;
let p_type = packet.header().packet_type();
let dir = packet.direction();
let type_i = p_type.to_usize().unwrap();
let id = packet.header().packet_id();
let (in_recv_win, gen_id, cur_next, limit) =
con.1.in_receive_window(p_type, id);
if con.1.params.is_some() && p_type == PacketType::Init {
return Err(Error::UnexpectedInitPacket);
}
// Ignore range for acks
if p_type == PacketType::Ack
|| p_type == PacketType::AckLow
|| in_recv_win
{
if!packet.header().flags().contains(Flags::UNENCRYPTED) {
// If it is the first ack packet of a client, try to fake
// decrypt it.
let decrypted = if (p_type == PacketType::Ack
&& id <= 1 && is_client)
|| con.1.params.is_none()
{
if let Ok(dec) = algs::decrypt_fake(&packet) {
packet.set_content(dec);
true
} else {
false
}
} else {
false
};
if!decrypted {
if let Some(params) = &mut con.1.params {
// Decrypt the packet
let dec_res = algs::decrypt(
&packet,
gen_id,
¶ms.shared_iv,
&mut params.key_cache,
);
if dec_res.is_err()
&& p_type == PacketType::Ack
&& id == 1 && is_client
{
// Ignore error, this is the ack packet for the
// clientinit, we take the initserver as ack anyway.
return Ok(());
}
packet.set_content(dec_res?);
} else {
// Failed to fake decrypt the packet
return Err(Error::WrongMac);
}
}
} else if algs::must_encrypt(p_type) {
// Check if it is ok for the packet to be unencrypted
return Err(Error::UnallowedUnencryptedPacket);
}
match p_type {
PacketType::Command | PacketType::CommandLow => {
ack = true;
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
let in_ids = &mut con.1.incoming_p_ids;
let r_queue = &mut con.1.receive_queue;
let frag_queue = &mut con.1.fragmented_queue;
let commands = Self::handle_command_packet(
logger, r_queue, frag_queue, in_ids, packet,
)?;
// Be careful with command packets, they are
// guaranteed to be in the right order now, because
// we hold a lock on the connection.
let observer = in_command_observer.read();
for c in commands {
for o in observer.values() {
o.observe(con, &c);
}
// Send to packet handler
if let Err(e) = con.1.command_sink.unbounded_send(c) {
error!(logger, "Failed to send command packet to \
handler"; "error" =>?e);
}
}
// Dummy value
packet_res = Ok(None);
}
_ => {
if p_type == PacketType::Ping {
ack = true;
}
// Update packet ids
let in_ids = &mut con.1.incoming_p_ids;
let (id, next_gen) = id.overflowing_add(1);
if p_type!= PacketType::Init {
in_ids[type_i] =
(if next_gen { gen_id + 1 } else { gen_id }, id);
}
if let Some(ack_id) = packet.ack_packet() {
// Remove command packet from send queue if the fitting ack is received.
let p_type = if p_type == PacketType::Ack {
PacketType::Command
} else {
| con.1.resender.ack_packet(p_type, ack_id);
} else if p_type.is_voice() {
// Seems to work better without assembling the first 3 voice packets
// Use handle_voice_packet to assemble fragmented voice packets
/*let mut res = Self::handle_voice_packet(&logger, params, &header, p_data);
let res = res.drain(..).map(|p|
(con_key.clone(), p)).collect();
Ok(res)*/
}
// Call observer after handling acks
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
packet_res = Ok(Some(packet));
}
}
} else {
// Send an ack for the case when it was lost
if p_type == PacketType::Command || p_type == PacketType::CommandLow
{
ack = true;
}
packet_res = Err(Error::NotInReceiveWindow {
id,
next: cur_next,
limit,
p_type,
});
};
// Send ack
if ack {
tokio::spawn(
packet_sink
.send(OutAck::new(dir.reverse(), p_type, id))
.map(|_| ())
// Ignore errors, this can happen if the connection is
// already gone because we are disconnected.
.map_err(|_| ()),
);
}
if let Some(packet) = packet_res? {
if p_type.is_voice() {
if let Err(e) =
con.1.audio_sink.unbounded_send(packet.into_audio()?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
} else if p_type == PacketType::Init {
if is_client {
if let Err(e) = con
.1
.s2c_init_sink
.unbounded_send(packet.into_s2cinit()?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
} else if let Err(e) = con
.1
.c2s_init_sink
.unbounded_send(packet.into_c2sinit().map_err(|(_, e)| e)?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
}
}
Ok(())
}
/// Handle `Command` and `CommandLow` packets.
///
/// They have to be handled in the right order.
fn handle_command_packet(
logger: &Logger,
r_queue: &mut [Vec<InPacket>; 2],
frag_queue: &mut [Option<(InPacket, Vec<u8>)>; 2],
in_ids: &mut [(u32, u16); 8],
mut packet: InPacket,
) -> Result<Vec<InCommand>>
{
let header = packet.header();
let p_type = header.packet_type();
let mut id = header.packet_id();
let type_i = p_type.to_usize().unwrap();
let cmd_i = if p_type == PacketType::Command { 0 } else { 1 };
let r_queue = &mut r_queue[cmd_i];
let frag_queue = &mut frag_queue[cmd_i];
let in_ids = &mut in_ids[type_i];
let cur_next = in_ids.1;
if cur_next == id {
// In order
let mut packets = Vec::new();
loop {
// Update next packet id
let (next_id, next_gen) = id.overflowing_add(1);
if next_gen {
// Next packet generation
in_ids.0 = in_ids.0.wrapping_add(1);
}
in_ids.1 = next_id;
let flags = packet.header().flags();
let res_packet = if flags.contains(Flags::FRAGMENTED) {
if let Some((header, mut frag_queue)) = frag_queue.take() {
// Last fragmented packet
frag_queue.extend_from_slice(packet.content());
// Decompress
let decompressed = if header
.header()
.flags()
.contains(Flags::COMPRESSED)
{
//debug!(logger, "Compressed"; "data" =>?::utils::HexSlice(&frag_queue));
::quicklz::decompress(
&mut Cursor::new(frag_queue),
crate::MAX_DECOMPRESSED_SIZE,
)?
} else {
frag_queue
};
/*if header.get_compressed() {
debug!(logger, "Decompressed";
"data" =>?::HexSlice(&decompressed),
"string" => %String::from_utf8_lossy(&decompressed),
);
}*/
Some(
InCommand::with_content(&header, decompressed)
.map_err(|(_, e)| e)?,
)
} else {
// Enqueue
let content = packet.take_content();
*frag_queue = Some((packet, content));
None
}
} else if let Some((_, ref mut frag_queue)) = *frag_queue {
// The packet is fragmented
if frag_queue.len() < MAX_FRAGMENTS_LENGTH {
frag_queue.extend_from_slice(packet.content());
None
} else {
return Err(Error::MaxLengthExceeded(String::from(
"fragment queue",
)));
}
} else {
// Decompress
let decompressed = if flags.contains(Flags::COMPRESSED) {
//debug!(logger, "Compressed"; "data" =>?::utils::HexSlice(packet.content()));
::quicklz::decompress(
&mut Cursor::new(packet.content()),
crate::MAX_DECOMPRESSED_SIZE,
)?
} else {
packet.take_content()
};
/*if header.get_compressed() {
debug!(logger, "Decompressed"; "data" =>?::HexSlice(&decompressed));
}*/
Some(
InCommand::with_content(&packet, decompressed)
.map_err(|(_, e)| e)?,
)
};
if let Some(p) = res_packet {
packets.push(p);
}
// Check if there are following packets in the receive queue.
id = id.wrapping_add(1);
if let Some(pos) =
r_queue.iter().position(|p| p.header().packet_id() == id)
{
packet = r_queue.remove(pos);
} else {
break;
}
}
// The first packets should be returned first
packets.reverse();
Ok(packets)
} else {
// Out of order
warn!(logger, "Out of order command packet"; "got" => id,
"expected" => cur_next);
let limit = ((u32::from(cur_next) + MAX_QUEUE_LEN as u32)
% u32::from(u16::MAX)) as u16;
if (cur_next < limit && id >= cur_next && id < limit)
|| (cur_next > limit && (id >= cur_next || id < limit))
{
r_queue.push(packet);
Ok(vec![])
} else {
Err(Error::MaxLengthExceeded(String::from("command queue")))
}
}
}
/*/// Handle `Voice` and `VoiceLow` packets.
///
/// The first 3 packets for each audio transmission have the compressed flag
/// set, which means they are fragmented and should be concatenated.
fn handle_voice_packet(
logger: &slog::Logger,
params: &mut ConnectedParams,
header: &Header,
packet: PData,
) -> Vec<Packet> {
let cmd_i = if header.get_type() == PacketType::Voice {
0
} else {
1
};
let frag_queue = &mut params.voice_fragmented_queue[cmd_i];
let (id, from_id, codec_type, voice_data) = match packet {
PData::VoiceS2C { id, from_id, codec_type, voice_data } => (id, from_id, codec_type, voice_data),
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data } => (id, from_id, codec_type, voice_data),
_ => unreachable!("handle_voice_packet did get an unknown voice packet"),
};
if header.get_compressed() {
let queue = frag_queue.entry(from_id).or_insert_with(Vec::new);
// Append to fragments
if queue.len() < MAX_FRAGMENTS_LENGTH {
queue.extend_from_slice(&voice_data);
return Vec::new();
}
warn!(logger, "Length of voice fragment queue exceeded"; "len" => queue.len());
}
let mut res = Vec::new();
if let Some(frags) = frag_queue.remove(&from_id) {
// We got two packets
let packet_data = if header.get_type() == PacketType::Voice {
PData::VoiceS2C { id, from_id, codec_type, voice_data: frags }
} else {
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data: frags }
};
res.push(Packet::new(header.clone(), packet_data));
}
let packet_data = if header.get_type() == PacketType::Voice {
PData::VoiceS2C { id, from_id, codec_type, voice_data }
} else {
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data }
};
res.push(Packet::new(header.clone(), packet_data));
res
}*/
}
/// Encodes outgoing packets.
///
/// This part does the compression, encryption and fragmentation.
pub struct PacketCodecSender {
is_client: bool,
}
impl PacketCodecSender {
pub fn new(is_client: bool) -> Self { Self { is_client } }
pub fn encode_packet(
&self,
con: &mut Connection,
mut packet: OutPacket,
) -> Result<Vec<(u16, Bytes)>>
{
let p_type = packet.header().packet_type();
let type_i = p_type.to_usize().unwrap();
// TODO Needed, commands should set their own flag?
if (p_type == PacketType::Command || p_type == PacketType::CommandLow)
&& self.is_client
{
// Set newprotocol flag
packet.flags(packet.header().flags() | Flags::NEWPROTOCOL);
}
let (gen, p_id) = if p_type == PacketType::Init {
(0, 0)
} else {
con.outgoing_p_ids[type_i]
};
// We fake encrypt the first command packet of the
// server (id 0) and the first command packet of the
// client (id 1) if the client uses the new protocol
// (the packet is a clientek).
let mut fake_encrypt = p_type == PacketType::Command
&& gen == 0
&& ((!self.is_client && p_id == 0)
|| (self.is_client && p_id == 1 && {
// Test if it is a clientek packet
let s = b"clientek";
packet.content().len() >= s.len()
&& packet.content()[..s.len()] == s[..]
}));
// Also fake encrypt the first ack of the client, which is the response
// for the initivexpand2 packet.
fake_encrypt |= self.is_client && p_type == PacketType::Ack && gen == 0
&& p_id == 0;
// Get values from parameters
let should_encrypt;
let c_id;
if let Some(params) = con.params.as_mut() {
should_encrypt =
algs::should_encrypt(p_type, params.voice_encryption);
c_id = params.c_id;
} else {
should_encrypt = algs::should_encrypt(p_type, false);
if should_encrypt {
fake_encrypt = true;
}
c_id = 0;
}
// Client id for clients
if self.is_client {
packet.client_id(c_id);
}
if!should_encrypt &&!fake_encrypt {
packet.flags(packet.header().flags() | Flags::UNENCRYPTED);
if let Some(params) = con.params.as_mut() {
packet.mac().copy_from_slice(¶ms.shared_mac);
}
}
// Compress and split packet
let packet_id;
let packets = if p_type == PacketType::Command
|| p_type == PacketType::CommandLow
{
packet_id = None;
algs::compress_and_split(self.is_client, packet)
} else {
// Set the inner packet id for voice packets
if p_type == PacketType::Voice || p_type == PacketType::VoiceWhisper
{
(&mut packet.content_mut()[..2])
.write_u16::<NetworkEndian>(con.outgoing_p_ids[type_i].1)
.unwrap();
}
// Identify init packets by their number
if p_type == PacketType::Init {
if packet.direction() == Direction::S2C {
packet_id = Some(u16::from(packet.content()[0]));
} else {
packet_id = Some(u16::from(packet.content()[4]));
}
} else {
packet_id = None;
}
vec![packet]
};
let packets = packets
.into_iter()
.map(|mut packet| -> Result<_> {
// Get packet id
let (mut gen, mut p_id) = if p_type == PacketType::Init {
(0, 0)
} else {
con.outgoing_p_ids[type_i]
};
if p_type!= PacketType::Init {
packet.packet_id(p_id);
}
// Identify init packets by their number
let packet_id = packet_id.unwrap_or(p_id);
// Encrypt if necessary
if fake_encrypt {
algs::encrypt_fake(&mut packet)?;
} else if should_encrypt {
// The params are set
let params = con.params.as_mut().unwrap();
algs::encrypt(
&mut packet,
gen,
¶ms.shared_iv,
&mut params.key_cache,
)?;
}
// Increment outgoing_p_ids
p_id = p_id.wrapping_add(1);
if p_id == 0 {
gen = gen.wrapping_add(1);
}
if p_type!= PacketType::Init {
con.outgoing_p_ids[type_i] = (gen, p_id);
}
Ok((packet_id, packet.into_vec().into()))
})
.collect::<Result<Vec<_>>>()?;
Ok(packets)
}
}
| PacketType::CommandLow
};
| conditional_block |
packet_codec.rs | use std::io::Cursor;
use std::net::SocketAddr;
use std::u16;
use byteorder::{NetworkEndian, WriteBytesExt};
use bytes::Bytes;
use futures::sync::mpsc;
use futures::{future, Future, IntoFuture, Sink};
use num_traits::ToPrimitive;
use slog::{error, o, warn, Logger};
use tokio;
use crate::algorithms as algs;
use crate::connection::Connection;
use crate::connectionmanager::{ConnectionManager, Resender};
use crate::handler_data::{
ConnectionValue, Data, InCommandObserver, InPacketObserver,
};
use crate::packets::*;
use crate::{
Error, LockedHashMap, Result, MAX_FRAGMENTS_LENGTH, MAX_QUEUE_LEN,
};
/// Decodes incoming udp packets.
///
/// This part does the defragmentation, decryption and decompression.
pub struct PacketCodecReceiver<CM: ConnectionManager +'static> {
connections: LockedHashMap<CM::Key, ConnectionValue<CM::AssociatedData>>,
is_client: bool,
logger: Logger,
in_packet_observer:
LockedHashMap<String, Box<InPacketObserver<CM::AssociatedData>>>,
in_command_observer:
LockedHashMap<String, Box<InCommandObserver<CM::AssociatedData>>>,
/// The sink for `UdpPacket`s with no known connection.
///
/// This can stay `None` so all packets without connection will be dropped.
unknown_udp_packet_sink: Option<mpsc::Sender<(SocketAddr, InPacket)>>,
}
impl<CM: ConnectionManager +'static> PacketCodecReceiver<CM> {
pub fn new(
data: &Data<CM>,
unknown_udp_packet_sink: Option<mpsc::Sender<(SocketAddr, InPacket)>>,
) -> Self
{
Self {
connections: data.connections.clone(),
is_client: data.is_client,
logger: data.logger.clone(),
in_packet_observer: data.in_packet_observer.clone(),
in_command_observer: data.in_command_observer.clone(),
unknown_udp_packet_sink,
}
}
pub fn | (
&mut self,
(addr, packet): (SocketAddr, InPacket),
) -> impl Future<Item = (), Error = Error>
{
// Find the right connection
let cons = self.connections.read();
if let Some(con) =
cons.get(&CM::get_connection_key(addr, &packet)).cloned()
{
// If we are a client and have only a single connection, we will do the
// work inside this future and not spawn a new one.
let logger = self.logger.new(o!("addr" => addr));
let in_packet_observer = self.in_packet_observer.clone();
let in_command_observer = self.in_command_observer.clone();
if self.is_client && cons.len() == 1 {
drop(cons);
Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
self.is_client,
&con,
addr,
packet,
)
.into_future()
} else {
drop(cons);
let is_client = self.is_client;
tokio::spawn(future::lazy(move || {
if let Err(e) = Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
is_client,
&con,
addr,
packet,
) {
error!(logger, "Error handling udp packet"; "error" =>?e);
}
Ok(())
}));
future::ok(())
}
} else {
drop(cons);
// Unknown connection
if let Some(sink) = &mut self.unknown_udp_packet_sink {
// Don't block if the queue is full
if sink.try_send((addr, packet)).is_err() {
warn!(self.logger, "Unknown connection handler overloaded \
– dropping udp packet");
}
} else {
warn!(
self.logger,
"Dropped packet without connection because no unknown \
packet handler is set"
);
}
future::ok(())
}
}
/// Handle a packet for a specific connection.
///
/// This part does the defragmentation, decryption and decompression.
fn connection_handle_udp_packet(
logger: &Logger,
in_packet_observer: LockedHashMap<
String,
Box<InPacketObserver<CM::AssociatedData>>,
>,
in_command_observer: LockedHashMap<
String,
Box<InCommandObserver<CM::AssociatedData>>,
>,
is_client: bool,
connection: &ConnectionValue<CM::AssociatedData>,
_: SocketAddr,
mut packet: InPacket,
) -> Result<()>
{
let con2 = connection.downgrade();
let packet_sink = con2.as_packet_sink();
let mut con = connection.mutex.lock();
let con = &mut *con;
let packet_res;
let mut ack = false;
let p_type = packet.header().packet_type();
let dir = packet.direction();
let type_i = p_type.to_usize().unwrap();
let id = packet.header().packet_id();
let (in_recv_win, gen_id, cur_next, limit) =
con.1.in_receive_window(p_type, id);
if con.1.params.is_some() && p_type == PacketType::Init {
return Err(Error::UnexpectedInitPacket);
}
// Ignore range for acks
if p_type == PacketType::Ack
|| p_type == PacketType::AckLow
|| in_recv_win
{
if!packet.header().flags().contains(Flags::UNENCRYPTED) {
// If it is the first ack packet of a client, try to fake
// decrypt it.
let decrypted = if (p_type == PacketType::Ack
&& id <= 1 && is_client)
|| con.1.params.is_none()
{
if let Ok(dec) = algs::decrypt_fake(&packet) {
packet.set_content(dec);
true
} else {
false
}
} else {
false
};
if!decrypted {
if let Some(params) = &mut con.1.params {
// Decrypt the packet
let dec_res = algs::decrypt(
&packet,
gen_id,
¶ms.shared_iv,
&mut params.key_cache,
);
if dec_res.is_err()
&& p_type == PacketType::Ack
&& id == 1 && is_client
{
// Ignore error, this is the ack packet for the
// clientinit, we take the initserver as ack anyway.
return Ok(());
}
packet.set_content(dec_res?);
} else {
// Failed to fake decrypt the packet
return Err(Error::WrongMac);
}
}
} else if algs::must_encrypt(p_type) {
// Check if it is ok for the packet to be unencrypted
return Err(Error::UnallowedUnencryptedPacket);
}
match p_type {
PacketType::Command | PacketType::CommandLow => {
ack = true;
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
let in_ids = &mut con.1.incoming_p_ids;
let r_queue = &mut con.1.receive_queue;
let frag_queue = &mut con.1.fragmented_queue;
let commands = Self::handle_command_packet(
logger, r_queue, frag_queue, in_ids, packet,
)?;
// Be careful with command packets, they are
// guaranteed to be in the right order now, because
// we hold a lock on the connection.
let observer = in_command_observer.read();
for c in commands {
for o in observer.values() {
o.observe(con, &c);
}
// Send to packet handler
if let Err(e) = con.1.command_sink.unbounded_send(c) {
error!(logger, "Failed to send command packet to \
handler"; "error" =>?e);
}
}
// Dummy value
packet_res = Ok(None);
}
_ => {
if p_type == PacketType::Ping {
ack = true;
}
// Update packet ids
let in_ids = &mut con.1.incoming_p_ids;
let (id, next_gen) = id.overflowing_add(1);
if p_type!= PacketType::Init {
in_ids[type_i] =
(if next_gen { gen_id + 1 } else { gen_id }, id);
}
if let Some(ack_id) = packet.ack_packet() {
// Remove command packet from send queue if the fitting ack is received.
let p_type = if p_type == PacketType::Ack {
PacketType::Command
} else {
PacketType::CommandLow
};
con.1.resender.ack_packet(p_type, ack_id);
} else if p_type.is_voice() {
// Seems to work better without assembling the first 3 voice packets
// Use handle_voice_packet to assemble fragmented voice packets
/*let mut res = Self::handle_voice_packet(&logger, params, &header, p_data);
let res = res.drain(..).map(|p|
(con_key.clone(), p)).collect();
Ok(res)*/
}
// Call observer after handling acks
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
packet_res = Ok(Some(packet));
}
}
} else {
// Send an ack for the case when it was lost
if p_type == PacketType::Command || p_type == PacketType::CommandLow
{
ack = true;
}
packet_res = Err(Error::NotInReceiveWindow {
id,
next: cur_next,
limit,
p_type,
});
};
// Send ack
if ack {
tokio::spawn(
packet_sink
.send(OutAck::new(dir.reverse(), p_type, id))
.map(|_| ())
// Ignore errors, this can happen if the connection is
// already gone because we are disconnected.
.map_err(|_| ()),
);
}
if let Some(packet) = packet_res? {
if p_type.is_voice() {
if let Err(e) =
con.1.audio_sink.unbounded_send(packet.into_audio()?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
} else if p_type == PacketType::Init {
if is_client {
if let Err(e) = con
.1
.s2c_init_sink
.unbounded_send(packet.into_s2cinit()?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
} else if let Err(e) = con
.1
.c2s_init_sink
.unbounded_send(packet.into_c2sinit().map_err(|(_, e)| e)?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
}
}
Ok(())
}
/// Handle `Command` and `CommandLow` packets.
///
/// They have to be handled in the right order.
fn handle_command_packet(
logger: &Logger,
r_queue: &mut [Vec<InPacket>; 2],
frag_queue: &mut [Option<(InPacket, Vec<u8>)>; 2],
in_ids: &mut [(u32, u16); 8],
mut packet: InPacket,
) -> Result<Vec<InCommand>>
{
let header = packet.header();
let p_type = header.packet_type();
let mut id = header.packet_id();
let type_i = p_type.to_usize().unwrap();
let cmd_i = if p_type == PacketType::Command { 0 } else { 1 };
let r_queue = &mut r_queue[cmd_i];
let frag_queue = &mut frag_queue[cmd_i];
let in_ids = &mut in_ids[type_i];
let cur_next = in_ids.1;
if cur_next == id {
// In order
let mut packets = Vec::new();
loop {
// Update next packet id
let (next_id, next_gen) = id.overflowing_add(1);
if next_gen {
// Next packet generation
in_ids.0 = in_ids.0.wrapping_add(1);
}
in_ids.1 = next_id;
let flags = packet.header().flags();
let res_packet = if flags.contains(Flags::FRAGMENTED) {
if let Some((header, mut frag_queue)) = frag_queue.take() {
// Last fragmented packet
frag_queue.extend_from_slice(packet.content());
// Decompress
let decompressed = if header
.header()
.flags()
.contains(Flags::COMPRESSED)
{
//debug!(logger, "Compressed"; "data" =>?::utils::HexSlice(&frag_queue));
::quicklz::decompress(
&mut Cursor::new(frag_queue),
crate::MAX_DECOMPRESSED_SIZE,
)?
} else {
frag_queue
};
/*if header.get_compressed() {
debug!(logger, "Decompressed";
"data" =>?::HexSlice(&decompressed),
"string" => %String::from_utf8_lossy(&decompressed),
);
}*/
Some(
InCommand::with_content(&header, decompressed)
.map_err(|(_, e)| e)?,
)
} else {
// Enqueue
let content = packet.take_content();
*frag_queue = Some((packet, content));
None
}
} else if let Some((_, ref mut frag_queue)) = *frag_queue {
// The packet is fragmented
if frag_queue.len() < MAX_FRAGMENTS_LENGTH {
frag_queue.extend_from_slice(packet.content());
None
} else {
return Err(Error::MaxLengthExceeded(String::from(
"fragment queue",
)));
}
} else {
// Decompress
let decompressed = if flags.contains(Flags::COMPRESSED) {
//debug!(logger, "Compressed"; "data" =>?::utils::HexSlice(packet.content()));
::quicklz::decompress(
&mut Cursor::new(packet.content()),
crate::MAX_DECOMPRESSED_SIZE,
)?
} else {
packet.take_content()
};
/*if header.get_compressed() {
debug!(logger, "Decompressed"; "data" =>?::HexSlice(&decompressed));
}*/
Some(
InCommand::with_content(&packet, decompressed)
.map_err(|(_, e)| e)?,
)
};
if let Some(p) = res_packet {
packets.push(p);
}
// Check if there are following packets in the receive queue.
id = id.wrapping_add(1);
if let Some(pos) =
r_queue.iter().position(|p| p.header().packet_id() == id)
{
packet = r_queue.remove(pos);
} else {
break;
}
}
// The first packets should be returned first
packets.reverse();
Ok(packets)
} else {
// Out of order
warn!(logger, "Out of order command packet"; "got" => id,
"expected" => cur_next);
let limit = ((u32::from(cur_next) + MAX_QUEUE_LEN as u32)
% u32::from(u16::MAX)) as u16;
if (cur_next < limit && id >= cur_next && id < limit)
|| (cur_next > limit && (id >= cur_next || id < limit))
{
r_queue.push(packet);
Ok(vec![])
} else {
Err(Error::MaxLengthExceeded(String::from("command queue")))
}
}
}
/*/// Handle `Voice` and `VoiceLow` packets.
///
/// The first 3 packets for each audio transmission have the compressed flag
/// set, which means they are fragmented and should be concatenated.
fn handle_voice_packet(
logger: &slog::Logger,
params: &mut ConnectedParams,
header: &Header,
packet: PData,
) -> Vec<Packet> {
let cmd_i = if header.get_type() == PacketType::Voice {
0
} else {
1
};
let frag_queue = &mut params.voice_fragmented_queue[cmd_i];
let (id, from_id, codec_type, voice_data) = match packet {
PData::VoiceS2C { id, from_id, codec_type, voice_data } => (id, from_id, codec_type, voice_data),
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data } => (id, from_id, codec_type, voice_data),
_ => unreachable!("handle_voice_packet did get an unknown voice packet"),
};
if header.get_compressed() {
let queue = frag_queue.entry(from_id).or_insert_with(Vec::new);
// Append to fragments
if queue.len() < MAX_FRAGMENTS_LENGTH {
queue.extend_from_slice(&voice_data);
return Vec::new();
}
warn!(logger, "Length of voice fragment queue exceeded"; "len" => queue.len());
}
let mut res = Vec::new();
if let Some(frags) = frag_queue.remove(&from_id) {
// We got two packets
let packet_data = if header.get_type() == PacketType::Voice {
PData::VoiceS2C { id, from_id, codec_type, voice_data: frags }
} else {
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data: frags }
};
res.push(Packet::new(header.clone(), packet_data));
}
let packet_data = if header.get_type() == PacketType::Voice {
PData::VoiceS2C { id, from_id, codec_type, voice_data }
} else {
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data }
};
res.push(Packet::new(header.clone(), packet_data));
res
}*/
}
/// Encodes outgoing packets.
///
/// This part does the compression, encryption and fragmentation.
pub struct PacketCodecSender {
is_client: bool,
}
impl PacketCodecSender {
pub fn new(is_client: bool) -> Self { Self { is_client } }
pub fn encode_packet(
&self,
con: &mut Connection,
mut packet: OutPacket,
) -> Result<Vec<(u16, Bytes)>>
{
let p_type = packet.header().packet_type();
let type_i = p_type.to_usize().unwrap();
// TODO Needed, commands should set their own flag?
if (p_type == PacketType::Command || p_type == PacketType::CommandLow)
&& self.is_client
{
// Set newprotocol flag
packet.flags(packet.header().flags() | Flags::NEWPROTOCOL);
}
let (gen, p_id) = if p_type == PacketType::Init {
(0, 0)
} else {
con.outgoing_p_ids[type_i]
};
// We fake encrypt the first command packet of the
// server (id 0) and the first command packet of the
// client (id 1) if the client uses the new protocol
// (the packet is a clientek).
let mut fake_encrypt = p_type == PacketType::Command
&& gen == 0
&& ((!self.is_client && p_id == 0)
|| (self.is_client && p_id == 1 && {
// Test if it is a clientek packet
let s = b"clientek";
packet.content().len() >= s.len()
&& packet.content()[..s.len()] == s[..]
}));
// Also fake encrypt the first ack of the client, which is the response
// for the initivexpand2 packet.
fake_encrypt |= self.is_client && p_type == PacketType::Ack && gen == 0
&& p_id == 0;
// Get values from parameters
let should_encrypt;
let c_id;
if let Some(params) = con.params.as_mut() {
should_encrypt =
algs::should_encrypt(p_type, params.voice_encryption);
c_id = params.c_id;
} else {
should_encrypt = algs::should_encrypt(p_type, false);
if should_encrypt {
fake_encrypt = true;
}
c_id = 0;
}
// Client id for clients
if self.is_client {
packet.client_id(c_id);
}
if!should_encrypt &&!fake_encrypt {
packet.flags(packet.header().flags() | Flags::UNENCRYPTED);
if let Some(params) = con.params.as_mut() {
packet.mac().copy_from_slice(¶ms.shared_mac);
}
}
// Compress and split packet
let packet_id;
let packets = if p_type == PacketType::Command
|| p_type == PacketType::CommandLow
{
packet_id = None;
algs::compress_and_split(self.is_client, packet)
} else {
// Set the inner packet id for voice packets
if p_type == PacketType::Voice || p_type == PacketType::VoiceWhisper
{
(&mut packet.content_mut()[..2])
.write_u16::<NetworkEndian>(con.outgoing_p_ids[type_i].1)
.unwrap();
}
// Identify init packets by their number
if p_type == PacketType::Init {
if packet.direction() == Direction::S2C {
packet_id = Some(u16::from(packet.content()[0]));
} else {
packet_id = Some(u16::from(packet.content()[4]));
}
} else {
packet_id = None;
}
vec![packet]
};
let packets = packets
.into_iter()
.map(|mut packet| -> Result<_> {
// Get packet id
let (mut gen, mut p_id) = if p_type == PacketType::Init {
(0, 0)
} else {
con.outgoing_p_ids[type_i]
};
if p_type!= PacketType::Init {
packet.packet_id(p_id);
}
// Identify init packets by their number
let packet_id = packet_id.unwrap_or(p_id);
// Encrypt if necessary
if fake_encrypt {
algs::encrypt_fake(&mut packet)?;
} else if should_encrypt {
// The params are set
let params = con.params.as_mut().unwrap();
algs::encrypt(
&mut packet,
gen,
¶ms.shared_iv,
&mut params.key_cache,
)?;
}
// Increment outgoing_p_ids
p_id = p_id.wrapping_add(1);
if p_id == 0 {
gen = gen.wrapping_add(1);
}
if p_type!= PacketType::Init {
con.outgoing_p_ids[type_i] = (gen, p_id);
}
Ok((packet_id, packet.into_vec().into()))
})
.collect::<Result<Vec<_>>>()?;
Ok(packets)
}
}
| handle_udp_packet | identifier_name |
packet_codec.rs | use std::io::Cursor;
use std::net::SocketAddr;
use std::u16;
use byteorder::{NetworkEndian, WriteBytesExt};
use bytes::Bytes;
use futures::sync::mpsc;
use futures::{future, Future, IntoFuture, Sink};
use num_traits::ToPrimitive;
use slog::{error, o, warn, Logger};
use tokio;
use crate::algorithms as algs;
use crate::connection::Connection;
use crate::connectionmanager::{ConnectionManager, Resender};
use crate::handler_data::{
ConnectionValue, Data, InCommandObserver, InPacketObserver,
};
use crate::packets::*;
use crate::{
Error, LockedHashMap, Result, MAX_FRAGMENTS_LENGTH, MAX_QUEUE_LEN,
};
/// Decodes incoming udp packets.
///
/// This part does the defragmentation, decryption and decompression.
pub struct PacketCodecReceiver<CM: ConnectionManager +'static> {
connections: LockedHashMap<CM::Key, ConnectionValue<CM::AssociatedData>>,
is_client: bool,
logger: Logger,
in_packet_observer:
LockedHashMap<String, Box<InPacketObserver<CM::AssociatedData>>>,
in_command_observer:
LockedHashMap<String, Box<InCommandObserver<CM::AssociatedData>>>,
/// The sink for `UdpPacket`s with no known connection.
///
/// This can stay `None` so all packets without connection will be dropped.
unknown_udp_packet_sink: Option<mpsc::Sender<(SocketAddr, InPacket)>>,
}
impl<CM: ConnectionManager +'static> PacketCodecReceiver<CM> {
pub fn new(
data: &Data<CM>,
unknown_udp_packet_sink: Option<mpsc::Sender<(SocketAddr, InPacket)>>,
) -> Self
{
Self {
connections: data.connections.clone(),
is_client: data.is_client,
logger: data.logger.clone(),
in_packet_observer: data.in_packet_observer.clone(),
in_command_observer: data.in_command_observer.clone(),
unknown_udp_packet_sink,
}
}
pub fn handle_udp_packet(
&mut self,
(addr, packet): (SocketAddr, InPacket),
) -> impl Future<Item = (), Error = Error>
| packet,
)
.into_future()
} else {
drop(cons);
let is_client = self.is_client;
tokio::spawn(future::lazy(move || {
if let Err(e) = Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
is_client,
&con,
addr,
packet,
) {
error!(logger, "Error handling udp packet"; "error" =>?e);
}
Ok(())
}));
future::ok(())
}
} else {
drop(cons);
// Unknown connection
if let Some(sink) = &mut self.unknown_udp_packet_sink {
// Don't block if the queue is full
if sink.try_send((addr, packet)).is_err() {
warn!(self.logger, "Unknown connection handler overloaded \
– dropping udp packet");
}
} else {
warn!(
self.logger,
"Dropped packet without connection because no unknown \
packet handler is set"
);
}
future::ok(())
}
}
/// Handle a packet for a specific connection.
///
/// This part does the defragmentation, decryption and decompression.
fn connection_handle_udp_packet(
logger: &Logger,
in_packet_observer: LockedHashMap<
String,
Box<InPacketObserver<CM::AssociatedData>>,
>,
in_command_observer: LockedHashMap<
String,
Box<InCommandObserver<CM::AssociatedData>>,
>,
is_client: bool,
connection: &ConnectionValue<CM::AssociatedData>,
_: SocketAddr,
mut packet: InPacket,
) -> Result<()>
{
let con2 = connection.downgrade();
let packet_sink = con2.as_packet_sink();
let mut con = connection.mutex.lock();
let con = &mut *con;
let packet_res;
let mut ack = false;
let p_type = packet.header().packet_type();
let dir = packet.direction();
let type_i = p_type.to_usize().unwrap();
let id = packet.header().packet_id();
let (in_recv_win, gen_id, cur_next, limit) =
con.1.in_receive_window(p_type, id);
if con.1.params.is_some() && p_type == PacketType::Init {
return Err(Error::UnexpectedInitPacket);
}
// Ignore range for acks
if p_type == PacketType::Ack
|| p_type == PacketType::AckLow
|| in_recv_win
{
if!packet.header().flags().contains(Flags::UNENCRYPTED) {
// If it is the first ack packet of a client, try to fake
// decrypt it.
let decrypted = if (p_type == PacketType::Ack
&& id <= 1 && is_client)
|| con.1.params.is_none()
{
if let Ok(dec) = algs::decrypt_fake(&packet) {
packet.set_content(dec);
true
} else {
false
}
} else {
false
};
if!decrypted {
if let Some(params) = &mut con.1.params {
// Decrypt the packet
let dec_res = algs::decrypt(
&packet,
gen_id,
¶ms.shared_iv,
&mut params.key_cache,
);
if dec_res.is_err()
&& p_type == PacketType::Ack
&& id == 1 && is_client
{
// Ignore error, this is the ack packet for the
// clientinit, we take the initserver as ack anyway.
return Ok(());
}
packet.set_content(dec_res?);
} else {
// Failed to fake decrypt the packet
return Err(Error::WrongMac);
}
}
} else if algs::must_encrypt(p_type) {
// Check if it is ok for the packet to be unencrypted
return Err(Error::UnallowedUnencryptedPacket);
}
match p_type {
PacketType::Command | PacketType::CommandLow => {
ack = true;
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
let in_ids = &mut con.1.incoming_p_ids;
let r_queue = &mut con.1.receive_queue;
let frag_queue = &mut con.1.fragmented_queue;
let commands = Self::handle_command_packet(
logger, r_queue, frag_queue, in_ids, packet,
)?;
// Be careful with command packets, they are
// guaranteed to be in the right order now, because
// we hold a lock on the connection.
let observer = in_command_observer.read();
for c in commands {
for o in observer.values() {
o.observe(con, &c);
}
// Send to packet handler
if let Err(e) = con.1.command_sink.unbounded_send(c) {
error!(logger, "Failed to send command packet to \
handler"; "error" =>?e);
}
}
// Dummy value
packet_res = Ok(None);
}
_ => {
if p_type == PacketType::Ping {
ack = true;
}
// Update packet ids
let in_ids = &mut con.1.incoming_p_ids;
let (id, next_gen) = id.overflowing_add(1);
if p_type!= PacketType::Init {
in_ids[type_i] =
(if next_gen { gen_id + 1 } else { gen_id }, id);
}
if let Some(ack_id) = packet.ack_packet() {
// Remove command packet from send queue if the fitting ack is received.
let p_type = if p_type == PacketType::Ack {
PacketType::Command
} else {
PacketType::CommandLow
};
con.1.resender.ack_packet(p_type, ack_id);
} else if p_type.is_voice() {
// Seems to work better without assembling the first 3 voice packets
// Use handle_voice_packet to assemble fragmented voice packets
/*let mut res = Self::handle_voice_packet(&logger, params, &header, p_data);
let res = res.drain(..).map(|p|
(con_key.clone(), p)).collect();
Ok(res)*/
}
// Call observer after handling acks
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
packet_res = Ok(Some(packet));
}
}
} else {
// Send an ack for the case when it was lost
if p_type == PacketType::Command || p_type == PacketType::CommandLow
{
ack = true;
}
packet_res = Err(Error::NotInReceiveWindow {
id,
next: cur_next,
limit,
p_type,
});
};
// Send ack
if ack {
tokio::spawn(
packet_sink
.send(OutAck::new(dir.reverse(), p_type, id))
.map(|_| ())
// Ignore errors, this can happen if the connection is
// already gone because we are disconnected.
.map_err(|_| ()),
);
}
if let Some(packet) = packet_res? {
if p_type.is_voice() {
if let Err(e) =
con.1.audio_sink.unbounded_send(packet.into_audio()?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
} else if p_type == PacketType::Init {
if is_client {
if let Err(e) = con
.1
.s2c_init_sink
.unbounded_send(packet.into_s2cinit()?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
} else if let Err(e) = con
.1
.c2s_init_sink
.unbounded_send(packet.into_c2sinit().map_err(|(_, e)| e)?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
}
}
Ok(())
}
/// Handle `Command` and `CommandLow` packets.
///
/// They have to be handled in the right order.
fn handle_command_packet(
logger: &Logger,
r_queue: &mut [Vec<InPacket>; 2],
frag_queue: &mut [Option<(InPacket, Vec<u8>)>; 2],
in_ids: &mut [(u32, u16); 8],
mut packet: InPacket,
) -> Result<Vec<InCommand>>
{
let header = packet.header();
let p_type = header.packet_type();
let mut id = header.packet_id();
let type_i = p_type.to_usize().unwrap();
let cmd_i = if p_type == PacketType::Command { 0 } else { 1 };
let r_queue = &mut r_queue[cmd_i];
let frag_queue = &mut frag_queue[cmd_i];
let in_ids = &mut in_ids[type_i];
let cur_next = in_ids.1;
if cur_next == id {
// In order
let mut packets = Vec::new();
loop {
// Update next packet id
let (next_id, next_gen) = id.overflowing_add(1);
if next_gen {
// Next packet generation
in_ids.0 = in_ids.0.wrapping_add(1);
}
in_ids.1 = next_id;
let flags = packet.header().flags();
let res_packet = if flags.contains(Flags::FRAGMENTED) {
if let Some((header, mut frag_queue)) = frag_queue.take() {
// Last fragmented packet
frag_queue.extend_from_slice(packet.content());
// Decompress
let decompressed = if header
.header()
.flags()
.contains(Flags::COMPRESSED)
{
//debug!(logger, "Compressed"; "data" =>?::utils::HexSlice(&frag_queue));
::quicklz::decompress(
&mut Cursor::new(frag_queue),
crate::MAX_DECOMPRESSED_SIZE,
)?
} else {
frag_queue
};
/*if header.get_compressed() {
debug!(logger, "Decompressed";
"data" =>?::HexSlice(&decompressed),
"string" => %String::from_utf8_lossy(&decompressed),
);
}*/
Some(
InCommand::with_content(&header, decompressed)
.map_err(|(_, e)| e)?,
)
} else {
// Enqueue
let content = packet.take_content();
*frag_queue = Some((packet, content));
None
}
} else if let Some((_, ref mut frag_queue)) = *frag_queue {
// The packet is fragmented
if frag_queue.len() < MAX_FRAGMENTS_LENGTH {
frag_queue.extend_from_slice(packet.content());
None
} else {
return Err(Error::MaxLengthExceeded(String::from(
"fragment queue",
)));
}
} else {
// Decompress
let decompressed = if flags.contains(Flags::COMPRESSED) {
//debug!(logger, "Compressed"; "data" =>?::utils::HexSlice(packet.content()));
::quicklz::decompress(
&mut Cursor::new(packet.content()),
crate::MAX_DECOMPRESSED_SIZE,
)?
} else {
packet.take_content()
};
/*if header.get_compressed() {
debug!(logger, "Decompressed"; "data" =>?::HexSlice(&decompressed));
}*/
Some(
InCommand::with_content(&packet, decompressed)
.map_err(|(_, e)| e)?,
)
};
if let Some(p) = res_packet {
packets.push(p);
}
// Check if there are following packets in the receive queue.
id = id.wrapping_add(1);
if let Some(pos) =
r_queue.iter().position(|p| p.header().packet_id() == id)
{
packet = r_queue.remove(pos);
} else {
break;
}
}
// The first packets should be returned first
packets.reverse();
Ok(packets)
} else {
// Out of order
warn!(logger, "Out of order command packet"; "got" => id,
"expected" => cur_next);
let limit = ((u32::from(cur_next) + MAX_QUEUE_LEN as u32)
% u32::from(u16::MAX)) as u16;
if (cur_next < limit && id >= cur_next && id < limit)
|| (cur_next > limit && (id >= cur_next || id < limit))
{
r_queue.push(packet);
Ok(vec![])
} else {
Err(Error::MaxLengthExceeded(String::from("command queue")))
}
}
}
/*/// Handle `Voice` and `VoiceLow` packets.
///
/// The first 3 packets for each audio transmission have the compressed flag
/// set, which means they are fragmented and should be concatenated.
fn handle_voice_packet(
logger: &slog::Logger,
params: &mut ConnectedParams,
header: &Header,
packet: PData,
) -> Vec<Packet> {
let cmd_i = if header.get_type() == PacketType::Voice {
0
} else {
1
};
let frag_queue = &mut params.voice_fragmented_queue[cmd_i];
let (id, from_id, codec_type, voice_data) = match packet {
PData::VoiceS2C { id, from_id, codec_type, voice_data } => (id, from_id, codec_type, voice_data),
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data } => (id, from_id, codec_type, voice_data),
_ => unreachable!("handle_voice_packet did get an unknown voice packet"),
};
if header.get_compressed() {
let queue = frag_queue.entry(from_id).or_insert_with(Vec::new);
// Append to fragments
if queue.len() < MAX_FRAGMENTS_LENGTH {
queue.extend_from_slice(&voice_data);
return Vec::new();
}
warn!(logger, "Length of voice fragment queue exceeded"; "len" => queue.len());
}
let mut res = Vec::new();
if let Some(frags) = frag_queue.remove(&from_id) {
// We got two packets
let packet_data = if header.get_type() == PacketType::Voice {
PData::VoiceS2C { id, from_id, codec_type, voice_data: frags }
} else {
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data: frags }
};
res.push(Packet::new(header.clone(), packet_data));
}
let packet_data = if header.get_type() == PacketType::Voice {
PData::VoiceS2C { id, from_id, codec_type, voice_data }
} else {
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data }
};
res.push(Packet::new(header.clone(), packet_data));
res
}*/
}
/// Encodes outgoing packets.
///
/// This part does the compression, encryption and fragmentation.
pub struct PacketCodecSender {
is_client: bool,
}
impl PacketCodecSender {
pub fn new(is_client: bool) -> Self { Self { is_client } }
pub fn encode_packet(
&self,
con: &mut Connection,
mut packet: OutPacket,
) -> Result<Vec<(u16, Bytes)>>
{
let p_type = packet.header().packet_type();
let type_i = p_type.to_usize().unwrap();
// TODO Needed, commands should set their own flag?
if (p_type == PacketType::Command || p_type == PacketType::CommandLow)
&& self.is_client
{
// Set newprotocol flag
packet.flags(packet.header().flags() | Flags::NEWPROTOCOL);
}
let (gen, p_id) = if p_type == PacketType::Init {
(0, 0)
} else {
con.outgoing_p_ids[type_i]
};
// We fake encrypt the first command packet of the
// server (id 0) and the first command packet of the
// client (id 1) if the client uses the new protocol
// (the packet is a clientek).
let mut fake_encrypt = p_type == PacketType::Command
&& gen == 0
&& ((!self.is_client && p_id == 0)
|| (self.is_client && p_id == 1 && {
// Test if it is a clientek packet
let s = b"clientek";
packet.content().len() >= s.len()
&& packet.content()[..s.len()] == s[..]
}));
// Also fake encrypt the first ack of the client, which is the response
// for the initivexpand2 packet.
fake_encrypt |= self.is_client && p_type == PacketType::Ack && gen == 0
&& p_id == 0;
// Get values from parameters
let should_encrypt;
let c_id;
if let Some(params) = con.params.as_mut() {
should_encrypt =
algs::should_encrypt(p_type, params.voice_encryption);
c_id = params.c_id;
} else {
should_encrypt = algs::should_encrypt(p_type, false);
if should_encrypt {
fake_encrypt = true;
}
c_id = 0;
}
// Client id for clients
if self.is_client {
packet.client_id(c_id);
}
if!should_encrypt &&!fake_encrypt {
packet.flags(packet.header().flags() | Flags::UNENCRYPTED);
if let Some(params) = con.params.as_mut() {
packet.mac().copy_from_slice(¶ms.shared_mac);
}
}
// Compress and split packet
let packet_id;
let packets = if p_type == PacketType::Command
|| p_type == PacketType::CommandLow
{
packet_id = None;
algs::compress_and_split(self.is_client, packet)
} else {
// Set the inner packet id for voice packets
if p_type == PacketType::Voice || p_type == PacketType::VoiceWhisper
{
(&mut packet.content_mut()[..2])
.write_u16::<NetworkEndian>(con.outgoing_p_ids[type_i].1)
.unwrap();
}
// Identify init packets by their number
if p_type == PacketType::Init {
if packet.direction() == Direction::S2C {
packet_id = Some(u16::from(packet.content()[0]));
} else {
packet_id = Some(u16::from(packet.content()[4]));
}
} else {
packet_id = None;
}
vec![packet]
};
let packets = packets
.into_iter()
.map(|mut packet| -> Result<_> {
// Get packet id
let (mut gen, mut p_id) = if p_type == PacketType::Init {
(0, 0)
} else {
con.outgoing_p_ids[type_i]
};
if p_type!= PacketType::Init {
packet.packet_id(p_id);
}
// Identify init packets by their number
let packet_id = packet_id.unwrap_or(p_id);
// Encrypt if necessary
if fake_encrypt {
algs::encrypt_fake(&mut packet)?;
} else if should_encrypt {
// The params are set
let params = con.params.as_mut().unwrap();
algs::encrypt(
&mut packet,
gen,
¶ms.shared_iv,
&mut params.key_cache,
)?;
}
// Increment outgoing_p_ids
p_id = p_id.wrapping_add(1);
if p_id == 0 {
gen = gen.wrapping_add(1);
}
if p_type!= PacketType::Init {
con.outgoing_p_ids[type_i] = (gen, p_id);
}
Ok((packet_id, packet.into_vec().into()))
})
.collect::<Result<Vec<_>>>()?;
Ok(packets)
}
}
| {
// Find the right connection
let cons = self.connections.read();
if let Some(con) =
cons.get(&CM::get_connection_key(addr, &packet)).cloned()
{
// If we are a client and have only a single connection, we will do the
// work inside this future and not spawn a new one.
let logger = self.logger.new(o!("addr" => addr));
let in_packet_observer = self.in_packet_observer.clone();
let in_command_observer = self.in_command_observer.clone();
if self.is_client && cons.len() == 1 {
drop(cons);
Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
self.is_client,
&con,
addr, | identifier_body |
packet_codec.rs | use std::io::Cursor;
use std::net::SocketAddr;
use std::u16;
use byteorder::{NetworkEndian, WriteBytesExt};
use bytes::Bytes;
use futures::sync::mpsc;
use futures::{future, Future, IntoFuture, Sink};
use num_traits::ToPrimitive;
use slog::{error, o, warn, Logger};
use tokio;
use crate::algorithms as algs;
use crate::connection::Connection;
use crate::connectionmanager::{ConnectionManager, Resender};
use crate::handler_data::{
ConnectionValue, Data, InCommandObserver, InPacketObserver,
};
use crate::packets::*;
use crate::{
Error, LockedHashMap, Result, MAX_FRAGMENTS_LENGTH, MAX_QUEUE_LEN,
};
/// Decodes incoming udp packets.
///
/// This part does the defragmentation, decryption and decompression.
pub struct PacketCodecReceiver<CM: ConnectionManager +'static> {
connections: LockedHashMap<CM::Key, ConnectionValue<CM::AssociatedData>>,
is_client: bool,
logger: Logger,
in_packet_observer:
LockedHashMap<String, Box<InPacketObserver<CM::AssociatedData>>>,
in_command_observer:
LockedHashMap<String, Box<InCommandObserver<CM::AssociatedData>>>,
/// The sink for `UdpPacket`s with no known connection.
///
/// This can stay `None` so all packets without connection will be dropped.
unknown_udp_packet_sink: Option<mpsc::Sender<(SocketAddr, InPacket)>>,
}
impl<CM: ConnectionManager +'static> PacketCodecReceiver<CM> {
pub fn new(
data: &Data<CM>,
unknown_udp_packet_sink: Option<mpsc::Sender<(SocketAddr, InPacket)>>,
) -> Self
{
Self {
connections: data.connections.clone(),
is_client: data.is_client,
logger: data.logger.clone(),
in_packet_observer: data.in_packet_observer.clone(),
in_command_observer: data.in_command_observer.clone(),
unknown_udp_packet_sink,
}
}
pub fn handle_udp_packet(
&mut self,
(addr, packet): (SocketAddr, InPacket),
) -> impl Future<Item = (), Error = Error>
{
// Find the right connection
let cons = self.connections.read();
if let Some(con) =
cons.get(&CM::get_connection_key(addr, &packet)).cloned()
{
// If we are a client and have only a single connection, we will do the
// work inside this future and not spawn a new one.
let logger = self.logger.new(o!("addr" => addr));
let in_packet_observer = self.in_packet_observer.clone();
let in_command_observer = self.in_command_observer.clone();
if self.is_client && cons.len() == 1 {
drop(cons);
Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
self.is_client,
&con,
addr,
packet,
)
.into_future()
} else {
drop(cons);
let is_client = self.is_client;
tokio::spawn(future::lazy(move || {
if let Err(e) = Self::connection_handle_udp_packet(
&logger,
in_packet_observer,
in_command_observer,
is_client,
&con,
addr,
packet,
) {
error!(logger, "Error handling udp packet"; "error" =>?e);
}
Ok(())
}));
future::ok(())
}
} else {
drop(cons);
// Unknown connection
if let Some(sink) = &mut self.unknown_udp_packet_sink {
// Don't block if the queue is full
if sink.try_send((addr, packet)).is_err() {
warn!(self.logger, "Unknown connection handler overloaded \
– dropping udp packet");
}
} else {
warn!(
self.logger,
"Dropped packet without connection because no unknown \
packet handler is set"
);
}
future::ok(())
}
}
/// Handle a packet for a specific connection.
///
/// This part does the defragmentation, decryption and decompression.
fn connection_handle_udp_packet( | logger: &Logger,
in_packet_observer: LockedHashMap<
String,
Box<InPacketObserver<CM::AssociatedData>>,
>,
in_command_observer: LockedHashMap<
String,
Box<InCommandObserver<CM::AssociatedData>>,
>,
is_client: bool,
connection: &ConnectionValue<CM::AssociatedData>,
_: SocketAddr,
mut packet: InPacket,
) -> Result<()>
{
let con2 = connection.downgrade();
let packet_sink = con2.as_packet_sink();
let mut con = connection.mutex.lock();
let con = &mut *con;
let packet_res;
let mut ack = false;
let p_type = packet.header().packet_type();
let dir = packet.direction();
let type_i = p_type.to_usize().unwrap();
let id = packet.header().packet_id();
let (in_recv_win, gen_id, cur_next, limit) =
con.1.in_receive_window(p_type, id);
if con.1.params.is_some() && p_type == PacketType::Init {
return Err(Error::UnexpectedInitPacket);
}
// Ignore range for acks
if p_type == PacketType::Ack
|| p_type == PacketType::AckLow
|| in_recv_win
{
if!packet.header().flags().contains(Flags::UNENCRYPTED) {
// If it is the first ack packet of a client, try to fake
// decrypt it.
let decrypted = if (p_type == PacketType::Ack
&& id <= 1 && is_client)
|| con.1.params.is_none()
{
if let Ok(dec) = algs::decrypt_fake(&packet) {
packet.set_content(dec);
true
} else {
false
}
} else {
false
};
if!decrypted {
if let Some(params) = &mut con.1.params {
// Decrypt the packet
let dec_res = algs::decrypt(
&packet,
gen_id,
¶ms.shared_iv,
&mut params.key_cache,
);
if dec_res.is_err()
&& p_type == PacketType::Ack
&& id == 1 && is_client
{
// Ignore error, this is the ack packet for the
// clientinit, we take the initserver as ack anyway.
return Ok(());
}
packet.set_content(dec_res?);
} else {
// Failed to fake decrypt the packet
return Err(Error::WrongMac);
}
}
} else if algs::must_encrypt(p_type) {
// Check if it is ok for the packet to be unencrypted
return Err(Error::UnallowedUnencryptedPacket);
}
match p_type {
PacketType::Command | PacketType::CommandLow => {
ack = true;
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
let in_ids = &mut con.1.incoming_p_ids;
let r_queue = &mut con.1.receive_queue;
let frag_queue = &mut con.1.fragmented_queue;
let commands = Self::handle_command_packet(
logger, r_queue, frag_queue, in_ids, packet,
)?;
// Be careful with command packets, they are
// guaranteed to be in the right order now, because
// we hold a lock on the connection.
let observer = in_command_observer.read();
for c in commands {
for o in observer.values() {
o.observe(con, &c);
}
// Send to packet handler
if let Err(e) = con.1.command_sink.unbounded_send(c) {
error!(logger, "Failed to send command packet to \
handler"; "error" =>?e);
}
}
// Dummy value
packet_res = Ok(None);
}
_ => {
if p_type == PacketType::Ping {
ack = true;
}
// Update packet ids
let in_ids = &mut con.1.incoming_p_ids;
let (id, next_gen) = id.overflowing_add(1);
if p_type!= PacketType::Init {
in_ids[type_i] =
(if next_gen { gen_id + 1 } else { gen_id }, id);
}
if let Some(ack_id) = packet.ack_packet() {
// Remove command packet from send queue if the fitting ack is received.
let p_type = if p_type == PacketType::Ack {
PacketType::Command
} else {
PacketType::CommandLow
};
con.1.resender.ack_packet(p_type, ack_id);
} else if p_type.is_voice() {
// Seems to work better without assembling the first 3 voice packets
// Use handle_voice_packet to assemble fragmented voice packets
/*let mut res = Self::handle_voice_packet(&logger, params, &header, p_data);
let res = res.drain(..).map(|p|
(con_key.clone(), p)).collect();
Ok(res)*/
}
// Call observer after handling acks
for o in in_packet_observer.read().values() {
o.observe(con, &packet);
}
packet_res = Ok(Some(packet));
}
}
} else {
// Send an ack for the case when it was lost
if p_type == PacketType::Command || p_type == PacketType::CommandLow
{
ack = true;
}
packet_res = Err(Error::NotInReceiveWindow {
id,
next: cur_next,
limit,
p_type,
});
};
// Send ack
if ack {
tokio::spawn(
packet_sink
.send(OutAck::new(dir.reverse(), p_type, id))
.map(|_| ())
// Ignore errors, this can happen if the connection is
// already gone because we are disconnected.
.map_err(|_| ()),
);
}
if let Some(packet) = packet_res? {
if p_type.is_voice() {
if let Err(e) =
con.1.audio_sink.unbounded_send(packet.into_audio()?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
} else if p_type == PacketType::Init {
if is_client {
if let Err(e) = con
.1
.s2c_init_sink
.unbounded_send(packet.into_s2cinit()?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
} else if let Err(e) = con
.1
.c2s_init_sink
.unbounded_send(packet.into_c2sinit().map_err(|(_, e)| e)?)
{
error!(logger, "Failed to send packet to handler"; "error" =>?e);
}
}
}
Ok(())
}
/// Handle `Command` and `CommandLow` packets.
///
/// They have to be handled in the right order.
fn handle_command_packet(
logger: &Logger,
r_queue: &mut [Vec<InPacket>; 2],
frag_queue: &mut [Option<(InPacket, Vec<u8>)>; 2],
in_ids: &mut [(u32, u16); 8],
mut packet: InPacket,
) -> Result<Vec<InCommand>>
{
let header = packet.header();
let p_type = header.packet_type();
let mut id = header.packet_id();
let type_i = p_type.to_usize().unwrap();
let cmd_i = if p_type == PacketType::Command { 0 } else { 1 };
let r_queue = &mut r_queue[cmd_i];
let frag_queue = &mut frag_queue[cmd_i];
let in_ids = &mut in_ids[type_i];
let cur_next = in_ids.1;
if cur_next == id {
// In order
let mut packets = Vec::new();
loop {
// Update next packet id
let (next_id, next_gen) = id.overflowing_add(1);
if next_gen {
// Next packet generation
in_ids.0 = in_ids.0.wrapping_add(1);
}
in_ids.1 = next_id;
let flags = packet.header().flags();
let res_packet = if flags.contains(Flags::FRAGMENTED) {
if let Some((header, mut frag_queue)) = frag_queue.take() {
// Last fragmented packet
frag_queue.extend_from_slice(packet.content());
// Decompress
let decompressed = if header
.header()
.flags()
.contains(Flags::COMPRESSED)
{
//debug!(logger, "Compressed"; "data" =>?::utils::HexSlice(&frag_queue));
::quicklz::decompress(
&mut Cursor::new(frag_queue),
crate::MAX_DECOMPRESSED_SIZE,
)?
} else {
frag_queue
};
/*if header.get_compressed() {
debug!(logger, "Decompressed";
"data" =>?::HexSlice(&decompressed),
"string" => %String::from_utf8_lossy(&decompressed),
);
}*/
Some(
InCommand::with_content(&header, decompressed)
.map_err(|(_, e)| e)?,
)
} else {
// Enqueue
let content = packet.take_content();
*frag_queue = Some((packet, content));
None
}
} else if let Some((_, ref mut frag_queue)) = *frag_queue {
// The packet is fragmented
if frag_queue.len() < MAX_FRAGMENTS_LENGTH {
frag_queue.extend_from_slice(packet.content());
None
} else {
return Err(Error::MaxLengthExceeded(String::from(
"fragment queue",
)));
}
} else {
// Decompress
let decompressed = if flags.contains(Flags::COMPRESSED) {
//debug!(logger, "Compressed"; "data" =>?::utils::HexSlice(packet.content()));
::quicklz::decompress(
&mut Cursor::new(packet.content()),
crate::MAX_DECOMPRESSED_SIZE,
)?
} else {
packet.take_content()
};
/*if header.get_compressed() {
debug!(logger, "Decompressed"; "data" =>?::HexSlice(&decompressed));
}*/
Some(
InCommand::with_content(&packet, decompressed)
.map_err(|(_, e)| e)?,
)
};
if let Some(p) = res_packet {
packets.push(p);
}
// Check if there are following packets in the receive queue.
id = id.wrapping_add(1);
if let Some(pos) =
r_queue.iter().position(|p| p.header().packet_id() == id)
{
packet = r_queue.remove(pos);
} else {
break;
}
}
// The first packets should be returned first
packets.reverse();
Ok(packets)
} else {
// Out of order
warn!(logger, "Out of order command packet"; "got" => id,
"expected" => cur_next);
let limit = ((u32::from(cur_next) + MAX_QUEUE_LEN as u32)
% u32::from(u16::MAX)) as u16;
if (cur_next < limit && id >= cur_next && id < limit)
|| (cur_next > limit && (id >= cur_next || id < limit))
{
r_queue.push(packet);
Ok(vec![])
} else {
Err(Error::MaxLengthExceeded(String::from("command queue")))
}
}
}
/*/// Handle `Voice` and `VoiceLow` packets.
///
/// The first 3 packets for each audio transmission have the compressed flag
/// set, which means they are fragmented and should be concatenated.
fn handle_voice_packet(
logger: &slog::Logger,
params: &mut ConnectedParams,
header: &Header,
packet: PData,
) -> Vec<Packet> {
let cmd_i = if header.get_type() == PacketType::Voice {
0
} else {
1
};
let frag_queue = &mut params.voice_fragmented_queue[cmd_i];
let (id, from_id, codec_type, voice_data) = match packet {
PData::VoiceS2C { id, from_id, codec_type, voice_data } => (id, from_id, codec_type, voice_data),
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data } => (id, from_id, codec_type, voice_data),
_ => unreachable!("handle_voice_packet did get an unknown voice packet"),
};
if header.get_compressed() {
let queue = frag_queue.entry(from_id).or_insert_with(Vec::new);
// Append to fragments
if queue.len() < MAX_FRAGMENTS_LENGTH {
queue.extend_from_slice(&voice_data);
return Vec::new();
}
warn!(logger, "Length of voice fragment queue exceeded"; "len" => queue.len());
}
let mut res = Vec::new();
if let Some(frags) = frag_queue.remove(&from_id) {
// We got two packets
let packet_data = if header.get_type() == PacketType::Voice {
PData::VoiceS2C { id, from_id, codec_type, voice_data: frags }
} else {
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data: frags }
};
res.push(Packet::new(header.clone(), packet_data));
}
let packet_data = if header.get_type() == PacketType::Voice {
PData::VoiceS2C { id, from_id, codec_type, voice_data }
} else {
PData::VoiceWhisperS2C { id, from_id, codec_type, voice_data }
};
res.push(Packet::new(header.clone(), packet_data));
res
}*/
}
/// Encodes outgoing packets.
///
/// This part does the compression, encryption and fragmentation.
pub struct PacketCodecSender {
is_client: bool,
}
impl PacketCodecSender {
pub fn new(is_client: bool) -> Self { Self { is_client } }
pub fn encode_packet(
&self,
con: &mut Connection,
mut packet: OutPacket,
) -> Result<Vec<(u16, Bytes)>>
{
let p_type = packet.header().packet_type();
let type_i = p_type.to_usize().unwrap();
// TODO Needed, commands should set their own flag?
if (p_type == PacketType::Command || p_type == PacketType::CommandLow)
&& self.is_client
{
// Set newprotocol flag
packet.flags(packet.header().flags() | Flags::NEWPROTOCOL);
}
let (gen, p_id) = if p_type == PacketType::Init {
(0, 0)
} else {
con.outgoing_p_ids[type_i]
};
// We fake encrypt the first command packet of the
// server (id 0) and the first command packet of the
// client (id 1) if the client uses the new protocol
// (the packet is a clientek).
let mut fake_encrypt = p_type == PacketType::Command
&& gen == 0
&& ((!self.is_client && p_id == 0)
|| (self.is_client && p_id == 1 && {
// Test if it is a clientek packet
let s = b"clientek";
packet.content().len() >= s.len()
&& packet.content()[..s.len()] == s[..]
}));
// Also fake encrypt the first ack of the client, which is the response
// for the initivexpand2 packet.
fake_encrypt |= self.is_client && p_type == PacketType::Ack && gen == 0
&& p_id == 0;
// Get values from parameters
let should_encrypt;
let c_id;
if let Some(params) = con.params.as_mut() {
should_encrypt =
algs::should_encrypt(p_type, params.voice_encryption);
c_id = params.c_id;
} else {
should_encrypt = algs::should_encrypt(p_type, false);
if should_encrypt {
fake_encrypt = true;
}
c_id = 0;
}
// Client id for clients
if self.is_client {
packet.client_id(c_id);
}
if!should_encrypt &&!fake_encrypt {
packet.flags(packet.header().flags() | Flags::UNENCRYPTED);
if let Some(params) = con.params.as_mut() {
packet.mac().copy_from_slice(¶ms.shared_mac);
}
}
// Compress and split packet
let packet_id;
let packets = if p_type == PacketType::Command
|| p_type == PacketType::CommandLow
{
packet_id = None;
algs::compress_and_split(self.is_client, packet)
} else {
// Set the inner packet id for voice packets
if p_type == PacketType::Voice || p_type == PacketType::VoiceWhisper
{
(&mut packet.content_mut()[..2])
.write_u16::<NetworkEndian>(con.outgoing_p_ids[type_i].1)
.unwrap();
}
// Identify init packets by their number
if p_type == PacketType::Init {
if packet.direction() == Direction::S2C {
packet_id = Some(u16::from(packet.content()[0]));
} else {
packet_id = Some(u16::from(packet.content()[4]));
}
} else {
packet_id = None;
}
vec![packet]
};
let packets = packets
.into_iter()
.map(|mut packet| -> Result<_> {
// Get packet id
let (mut gen, mut p_id) = if p_type == PacketType::Init {
(0, 0)
} else {
con.outgoing_p_ids[type_i]
};
if p_type!= PacketType::Init {
packet.packet_id(p_id);
}
// Identify init packets by their number
let packet_id = packet_id.unwrap_or(p_id);
// Encrypt if necessary
if fake_encrypt {
algs::encrypt_fake(&mut packet)?;
} else if should_encrypt {
// The params are set
let params = con.params.as_mut().unwrap();
algs::encrypt(
&mut packet,
gen,
¶ms.shared_iv,
&mut params.key_cache,
)?;
}
// Increment outgoing_p_ids
p_id = p_id.wrapping_add(1);
if p_id == 0 {
gen = gen.wrapping_add(1);
}
if p_type!= PacketType::Init {
con.outgoing_p_ids[type_i] = (gen, p_id);
}
Ok((packet_id, packet.into_vec().into()))
})
.collect::<Result<Vec<_>>>()?;
Ok(packets)
}
} | random_line_split |
|
shell.rs | ]
}
fn eval(&self, cwd: &mut PathBuf) {
match self.path() {
"echo" => {
for arg in &self.args[1..] {
kprint!("{} ", arg);
}
kprintln!("");
},
"panic" => panic!("ARE YOU THE BRAIN SPECIALIST?"),
"lsatag" => {
for tag in Atags::get() {
kprintln!("{:#?}", tag)
}
},
"memorymap" => {
match crate::allocator::memory_map() {
Some((start, end)) =>
kprintln!("Memory available: [{}..{}]", start, end),
None => kprintln!("Couldn't load memory map")
}
},
"testalloc" => {
let mut v = Vec::new();
for i in 0..50 {
v.push(i);
kprintln!("{:?}", v);
}
},
"sleep" => {
use core::time::Duration;
if self.args.len() > 1 {
let span = match self.args[1].parse() {
Ok(span) => span,
Err(_) => {
kprintln!("Couldn't parse time");
return
}
};
let slept = kernel_api::syscall::sleep(Duration::from_millis(span));
match slept {
Ok(time) => kprintln!("Slept {:?}", time),
Err(e) => kprintln!("Couldn't sleep: {:?}", e),
}
} else {
kprintln!("Must pass in # of millis to sleep");
}
},
"pwd" => pwd(cwd),
"cd" => {
if self.args.len() > 1 {
cd(cwd, self.args[1]);
}
},
"ls" => ls(cwd, &self.args[1..]),
"cat" => cat(cwd, &self.args[1..]),
"mkdir" => mkdir(cwd, &self.args[1..]),
"write_file_test" => write_file_test(cwd),
"touch" => touch(cwd, &self.args[1..]),
"rm" => rm(cwd, &self.args[1..]),
"append" => append(cwd, &self.args[1..]),
"lsblk" => FILESYSTEM.lsblk(),
"mount" => mount(cwd, &self.args[1..]),
"umount" => umount(cwd, &self.args[1]),
"mkcrypt" => encrypt_part(&self.args[1..]),
path => kprintln!("unknown command: {}", path)
}
}
}
fn pwd(cwd: &mut PathBuf) {
let path = cwd.as_path();
let path_str = path.to_str().expect("Failed to get working directory");
kprintln!("{}", path_str);
}
fn cd(cwd: &mut PathBuf, path: &str) -> bool {
if path.len() == 0 { return true }
if &path[0..1] == "/" {
// cwd.clear() not implemented in shim :(
while cwd.pop() {}
}
for part in path.split('/') {
// Remove any / that makes its way in
let part = part.replace("/", "");
if part == "." {
continue
} else if part == ".." {
cwd.pop();
} else {
cwd.push(&part);
match FILESYSTEM.open(cwd.as_path()) {
Ok(entry) => {
if entry.is_file() {
kprintln!("{}: Not a directory", part);
cwd.pop();
return false
}
}
Err(_) => {
kprintln!("{}: No such file or directory", part);
cwd.pop();
return false
} | }
fn ls(cwd: &PathBuf, args: &[&str]) {
let mut rel_dir = cwd.clone();
let mut changed_dir = false;
let mut show_hidden = false;
for arg in args {
if *arg == "-a" {
show_hidden = true;
continue
}
if changed_dir {
continue
}
if!cd(&mut rel_dir, arg) {
return
} else {
changed_dir = true // only run cd once
}
}
// no need to cd. if they didn't change dir
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open dir");
let dir = entry.as_dir().expect("Expected directory, found file");
for item in dir.entries().expect("Couldn't get a dir iterator") {
if show_hidden ||!item.metadata().hidden() {
kprintln!("{}", item.metadata())
}
}
}
fn cat(cwd: &PathBuf, args: &[&str]) {
fn cat_one(cwd: &PathBuf, path: &str) {
use core::str;
use io::Read;
use alloc::slice::SliceConcatExt;
let mut rel_dir = cwd.clone();
let parts = path.split('/').collect::<Vec<&str>>();
let dir = parts[0..parts.len()-1].join("/");
if!cd(&mut rel_dir, &dir) {
return
}
rel_dir.push(parts[parts.len()-1]);
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open file");
if!entry.is_file() {
kprintln!("Can't cat a directory {}!", path);
return
}
let mut file = entry.into_file().expect("Expected file, found directory");
loop {
let mut buffer = [0u8; 256];
match file.read(&mut buffer) {
Ok(0) => break,
Ok(n) => {
let string = str::from_utf8(&buffer[0..n]);
match string {
Ok(string) => kprint!("{}", string),
Err(_) => {
kprintln!("Couldn't parse {} as UTF-8", path);
return
},
}
},
Err(e) => {
kprintln!("Error when reading file {}: {:?}", path, e);
return
}
}
}
}
for arg in args {
cat_one(cwd, arg)
}
}
fn canonicalize(path: PathBuf) -> Result<PathBuf, ()> {
let mut new_path = PathBuf::new();
for comp in path.components() {
match comp {
Component::ParentDir => {
let res = new_path.pop();
if!res {
return Err(());
}
},
Component::Normal(n) => new_path = new_path.join(n),
Component::RootDir => new_path = ["/"].iter().collect(),
_ => ()
};
}
Ok(new_path)
}
fn get_abs_path(cwd: &PathBuf, dir_arg: &str) -> Option<PathBuf> {
let mut raw_path: PathBuf = PathBuf::from(dir_arg);
if!raw_path.is_absolute() {
raw_path = cwd.clone().join(raw_path);
}
let abs_path = match canonicalize(raw_path) {
Ok(p) => p,
Err(_) => {
kprintln!("\ninvalid arg: {}", dir_arg);
return None;
}
};
Some(abs_path)
}
fn mkdir(cwd: &PathBuf, args: &[&str]) {
let abs_path = match get_abs_path(cwd, args[0]) {
Some(p) => p,
None => return
};
let dir_metadata = fat32::vfat::Metadata {
name: String::from(abs_path.file_name().unwrap().to_str().unwrap()),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default_dir(), // directory
size: 0
};
let path_clone = abs_path.clone();
FILESYSTEM.create_dir(abs_path.parent().unwrap(), dir_metadata).expect("Failed to create dir");
FILESYSTEM.flush_fs(path_clone);
}
fn write_file_test(cwd: &PathBuf) {
use shim::io::Write;
let mut dir = FILESYSTEM.open_dir(cwd.as_path()).expect("Couldn't get $CWD as dir");
dir.create(fat32::vfat::Metadata {
name: String::from("test_write.txt"),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default(),
size: 0,
}).expect("Couldn't create test_write.txt");
let mut path = cwd.clone();
path.push("test_write.txt");
let test_file_entry = FILESYSTEM.open(path.as_path()).expect("couldn't open /test_write.txt");
assert!(test_file_entry.is_file());
let mut test_file = test_file_entry.into_file().expect("couldn't open /test_write.txt as file");
let test_buf = "hello world!!\n".as_bytes();
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
FILESYSTEM.flush_fs(cwd);
}
fn touch(cwd: &PathBuf, args: &[&str]) {
for arg in args {
let arg_path = PathBuf::from(arg);
let raw_path = if!arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let base = path.parent();
let mut base_dir = match base {
None => FILESYSTEM.open_dir("/").expect("Could not get / as dir"),
Some(base) => FILESYSTEM.open_dir(base).expect("Could not get target as dir"),
};
let file = path.file_name().expect("Must specify a file to create")
.to_str().expect("Couldn't get filename as string");
base_dir.create(fat32::vfat::Metadata {
name: String::from(file),
..Default::default()
}).expect("Couldn't create file");
match base {
Some(base) => FILESYSTEM.flush_fs(base),
None => FILESYSTEM.flush_fs("/")
}
}
}
fn append(cwd: &PathBuf, args: &[&str]) {
use shim::io::{Write, Seek, SeekFrom};
if args.len() < 2 {
kprintln!("USAGE: append [filename] [contents]");
return;
}
let arg_path = PathBuf::from(args[0]);
let raw_path = if!arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let mut fd = FILESYSTEM.open_file(path.as_path()).expect("Couldn't open file for writing");
for i in 1..args.len() {
fd.seek(SeekFrom::End(0)).expect("Failed to seek to end of file");
fd.write(&args[i].bytes().collect::<alloc::vec::Vec<u8>>()).expect("Failed to append to file");
if i < args.len() - 1 {
fd.write(&[''as u8]).expect("Failed to append space to file");
}
}
fd.write(&['\n' as u8]).expect("Failed to append newline to file");
FILESYSTEM.flush_fs(path);
}
fn rm(cwd: &PathBuf, args: &[&str]) {
use fat32::traits::File;
if args.len() < 1 {
kprintln!("USAGE: rm [filename]+");
return;
}
for i in 0..args.len() {
let arg_path = PathBuf::from(args[i]);
let raw_path = if!arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let fd = FILESYSTEM.open(path.as_path()).expect("Couldn't open file for writing");
if fd.is_dir() {
match fd.into_dir().expect("Couldn't get dir as dir").delete() {
Ok(_) => (),
Err(e) => kprintln!("Could not delete directory: {:?}", e),
}
} else {
fd.into_file().expect("Couldn't get file as file").delete().expect("Could not delete file");
}
FILESYSTEM.flush_fs(path);
}
}
fn mount(cwd: &PathBuf, args: &[&str]) {
if args.len() < 2 {
kprintln!("not enough arguments!\nusage: mount <part> <path> -p <pw>");
return;
}
let part_num: usize = match args[0].parse() {
Ok(num) => num,
Err(_) => {
kprintln!("invalid partition number");
return;
}
};
let abs_path = match get_abs_path(cwd, args[1]) {
Some(p) => p,
None => return
};
let mut mount_opts = MountOptions::Normal;
if args.len() > 2 && args.len()!= 4 {
kprintln!("incorrect arguments!\nusage: mount <part> <path> -p <pw>");
return;
} else if args.len() > 2 {
if args[2].eq_ignore_ascii_case("-p") {
mount_opts = MountOptions::Encrypted(Some(String::from(args[3])));
} else {
kprintln!("unknown flag: {}", args[2]);
return;
}
}
FILESYSTEM.mount(part_num, abs_path, mount_opts);
}
fn umount(cwd: &PathBuf, mount_point: &str) {
let abs_path = match get_abs_path(cwd, mount_point) {
Some(p) => p,
None => return
};
if abs_path.to_str().unwrap().eq_ignore_ascii_case("/") {
kprintln!("don't unmount root!!!");
} else {
match FILESYSTEM.unmount(PathBuf::from(abs_path.to_str().unwrap())) {
Ok(_) => { kprintln!("unmounted {}", abs_path.to_str().unwrap()); },
Err(_) => ()
}
}
}
// backs the mkcrypt command
// encrypts a device sector by sector
// usage: mkcrypt {header|full} num password
// header = don't encrypt data area (saves time if we don't care about the existing data on the partition)
// header mode will also zero the root cluster so that it's valid
// full = encrypt every sector of the partition (will be slow for large disks)
// if there's a power disruption or anything like that during the execution of this command you'll have
// big problems (we can fix that by creating a backup though)
fn encrypt_part(args: &[&str]) {
if args.len() < 3 {
kprintln!("not enough arguments!\nusage: mkcrypt {{header|full}} num password");
return;
}
// check usage
let mode = args[0]; // args[0] = mode (header|full)
match mode {
"header"|"full" => (),
_ => {
kprintln!("invalid mode: {}!\nusage: mkcrypt {{header|full}} num password", mode);
return;
}
}
let part_num: usize = match args[1].parse() {
Ok(num) => num,
Err(_) => {
kprintln!("invalid partition number: {}!\nusage: mkcrypt {{header|full}} num password", args[1]);
return;
}
};
if args[2].as_bytes().len() > 16 {
kprintln!("password can be at most 16 bytes");
return;
}
// we can change this later to be device agnostic
let mbr = match MasterBootRecord::from(Sd {}) {
Ok(mbr) => mbr,
Err(e) => {
kprintln!("error parsing MBR: {:?}", e);
return;
}
};
let start_sector = match mbr.get_partition_start(part_num) {
Some(s) => s as u64,
None => {
kprintln!("unable to get start sector for partition #{}", part_num);
return;
}
};
let ebpb = match BiosParameterBlock::from(Sd {}, start_sector) {
Ok(ebpb) => ebpb,
Err(e) => {
kprintln!("unable to parse EBPB: {:?}", e);
return;
}
};
let end_sector: u64;
match mode {
"header" => {
end_sector = start_sector
+ ebpb.num_reserved_sectors as u64
+ (ebpb.num_fats as u64 * ebpb.sectors_per_fat as u64)
- 1;
kprintln!("Encrypting root cluster...");
// encrypt root dir as well
let sector_offset = ((ebpb.root_cluster_number - 2) as u64) * (ebpb.sectors_per_cluster as u64);
let root_start_sector = end_sector + 1 + sector_offset as u64;
let root_cluster_end = root_start_sector + ebpb.sectors_per_cluster as u64 - 1;
match encrypt_sectors(root_start_sector, root_cluster_end, args[2]) {
Ok(_) => (),
Err(_) => return
}
kprintln!("Encrypting filesystem header...");
},
"full" => {
end_sector = start_sector
+ ebpb.num_logical_sectors_ext as u64
- 1;
kprintln!("Encrypting entire partition...");
}
_ => {
kprintln!("invalid mode: {}!\nusage: mkcrypt {{header|full}} num password", mode);
return;
}
}
match encrypt_sectors(start_sector, end_sector, args[2]) {
Ok(_) => (),
Err(_) => return
}
}
// encrypts sectors [first, last] on Sd {} using key
fn encrypt_sectors(first: u64, last: u64, key: &str) -> Result<(), ()> {
kprintln!("about to encrypt | }
}
}
return true | random_line_split |
shell.rs |
}
fn eval(&self, cwd: &mut PathBuf) {
match self.path() {
"echo" => {
for arg in &self.args[1..] {
kprint!("{} ", arg);
}
kprintln!("");
},
"panic" => panic!("ARE YOU THE BRAIN SPECIALIST?"),
"lsatag" => {
for tag in Atags::get() {
kprintln!("{:#?}", tag)
}
},
"memorymap" => {
match crate::allocator::memory_map() {
Some((start, end)) =>
kprintln!("Memory available: [{}..{}]", start, end),
None => kprintln!("Couldn't load memory map")
}
},
"testalloc" => {
let mut v = Vec::new();
for i in 0..50 {
v.push(i);
kprintln!("{:?}", v);
}
},
"sleep" => {
use core::time::Duration;
if self.args.len() > 1 {
let span = match self.args[1].parse() {
Ok(span) => span,
Err(_) => {
kprintln!("Couldn't parse time");
return
}
};
let slept = kernel_api::syscall::sleep(Duration::from_millis(span));
match slept {
Ok(time) => kprintln!("Slept {:?}", time),
Err(e) => kprintln!("Couldn't sleep: {:?}", e),
}
} else {
kprintln!("Must pass in # of millis to sleep");
}
},
"pwd" => pwd(cwd),
"cd" => {
if self.args.len() > 1 {
cd(cwd, self.args[1]);
}
},
"ls" => ls(cwd, &self.args[1..]),
"cat" => cat(cwd, &self.args[1..]),
"mkdir" => mkdir(cwd, &self.args[1..]),
"write_file_test" => write_file_test(cwd),
"touch" => touch(cwd, &self.args[1..]),
"rm" => rm(cwd, &self.args[1..]),
"append" => append(cwd, &self.args[1..]),
"lsblk" => FILESYSTEM.lsblk(),
"mount" => mount(cwd, &self.args[1..]),
"umount" => umount(cwd, &self.args[1]),
"mkcrypt" => encrypt_part(&self.args[1..]),
path => kprintln!("unknown command: {}", path)
}
}
}
fn pwd(cwd: &mut PathBuf) {
let path = cwd.as_path();
let path_str = path.to_str().expect("Failed to get working directory");
kprintln!("{}", path_str);
}
fn cd(cwd: &mut PathBuf, path: &str) -> bool {
if path.len() == 0 { return true }
if &path[0..1] == "/" {
// cwd.clear() not implemented in shim :(
while cwd.pop() {}
}
for part in path.split('/') {
// Remove any / that makes its way in
let part = part.replace("/", "");
if part == "." {
continue
} else if part == ".." {
cwd.pop();
} else {
cwd.push(&part);
match FILESYSTEM.open(cwd.as_path()) {
Ok(entry) => {
if entry.is_file() {
kprintln!("{}: Not a directory", part);
cwd.pop();
return false
}
}
Err(_) => {
kprintln!("{}: No such file or directory", part);
cwd.pop();
return false
}
}
}
}
return true
}
fn ls(cwd: &PathBuf, args: &[&str]) {
let mut rel_dir = cwd.clone();
let mut changed_dir = false;
let mut show_hidden = false;
for arg in args {
if *arg == "-a" {
show_hidden = true;
continue
}
if changed_dir {
continue
}
if!cd(&mut rel_dir, arg) {
return
} else {
changed_dir = true // only run cd once
}
}
// no need to cd. if they didn't change dir
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open dir");
let dir = entry.as_dir().expect("Expected directory, found file");
for item in dir.entries().expect("Couldn't get a dir iterator") {
if show_hidden ||!item.metadata().hidden() {
kprintln!("{}", item.metadata())
}
}
}
fn cat(cwd: &PathBuf, args: &[&str]) {
fn cat_one(cwd: &PathBuf, path: &str) {
use core::str;
use io::Read;
use alloc::slice::SliceConcatExt;
let mut rel_dir = cwd.clone();
let parts = path.split('/').collect::<Vec<&str>>();
let dir = parts[0..parts.len()-1].join("/");
if!cd(&mut rel_dir, &dir) {
return
}
rel_dir.push(parts[parts.len()-1]);
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open file");
if!entry.is_file() {
kprintln!("Can't cat a directory {}!", path);
return
}
let mut file = entry.into_file().expect("Expected file, found directory");
loop {
let mut buffer = [0u8; 256];
match file.read(&mut buffer) {
Ok(0) => break,
Ok(n) => {
let string = str::from_utf8(&buffer[0..n]);
match string {
Ok(string) => kprint!("{}", string),
Err(_) => {
kprintln!("Couldn't parse {} as UTF-8", path);
return
},
}
},
Err(e) => {
kprintln!("Error when reading file {}: {:?}", path, e);
return
}
}
}
}
for arg in args {
cat_one(cwd, arg)
}
}
fn canonicalize(path: PathBuf) -> Result<PathBuf, ()> {
let mut new_path = PathBuf::new();
for comp in path.components() {
match comp {
Component::ParentDir => {
let res = new_path.pop();
if!res {
return Err(());
}
},
Component::Normal(n) => new_path = new_path.join(n),
Component::RootDir => new_path = ["/"].iter().collect(),
_ => ()
};
}
Ok(new_path)
}
fn get_abs_path(cwd: &PathBuf, dir_arg: &str) -> Option<PathBuf> |
fn mkdir(cwd: &PathBuf, args: &[&str]) {
let abs_path = match get_abs_path(cwd, args[0]) {
Some(p) => p,
None => return
};
let dir_metadata = fat32::vfat::Metadata {
name: String::from(abs_path.file_name().unwrap().to_str().unwrap()),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default_dir(), // directory
size: 0
};
let path_clone = abs_path.clone();
FILESYSTEM.create_dir(abs_path.parent().unwrap(), dir_metadata).expect("Failed to create dir");
FILESYSTEM.flush_fs(path_clone);
}
fn write_file_test(cwd: &PathBuf) {
use shim::io::Write;
let mut dir = FILESYSTEM.open_dir(cwd.as_path()).expect("Couldn't get $CWD as dir");
dir.create(fat32::vfat::Metadata {
name: String::from("test_write.txt"),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default(),
size: 0,
}).expect("Couldn't create test_write.txt");
let mut path = cwd.clone();
path.push("test_write.txt");
let test_file_entry = FILESYSTEM.open(path.as_path()).expect("couldn't open /test_write.txt");
assert!(test_file_entry.is_file());
let mut test_file = test_file_entry.into_file().expect("couldn't open /test_write.txt as file");
let test_buf = "hello world!!\n".as_bytes();
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
FILESYSTEM.flush_fs(cwd);
}
fn touch(cwd: &PathBuf, args: &[&str]) {
for arg in args {
let arg_path = PathBuf::from(arg);
let raw_path = if!arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let base = path.parent();
let mut base_dir = match base {
None => FILESYSTEM.open_dir("/").expect("Could not get / as dir"),
Some(base) => FILESYSTEM.open_dir(base).expect("Could not get target as dir"),
};
let file = path.file_name().expect("Must specify a file to create")
.to_str().expect("Couldn't get filename as string");
base_dir.create(fat32::vfat::Metadata {
name: String::from(file),
..Default::default()
}).expect("Couldn't create file");
match base {
Some(base) => FILESYSTEM.flush_fs(base),
None => FILESYSTEM.flush_fs("/")
}
}
}
fn append(cwd: &PathBuf, args: &[&str]) {
use shim::io::{Write, Seek, SeekFrom};
if args.len() < 2 {
kprintln!("USAGE: append [filename] [contents]");
return;
}
let arg_path = PathBuf::from(args[0]);
let raw_path = if!arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let mut fd = FILESYSTEM.open_file(path.as_path()).expect("Couldn't open file for writing");
for i in 1..args.len() {
fd.seek(SeekFrom::End(0)).expect("Failed to seek to end of file");
fd.write(&args[i].bytes().collect::<alloc::vec::Vec<u8>>()).expect("Failed to append to file");
if i < args.len() - 1 {
fd.write(&[''as u8]).expect("Failed to append space to file");
}
}
fd.write(&['\n' as u8]).expect("Failed to append newline to file");
FILESYSTEM.flush_fs(path);
}
fn rm(cwd: &PathBuf, args: &[&str]) {
use fat32::traits::File;
if args.len() < 1 {
kprintln!("USAGE: rm [filename]+");
return;
}
for i in 0..args.len() {
let arg_path = PathBuf::from(args[i]);
let raw_path = if!arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let fd = FILESYSTEM.open(path.as_path()).expect("Couldn't open file for writing");
if fd.is_dir() {
match fd.into_dir().expect("Couldn't get dir as dir").delete() {
Ok(_) => (),
Err(e) => kprintln!("Could not delete directory: {:?}", e),
}
} else {
fd.into_file().expect("Couldn't get file as file").delete().expect("Could not delete file");
}
FILESYSTEM.flush_fs(path);
}
}
fn mount(cwd: &PathBuf, args: &[&str]) {
if args.len() < 2 {
kprintln!("not enough arguments!\nusage: mount <part> <path> -p <pw>");
return;
}
let part_num: usize = match args[0].parse() {
Ok(num) => num,
Err(_) => {
kprintln!("invalid partition number");
return;
}
};
let abs_path = match get_abs_path(cwd, args[1]) {
Some(p) => p,
None => return
};
let mut mount_opts = MountOptions::Normal;
if args.len() > 2 && args.len()!= 4 {
kprintln!("incorrect arguments!\nusage: mount <part> <path> -p <pw>");
return;
} else if args.len() > 2 {
if args[2].eq_ignore_ascii_case("-p") {
mount_opts = MountOptions::Encrypted(Some(String::from(args[3])));
} else {
kprintln!("unknown flag: {}", args[2]);
return;
}
}
FILESYSTEM.mount(part_num, abs_path, mount_opts);
}
fn umount(cwd: &PathBuf, mount_point: &str) {
let abs_path = match get_abs_path(cwd, mount_point) {
Some(p) => p,
None => return
};
if abs_path.to_str().unwrap().eq_ignore_ascii_case("/") {
kprintln!("don't unmount root!!!");
} else {
match FILESYSTEM.unmount(PathBuf::from(abs_path.to_str().unwrap())) {
Ok(_) => { kprintln!("unmounted {}", abs_path.to_str().unwrap()); },
Err(_) => ()
}
}
}
// backs the mkcrypt command
// encrypts a device sector by sector
// usage: mkcrypt {header|full} num password
// header = don't encrypt data area (saves time if we don't care about the existing data on the partition)
// header mode will also zero the root cluster so that it's valid
// full = encrypt every sector of the partition (will be slow for large disks)
// if there's a power disruption or anything like that during the execution of this command you'll have
// big problems (we can fix that by creating a backup though)
fn encrypt_part(args: &[&str]) {
if args.len() < 3 {
kprintln!("not enough arguments!\nusage: mkcrypt {{header|full}} num password");
return;
}
// check usage
let mode = args[0]; // args[0] = mode (header|full)
match mode {
"header"|"full" => (),
_ => {
kprintln!("invalid mode: {}!\nusage: mkcrypt {{header|full}} num password", mode);
return;
}
}
let part_num: usize = match args[1].parse() {
Ok(num) => num,
Err(_) => {
kprintln!("invalid partition number: {}!\nusage: mkcrypt {{header|full}} num password", args[1]);
return;
}
};
if args[2].as_bytes().len() > 16 {
kprintln!("password can be at most 16 bytes");
return;
}
// we can change this later to be device agnostic
let mbr = match MasterBootRecord::from(Sd {}) {
Ok(mbr) => mbr,
Err(e) => {
kprintln!("error parsing MBR: {:?}", e);
return;
}
};
let start_sector = match mbr.get_partition_start(part_num) {
Some(s) => s as u64,
None => {
kprintln!("unable to get start sector for partition #{}", part_num);
return;
}
};
let ebpb = match BiosParameterBlock::from(Sd {}, start_sector) {
Ok(ebpb) => ebpb,
Err(e) => {
kprintln!("unable to parse EBPB: {:?}", e);
return;
}
};
let end_sector: u64;
match mode {
"header" => {
end_sector = start_sector
+ ebpb.num_reserved_sectors as u64
+ (ebpb.num_fats as u64 * ebpb.sectors_per_fat as u64)
- 1;
kprintln!("Encrypting root cluster...");
// encrypt root dir as well
let sector_offset = ((ebpb.root_cluster_number - 2) as u64) * (ebpb.sectors_per_cluster as u64);
let root_start_sector = end_sector + 1 + sector_offset as u64;
let root_cluster_end = root_start_sector + ebpb.sectors_per_cluster as u64 - 1;
match encrypt_sectors(root_start_sector, root_cluster_end, args[2]) {
Ok(_) => (),
Err(_) => return
}
kprintln!("Encrypting filesystem header...");
},
"full" => {
end_sector = start_sector
+ ebpb.num_logical_sectors_ext as u64
- 1;
kprintln!("Encrypting entire partition...");
}
_ => {
kprintln!("invalid mode: {}!\nusage: mkcrypt {{header|full}} num password", mode);
return;
}
}
match encrypt_sectors(start_sector, end_sector, args[2]) {
Ok(_) => (),
Err(_) => return
}
}
// encrypts sectors [first, last] on Sd {} using key
fn encrypt_sectors(first: u64, last: u64, key: &str) -> Result<(), ()> {
kprintln!("about | {
let mut raw_path: PathBuf = PathBuf::from(dir_arg);
if !raw_path.is_absolute() {
raw_path = cwd.clone().join(raw_path);
}
let abs_path = match canonicalize(raw_path) {
Ok(p) => p,
Err(_) => {
kprintln!("\ninvalid arg: {}", dir_arg);
return None;
}
};
Some(abs_path)
} | identifier_body |
shell.rs |
}
fn eval(&self, cwd: &mut PathBuf) {
match self.path() {
"echo" => {
for arg in &self.args[1..] {
kprint!("{} ", arg);
}
kprintln!("");
},
"panic" => panic!("ARE YOU THE BRAIN SPECIALIST?"),
"lsatag" => {
for tag in Atags::get() {
kprintln!("{:#?}", tag)
}
},
"memorymap" => {
match crate::allocator::memory_map() {
Some((start, end)) =>
kprintln!("Memory available: [{}..{}]", start, end),
None => kprintln!("Couldn't load memory map")
}
},
"testalloc" => {
let mut v = Vec::new();
for i in 0..50 {
v.push(i);
kprintln!("{:?}", v);
}
},
"sleep" => {
use core::time::Duration;
if self.args.len() > 1 {
let span = match self.args[1].parse() {
Ok(span) => span,
Err(_) => {
kprintln!("Couldn't parse time");
return
}
};
let slept = kernel_api::syscall::sleep(Duration::from_millis(span));
match slept {
Ok(time) => kprintln!("Slept {:?}", time),
Err(e) => kprintln!("Couldn't sleep: {:?}", e),
}
} else {
kprintln!("Must pass in # of millis to sleep");
}
},
"pwd" => pwd(cwd),
"cd" => {
if self.args.len() > 1 {
cd(cwd, self.args[1]);
}
},
"ls" => ls(cwd, &self.args[1..]),
"cat" => cat(cwd, &self.args[1..]),
"mkdir" => mkdir(cwd, &self.args[1..]),
"write_file_test" => write_file_test(cwd),
"touch" => touch(cwd, &self.args[1..]),
"rm" => rm(cwd, &self.args[1..]),
"append" => append(cwd, &self.args[1..]),
"lsblk" => FILESYSTEM.lsblk(),
"mount" => mount(cwd, &self.args[1..]),
"umount" => umount(cwd, &self.args[1]),
"mkcrypt" => encrypt_part(&self.args[1..]),
path => kprintln!("unknown command: {}", path)
}
}
}
fn pwd(cwd: &mut PathBuf) {
let path = cwd.as_path();
let path_str = path.to_str().expect("Failed to get working directory");
kprintln!("{}", path_str);
}
fn | (cwd: &mut PathBuf, path: &str) -> bool {
if path.len() == 0 { return true }
if &path[0..1] == "/" {
// cwd.clear() not implemented in shim :(
while cwd.pop() {}
}
for part in path.split('/') {
// Remove any / that makes its way in
let part = part.replace("/", "");
if part == "." {
continue
} else if part == ".." {
cwd.pop();
} else {
cwd.push(&part);
match FILESYSTEM.open(cwd.as_path()) {
Ok(entry) => {
if entry.is_file() {
kprintln!("{}: Not a directory", part);
cwd.pop();
return false
}
}
Err(_) => {
kprintln!("{}: No such file or directory", part);
cwd.pop();
return false
}
}
}
}
return true
}
fn ls(cwd: &PathBuf, args: &[&str]) {
let mut rel_dir = cwd.clone();
let mut changed_dir = false;
let mut show_hidden = false;
for arg in args {
if *arg == "-a" {
show_hidden = true;
continue
}
if changed_dir {
continue
}
if!cd(&mut rel_dir, arg) {
return
} else {
changed_dir = true // only run cd once
}
}
// no need to cd. if they didn't change dir
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open dir");
let dir = entry.as_dir().expect("Expected directory, found file");
for item in dir.entries().expect("Couldn't get a dir iterator") {
if show_hidden ||!item.metadata().hidden() {
kprintln!("{}", item.metadata())
}
}
}
fn cat(cwd: &PathBuf, args: &[&str]) {
fn cat_one(cwd: &PathBuf, path: &str) {
use core::str;
use io::Read;
use alloc::slice::SliceConcatExt;
let mut rel_dir = cwd.clone();
let parts = path.split('/').collect::<Vec<&str>>();
let dir = parts[0..parts.len()-1].join("/");
if!cd(&mut rel_dir, &dir) {
return
}
rel_dir.push(parts[parts.len()-1]);
let entry = FILESYSTEM.open(rel_dir.as_path()).expect("Couldn't open file");
if!entry.is_file() {
kprintln!("Can't cat a directory {}!", path);
return
}
let mut file = entry.into_file().expect("Expected file, found directory");
loop {
let mut buffer = [0u8; 256];
match file.read(&mut buffer) {
Ok(0) => break,
Ok(n) => {
let string = str::from_utf8(&buffer[0..n]);
match string {
Ok(string) => kprint!("{}", string),
Err(_) => {
kprintln!("Couldn't parse {} as UTF-8", path);
return
},
}
},
Err(e) => {
kprintln!("Error when reading file {}: {:?}", path, e);
return
}
}
}
}
for arg in args {
cat_one(cwd, arg)
}
}
fn canonicalize(path: PathBuf) -> Result<PathBuf, ()> {
let mut new_path = PathBuf::new();
for comp in path.components() {
match comp {
Component::ParentDir => {
let res = new_path.pop();
if!res {
return Err(());
}
},
Component::Normal(n) => new_path = new_path.join(n),
Component::RootDir => new_path = ["/"].iter().collect(),
_ => ()
};
}
Ok(new_path)
}
fn get_abs_path(cwd: &PathBuf, dir_arg: &str) -> Option<PathBuf> {
let mut raw_path: PathBuf = PathBuf::from(dir_arg);
if!raw_path.is_absolute() {
raw_path = cwd.clone().join(raw_path);
}
let abs_path = match canonicalize(raw_path) {
Ok(p) => p,
Err(_) => {
kprintln!("\ninvalid arg: {}", dir_arg);
return None;
}
};
Some(abs_path)
}
fn mkdir(cwd: &PathBuf, args: &[&str]) {
let abs_path = match get_abs_path(cwd, args[0]) {
Some(p) => p,
None => return
};
let dir_metadata = fat32::vfat::Metadata {
name: String::from(abs_path.file_name().unwrap().to_str().unwrap()),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default_dir(), // directory
size: 0
};
let path_clone = abs_path.clone();
FILESYSTEM.create_dir(abs_path.parent().unwrap(), dir_metadata).expect("Failed to create dir");
FILESYSTEM.flush_fs(path_clone);
}
fn write_file_test(cwd: &PathBuf) {
use shim::io::Write;
let mut dir = FILESYSTEM.open_dir(cwd.as_path()).expect("Couldn't get $CWD as dir");
dir.create(fat32::vfat::Metadata {
name: String::from("test_write.txt"),
created: fat32::vfat::Timestamp::default(),
accessed: fat32::vfat::Timestamp::default(),
modified: fat32::vfat::Timestamp::default(),
attributes: fat32::vfat::Attributes::default(),
size: 0,
}).expect("Couldn't create test_write.txt");
let mut path = cwd.clone();
path.push("test_write.txt");
let test_file_entry = FILESYSTEM.open(path.as_path()).expect("couldn't open /test_write.txt");
assert!(test_file_entry.is_file());
let mut test_file = test_file_entry.into_file().expect("couldn't open /test_write.txt as file");
let test_buf = "hello world!!\n".as_bytes();
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
assert_eq!(test_file.write(test_buf).unwrap(), test_buf.len());
FILESYSTEM.flush_fs(cwd);
}
fn touch(cwd: &PathBuf, args: &[&str]) {
for arg in args {
let arg_path = PathBuf::from(arg);
let raw_path = if!arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let base = path.parent();
let mut base_dir = match base {
None => FILESYSTEM.open_dir("/").expect("Could not get / as dir"),
Some(base) => FILESYSTEM.open_dir(base).expect("Could not get target as dir"),
};
let file = path.file_name().expect("Must specify a file to create")
.to_str().expect("Couldn't get filename as string");
base_dir.create(fat32::vfat::Metadata {
name: String::from(file),
..Default::default()
}).expect("Couldn't create file");
match base {
Some(base) => FILESYSTEM.flush_fs(base),
None => FILESYSTEM.flush_fs("/")
}
}
}
fn append(cwd: &PathBuf, args: &[&str]) {
use shim::io::{Write, Seek, SeekFrom};
if args.len() < 2 {
kprintln!("USAGE: append [filename] [contents]");
return;
}
let arg_path = PathBuf::from(args[0]);
let raw_path = if!arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let mut fd = FILESYSTEM.open_file(path.as_path()).expect("Couldn't open file for writing");
for i in 1..args.len() {
fd.seek(SeekFrom::End(0)).expect("Failed to seek to end of file");
fd.write(&args[i].bytes().collect::<alloc::vec::Vec<u8>>()).expect("Failed to append to file");
if i < args.len() - 1 {
fd.write(&[''as u8]).expect("Failed to append space to file");
}
}
fd.write(&['\n' as u8]).expect("Failed to append newline to file");
FILESYSTEM.flush_fs(path);
}
fn rm(cwd: &PathBuf, args: &[&str]) {
use fat32::traits::File;
if args.len() < 1 {
kprintln!("USAGE: rm [filename]+");
return;
}
for i in 0..args.len() {
let arg_path = PathBuf::from(args[i]);
let raw_path = if!arg_path.is_absolute() {
cwd.join(arg_path)
} else { arg_path };
let path = canonicalize(raw_path).expect("Could not canonicalize path");
let fd = FILESYSTEM.open(path.as_path()).expect("Couldn't open file for writing");
if fd.is_dir() {
match fd.into_dir().expect("Couldn't get dir as dir").delete() {
Ok(_) => (),
Err(e) => kprintln!("Could not delete directory: {:?}", e),
}
} else {
fd.into_file().expect("Couldn't get file as file").delete().expect("Could not delete file");
}
FILESYSTEM.flush_fs(path);
}
}
fn mount(cwd: &PathBuf, args: &[&str]) {
if args.len() < 2 {
kprintln!("not enough arguments!\nusage: mount <part> <path> -p <pw>");
return;
}
let part_num: usize = match args[0].parse() {
Ok(num) => num,
Err(_) => {
kprintln!("invalid partition number");
return;
}
};
let abs_path = match get_abs_path(cwd, args[1]) {
Some(p) => p,
None => return
};
let mut mount_opts = MountOptions::Normal;
if args.len() > 2 && args.len()!= 4 {
kprintln!("incorrect arguments!\nusage: mount <part> <path> -p <pw>");
return;
} else if args.len() > 2 {
if args[2].eq_ignore_ascii_case("-p") {
mount_opts = MountOptions::Encrypted(Some(String::from(args[3])));
} else {
kprintln!("unknown flag: {}", args[2]);
return;
}
}
FILESYSTEM.mount(part_num, abs_path, mount_opts);
}
fn umount(cwd: &PathBuf, mount_point: &str) {
let abs_path = match get_abs_path(cwd, mount_point) {
Some(p) => p,
None => return
};
if abs_path.to_str().unwrap().eq_ignore_ascii_case("/") {
kprintln!("don't unmount root!!!");
} else {
match FILESYSTEM.unmount(PathBuf::from(abs_path.to_str().unwrap())) {
Ok(_) => { kprintln!("unmounted {}", abs_path.to_str().unwrap()); },
Err(_) => ()
}
}
}
// backs the mkcrypt command
// encrypts a device sector by sector
// usage: mkcrypt {header|full} num password
// header = don't encrypt data area (saves time if we don't care about the existing data on the partition)
// header mode will also zero the root cluster so that it's valid
// full = encrypt every sector of the partition (will be slow for large disks)
// if there's a power disruption or anything like that during the execution of this command you'll have
// big problems (we can fix that by creating a backup though)
fn encrypt_part(args: &[&str]) {
if args.len() < 3 {
kprintln!("not enough arguments!\nusage: mkcrypt {{header|full}} num password");
return;
}
// check usage
let mode = args[0]; // args[0] = mode (header|full)
match mode {
"header"|"full" => (),
_ => {
kprintln!("invalid mode: {}!\nusage: mkcrypt {{header|full}} num password", mode);
return;
}
}
let part_num: usize = match args[1].parse() {
Ok(num) => num,
Err(_) => {
kprintln!("invalid partition number: {}!\nusage: mkcrypt {{header|full}} num password", args[1]);
return;
}
};
if args[2].as_bytes().len() > 16 {
kprintln!("password can be at most 16 bytes");
return;
}
// we can change this later to be device agnostic
let mbr = match MasterBootRecord::from(Sd {}) {
Ok(mbr) => mbr,
Err(e) => {
kprintln!("error parsing MBR: {:?}", e);
return;
}
};
let start_sector = match mbr.get_partition_start(part_num) {
Some(s) => s as u64,
None => {
kprintln!("unable to get start sector for partition #{}", part_num);
return;
}
};
let ebpb = match BiosParameterBlock::from(Sd {}, start_sector) {
Ok(ebpb) => ebpb,
Err(e) => {
kprintln!("unable to parse EBPB: {:?}", e);
return;
}
};
let end_sector: u64;
match mode {
"header" => {
end_sector = start_sector
+ ebpb.num_reserved_sectors as u64
+ (ebpb.num_fats as u64 * ebpb.sectors_per_fat as u64)
- 1;
kprintln!("Encrypting root cluster...");
// encrypt root dir as well
let sector_offset = ((ebpb.root_cluster_number - 2) as u64) * (ebpb.sectors_per_cluster as u64);
let root_start_sector = end_sector + 1 + sector_offset as u64;
let root_cluster_end = root_start_sector + ebpb.sectors_per_cluster as u64 - 1;
match encrypt_sectors(root_start_sector, root_cluster_end, args[2]) {
Ok(_) => (),
Err(_) => return
}
kprintln!("Encrypting filesystem header...");
},
"full" => {
end_sector = start_sector
+ ebpb.num_logical_sectors_ext as u64
- 1;
kprintln!("Encrypting entire partition...");
}
_ => {
kprintln!("invalid mode: {}!\nusage: mkcrypt {{header|full}} num password", mode);
return;
}
}
match encrypt_sectors(start_sector, end_sector, args[2]) {
Ok(_) => (),
Err(_) => return
}
}
// encrypts sectors [first, last] on Sd {} using key
fn encrypt_sectors(first: u64, last: u64, key: &str) -> Result<(), ()> {
kprintln!("about | cd | identifier_name |
client_conn.rs | //! Single client connection
use std::io;
use std::result::Result as std_Result;
use std::sync::Arc;
use error;
use error::Error;
use result;
use exec::CpuPoolOption;
use solicit::end_stream::EndStream;
use solicit::frame::settings::*;
use solicit::header::*;
use solicit::StreamId;
use solicit::DEFAULT_SETTINGS;
use service::Service;
use futures::future::Future;
use futures::stream::Stream;
use futures::sync::mpsc::unbounded;
use futures::sync::mpsc::UnboundedSender;
use futures::sync::oneshot;
use tls_api::TlsConnector;
use tokio_core::reactor;
use tokio_io::AsyncRead;
use tokio_io::AsyncWrite;
use tokio_timer::Timer;
use tokio_tls_api;
use solicit_async::*;
use common::*;
use data_or_trailers::*;
use socket::*;
use client_died_error_holder::ClientDiedErrorHolder;
use common::client_or_server::ClientOrServer;
use data_or_headers::DataOrHeaders;
use data_or_headers_with_flag::DataOrHeadersWithFlag;
use headers_place::HeadersPlace;
use req_resp::RequestOrResponse;
use result_or_eof::ResultOrEof;
use std::marker;
use ClientConf;
use ClientTlsOption;
use ErrorCode;
struct ClientTypes<I>(marker::PhantomData<I>);
impl<I> Types for ClientTypes<I>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Io = I;
type HttpStreamData = ClientStream<I>;
type HttpStreamSpecific = ClientStreamData;
type ConnSpecific = ClientConnData;
type ToWriteMessage = ClientToWriteMessage;
const OUT_REQUEST_OR_RESPONSE: RequestOrResponse = RequestOrResponse::Request;
const CLIENT_OR_SERVER: ClientOrServer = ClientOrServer::Client;
}
pub struct ClientStreamData {}
impl HttpStreamDataSpecific for ClientStreamData {}
type ClientStream<I> = HttpStreamCommon<ClientTypes<I>>;
impl<I> HttpStreamData for ClientStream<I>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Types = ClientTypes<I>;
}
pub struct ClientConnData {
_callbacks: Box<ClientConnCallbacks>,
}
impl ConnSpecific for ClientConnData {}
pub struct | {
write_tx: UnboundedSender<ClientToWriteMessage>,
}
unsafe impl Sync for ClientConn {}
pub struct StartRequestMessage {
pub headers: Headers,
pub body: HttpStreamAfterHeaders,
pub resp_tx: oneshot::Sender<Response>,
}
enum ClientToWriteMessage {
Start(StartRequestMessage),
WaitForHandshake(oneshot::Sender<result::Result<()>>),
Common(CommonToWriteMessage),
}
impl From<CommonToWriteMessage> for ClientToWriteMessage {
fn from(m: CommonToWriteMessage) -> Self {
ClientToWriteMessage::Common(m)
}
}
impl<I> ConnWriteSideCustom for Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Types = ClientTypes<I>;
fn process_message(&mut self, message: ClientToWriteMessage) -> result::Result<()> {
match message {
ClientToWriteMessage::Start(start) => self.process_start(start),
ClientToWriteMessage::Common(common) => self.process_common_message(common),
ClientToWriteMessage::WaitForHandshake(tx) => {
// ignore error
drop(tx.send(Ok(())));
Ok(())
}
}
}
}
impl<I> Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
fn process_start(&mut self, start: StartRequestMessage) -> result::Result<()> {
let StartRequestMessage {
headers,
body,
resp_tx,
} = start;
let stream_id = self.next_local_stream_id();
let out_window = {
let (mut http_stream, resp_stream, out_window) = self.new_stream_data(
stream_id,
None,
InMessageStage::Initial,
ClientStreamData {},
);
if let Err(_) = resp_tx.send(Response::from_stream(resp_stream)) {
warn!("caller died");
}
http_stream.push_back(DataOrHeaders::Headers(headers));
out_window
};
self.pump_stream_to_write_loop(stream_id, body.into_part_stream(), out_window);
// Also opens latch if necessary
self.buffer_outg_conn()?;
Ok(())
}
}
pub trait ClientConnCallbacks:'static {
// called at most once
fn goaway(&self, stream_id: StreamId, raw_error_code: u32);
}
impl ClientConn {
fn spawn_connected<I, C>(
lh: reactor::Handle,
connect: HttpFutureSend<I>,
conf: ClientConf,
callbacks: C,
) -> Self
where
I: AsyncWrite + AsyncRead + Send +'static,
C: ClientConnCallbacks,
{
let (to_write_tx, to_write_rx) = unbounded();
let to_write_rx = Box::new(
to_write_rx
.map_err(|()| Error::IoError(io::Error::new(io::ErrorKind::Other, "to_write"))),
);
let c = ClientConn {
write_tx: to_write_tx.clone(),
};
let settings_frame = SettingsFrame::from_settings(vec![HttpSetting::EnablePush(false)]);
let mut settings = DEFAULT_SETTINGS;
settings.apply_from_frame(&settings_frame);
let handshake = connect.and_then(|conn| client_handshake(conn, settings_frame));
let conn_died_error_holder = ClientDiedErrorHolder::new();
let conn_died_error_holder_copy = conn_died_error_holder.clone();
let lh_copy = lh.clone();
let future = handshake.and_then(move |conn| {
debug!("handshake done");
let (read, write) = conn.split();
let conn_data = Conn::<ClientTypes<_>>::new(
lh_copy,
CpuPoolOption::SingleThread,
ClientConnData {
_callbacks: Box::new(callbacks),
},
conf.common,
settings,
to_write_tx.clone(),
to_write_rx,
read,
write,
conn_died_error_holder,
);
conn_data.run()
});
let future = conn_died_error_holder_copy.wrap_future(future);
lh.spawn(future);
c
}
pub fn spawn<H, C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
tls: ClientTlsOption<C>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
match tls {
ClientTlsOption::Plain => ClientConn::spawn_plain(lh.clone(), addr, conf, callbacks),
ClientTlsOption::Tls(domain, connector) => {
ClientConn::spawn_tls(lh.clone(), &domain, connector, addr, conf, callbacks)
}
}
}
pub fn spawn_plain<C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: C,
) -> Self
where
C: ClientConnCallbacks,
{
let no_delay = conf.no_delay.unwrap_or(true);
let connect = addr.connect(&lh).map_err(Into::into);
let map_callback = move |socket: Box<StreamItem>| {
info!("connected to {}", addr);
if socket.is_tcp() {
socket
.set_nodelay(no_delay)
.expect("failed to set TCP_NODELAY");
}
socket
};
let connect: Box<Future<Item = _, Error = _> + Send> =
if let Some(timeout) = conf.connection_timeout {
let timer = Timer::default();
Box::new(timer.timeout(connect, timeout).map(map_callback))
} else {
Box::new(connect.map(map_callback))
};
ClientConn::spawn_connected(lh, connect, conf, callbacks)
}
pub fn spawn_tls<H, C>(
lh: reactor::Handle,
domain: &str,
connector: Arc<C>,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
let domain = domain.to_owned();
let connect = addr
.connect(&lh)
.map(move |c| {
info!("connected to {}", addr);
c
}).map_err(|e| e.into());
let tls_conn = connect.and_then(move |conn| {
tokio_tls_api::connect_async(&*connector, &domain, conn)
.map_err(|e| Error::IoError(io::Error::new(io::ErrorKind::Other, e)))
});
let tls_conn = tls_conn.map_err(Error::from);
ClientConn::spawn_connected(lh, Box::new(tls_conn), conf, callbacks)
}
pub fn start_request_with_resp_sender(
&self,
start: StartRequestMessage,
) -> Result<(), StartRequestMessage> {
self.write_tx
.unbounded_send(ClientToWriteMessage::Start(start))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::Start(start) => start,
_ => unreachable!(),
})
}
pub fn dump_state_with_resp_sender(&self, tx: oneshot::Sender<ConnStateSnapshot>) {
let message = ClientToWriteMessage::Common(CommonToWriteMessage::DumpState(tx));
// ignore error
drop(self.write_tx.unbounded_send(message));
}
/// For tests
#[doc(hidden)]
pub fn _dump_state(&self) -> HttpFutureSend<ConnStateSnapshot> {
let (tx, rx) = oneshot::channel();
self.dump_state_with_resp_sender(tx);
let rx =
rx.map_err(|_| Error::from(io::Error::new(io::ErrorKind::Other, "oneshot canceled")));
Box::new(rx)
}
pub fn wait_for_connect_with_resp_sender(
&self,
tx: oneshot::Sender<result::Result<()>>,
) -> std_Result<(), oneshot::Sender<result::Result<()>>> {
self.write_tx
.unbounded_send(ClientToWriteMessage::WaitForHandshake(tx))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::WaitForHandshake(tx) => tx,
_ => unreachable!(),
})
}
}
impl Service for ClientConn {
// TODO: copy-paste with Client::start_request
fn start_request(&self, headers: Headers, body: HttpStreamAfterHeaders) -> Response {
let (resp_tx, resp_rx) = oneshot::channel();
let start = StartRequestMessage {
headers: headers,
body: body,
resp_tx: resp_tx,
};
if let Err(_) = self.start_request_with_resp_sender(start) {
return Response::err(error::Error::Other("client died"));
}
let resp_rx =
resp_rx.map_err(|oneshot::Canceled| error::Error::Other("client likely died"));
let resp_rx = resp_rx.map(|r| r.into_stream_flag());
let resp_rx = resp_rx.flatten_stream();
Response::from_stream(resp_rx)
}
}
impl<I> ConnReadSideCustom for Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Types = ClientTypes<I>;
fn process_headers(
&mut self,
stream_id: StreamId,
end_stream: EndStream,
headers: Headers,
) -> result::Result<Option<HttpStreamRef<ClientTypes<I>>>> {
let existing_stream = self
.get_stream_for_headers_maybe_send_error(stream_id)?
.is_some();
if!existing_stream {
return Ok(None);
}
let in_message_stage = self
.streams
.get_mut(stream_id)
.unwrap()
.stream()
.in_message_stage;
let headers_place = match in_message_stage {
InMessageStage::Initial => HeadersPlace::Initial,
InMessageStage::AfterInitialHeaders => HeadersPlace::Trailing,
InMessageStage::AfterTrailingHeaders => {
return Err(error::Error::InternalError(format!(
"closed stream must be handled before"
)));
}
};
if let Err(e) = headers.validate(RequestOrResponse::Response, headers_place) {
warn!("invalid headers: {:?}: {:?}", e, headers);
self.send_rst_stream(stream_id, ErrorCode::ProtocolError)?;
return Ok(None);
}
let status_1xx = match headers_place {
HeadersPlace::Initial => {
let status = headers.status();
let status_1xx = status >= 100 && status <= 199;
if status_1xx && end_stream == EndStream::Yes {
warn!("1xx headers and end stream: {}", stream_id);
self.send_rst_stream(stream_id, ErrorCode::ProtocolError)?;
return Ok(None);
}
status_1xx
}
HeadersPlace::Trailing => {
if end_stream == EndStream::No {
warn!("headers without end stream after data: {}", stream_id);
self.send_rst_stream(stream_id, ErrorCode::ProtocolError)?;
return Ok(None);
}
false
}
};
let mut stream = self.streams.get_mut(stream_id).unwrap();
if let Some(in_rem_content_length) = headers.content_length() {
stream.stream().in_rem_content_length = Some(in_rem_content_length);
}
stream.stream().in_message_stage = match (headers_place, status_1xx) {
(HeadersPlace::Initial, false) => InMessageStage::AfterInitialHeaders,
(HeadersPlace::Initial, true) => InMessageStage::Initial,
(HeadersPlace::Trailing, _) => InMessageStage::AfterTrailingHeaders,
};
// Ignore 1xx headers
if!status_1xx {
if let Some(ref mut response_handler) = stream.stream().peer_tx {
// TODO: reset stream on error
drop(
response_handler.send(ResultOrEof::Item(DataOrHeadersWithFlag {
content: DataOrHeaders::Headers(headers),
last: end_stream == EndStream::Yes,
})),
);
} else {
// TODO: reset stream
}
}
Ok(Some(stream))
}
}
| ClientConn | identifier_name |
client_conn.rs | //! Single client connection
use std::io;
use std::result::Result as std_Result;
use std::sync::Arc;
use error;
use error::Error;
use result;
use exec::CpuPoolOption;
use solicit::end_stream::EndStream;
use solicit::frame::settings::*; |
use futures::future::Future;
use futures::stream::Stream;
use futures::sync::mpsc::unbounded;
use futures::sync::mpsc::UnboundedSender;
use futures::sync::oneshot;
use tls_api::TlsConnector;
use tokio_core::reactor;
use tokio_io::AsyncRead;
use tokio_io::AsyncWrite;
use tokio_timer::Timer;
use tokio_tls_api;
use solicit_async::*;
use common::*;
use data_or_trailers::*;
use socket::*;
use client_died_error_holder::ClientDiedErrorHolder;
use common::client_or_server::ClientOrServer;
use data_or_headers::DataOrHeaders;
use data_or_headers_with_flag::DataOrHeadersWithFlag;
use headers_place::HeadersPlace;
use req_resp::RequestOrResponse;
use result_or_eof::ResultOrEof;
use std::marker;
use ClientConf;
use ClientTlsOption;
use ErrorCode;
struct ClientTypes<I>(marker::PhantomData<I>);
impl<I> Types for ClientTypes<I>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Io = I;
type HttpStreamData = ClientStream<I>;
type HttpStreamSpecific = ClientStreamData;
type ConnSpecific = ClientConnData;
type ToWriteMessage = ClientToWriteMessage;
const OUT_REQUEST_OR_RESPONSE: RequestOrResponse = RequestOrResponse::Request;
const CLIENT_OR_SERVER: ClientOrServer = ClientOrServer::Client;
}
pub struct ClientStreamData {}
impl HttpStreamDataSpecific for ClientStreamData {}
type ClientStream<I> = HttpStreamCommon<ClientTypes<I>>;
impl<I> HttpStreamData for ClientStream<I>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Types = ClientTypes<I>;
}
pub struct ClientConnData {
_callbacks: Box<ClientConnCallbacks>,
}
impl ConnSpecific for ClientConnData {}
pub struct ClientConn {
write_tx: UnboundedSender<ClientToWriteMessage>,
}
unsafe impl Sync for ClientConn {}
pub struct StartRequestMessage {
pub headers: Headers,
pub body: HttpStreamAfterHeaders,
pub resp_tx: oneshot::Sender<Response>,
}
enum ClientToWriteMessage {
Start(StartRequestMessage),
WaitForHandshake(oneshot::Sender<result::Result<()>>),
Common(CommonToWriteMessage),
}
impl From<CommonToWriteMessage> for ClientToWriteMessage {
fn from(m: CommonToWriteMessage) -> Self {
ClientToWriteMessage::Common(m)
}
}
impl<I> ConnWriteSideCustom for Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Types = ClientTypes<I>;
fn process_message(&mut self, message: ClientToWriteMessage) -> result::Result<()> {
match message {
ClientToWriteMessage::Start(start) => self.process_start(start),
ClientToWriteMessage::Common(common) => self.process_common_message(common),
ClientToWriteMessage::WaitForHandshake(tx) => {
// ignore error
drop(tx.send(Ok(())));
Ok(())
}
}
}
}
impl<I> Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
fn process_start(&mut self, start: StartRequestMessage) -> result::Result<()> {
let StartRequestMessage {
headers,
body,
resp_tx,
} = start;
let stream_id = self.next_local_stream_id();
let out_window = {
let (mut http_stream, resp_stream, out_window) = self.new_stream_data(
stream_id,
None,
InMessageStage::Initial,
ClientStreamData {},
);
if let Err(_) = resp_tx.send(Response::from_stream(resp_stream)) {
warn!("caller died");
}
http_stream.push_back(DataOrHeaders::Headers(headers));
out_window
};
self.pump_stream_to_write_loop(stream_id, body.into_part_stream(), out_window);
// Also opens latch if necessary
self.buffer_outg_conn()?;
Ok(())
}
}
pub trait ClientConnCallbacks:'static {
// called at most once
fn goaway(&self, stream_id: StreamId, raw_error_code: u32);
}
impl ClientConn {
fn spawn_connected<I, C>(
lh: reactor::Handle,
connect: HttpFutureSend<I>,
conf: ClientConf,
callbacks: C,
) -> Self
where
I: AsyncWrite + AsyncRead + Send +'static,
C: ClientConnCallbacks,
{
let (to_write_tx, to_write_rx) = unbounded();
let to_write_rx = Box::new(
to_write_rx
.map_err(|()| Error::IoError(io::Error::new(io::ErrorKind::Other, "to_write"))),
);
let c = ClientConn {
write_tx: to_write_tx.clone(),
};
let settings_frame = SettingsFrame::from_settings(vec![HttpSetting::EnablePush(false)]);
let mut settings = DEFAULT_SETTINGS;
settings.apply_from_frame(&settings_frame);
let handshake = connect.and_then(|conn| client_handshake(conn, settings_frame));
let conn_died_error_holder = ClientDiedErrorHolder::new();
let conn_died_error_holder_copy = conn_died_error_holder.clone();
let lh_copy = lh.clone();
let future = handshake.and_then(move |conn| {
debug!("handshake done");
let (read, write) = conn.split();
let conn_data = Conn::<ClientTypes<_>>::new(
lh_copy,
CpuPoolOption::SingleThread,
ClientConnData {
_callbacks: Box::new(callbacks),
},
conf.common,
settings,
to_write_tx.clone(),
to_write_rx,
read,
write,
conn_died_error_holder,
);
conn_data.run()
});
let future = conn_died_error_holder_copy.wrap_future(future);
lh.spawn(future);
c
}
pub fn spawn<H, C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
tls: ClientTlsOption<C>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
match tls {
ClientTlsOption::Plain => ClientConn::spawn_plain(lh.clone(), addr, conf, callbacks),
ClientTlsOption::Tls(domain, connector) => {
ClientConn::spawn_tls(lh.clone(), &domain, connector, addr, conf, callbacks)
}
}
}
pub fn spawn_plain<C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: C,
) -> Self
where
C: ClientConnCallbacks,
{
let no_delay = conf.no_delay.unwrap_or(true);
let connect = addr.connect(&lh).map_err(Into::into);
let map_callback = move |socket: Box<StreamItem>| {
info!("connected to {}", addr);
if socket.is_tcp() {
socket
.set_nodelay(no_delay)
.expect("failed to set TCP_NODELAY");
}
socket
};
let connect: Box<Future<Item = _, Error = _> + Send> =
if let Some(timeout) = conf.connection_timeout {
let timer = Timer::default();
Box::new(timer.timeout(connect, timeout).map(map_callback))
} else {
Box::new(connect.map(map_callback))
};
ClientConn::spawn_connected(lh, connect, conf, callbacks)
}
pub fn spawn_tls<H, C>(
lh: reactor::Handle,
domain: &str,
connector: Arc<C>,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
let domain = domain.to_owned();
let connect = addr
.connect(&lh)
.map(move |c| {
info!("connected to {}", addr);
c
}).map_err(|e| e.into());
let tls_conn = connect.and_then(move |conn| {
tokio_tls_api::connect_async(&*connector, &domain, conn)
.map_err(|e| Error::IoError(io::Error::new(io::ErrorKind::Other, e)))
});
let tls_conn = tls_conn.map_err(Error::from);
ClientConn::spawn_connected(lh, Box::new(tls_conn), conf, callbacks)
}
pub fn start_request_with_resp_sender(
&self,
start: StartRequestMessage,
) -> Result<(), StartRequestMessage> {
self.write_tx
.unbounded_send(ClientToWriteMessage::Start(start))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::Start(start) => start,
_ => unreachable!(),
})
}
pub fn dump_state_with_resp_sender(&self, tx: oneshot::Sender<ConnStateSnapshot>) {
let message = ClientToWriteMessage::Common(CommonToWriteMessage::DumpState(tx));
// ignore error
drop(self.write_tx.unbounded_send(message));
}
/// For tests
#[doc(hidden)]
pub fn _dump_state(&self) -> HttpFutureSend<ConnStateSnapshot> {
let (tx, rx) = oneshot::channel();
self.dump_state_with_resp_sender(tx);
let rx =
rx.map_err(|_| Error::from(io::Error::new(io::ErrorKind::Other, "oneshot canceled")));
Box::new(rx)
}
pub fn wait_for_connect_with_resp_sender(
&self,
tx: oneshot::Sender<result::Result<()>>,
) -> std_Result<(), oneshot::Sender<result::Result<()>>> {
self.write_tx
.unbounded_send(ClientToWriteMessage::WaitForHandshake(tx))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::WaitForHandshake(tx) => tx,
_ => unreachable!(),
})
}
}
impl Service for ClientConn {
// TODO: copy-paste with Client::start_request
fn start_request(&self, headers: Headers, body: HttpStreamAfterHeaders) -> Response {
let (resp_tx, resp_rx) = oneshot::channel();
let start = StartRequestMessage {
headers: headers,
body: body,
resp_tx: resp_tx,
};
if let Err(_) = self.start_request_with_resp_sender(start) {
return Response::err(error::Error::Other("client died"));
}
let resp_rx =
resp_rx.map_err(|oneshot::Canceled| error::Error::Other("client likely died"));
let resp_rx = resp_rx.map(|r| r.into_stream_flag());
let resp_rx = resp_rx.flatten_stream();
Response::from_stream(resp_rx)
}
}
impl<I> ConnReadSideCustom for Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Types = ClientTypes<I>;
fn process_headers(
&mut self,
stream_id: StreamId,
end_stream: EndStream,
headers: Headers,
) -> result::Result<Option<HttpStreamRef<ClientTypes<I>>>> {
let existing_stream = self
.get_stream_for_headers_maybe_send_error(stream_id)?
.is_some();
if!existing_stream {
return Ok(None);
}
let in_message_stage = self
.streams
.get_mut(stream_id)
.unwrap()
.stream()
.in_message_stage;
let headers_place = match in_message_stage {
InMessageStage::Initial => HeadersPlace::Initial,
InMessageStage::AfterInitialHeaders => HeadersPlace::Trailing,
InMessageStage::AfterTrailingHeaders => {
return Err(error::Error::InternalError(format!(
"closed stream must be handled before"
)));
}
};
if let Err(e) = headers.validate(RequestOrResponse::Response, headers_place) {
warn!("invalid headers: {:?}: {:?}", e, headers);
self.send_rst_stream(stream_id, ErrorCode::ProtocolError)?;
return Ok(None);
}
let status_1xx = match headers_place {
HeadersPlace::Initial => {
let status = headers.status();
let status_1xx = status >= 100 && status <= 199;
if status_1xx && end_stream == EndStream::Yes {
warn!("1xx headers and end stream: {}", stream_id);
self.send_rst_stream(stream_id, ErrorCode::ProtocolError)?;
return Ok(None);
}
status_1xx
}
HeadersPlace::Trailing => {
if end_stream == EndStream::No {
warn!("headers without end stream after data: {}", stream_id);
self.send_rst_stream(stream_id, ErrorCode::ProtocolError)?;
return Ok(None);
}
false
}
};
let mut stream = self.streams.get_mut(stream_id).unwrap();
if let Some(in_rem_content_length) = headers.content_length() {
stream.stream().in_rem_content_length = Some(in_rem_content_length);
}
stream.stream().in_message_stage = match (headers_place, status_1xx) {
(HeadersPlace::Initial, false) => InMessageStage::AfterInitialHeaders,
(HeadersPlace::Initial, true) => InMessageStage::Initial,
(HeadersPlace::Trailing, _) => InMessageStage::AfterTrailingHeaders,
};
// Ignore 1xx headers
if!status_1xx {
if let Some(ref mut response_handler) = stream.stream().peer_tx {
// TODO: reset stream on error
drop(
response_handler.send(ResultOrEof::Item(DataOrHeadersWithFlag {
content: DataOrHeaders::Headers(headers),
last: end_stream == EndStream::Yes,
})),
);
} else {
// TODO: reset stream
}
}
Ok(Some(stream))
}
} | use solicit::header::*;
use solicit::StreamId;
use solicit::DEFAULT_SETTINGS;
use service::Service; | random_line_split |
client_conn.rs | //! Single client connection
use std::io;
use std::result::Result as std_Result;
use std::sync::Arc;
use error;
use error::Error;
use result;
use exec::CpuPoolOption;
use solicit::end_stream::EndStream;
use solicit::frame::settings::*;
use solicit::header::*;
use solicit::StreamId;
use solicit::DEFAULT_SETTINGS;
use service::Service;
use futures::future::Future;
use futures::stream::Stream;
use futures::sync::mpsc::unbounded;
use futures::sync::mpsc::UnboundedSender;
use futures::sync::oneshot;
use tls_api::TlsConnector;
use tokio_core::reactor;
use tokio_io::AsyncRead;
use tokio_io::AsyncWrite;
use tokio_timer::Timer;
use tokio_tls_api;
use solicit_async::*;
use common::*;
use data_or_trailers::*;
use socket::*;
use client_died_error_holder::ClientDiedErrorHolder;
use common::client_or_server::ClientOrServer;
use data_or_headers::DataOrHeaders;
use data_or_headers_with_flag::DataOrHeadersWithFlag;
use headers_place::HeadersPlace;
use req_resp::RequestOrResponse;
use result_or_eof::ResultOrEof;
use std::marker;
use ClientConf;
use ClientTlsOption;
use ErrorCode;
struct ClientTypes<I>(marker::PhantomData<I>);
impl<I> Types for ClientTypes<I>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Io = I;
type HttpStreamData = ClientStream<I>;
type HttpStreamSpecific = ClientStreamData;
type ConnSpecific = ClientConnData;
type ToWriteMessage = ClientToWriteMessage;
const OUT_REQUEST_OR_RESPONSE: RequestOrResponse = RequestOrResponse::Request;
const CLIENT_OR_SERVER: ClientOrServer = ClientOrServer::Client;
}
pub struct ClientStreamData {}
impl HttpStreamDataSpecific for ClientStreamData {}
type ClientStream<I> = HttpStreamCommon<ClientTypes<I>>;
impl<I> HttpStreamData for ClientStream<I>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Types = ClientTypes<I>;
}
pub struct ClientConnData {
_callbacks: Box<ClientConnCallbacks>,
}
impl ConnSpecific for ClientConnData {}
pub struct ClientConn {
write_tx: UnboundedSender<ClientToWriteMessage>,
}
unsafe impl Sync for ClientConn {}
pub struct StartRequestMessage {
pub headers: Headers,
pub body: HttpStreamAfterHeaders,
pub resp_tx: oneshot::Sender<Response>,
}
enum ClientToWriteMessage {
Start(StartRequestMessage),
WaitForHandshake(oneshot::Sender<result::Result<()>>),
Common(CommonToWriteMessage),
}
impl From<CommonToWriteMessage> for ClientToWriteMessage {
fn from(m: CommonToWriteMessage) -> Self {
ClientToWriteMessage::Common(m)
}
}
impl<I> ConnWriteSideCustom for Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Types = ClientTypes<I>;
fn process_message(&mut self, message: ClientToWriteMessage) -> result::Result<()> {
match message {
ClientToWriteMessage::Start(start) => self.process_start(start),
ClientToWriteMessage::Common(common) => self.process_common_message(common),
ClientToWriteMessage::WaitForHandshake(tx) => {
// ignore error
drop(tx.send(Ok(())));
Ok(())
}
}
}
}
impl<I> Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
fn process_start(&mut self, start: StartRequestMessage) -> result::Result<()> {
let StartRequestMessage {
headers,
body,
resp_tx,
} = start;
let stream_id = self.next_local_stream_id();
let out_window = {
let (mut http_stream, resp_stream, out_window) = self.new_stream_data(
stream_id,
None,
InMessageStage::Initial,
ClientStreamData {},
);
if let Err(_) = resp_tx.send(Response::from_stream(resp_stream)) {
warn!("caller died");
}
http_stream.push_back(DataOrHeaders::Headers(headers));
out_window
};
self.pump_stream_to_write_loop(stream_id, body.into_part_stream(), out_window);
// Also opens latch if necessary
self.buffer_outg_conn()?;
Ok(())
}
}
pub trait ClientConnCallbacks:'static {
// called at most once
fn goaway(&self, stream_id: StreamId, raw_error_code: u32);
}
impl ClientConn {
fn spawn_connected<I, C>(
lh: reactor::Handle,
connect: HttpFutureSend<I>,
conf: ClientConf,
callbacks: C,
) -> Self
where
I: AsyncWrite + AsyncRead + Send +'static,
C: ClientConnCallbacks,
{
let (to_write_tx, to_write_rx) = unbounded();
let to_write_rx = Box::new(
to_write_rx
.map_err(|()| Error::IoError(io::Error::new(io::ErrorKind::Other, "to_write"))),
);
let c = ClientConn {
write_tx: to_write_tx.clone(),
};
let settings_frame = SettingsFrame::from_settings(vec![HttpSetting::EnablePush(false)]);
let mut settings = DEFAULT_SETTINGS;
settings.apply_from_frame(&settings_frame);
let handshake = connect.and_then(|conn| client_handshake(conn, settings_frame));
let conn_died_error_holder = ClientDiedErrorHolder::new();
let conn_died_error_holder_copy = conn_died_error_holder.clone();
let lh_copy = lh.clone();
let future = handshake.and_then(move |conn| {
debug!("handshake done");
let (read, write) = conn.split();
let conn_data = Conn::<ClientTypes<_>>::new(
lh_copy,
CpuPoolOption::SingleThread,
ClientConnData {
_callbacks: Box::new(callbacks),
},
conf.common,
settings,
to_write_tx.clone(),
to_write_rx,
read,
write,
conn_died_error_holder,
);
conn_data.run()
});
let future = conn_died_error_holder_copy.wrap_future(future);
lh.spawn(future);
c
}
pub fn spawn<H, C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
tls: ClientTlsOption<C>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
match tls {
ClientTlsOption::Plain => ClientConn::spawn_plain(lh.clone(), addr, conf, callbacks),
ClientTlsOption::Tls(domain, connector) => {
ClientConn::spawn_tls(lh.clone(), &domain, connector, addr, conf, callbacks)
}
}
}
pub fn spawn_plain<C>(
lh: reactor::Handle,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: C,
) -> Self
where
C: ClientConnCallbacks,
{
let no_delay = conf.no_delay.unwrap_or(true);
let connect = addr.connect(&lh).map_err(Into::into);
let map_callback = move |socket: Box<StreamItem>| {
info!("connected to {}", addr);
if socket.is_tcp() {
socket
.set_nodelay(no_delay)
.expect("failed to set TCP_NODELAY");
}
socket
};
let connect: Box<Future<Item = _, Error = _> + Send> =
if let Some(timeout) = conf.connection_timeout {
let timer = Timer::default();
Box::new(timer.timeout(connect, timeout).map(map_callback))
} else {
Box::new(connect.map(map_callback))
};
ClientConn::spawn_connected(lh, connect, conf, callbacks)
}
pub fn spawn_tls<H, C>(
lh: reactor::Handle,
domain: &str,
connector: Arc<C>,
addr: Box<ToClientStream>,
conf: ClientConf,
callbacks: H,
) -> Self
where
H: ClientConnCallbacks,
C: TlsConnector + Sync,
{
let domain = domain.to_owned();
let connect = addr
.connect(&lh)
.map(move |c| {
info!("connected to {}", addr);
c
}).map_err(|e| e.into());
let tls_conn = connect.and_then(move |conn| {
tokio_tls_api::connect_async(&*connector, &domain, conn)
.map_err(|e| Error::IoError(io::Error::new(io::ErrorKind::Other, e)))
});
let tls_conn = tls_conn.map_err(Error::from);
ClientConn::spawn_connected(lh, Box::new(tls_conn), conf, callbacks)
}
pub fn start_request_with_resp_sender(
&self,
start: StartRequestMessage,
) -> Result<(), StartRequestMessage> {
self.write_tx
.unbounded_send(ClientToWriteMessage::Start(start))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::Start(start) => start,
_ => unreachable!(),
})
}
pub fn dump_state_with_resp_sender(&self, tx: oneshot::Sender<ConnStateSnapshot>) {
let message = ClientToWriteMessage::Common(CommonToWriteMessage::DumpState(tx));
// ignore error
drop(self.write_tx.unbounded_send(message));
}
/// For tests
#[doc(hidden)]
pub fn _dump_state(&self) -> HttpFutureSend<ConnStateSnapshot> {
let (tx, rx) = oneshot::channel();
self.dump_state_with_resp_sender(tx);
let rx =
rx.map_err(|_| Error::from(io::Error::new(io::ErrorKind::Other, "oneshot canceled")));
Box::new(rx)
}
pub fn wait_for_connect_with_resp_sender(
&self,
tx: oneshot::Sender<result::Result<()>>,
) -> std_Result<(), oneshot::Sender<result::Result<()>>> {
self.write_tx
.unbounded_send(ClientToWriteMessage::WaitForHandshake(tx))
.map_err(|send_error| match send_error.into_inner() {
ClientToWriteMessage::WaitForHandshake(tx) => tx,
_ => unreachable!(),
})
}
}
impl Service for ClientConn {
// TODO: copy-paste with Client::start_request
fn start_request(&self, headers: Headers, body: HttpStreamAfterHeaders) -> Response {
let (resp_tx, resp_rx) = oneshot::channel();
let start = StartRequestMessage {
headers: headers,
body: body,
resp_tx: resp_tx,
};
if let Err(_) = self.start_request_with_resp_sender(start) {
return Response::err(error::Error::Other("client died"));
}
let resp_rx =
resp_rx.map_err(|oneshot::Canceled| error::Error::Other("client likely died"));
let resp_rx = resp_rx.map(|r| r.into_stream_flag());
let resp_rx = resp_rx.flatten_stream();
Response::from_stream(resp_rx)
}
}
impl<I> ConnReadSideCustom for Conn<ClientTypes<I>>
where
I: AsyncWrite + AsyncRead + Send +'static,
{
type Types = ClientTypes<I>;
fn process_headers(
&mut self,
stream_id: StreamId,
end_stream: EndStream,
headers: Headers,
) -> result::Result<Option<HttpStreamRef<ClientTypes<I>>>> {
let existing_stream = self
.get_stream_for_headers_maybe_send_error(stream_id)?
.is_some();
if!existing_stream {
return Ok(None);
}
let in_message_stage = self
.streams
.get_mut(stream_id)
.unwrap()
.stream()
.in_message_stage;
let headers_place = match in_message_stage {
InMessageStage::Initial => HeadersPlace::Initial,
InMessageStage::AfterInitialHeaders => HeadersPlace::Trailing,
InMessageStage::AfterTrailingHeaders => {
return Err(error::Error::InternalError(format!(
"closed stream must be handled before"
)));
}
};
if let Err(e) = headers.validate(RequestOrResponse::Response, headers_place) |
let status_1xx = match headers_place {
HeadersPlace::Initial => {
let status = headers.status();
let status_1xx = status >= 100 && status <= 199;
if status_1xx && end_stream == EndStream::Yes {
warn!("1xx headers and end stream: {}", stream_id);
self.send_rst_stream(stream_id, ErrorCode::ProtocolError)?;
return Ok(None);
}
status_1xx
}
HeadersPlace::Trailing => {
if end_stream == EndStream::No {
warn!("headers without end stream after data: {}", stream_id);
self.send_rst_stream(stream_id, ErrorCode::ProtocolError)?;
return Ok(None);
}
false
}
};
let mut stream = self.streams.get_mut(stream_id).unwrap();
if let Some(in_rem_content_length) = headers.content_length() {
stream.stream().in_rem_content_length = Some(in_rem_content_length);
}
stream.stream().in_message_stage = match (headers_place, status_1xx) {
(HeadersPlace::Initial, false) => InMessageStage::AfterInitialHeaders,
(HeadersPlace::Initial, true) => InMessageStage::Initial,
(HeadersPlace::Trailing, _) => InMessageStage::AfterTrailingHeaders,
};
// Ignore 1xx headers
if!status_1xx {
if let Some(ref mut response_handler) = stream.stream().peer_tx {
// TODO: reset stream on error
drop(
response_handler.send(ResultOrEof::Item(DataOrHeadersWithFlag {
content: DataOrHeaders::Headers(headers),
last: end_stream == EndStream::Yes,
})),
);
} else {
// TODO: reset stream
}
}
Ok(Some(stream))
}
}
| {
warn!("invalid headers: {:?}: {:?}", e, headers);
self.send_rst_stream(stream_id, ErrorCode::ProtocolError)?;
return Ok(None);
} | conditional_block |
mod.rs | //! Kit for creating a a handler for batches of events
//!
//! Start here if you want to implement a handler for processing of events
use std::fmt;
use std::time::{Duration, Instant};
pub use bytes::Bytes;
use futures::future::BoxFuture;
pub type BatchHandlerFuture<'a> = BoxFuture<'a, BatchPostAction>;
use crate::nakadi_types::{
event_type::EventTypeName,
partition::PartitionId,
subscription::{EventTypePartition, StreamId, SubscriptionCursor},
};
pub use crate::nakadi_types::Error;
mod typed;
pub use typed::*;
/// Information on the current batch passed to a `BatchHandler`.
///
/// The `frame_id` is monotonically increasing for each `BatchHandler`
/// within a stream(same `StreamId`)
/// as long a s a dispatch strategy which keeps the ordering of
/// events is chosen. There may be gaps between the ids.
#[derive(Debug)]
#[non_exhaustive]
pub struct BatchMeta<'a> {
pub stream_id: StreamId,
pub cursor: &'a SubscriptionCursor,
/// Timestamp when the first byte was received
pub frame_started_at: Instant,
/// Timestamp when the frame was completed
pub frame_completed_at: Instant,
pub frame_id: usize,
pub n_events: usize,
}
/// Returned by a `BatchHandler` and tell `Nakadion`
/// how to continue.
#[derive(Debug, Clone)]
pub enum BatchPostAction {
/// Commit the batch
Commit(BatchStats),
/// Do not commit the batch and continue
///
/// Use if committed "manually" within the handler
DoNotCommit(BatchStats),
/// Abort the current stream and reconnect
AbortStream(String),
/// Abort the consumption and shut down
ShutDown(String),
}
impl BatchPostAction {
pub fn commit_no_stats() -> Self {
BatchPostAction::Commit(BatchStats::default())
}
pub fn commit(t_deserialize: Duration) -> Self {
BatchPostAction::Commit(BatchStats {
t_deserialize: Some(t_deserialize),
})
}
pub fn do_not_commit_no_stats() -> Self {
BatchPostAction::DoNotCommit(BatchStats::default())
}
pub fn do_not_commit(t_deserialize: Duration) -> Self {
BatchPostAction::DoNotCommit(BatchStats {
t_deserialize: Some(t_deserialize),
})
}
}
/// Statistics on the processed batch
#[derive(Default, Debug, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub struct BatchStats {
/// The time it took to deserialize the batch
pub t_deserialize: Option<Duration>,
}
/// Returned by a `BatchHandler` when queried
/// on inactivity.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum InactivityAnswer {
KeepMeAlive,
KillMe,
}
impl InactivityAnswer {
/// Returns `true` if the `BatchHandler` should be killed.
pub fn should_kill(self) -> bool {
self == InactivityAnswer::KillMe
}
/// Returns `true` if the `BatchHandler` should stay alive.
pub fn should_stay_alive(self) -> bool {
self == InactivityAnswer::KeepMeAlive
}
}
/// A handler that implements batch processing logic.
///
/// This trait will be called by Nakadion when a batch has to
/// be processed. The `BatchHandler` only receives an `EventType`
/// and a slice of bytes that contains the batch.
///
/// The `events` slice always contains a JSON encoded array of events.
///
/// # Hint
///
/// The `handle` method gets called on `&mut self`.
///
/// # Example
///
/// ```rust
/// use futures::FutureExt;
///
/// use nakadion::handler::{BatchHandler, BatchPostAction, BatchMeta, Bytes, BatchHandlerFuture};
/// use nakadion::nakadi_types::subscription::EventTypeName;
///
/// // Use a struct to maintain state
/// struct MyHandler {
/// pub count: i32,
/// }
///
/// // Implement the processing logic by implementing `BatchHandler`
/// impl BatchHandler for MyHandler {
/// fn handle(&mut self, _events: Bytes, _meta: BatchMeta) -> BatchHandlerFuture {
/// async move {
/// self.count += 1;
/// BatchPostAction::commit_no_stats()
/// }.boxed()
/// }
/// }
/// ```
pub trait BatchHandler: Send {
/// Handle a batch of bytes
fn handle<'a>(&'a mut self, events: Bytes, meta: BatchMeta<'a>) -> BatchHandlerFuture<'a>;
/// Periodically called if there were no events for a given time.
///
/// This method will only be called if the parameter `handler_inactivity_timeout_secs`
/// was set for the `Consumer`
fn on_inactive(
&mut self,
_inactive_for: Duration,
_last_activity: Instant,
) -> InactivityAnswer {
InactivityAnswer::KeepMeAlive
}
}
/// Simple wrapper for `BatchHandlers` from closures
pub struct HandlerFn<F>(pub F);
impl<F> BatchHandler for HandlerFn<F>
where
F: for<'a> FnMut(Bytes, BatchMeta<'a>) -> BatchHandlerFuture<'a> + Send,
{
fn handle<'a>(&'a mut self, events: Bytes, meta: BatchMeta<'a>) -> BatchHandlerFuture<'a> {
(self.0)(events, meta)
}
}
/// Defines what a `BatchHandler` will receive.
///
/// This value should the same for the whole lifetime of the
/// `BatchHandler`. "Should" because in the end it is the
/// `BatchHandlerFactory` which returns `BatchHandler`s. But it
/// is guaranteed that `Nakadion` will only pass events to a `BatchHandler`
/// as defined by the `DispatchStrategy`.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum HandlerAssignment {
/// Everything can be passed to the `BatchHandler`.
Unspecified,
/// The `BatchHandler` will only receive events
/// of the given event type but from any partition.
EventType(EventTypeName),
/// The `BatchHandler` will only receive events
/// of the given event type on the given partition.
EventTypePartition(EventTypePartition),
}
impl HandlerAssignment {
pub fn event_type(&self) -> Option<&EventTypeName> {
self.event_type_and_partition().0
}
pub fn partition(&self) -> Option<&PartitionId> {
self.event_type_and_partition().1
}
pub fn event_type_and_partition(&self) -> (Option<&EventTypeName>, Option<&PartitionId>) {
match self {
HandlerAssignment::Unspecified => (None, None),
HandlerAssignment::EventType(event_type) => (Some(&event_type), None),
HandlerAssignment::EventTypePartition(ref etp) => {
(Some(etp.event_type()), Some(etp.partition()))
}
}
}
pub fn into_event_type_and_partition(self) -> (Option<EventTypeName>, Option<PartitionId>) {
match self {
HandlerAssignment::Unspecified => (None, None),
HandlerAssignment::EventType(event_type) => (Some(event_type), None),
HandlerAssignment::EventTypePartition(etp) => {
let (a, b) = etp.split();
(Some(a), Some(b))
}
}
}
}
impl fmt::Display for HandlerAssignment {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
HandlerAssignment::Unspecified => write!(f, "[unspecified]")?,
HandlerAssignment::EventType(ref event_type) => {
write!(f, "[event_type={}]", event_type)?
}
HandlerAssignment::EventTypePartition(ref event_type_partition) => write!(
f,
"[event_type={}, partition={}]",
event_type_partition.event_type(), | )?,
}
Ok(())
}
}
/// A factory that creates `BatchHandler`s.
///
/// # Usage
///
/// A `BatchHandlerFactory` can be used in two ways:
///
/// * It does not contain any state it shares with the created `BatchHandler`s.
/// This is useful when incoming data is partitioned in a way that all
/// `BatchHandler`s act only on data that never appears on another partition.
///
/// * It contains state that is shared with the `BatchHandler`s. E.g. a cache
/// that contains data that can appear on other partitions.
/// # Example
///
/// ```rust
/// use std::sync::{Arc, Mutex};
/// use futures::{FutureExt, future::BoxFuture};
///
/// use nakadion::handler::*;
///
/// // Use a struct to maintain state
/// struct MyHandler(Arc<Mutex<i32>>);
///
/// // Implement the processing logic by implementing `BatchHandler`
/// impl BatchHandler for MyHandler {
/// fn handle(&mut self, _events: Bytes, _meta: BatchMeta) -> BatchHandlerFuture {
/// async move {
/// *self.0.lock().unwrap() += 1;
/// BatchPostAction::commit_no_stats()
/// }.boxed()
/// }
/// }
///
/// // We keep shared state for all handlers in the `BatchHandlerFactory`
/// struct MyBatchHandlerFactory(Arc<Mutex<i32>>);
///
/// // Now we implement the trait `BatchHandlerFactory` to control how
/// // our `BatchHandler`s are created
/// impl BatchHandlerFactory for MyBatchHandlerFactory {
/// fn handler(
/// &self,
/// _assignment: &HandlerAssignment,
/// ) -> BoxFuture<Result<Box<dyn BatchHandler>, Error>> {
/// async move {
/// Ok(Box::new(MyHandler(self.0.clone())) as Box<_>)
/// }.boxed()
/// }
/// }
///
/// let count = Arc::new(Mutex::new(0));
///
/// let factory = MyBatchHandlerFactory(count.clone());
/// ```
pub trait BatchHandlerFactory: Send + Sync +'static {
/// New `BatchHandler` was requested.
///
/// `assignment` defines for what event types and partitions the returned
/// `BatchHandler` will be used. `Nakadion` guarantees that this will stay true
/// over the whole lifetime of the `BatchHandler`.
///
/// Returning an `Error` aborts the `Consumer`.
///
/// It is up to the `BatchHandlerFactory` on whether it respects `assignment`.
fn handler<'a>(
&'a self,
assignment: &'a HandlerAssignment,
) -> BoxFuture<'a, Result<Box<dyn BatchHandler>, Error>>;
}
impl<T> BatchHandlerFactory for T
where
T: for<'a> Fn(&'a HandlerAssignment) -> BoxFuture<'a, Result<Box<dyn BatchHandler>, Error>>
+ Send
+ Sync
+'static,
{
fn handler<'a>(
&'a self,
assignment: &'a HandlerAssignment,
) -> BoxFuture<'a, Result<Box<dyn BatchHandler>, Error>> {
self(assignment)
}
} | event_type_partition.partition() | random_line_split |
mod.rs | //! Kit for creating a a handler for batches of events
//!
//! Start here if you want to implement a handler for processing of events
use std::fmt;
use std::time::{Duration, Instant};
pub use bytes::Bytes;
use futures::future::BoxFuture;
pub type BatchHandlerFuture<'a> = BoxFuture<'a, BatchPostAction>;
use crate::nakadi_types::{
event_type::EventTypeName,
partition::PartitionId,
subscription::{EventTypePartition, StreamId, SubscriptionCursor},
};
pub use crate::nakadi_types::Error;
mod typed;
pub use typed::*;
/// Information on the current batch passed to a `BatchHandler`.
///
/// The `frame_id` is monotonically increasing for each `BatchHandler`
/// within a stream(same `StreamId`)
/// as long a s a dispatch strategy which keeps the ordering of
/// events is chosen. There may be gaps between the ids.
#[derive(Debug)]
#[non_exhaustive]
pub struct BatchMeta<'a> {
pub stream_id: StreamId,
pub cursor: &'a SubscriptionCursor,
/// Timestamp when the first byte was received
pub frame_started_at: Instant,
/// Timestamp when the frame was completed
pub frame_completed_at: Instant,
pub frame_id: usize,
pub n_events: usize,
}
/// Returned by a `BatchHandler` and tell `Nakadion`
/// how to continue.
#[derive(Debug, Clone)]
pub enum BatchPostAction {
/// Commit the batch
Commit(BatchStats),
/// Do not commit the batch and continue
///
/// Use if committed "manually" within the handler
DoNotCommit(BatchStats),
/// Abort the current stream and reconnect
AbortStream(String),
/// Abort the consumption and shut down
ShutDown(String),
}
impl BatchPostAction {
pub fn commit_no_stats() -> Self {
BatchPostAction::Commit(BatchStats::default())
}
pub fn commit(t_deserialize: Duration) -> Self {
BatchPostAction::Commit(BatchStats {
t_deserialize: Some(t_deserialize),
})
}
pub fn do_not_commit_no_stats() -> Self {
BatchPostAction::DoNotCommit(BatchStats::default())
}
pub fn do_not_commit(t_deserialize: Duration) -> Self {
BatchPostAction::DoNotCommit(BatchStats {
t_deserialize: Some(t_deserialize),
})
}
}
/// Statistics on the processed batch
#[derive(Default, Debug, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub struct BatchStats {
/// The time it took to deserialize the batch
pub t_deserialize: Option<Duration>,
}
/// Returned by a `BatchHandler` when queried
/// on inactivity.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum InactivityAnswer {
KeepMeAlive,
KillMe,
}
impl InactivityAnswer {
/// Returns `true` if the `BatchHandler` should be killed.
pub fn should_kill(self) -> bool {
self == InactivityAnswer::KillMe
}
/// Returns `true` if the `BatchHandler` should stay alive.
pub fn should_stay_alive(self) -> bool {
self == InactivityAnswer::KeepMeAlive
}
}
/// A handler that implements batch processing logic.
///
/// This trait will be called by Nakadion when a batch has to
/// be processed. The `BatchHandler` only receives an `EventType`
/// and a slice of bytes that contains the batch.
///
/// The `events` slice always contains a JSON encoded array of events.
///
/// # Hint
///
/// The `handle` method gets called on `&mut self`.
///
/// # Example
///
/// ```rust
/// use futures::FutureExt;
///
/// use nakadion::handler::{BatchHandler, BatchPostAction, BatchMeta, Bytes, BatchHandlerFuture};
/// use nakadion::nakadi_types::subscription::EventTypeName;
///
/// // Use a struct to maintain state
/// struct MyHandler {
/// pub count: i32,
/// }
///
/// // Implement the processing logic by implementing `BatchHandler`
/// impl BatchHandler for MyHandler {
/// fn handle(&mut self, _events: Bytes, _meta: BatchMeta) -> BatchHandlerFuture {
/// async move {
/// self.count += 1;
/// BatchPostAction::commit_no_stats()
/// }.boxed()
/// }
/// }
/// ```
pub trait BatchHandler: Send {
/// Handle a batch of bytes
fn handle<'a>(&'a mut self, events: Bytes, meta: BatchMeta<'a>) -> BatchHandlerFuture<'a>;
/// Periodically called if there were no events for a given time.
///
/// This method will only be called if the parameter `handler_inactivity_timeout_secs`
/// was set for the `Consumer`
fn on_inactive(
&mut self,
_inactive_for: Duration,
_last_activity: Instant,
) -> InactivityAnswer {
InactivityAnswer::KeepMeAlive
}
}
/// Simple wrapper for `BatchHandlers` from closures
pub struct HandlerFn<F>(pub F);
impl<F> BatchHandler for HandlerFn<F>
where
F: for<'a> FnMut(Bytes, BatchMeta<'a>) -> BatchHandlerFuture<'a> + Send,
{
fn handle<'a>(&'a mut self, events: Bytes, meta: BatchMeta<'a>) -> BatchHandlerFuture<'a> {
(self.0)(events, meta)
}
}
/// Defines what a `BatchHandler` will receive.
///
/// This value should the same for the whole lifetime of the
/// `BatchHandler`. "Should" because in the end it is the
/// `BatchHandlerFactory` which returns `BatchHandler`s. But it
/// is guaranteed that `Nakadion` will only pass events to a `BatchHandler`
/// as defined by the `DispatchStrategy`.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum HandlerAssignment {
/// Everything can be passed to the `BatchHandler`.
Unspecified,
/// The `BatchHandler` will only receive events
/// of the given event type but from any partition.
EventType(EventTypeName),
/// The `BatchHandler` will only receive events
/// of the given event type on the given partition.
EventTypePartition(EventTypePartition),
}
impl HandlerAssignment {
pub fn | (&self) -> Option<&EventTypeName> {
self.event_type_and_partition().0
}
pub fn partition(&self) -> Option<&PartitionId> {
self.event_type_and_partition().1
}
pub fn event_type_and_partition(&self) -> (Option<&EventTypeName>, Option<&PartitionId>) {
match self {
HandlerAssignment::Unspecified => (None, None),
HandlerAssignment::EventType(event_type) => (Some(&event_type), None),
HandlerAssignment::EventTypePartition(ref etp) => {
(Some(etp.event_type()), Some(etp.partition()))
}
}
}
pub fn into_event_type_and_partition(self) -> (Option<EventTypeName>, Option<PartitionId>) {
match self {
HandlerAssignment::Unspecified => (None, None),
HandlerAssignment::EventType(event_type) => (Some(event_type), None),
HandlerAssignment::EventTypePartition(etp) => {
let (a, b) = etp.split();
(Some(a), Some(b))
}
}
}
}
impl fmt::Display for HandlerAssignment {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
HandlerAssignment::Unspecified => write!(f, "[unspecified]")?,
HandlerAssignment::EventType(ref event_type) => {
write!(f, "[event_type={}]", event_type)?
}
HandlerAssignment::EventTypePartition(ref event_type_partition) => write!(
f,
"[event_type={}, partition={}]",
event_type_partition.event_type(),
event_type_partition.partition()
)?,
}
Ok(())
}
}
/// A factory that creates `BatchHandler`s.
///
/// # Usage
///
/// A `BatchHandlerFactory` can be used in two ways:
///
/// * It does not contain any state it shares with the created `BatchHandler`s.
/// This is useful when incoming data is partitioned in a way that all
/// `BatchHandler`s act only on data that never appears on another partition.
///
/// * It contains state that is shared with the `BatchHandler`s. E.g. a cache
/// that contains data that can appear on other partitions.
/// # Example
///
/// ```rust
/// use std::sync::{Arc, Mutex};
/// use futures::{FutureExt, future::BoxFuture};
///
/// use nakadion::handler::*;
///
/// // Use a struct to maintain state
/// struct MyHandler(Arc<Mutex<i32>>);
///
/// // Implement the processing logic by implementing `BatchHandler`
/// impl BatchHandler for MyHandler {
/// fn handle(&mut self, _events: Bytes, _meta: BatchMeta) -> BatchHandlerFuture {
/// async move {
/// *self.0.lock().unwrap() += 1;
/// BatchPostAction::commit_no_stats()
/// }.boxed()
/// }
/// }
///
/// // We keep shared state for all handlers in the `BatchHandlerFactory`
/// struct MyBatchHandlerFactory(Arc<Mutex<i32>>);
///
/// // Now we implement the trait `BatchHandlerFactory` to control how
/// // our `BatchHandler`s are created
/// impl BatchHandlerFactory for MyBatchHandlerFactory {
/// fn handler(
/// &self,
/// _assignment: &HandlerAssignment,
/// ) -> BoxFuture<Result<Box<dyn BatchHandler>, Error>> {
/// async move {
/// Ok(Box::new(MyHandler(self.0.clone())) as Box<_>)
/// }.boxed()
/// }
/// }
///
/// let count = Arc::new(Mutex::new(0));
///
/// let factory = MyBatchHandlerFactory(count.clone());
/// ```
pub trait BatchHandlerFactory: Send + Sync +'static {
/// New `BatchHandler` was requested.
///
/// `assignment` defines for what event types and partitions the returned
/// `BatchHandler` will be used. `Nakadion` guarantees that this will stay true
/// over the whole lifetime of the `BatchHandler`.
///
/// Returning an `Error` aborts the `Consumer`.
///
/// It is up to the `BatchHandlerFactory` on whether it respects `assignment`.
fn handler<'a>(
&'a self,
assignment: &'a HandlerAssignment,
) -> BoxFuture<'a, Result<Box<dyn BatchHandler>, Error>>;
}
impl<T> BatchHandlerFactory for T
where
T: for<'a> Fn(&'a HandlerAssignment) -> BoxFuture<'a, Result<Box<dyn BatchHandler>, Error>>
+ Send
+ Sync
+'static,
{
fn handler<'a>(
&'a self,
assignment: &'a HandlerAssignment,
) -> BoxFuture<'a, Result<Box<dyn BatchHandler>, Error>> {
self(assignment)
}
}
| event_type | identifier_name |
lib.rs | use std::num::ParseIntError;
use std::{error, iter};
use std::fmt;
use serde_json;
use serde_with::{ serde_as, DefaultOnError };
use crate::ParseError::MissingNode;
use lazy_static::lazy_static; // 1.3.0
use regex::Regex;
use serde::{Deserializer, Deserialize, Serialize, de};
use serde_json::{Error, Value};
use web_sys::console;
use std::collections::HashMap;
use serde::export::PhantomData;
use crate::utils::set_panic_hook;
use std::collections::hash_map::RandomState;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
// Regexes
lazy_static! {
static ref COUNTRY_TAG: Regex = Regex::new(r"^[A-Z]{3}$").unwrap();
static ref PROVINCE_TAG: Regex = Regex::new(r"^[0-9]*$").unwrap();
}
#[wasm_bindgen]
extern {
fn alert(s: &str);
}
struct Product<'a> {
name: &'a str,
world_quantity: f64,
price: f64,
price_history: Vec<f64>,
// assert discovered good true
}
#[derive(Deserialize, Debug, PartialEq)]
pub struct Pop {
/// Presumably the money deposited in the national bank
#[serde(default)]
bank: f64,
/// Presumably the money on-hand
money: f64,
/// The pop size
size: i64,
/// The pop ID
id: i32,
}
impl Pop {
pub fn new(bank: f64, money: f64, size: i64, id: i32) -> Self {
Pop { bank, money, size, id }
}
}
#[serde_as]
#[derive(Deserialize, Debug, PartialEq)]
pub struct Province {
name: String,
#[serde(default)]
owner: Option<String>,
/// Small hack: make the remainder pops.
/// This, shockingly, actually works for any subfield we can think of,
/// so it's actually the magic backtracking we were looking for all along
#[serde(flatten)]
#[serde_as(as="HashMap<DefaultOnError, DefaultOnError>")]
pops: HashMap<String, SingleOrMany<Pop>>,
}
impl Province {
pub fn new(name: String, owner: Option<String>, pops: HashMap<String, SingleOrMany<Pop>, RandomState>) -> Self {
Province { name, owner, pops }
}
}
#[derive(Deserialize, Debug)]
struct Building {
#[serde(rename = "building")]
name: String,
money: f64,
}
#[derive(Deserialize, Debug)]
struct StateID {
// Name in a localization file
id: i32,
#[serde(rename = "type")]
state_type: i32,
}
/// A state owned by a country
#[derive(Deserialize, Debug)]
struct State {
#[serde(rename = "state_buildings", default)]
buildings: SingleOrMany<Building>,
// What are these?
#[serde(default)]
savings: f64,
#[serde(default)]
interest: f64,
id: StateID,
#[serde(rename = "provinces")]
province_ids: SingleOrMany<i32>,
}
#[derive(Deserialize, Debug)]
struct Country {
money: f64,
tax_base: f64,
// Don't count single-state countries rn
#[serde(rename="state", default)]
states: SingleOrMany<State>,
}
#[derive(Deserialize, Debug, PartialEq)]
#[serde(untagged)]
pub enum SingleOrMany<T> {
Single(T),
Many(Vec<T>),
None,
}
impl<T> Default for SingleOrMany<T> {
fn default() -> Self {
SingleOrMany::None
}
}
impl<T> SingleOrMany<T> {
// https://stackoverflow.com/a/30220832/998335
fn values(&self) -> Box<dyn Iterator<Item = &T> + '_> {
match self {
SingleOrMany::None => Box::new(iter::empty()),
SingleOrMany::Single(elem) => Box::new(iter::once(elem)),
SingleOrMany::Many(elems) => Box::new(elems.iter()),
}
}
}
#[wasm_bindgen]
#[derive(Deserialize, Debug)]
pub struct Save {
#[serde(deserialize_with = "vicky_date_serialize_serde")]
date: NaiveDate,
#[serde(rename = "player")]
player_tag: String,
// USA: Country,
/// Hack:
/// we know we want all aliases that are country tags,
/// so we'll accept all all uppercase sequences of characters of size two or three
/// (26^2 + 26^3) = 18252. Not great. I actually tried this and it killed the compiler. Sad!
/// The problem is around line 1168 on serde-rs's de.rs. It does explicit checking, not pattern
/// matching against valid rust patterns (we could use that to our advantage as we did with the
/// PEG parser). Additionally, it wouldn't populate a hashmap like we want - just a vec.
/// This is surmountable (can infer country from other tags) but irrelevant because we can't actually do that.
/// Solution: create an artificial countries tag somewhere else to do what we want.
countries: HashMap<String, Country>,
/// Same hack as countries
provinces: HashMap<i32, Province>,
}
#[wasm_bindgen]
impl Save {
pub fn js_forex_position(&self) -> D3Node {
let mut generator = (0u64..);
let forex = self.forex_position();
D3Node::parent(generator.nth(0).unwrap(), "Forex".to_string(),
forex.iter().map(|(countryname, (treasury, statewealth))| {
D3Node::parent(generator.nth(0).unwrap(), countryname.to_string(),
vec![
D3Node::leaf(generator.nth(0).unwrap(), "Treasury".to_string(), *treasury),
D3Node::parent(generator.nth(0).unwrap(), "States".to_string(),
statewealth.iter().map(|(state_id, (factories, provinces))| {
D3Node::parent(generator.nth(0).unwrap(), state_id.to_string(), vec![
D3Node::parent(generator.nth(0).unwrap(), "Factories".to_string(), factories.iter().map(|(x, y)|D3Node::leaf(generator.nth(0).unwrap(), x.to_string(), *y)).collect()),
D3Node::parent(generator.nth(0).unwrap(), "Provinces".to_string(), provinces.iter().map(|(province, pop)| {
D3Node::parent(generator.nth(0).unwrap(), province.to_string(), pop.iter().map(|(title, wealth)| {
D3Node::leaf(generator.nth(0).unwrap(), title.to_string(), *wealth)
}).collect())
}).collect())
])
}).collect())
]
)
}).collect()
)
}
}
#[wasm_bindgen]
#[derive(Serialize, Clone, Debug)]
pub struct D3Node {
id: u64,
name: String,
#[serde(flatten)]
atom: D3Atomic,
}
impl D3Node {
// For tests
pub fn parent(id: u64, name: String, children: Vec<D3Node>) -> Self {
D3Node { id, name, atom: D3Atomic::Parent{ children } }
}
pub fn leaf(id: u64, name: String, atom: f64) -> Self {
D3Node { id, name, atom: D3Atomic::Leaf{ size: atom } }
}
pub fn atom(&self) -> &D3Atomic {
&self.atom
}
pub fn name(&self) -> &str {
&self.name
}
// Actually useful
pub fn children_value(&self) -> f64 {
match &self.atom {
D3Atomic::Parent { children } => children.iter().map(D3Node::children_value).sum(),
D3Atomic::Leaf { size: loc } => *loc,
}
}
pub fn cauterize(&self, depth: u32) -> D3Node {
if depth == 0 {
D3Node::leaf(self.id, self.name.to_string(), self.children_value())
} else {
match &self.atom {
D3Atomic::Parent { children } => {
// https://github.com/plouc/nivo/issues/942
// For now, remove anything < 1% of the total
let stream = children.iter().map(|x| x.cauterize(depth - 1)).collect::<Vec<D3Node>>();
let values = stream.iter().map(|x| x.children_value()).collect::<Vec<f64>>();
let total: f64 = values.iter().sum();
let mut keptTotal: f64 = 0.0;
let mut kept: Vec<D3Node> = stream.iter().enumerate().filter(|(idx, _)| values[*idx] > (total * 0.01)).map(|(idx, y)| {
keptTotal += values[idx];
y.clone()
}).collect();
// kept.push(D3Node::leaf(depth as u64 + 1 * keptTotal as u64, "Other".to_string(), keptTotal));
D3Node::parent(self.id, self.name.to_string(), kept)
}
// gdi I can't borrow anything 'cause of that one stupid int parse
D3Atomic::Leaf { size: loc } => D3Node::leaf(self.id, self.name.to_string(), *loc )
}
}
}
// Everything from the end of the keypad down to depth, as truncated
// For forex -> chi -> states -> etc
// keypath = [CHI], depth = 1 => chi at root, all of the states under it, and nothing else
fn subtree_for_node<T: AsRef<str>>(&self, key_path: &[T], depth: u32) -> Result<D3Node, String> {
match key_path.first() {
None => {
// Navigate down depth
Ok(self.cauterize(depth))
}
Some(name) => {
// Navigate down keypath
let name = name.as_ref();
match &self.atom {
D3Atomic::Parent {children: child} => {
match child.iter().find(|x| x.name.as_str() == name) {
Some(element) => element.subtree_for_node(&key_path[1..], depth),
None => Err(format!("Expected to find {} in {} (found {:?})", name, &self.name, child))
}
}
_ => Err(format!("Expected {} to be a parent", &self.name))
}
}
}
}
}
#[wasm_bindgen]
impl D3Node {
pub fn js_subtree_for_node(&self, key_path: JsValue, depth: u32) -> Result<JsValue, JsValue> {
let keypath = key_path.into_serde::<Vec<String>>().map_err(|x| JsValue::from(x.to_string()))?;
let subtree = self.subtree_for_node(&keypath, depth).map_err(|x| JsValue::from(x.to_string()))?;
JsValue::from_serde(&subtree).map_err(|x| JsValue::from(x.to_string()))
}
}
#[derive(Serialize, Clone, Debug)]
#[serde(untagged)]
pub enum D3Atomic {
Parent { children: Vec<D3Node> },
Leaf { size: f64 },
}
impl Save {
/// Just return country -> treasury, wealth by state (ID is -> wealth by factory / pop (per province)
pub fn forex_position(&self) -> HashMap<&str, (f64, HashMap<i32, (HashMap<&str, f64>, HashMap<&str, HashMap<String, f64>>)>)> {
self.countries.iter().map(|(name, country)| {
(name.as_str(), (country.money, country.states.values()
.map(|state| {
(state.id.id, (
state.buildings.values().map(|building| (building.name.as_str(), building.money)).collect::<HashMap<&str, f64>>(),
state.province_ids.values()
.map(|x| self.provinces.get(x).unwrap())
.filter(|x| x.owner.as_ref().map(|unwrapper| unquote(unwrapper) == name).unwrap_or(false))
.map(|x| {
(x.name.as_str(), x.pops.iter()
.flat_map(|(title, pop)| {
pop.values().enumerate().map(move |(index, x)| (numerate(index, title.to_string()), x.bank + x.money))
})
.collect::<HashMap<String, f64>>())
}).collect::<HashMap<&str, HashMap<String, f64>>>()
))
}
).collect()))
}).collect()
}
}
fn numerate(index: usize, thing: String) -> String {
if index == 0 {
thing
} else {
thing + (index + 1).to_string().as_str()
}
}
fn vicky_date_serialize_serde<'de, D>(
deserializer: D,
) -> Result<NaiveDate, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
parse_victoria_date(&*s).map_err(serde::de::Error::custom)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum ParseError {
InvalidDate,
Integer(ParseIntError),
MissingNode,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::MissingNode => write!(f, "Missing node"),
ParseError::InvalidDate =>
write!(f, "Invalid date"),
// The wrapped error contains additional information and is available
// via the source() method.
ParseError::Integer(ref e) =>
e.fmt(f)
//write!(f, "the provided string could not be parsed as int"),
}
}
}
impl error::Error for ParseError {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match *self {
ParseError::InvalidDate | ParseError::MissingNode => None,
// The cause is the underlying implementation error type. Is implicitly
// cast to the trait object `&error::Error`. This works because the
// underlying type already implements the `Error` trait.
ParseError::Integer(ref e) => Some(e),
}
}
}
// Implement the conversion from `ParseIntError` to `DoubleError`.
// This will be automatically called by `?` if a `ParseIntError`
// needs to be converted into a `DoubleError`.
impl From<ParseIntError> for ParseError {
fn from(err: ParseIntError) -> ParseError {
ParseError::Integer(err)
}
}
// Until rust gets negative slice semantics, have to make do with this
pub fn unquote(thing: &str) -> &str {
assert_eq!(thing.chars().nth(0), Some('"'));
assert_eq!(thing.chars().nth(thing.len() - 1), Some('"'));
return &thing[1..= thing.len() - 2];
}
pub fn parse_victoria_date(text: &str) -> Result<NaiveDate, ParseError> {
let text = unquote(text);
let textiter = text.char_indices();
let dots: Vec<usize> = textiter.filter_map(|(x, y)| match y {
'.' => Some(x),
_ => None,
}).take(2).collect();
match (text[0..dots[0]].parse(),
text[(dots[0] + 1)..dots[1]].parse(),
text[(dots[1] + 1)..].parse(),
) {
(Ok(y), Ok(m), Ok(d)) => {
match NaiveDate::from_ymd_opt(y, m, d) {
Some(date) => Ok(date),
None => Err(ParseError::InvalidDate),
}
},
(y, m, d) => {
Err([y.err(), m.err(), d.err()]
.iter()
.find_map(|x| x.clone())
.map_or(ParseError::InvalidDate, |x| ParseError::Integer(x)))
},
}
}
impl Save {
pub fn new(list: Node) -> Result<Save, Error> {
serde_json::from_value(list.to_json())
}
}
// https://stackoverflow.com/questions/32571441/what-is-the-difference-between-storing-a-vec-vs-a-slice
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Node<'a> {
Line((&'a str, Vec<Node<'a >>)),
SingleElementLine((&'a str, &'a str)),
List(Vec<Node<'a >>),
Leaf(&'a str),
}
impl<'a> Node<'a> {
fn insert_or_listify(name: &'a str, object: &serde_json::Value, map: &mut serde_json::Map<String, serde_json::Value>, seen: &mut Vec<&'a str>) {
if let Some(prior) = map.get(name) {
// if we already have an entry in the map for this element,
// convert it to a list of this element with the name as a key
// for now, means we can't invert unless we make this nicer
if seen.contains(&name) {
// append to list
if let Some(serde_json::Value::Array(elements)) = map.get_mut(name) {
elements.push(object.clone());
} else {
unreachable!()
}
} else {
// create list
seen.push(name);
map.insert(name.to_string(), serde_json::Value::Array(vec![prior.clone(), object.clone()]));
}
} else {
map.insert(name.to_string(), object.clone());
}
}
/// In-place modify to be parseable.
/// See the comment above for countries for rationale.
/// Call on root.
pub fn raise(&mut self) {
if let Node::List(nodes) = self {
// Get the first country index
for (name, tag) in [("provinces", &*PROVINCE_TAG), ("countries", &*COUNTRY_TAG)].iter() {
if let Some(country_index) = nodes.iter().position(|x| x.is_matching(tag)) {
// Drain all countries
let country_list: Vec<Node> = nodes.drain_filter(|x| x.is_matching(tag)).collect();
nodes.insert(country_index, Node::Line((name, country_list)));
}
}
}
}
fn is_matching(&self, re: &Regex) -> bool {
match self {
Node::Line((name, _)) => re.is_match(name),
_ => false,
}
}
/// convert a function to serde's json
pub fn to_json(&self) -> serde_json::Value {
match self {
Node::Line((_, arr)) | Node::List(arr) => {
// Object if any element has a child
// List if none do
// Undefined if both
if let Some(thing) = arr.first() {
match thing {
// List
Node::Leaf(_) => serde_json::Value::Array(arr.iter().map(|x| x.to_json()).collect()),
// Object
_ => {
let mut map = serde_json::Map::new();
let mut stuff = vec![];
for element in arr.iter() {
match element {
Node::Line((name, innerLineItems)) => {
| use peg;
use chrono::NaiveDate; | random_line_split |
|
lib.rs | id }
}
}
#[serde_as]
#[derive(Deserialize, Debug, PartialEq)]
pub struct Province {
name: String,
#[serde(default)]
owner: Option<String>,
/// Small hack: make the remainder pops.
/// This, shockingly, actually works for any subfield we can think of,
/// so it's actually the magic backtracking we were looking for all along
#[serde(flatten)]
#[serde_as(as="HashMap<DefaultOnError, DefaultOnError>")]
pops: HashMap<String, SingleOrMany<Pop>>,
}
impl Province {
pub fn new(name: String, owner: Option<String>, pops: HashMap<String, SingleOrMany<Pop>, RandomState>) -> Self {
Province { name, owner, pops }
}
}
#[derive(Deserialize, Debug)]
struct Building {
#[serde(rename = "building")]
name: String,
money: f64,
}
#[derive(Deserialize, Debug)]
struct StateID {
// Name in a localization file
id: i32,
#[serde(rename = "type")]
state_type: i32,
}
/// A state owned by a country
#[derive(Deserialize, Debug)]
struct State {
#[serde(rename = "state_buildings", default)]
buildings: SingleOrMany<Building>,
// What are these?
#[serde(default)]
savings: f64,
#[serde(default)]
interest: f64,
id: StateID,
#[serde(rename = "provinces")]
province_ids: SingleOrMany<i32>,
}
#[derive(Deserialize, Debug)]
struct Country {
money: f64,
tax_base: f64,
// Don't count single-state countries rn
#[serde(rename="state", default)]
states: SingleOrMany<State>,
}
#[derive(Deserialize, Debug, PartialEq)]
#[serde(untagged)]
pub enum SingleOrMany<T> {
Single(T),
Many(Vec<T>),
None,
}
impl<T> Default for SingleOrMany<T> {
fn default() -> Self {
SingleOrMany::None
}
}
impl<T> SingleOrMany<T> {
// https://stackoverflow.com/a/30220832/998335
fn values(&self) -> Box<dyn Iterator<Item = &T> + '_> {
match self {
SingleOrMany::None => Box::new(iter::empty()),
SingleOrMany::Single(elem) => Box::new(iter::once(elem)),
SingleOrMany::Many(elems) => Box::new(elems.iter()),
}
}
}
#[wasm_bindgen]
#[derive(Deserialize, Debug)]
pub struct Save {
#[serde(deserialize_with = "vicky_date_serialize_serde")]
date: NaiveDate,
#[serde(rename = "player")]
player_tag: String,
// USA: Country,
/// Hack:
/// we know we want all aliases that are country tags,
/// so we'll accept all all uppercase sequences of characters of size two or three
/// (26^2 + 26^3) = 18252. Not great. I actually tried this and it killed the compiler. Sad!
/// The problem is around line 1168 on serde-rs's de.rs. It does explicit checking, not pattern
/// matching against valid rust patterns (we could use that to our advantage as we did with the
/// PEG parser). Additionally, it wouldn't populate a hashmap like we want - just a vec.
/// This is surmountable (can infer country from other tags) but irrelevant because we can't actually do that.
/// Solution: create an artificial countries tag somewhere else to do what we want.
countries: HashMap<String, Country>,
/// Same hack as countries
provinces: HashMap<i32, Province>,
}
#[wasm_bindgen]
impl Save {
pub fn js_forex_position(&self) -> D3Node {
let mut generator = (0u64..);
let forex = self.forex_position();
D3Node::parent(generator.nth(0).unwrap(), "Forex".to_string(),
forex.iter().map(|(countryname, (treasury, statewealth))| {
D3Node::parent(generator.nth(0).unwrap(), countryname.to_string(),
vec![
D3Node::leaf(generator.nth(0).unwrap(), "Treasury".to_string(), *treasury),
D3Node::parent(generator.nth(0).unwrap(), "States".to_string(),
statewealth.iter().map(|(state_id, (factories, provinces))| {
D3Node::parent(generator.nth(0).unwrap(), state_id.to_string(), vec![
D3Node::parent(generator.nth(0).unwrap(), "Factories".to_string(), factories.iter().map(|(x, y)|D3Node::leaf(generator.nth(0).unwrap(), x.to_string(), *y)).collect()),
D3Node::parent(generator.nth(0).unwrap(), "Provinces".to_string(), provinces.iter().map(|(province, pop)| {
D3Node::parent(generator.nth(0).unwrap(), province.to_string(), pop.iter().map(|(title, wealth)| {
D3Node::leaf(generator.nth(0).unwrap(), title.to_string(), *wealth)
}).collect())
}).collect())
])
}).collect())
]
)
}).collect()
)
}
}
#[wasm_bindgen]
#[derive(Serialize, Clone, Debug)]
pub struct D3Node {
id: u64,
name: String,
#[serde(flatten)]
atom: D3Atomic,
}
impl D3Node {
// For tests
pub fn parent(id: u64, name: String, children: Vec<D3Node>) -> Self {
D3Node { id, name, atom: D3Atomic::Parent{ children } }
}
pub fn leaf(id: u64, name: String, atom: f64) -> Self {
D3Node { id, name, atom: D3Atomic::Leaf{ size: atom } }
}
pub fn atom(&self) -> &D3Atomic {
&self.atom
}
pub fn name(&self) -> &str {
&self.name
}
// Actually useful
pub fn children_value(&self) -> f64 {
match &self.atom {
D3Atomic::Parent { children } => children.iter().map(D3Node::children_value).sum(),
D3Atomic::Leaf { size: loc } => *loc,
}
}
pub fn cauterize(&self, depth: u32) -> D3Node {
if depth == 0 {
D3Node::leaf(self.id, self.name.to_string(), self.children_value())
} else {
match &self.atom {
D3Atomic::Parent { children } => {
// https://github.com/plouc/nivo/issues/942
// For now, remove anything < 1% of the total
let stream = children.iter().map(|x| x.cauterize(depth - 1)).collect::<Vec<D3Node>>();
let values = stream.iter().map(|x| x.children_value()).collect::<Vec<f64>>();
let total: f64 = values.iter().sum();
let mut keptTotal: f64 = 0.0;
let mut kept: Vec<D3Node> = stream.iter().enumerate().filter(|(idx, _)| values[*idx] > (total * 0.01)).map(|(idx, y)| {
keptTotal += values[idx];
y.clone()
}).collect();
// kept.push(D3Node::leaf(depth as u64 + 1 * keptTotal as u64, "Other".to_string(), keptTotal));
D3Node::parent(self.id, self.name.to_string(), kept)
}
// gdi I can't borrow anything 'cause of that one stupid int parse
D3Atomic::Leaf { size: loc } => D3Node::leaf(self.id, self.name.to_string(), *loc )
}
}
}
// Everything from the end of the keypad down to depth, as truncated
// For forex -> chi -> states -> etc
// keypath = [CHI], depth = 1 => chi at root, all of the states under it, and nothing else
fn subtree_for_node<T: AsRef<str>>(&self, key_path: &[T], depth: u32) -> Result<D3Node, String> {
match key_path.first() {
None => {
// Navigate down depth
Ok(self.cauterize(depth))
}
Some(name) => {
// Navigate down keypath
let name = name.as_ref();
match &self.atom {
D3Atomic::Parent {children: child} => {
match child.iter().find(|x| x.name.as_str() == name) {
Some(element) => element.subtree_for_node(&key_path[1..], depth),
None => Err(format!("Expected to find {} in {} (found {:?})", name, &self.name, child))
}
}
_ => Err(format!("Expected {} to be a parent", &self.name))
}
}
}
}
}
#[wasm_bindgen]
impl D3Node {
pub fn js_subtree_for_node(&self, key_path: JsValue, depth: u32) -> Result<JsValue, JsValue> {
let keypath = key_path.into_serde::<Vec<String>>().map_err(|x| JsValue::from(x.to_string()))?;
let subtree = self.subtree_for_node(&keypath, depth).map_err(|x| JsValue::from(x.to_string()))?;
JsValue::from_serde(&subtree).map_err(|x| JsValue::from(x.to_string()))
}
}
#[derive(Serialize, Clone, Debug)]
#[serde(untagged)]
pub enum D3Atomic {
Parent { children: Vec<D3Node> },
Leaf { size: f64 },
}
impl Save {
/// Just return country -> treasury, wealth by state (ID is -> wealth by factory / pop (per province)
pub fn forex_position(&self) -> HashMap<&str, (f64, HashMap<i32, (HashMap<&str, f64>, HashMap<&str, HashMap<String, f64>>)>)> {
self.countries.iter().map(|(name, country)| {
(name.as_str(), (country.money, country.states.values()
.map(|state| {
(state.id.id, (
state.buildings.values().map(|building| (building.name.as_str(), building.money)).collect::<HashMap<&str, f64>>(),
state.province_ids.values()
.map(|x| self.provinces.get(x).unwrap())
.filter(|x| x.owner.as_ref().map(|unwrapper| unquote(unwrapper) == name).unwrap_or(false))
.map(|x| {
(x.name.as_str(), x.pops.iter()
.flat_map(|(title, pop)| {
pop.values().enumerate().map(move |(index, x)| (numerate(index, title.to_string()), x.bank + x.money))
})
.collect::<HashMap<String, f64>>())
}).collect::<HashMap<&str, HashMap<String, f64>>>()
))
}
).collect()))
}).collect()
}
}
fn numerate(index: usize, thing: String) -> String {
if index == 0 {
thing
} else {
thing + (index + 1).to_string().as_str()
}
}
fn vicky_date_serialize_serde<'de, D>(
deserializer: D,
) -> Result<NaiveDate, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
parse_victoria_date(&*s).map_err(serde::de::Error::custom)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum ParseError {
InvalidDate,
Integer(ParseIntError),
MissingNode,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::MissingNode => write!(f, "Missing node"),
ParseError::InvalidDate =>
write!(f, "Invalid date"),
// The wrapped error contains additional information and is available
// via the source() method.
ParseError::Integer(ref e) =>
e.fmt(f)
//write!(f, "the provided string could not be parsed as int"),
}
}
}
impl error::Error for ParseError {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match *self {
ParseError::InvalidDate | ParseError::MissingNode => None,
// The cause is the underlying implementation error type. Is implicitly
// cast to the trait object `&error::Error`. This works because the
// underlying type already implements the `Error` trait.
ParseError::Integer(ref e) => Some(e),
}
}
}
// Implement the conversion from `ParseIntError` to `DoubleError`.
// This will be automatically called by `?` if a `ParseIntError`
// needs to be converted into a `DoubleError`.
impl From<ParseIntError> for ParseError {
fn from(err: ParseIntError) -> ParseError {
ParseError::Integer(err)
}
}
// Until rust gets negative slice semantics, have to make do with this
pub fn unquote(thing: &str) -> &str {
assert_eq!(thing.chars().nth(0), Some('"'));
assert_eq!(thing.chars().nth(thing.len() - 1), Some('"'));
return &thing[1..= thing.len() - 2];
}
pub fn parse_victoria_date(text: &str) -> Result<NaiveDate, ParseError> {
let text = unquote(text);
let textiter = text.char_indices();
let dots: Vec<usize> = textiter.filter_map(|(x, y)| match y {
'.' => Some(x),
_ => None,
}).take(2).collect();
match (text[0..dots[0]].parse(),
text[(dots[0] + 1)..dots[1]].parse(),
text[(dots[1] + 1)..].parse(),
) {
(Ok(y), Ok(m), Ok(d)) => {
match NaiveDate::from_ymd_opt(y, m, d) {
Some(date) => Ok(date),
None => Err(ParseError::InvalidDate),
}
},
(y, m, d) => {
Err([y.err(), m.err(), d.err()]
.iter()
.find_map(|x| x.clone())
.map_or(ParseError::InvalidDate, |x| ParseError::Integer(x)))
},
}
}
impl Save {
pub fn new(list: Node) -> Result<Save, Error> {
serde_json::from_value(list.to_json())
}
}
// https://stackoverflow.com/questions/32571441/what-is-the-difference-between-storing-a-vec-vs-a-slice
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Node<'a> {
Line((&'a str, Vec<Node<'a >>)),
SingleElementLine((&'a str, &'a str)),
List(Vec<Node<'a >>),
Leaf(&'a str),
}
impl<'a> Node<'a> {
fn insert_or_listify(name: &'a str, object: &serde_json::Value, map: &mut serde_json::Map<String, serde_json::Value>, seen: &mut Vec<&'a str>) {
if let Some(prior) = map.get(name) {
// if we already have an entry in the map for this element,
// convert it to a list of this element with the name as a key
// for now, means we can't invert unless we make this nicer
if seen.contains(&name) {
// append to list
if let Some(serde_json::Value::Array(elements)) = map.get_mut(name) {
elements.push(object.clone());
} else {
unreachable!()
}
} else {
// create list
seen.push(name);
map.insert(name.to_string(), serde_json::Value::Array(vec![prior.clone(), object.clone()]));
}
} else {
map.insert(name.to_string(), object.clone());
}
}
/// In-place modify to be parseable.
/// See the comment above for countries for rationale.
/// Call on root.
pub fn raise(&mut self) {
if let Node::List(nodes) = self {
// Get the first country index
for (name, tag) in [("provinces", &*PROVINCE_TAG), ("countries", &*COUNTRY_TAG)].iter() {
if let Some(country_index) = nodes.iter().position(|x| x.is_matching(tag)) {
// Drain all countries
let country_list: Vec<Node> = nodes.drain_filter(|x| x.is_matching(tag)).collect();
nodes.insert(country_index, Node::Line((name, country_list)));
}
}
}
}
fn is_matching(&self, re: &Regex) -> bool |
/// convert a function to serde's json
pub fn to_json(&self) -> serde_json::Value {
match self {
Node::Line((_, arr)) | Node::List(arr) => {
// Object if any element has a child
// List if none do
// Undefined if both
if let Some(thing) = arr.first() {
match thing {
// List
Node::Leaf(_) => serde_json::Value::Array(arr.iter().map(|x| x.to_json()).collect()),
// Object
_ => {
let mut map = serde_json::Map::new();
let mut stuff = vec![];
for element in arr.iter() {
match element {
Node::Line((name, innerLineItems)) => {
Node::insert_or_listify(name, &Node::List(innerLineItems.clone()).to_json(), &mut map, &mut stuff);
}
l @ Node::List(_) => {
Node::insert_or_listify("list", &l.to_json(), &mut map, &mut stuff);
}
Node::SingleElementLine((name, object)) => {
Node::insert_or_listify(name, &Node::Leaf(object).to_json(), &mut map, &mut stuff);
}
Node::Leaf(name) => {
//log!("{}", name);
// | {
match self {
Node::Line((name, _)) => re.is_match(name),
_ => false,
}
} | identifier_body |
lib.rs | id }
}
}
#[serde_as]
#[derive(Deserialize, Debug, PartialEq)]
pub struct Province {
name: String,
#[serde(default)]
owner: Option<String>,
/// Small hack: make the remainder pops.
/// This, shockingly, actually works for any subfield we can think of,
/// so it's actually the magic backtracking we were looking for all along
#[serde(flatten)]
#[serde_as(as="HashMap<DefaultOnError, DefaultOnError>")]
pops: HashMap<String, SingleOrMany<Pop>>,
}
impl Province {
pub fn new(name: String, owner: Option<String>, pops: HashMap<String, SingleOrMany<Pop>, RandomState>) -> Self {
Province { name, owner, pops }
}
}
#[derive(Deserialize, Debug)]
struct Building {
#[serde(rename = "building")]
name: String,
money: f64,
}
#[derive(Deserialize, Debug)]
struct StateID {
// Name in a localization file
id: i32,
#[serde(rename = "type")]
state_type: i32,
}
/// A state owned by a country
#[derive(Deserialize, Debug)]
struct State {
#[serde(rename = "state_buildings", default)]
buildings: SingleOrMany<Building>,
// What are these?
#[serde(default)]
savings: f64,
#[serde(default)]
interest: f64,
id: StateID,
#[serde(rename = "provinces")]
province_ids: SingleOrMany<i32>,
}
#[derive(Deserialize, Debug)]
struct Country {
money: f64,
tax_base: f64,
// Don't count single-state countries rn
#[serde(rename="state", default)]
states: SingleOrMany<State>,
}
#[derive(Deserialize, Debug, PartialEq)]
#[serde(untagged)]
pub enum SingleOrMany<T> {
Single(T),
Many(Vec<T>),
None,
}
impl<T> Default for SingleOrMany<T> {
fn default() -> Self {
SingleOrMany::None
}
}
impl<T> SingleOrMany<T> {
// https://stackoverflow.com/a/30220832/998335
fn | (&self) -> Box<dyn Iterator<Item = &T> + '_> {
match self {
SingleOrMany::None => Box::new(iter::empty()),
SingleOrMany::Single(elem) => Box::new(iter::once(elem)),
SingleOrMany::Many(elems) => Box::new(elems.iter()),
}
}
}
#[wasm_bindgen]
#[derive(Deserialize, Debug)]
pub struct Save {
#[serde(deserialize_with = "vicky_date_serialize_serde")]
date: NaiveDate,
#[serde(rename = "player")]
player_tag: String,
// USA: Country,
/// Hack:
/// we know we want all aliases that are country tags,
/// so we'll accept all all uppercase sequences of characters of size two or three
/// (26^2 + 26^3) = 18252. Not great. I actually tried this and it killed the compiler. Sad!
/// The problem is around line 1168 on serde-rs's de.rs. It does explicit checking, not pattern
/// matching against valid rust patterns (we could use that to our advantage as we did with the
/// PEG parser). Additionally, it wouldn't populate a hashmap like we want - just a vec.
/// This is surmountable (can infer country from other tags) but irrelevant because we can't actually do that.
/// Solution: create an artificial countries tag somewhere else to do what we want.
countries: HashMap<String, Country>,
/// Same hack as countries
provinces: HashMap<i32, Province>,
}
#[wasm_bindgen]
impl Save {
pub fn js_forex_position(&self) -> D3Node {
let mut generator = (0u64..);
let forex = self.forex_position();
D3Node::parent(generator.nth(0).unwrap(), "Forex".to_string(),
forex.iter().map(|(countryname, (treasury, statewealth))| {
D3Node::parent(generator.nth(0).unwrap(), countryname.to_string(),
vec![
D3Node::leaf(generator.nth(0).unwrap(), "Treasury".to_string(), *treasury),
D3Node::parent(generator.nth(0).unwrap(), "States".to_string(),
statewealth.iter().map(|(state_id, (factories, provinces))| {
D3Node::parent(generator.nth(0).unwrap(), state_id.to_string(), vec![
D3Node::parent(generator.nth(0).unwrap(), "Factories".to_string(), factories.iter().map(|(x, y)|D3Node::leaf(generator.nth(0).unwrap(), x.to_string(), *y)).collect()),
D3Node::parent(generator.nth(0).unwrap(), "Provinces".to_string(), provinces.iter().map(|(province, pop)| {
D3Node::parent(generator.nth(0).unwrap(), province.to_string(), pop.iter().map(|(title, wealth)| {
D3Node::leaf(generator.nth(0).unwrap(), title.to_string(), *wealth)
}).collect())
}).collect())
])
}).collect())
]
)
}).collect()
)
}
}
#[wasm_bindgen]
#[derive(Serialize, Clone, Debug)]
pub struct D3Node {
id: u64,
name: String,
#[serde(flatten)]
atom: D3Atomic,
}
impl D3Node {
// For tests
pub fn parent(id: u64, name: String, children: Vec<D3Node>) -> Self {
D3Node { id, name, atom: D3Atomic::Parent{ children } }
}
pub fn leaf(id: u64, name: String, atom: f64) -> Self {
D3Node { id, name, atom: D3Atomic::Leaf{ size: atom } }
}
pub fn atom(&self) -> &D3Atomic {
&self.atom
}
pub fn name(&self) -> &str {
&self.name
}
// Actually useful
pub fn children_value(&self) -> f64 {
match &self.atom {
D3Atomic::Parent { children } => children.iter().map(D3Node::children_value).sum(),
D3Atomic::Leaf { size: loc } => *loc,
}
}
pub fn cauterize(&self, depth: u32) -> D3Node {
if depth == 0 {
D3Node::leaf(self.id, self.name.to_string(), self.children_value())
} else {
match &self.atom {
D3Atomic::Parent { children } => {
// https://github.com/plouc/nivo/issues/942
// For now, remove anything < 1% of the total
let stream = children.iter().map(|x| x.cauterize(depth - 1)).collect::<Vec<D3Node>>();
let values = stream.iter().map(|x| x.children_value()).collect::<Vec<f64>>();
let total: f64 = values.iter().sum();
let mut keptTotal: f64 = 0.0;
let mut kept: Vec<D3Node> = stream.iter().enumerate().filter(|(idx, _)| values[*idx] > (total * 0.01)).map(|(idx, y)| {
keptTotal += values[idx];
y.clone()
}).collect();
// kept.push(D3Node::leaf(depth as u64 + 1 * keptTotal as u64, "Other".to_string(), keptTotal));
D3Node::parent(self.id, self.name.to_string(), kept)
}
// gdi I can't borrow anything 'cause of that one stupid int parse
D3Atomic::Leaf { size: loc } => D3Node::leaf(self.id, self.name.to_string(), *loc )
}
}
}
// Everything from the end of the keypad down to depth, as truncated
// For forex -> chi -> states -> etc
// keypath = [CHI], depth = 1 => chi at root, all of the states under it, and nothing else
fn subtree_for_node<T: AsRef<str>>(&self, key_path: &[T], depth: u32) -> Result<D3Node, String> {
match key_path.first() {
None => {
// Navigate down depth
Ok(self.cauterize(depth))
}
Some(name) => {
// Navigate down keypath
let name = name.as_ref();
match &self.atom {
D3Atomic::Parent {children: child} => {
match child.iter().find(|x| x.name.as_str() == name) {
Some(element) => element.subtree_for_node(&key_path[1..], depth),
None => Err(format!("Expected to find {} in {} (found {:?})", name, &self.name, child))
}
}
_ => Err(format!("Expected {} to be a parent", &self.name))
}
}
}
}
}
#[wasm_bindgen]
impl D3Node {
pub fn js_subtree_for_node(&self, key_path: JsValue, depth: u32) -> Result<JsValue, JsValue> {
let keypath = key_path.into_serde::<Vec<String>>().map_err(|x| JsValue::from(x.to_string()))?;
let subtree = self.subtree_for_node(&keypath, depth).map_err(|x| JsValue::from(x.to_string()))?;
JsValue::from_serde(&subtree).map_err(|x| JsValue::from(x.to_string()))
}
}
#[derive(Serialize, Clone, Debug)]
#[serde(untagged)]
pub enum D3Atomic {
Parent { children: Vec<D3Node> },
Leaf { size: f64 },
}
impl Save {
/// Just return country -> treasury, wealth by state (ID is -> wealth by factory / pop (per province)
pub fn forex_position(&self) -> HashMap<&str, (f64, HashMap<i32, (HashMap<&str, f64>, HashMap<&str, HashMap<String, f64>>)>)> {
self.countries.iter().map(|(name, country)| {
(name.as_str(), (country.money, country.states.values()
.map(|state| {
(state.id.id, (
state.buildings.values().map(|building| (building.name.as_str(), building.money)).collect::<HashMap<&str, f64>>(),
state.province_ids.values()
.map(|x| self.provinces.get(x).unwrap())
.filter(|x| x.owner.as_ref().map(|unwrapper| unquote(unwrapper) == name).unwrap_or(false))
.map(|x| {
(x.name.as_str(), x.pops.iter()
.flat_map(|(title, pop)| {
pop.values().enumerate().map(move |(index, x)| (numerate(index, title.to_string()), x.bank + x.money))
})
.collect::<HashMap<String, f64>>())
}).collect::<HashMap<&str, HashMap<String, f64>>>()
))
}
).collect()))
}).collect()
}
}
fn numerate(index: usize, thing: String) -> String {
if index == 0 {
thing
} else {
thing + (index + 1).to_string().as_str()
}
}
fn vicky_date_serialize_serde<'de, D>(
deserializer: D,
) -> Result<NaiveDate, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
parse_victoria_date(&*s).map_err(serde::de::Error::custom)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum ParseError {
InvalidDate,
Integer(ParseIntError),
MissingNode,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::MissingNode => write!(f, "Missing node"),
ParseError::InvalidDate =>
write!(f, "Invalid date"),
// The wrapped error contains additional information and is available
// via the source() method.
ParseError::Integer(ref e) =>
e.fmt(f)
//write!(f, "the provided string could not be parsed as int"),
}
}
}
impl error::Error for ParseError {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match *self {
ParseError::InvalidDate | ParseError::MissingNode => None,
// The cause is the underlying implementation error type. Is implicitly
// cast to the trait object `&error::Error`. This works because the
// underlying type already implements the `Error` trait.
ParseError::Integer(ref e) => Some(e),
}
}
}
// Implement the conversion from `ParseIntError` to `DoubleError`.
// This will be automatically called by `?` if a `ParseIntError`
// needs to be converted into a `DoubleError`.
impl From<ParseIntError> for ParseError {
fn from(err: ParseIntError) -> ParseError {
ParseError::Integer(err)
}
}
// Until rust gets negative slice semantics, have to make do with this
pub fn unquote(thing: &str) -> &str {
assert_eq!(thing.chars().nth(0), Some('"'));
assert_eq!(thing.chars().nth(thing.len() - 1), Some('"'));
return &thing[1..= thing.len() - 2];
}
pub fn parse_victoria_date(text: &str) -> Result<NaiveDate, ParseError> {
let text = unquote(text);
let textiter = text.char_indices();
let dots: Vec<usize> = textiter.filter_map(|(x, y)| match y {
'.' => Some(x),
_ => None,
}).take(2).collect();
match (text[0..dots[0]].parse(),
text[(dots[0] + 1)..dots[1]].parse(),
text[(dots[1] + 1)..].parse(),
) {
(Ok(y), Ok(m), Ok(d)) => {
match NaiveDate::from_ymd_opt(y, m, d) {
Some(date) => Ok(date),
None => Err(ParseError::InvalidDate),
}
},
(y, m, d) => {
Err([y.err(), m.err(), d.err()]
.iter()
.find_map(|x| x.clone())
.map_or(ParseError::InvalidDate, |x| ParseError::Integer(x)))
},
}
}
impl Save {
pub fn new(list: Node) -> Result<Save, Error> {
serde_json::from_value(list.to_json())
}
}
// https://stackoverflow.com/questions/32571441/what-is-the-difference-between-storing-a-vec-vs-a-slice
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Node<'a> {
Line((&'a str, Vec<Node<'a >>)),
SingleElementLine((&'a str, &'a str)),
List(Vec<Node<'a >>),
Leaf(&'a str),
}
impl<'a> Node<'a> {
fn insert_or_listify(name: &'a str, object: &serde_json::Value, map: &mut serde_json::Map<String, serde_json::Value>, seen: &mut Vec<&'a str>) {
if let Some(prior) = map.get(name) {
// if we already have an entry in the map for this element,
// convert it to a list of this element with the name as a key
// for now, means we can't invert unless we make this nicer
if seen.contains(&name) {
// append to list
if let Some(serde_json::Value::Array(elements)) = map.get_mut(name) {
elements.push(object.clone());
} else {
unreachable!()
}
} else {
// create list
seen.push(name);
map.insert(name.to_string(), serde_json::Value::Array(vec![prior.clone(), object.clone()]));
}
} else {
map.insert(name.to_string(), object.clone());
}
}
/// In-place modify to be parseable.
/// See the comment above for countries for rationale.
/// Call on root.
pub fn raise(&mut self) {
if let Node::List(nodes) = self {
// Get the first country index
for (name, tag) in [("provinces", &*PROVINCE_TAG), ("countries", &*COUNTRY_TAG)].iter() {
if let Some(country_index) = nodes.iter().position(|x| x.is_matching(tag)) {
// Drain all countries
let country_list: Vec<Node> = nodes.drain_filter(|x| x.is_matching(tag)).collect();
nodes.insert(country_index, Node::Line((name, country_list)));
}
}
}
}
fn is_matching(&self, re: &Regex) -> bool {
match self {
Node::Line((name, _)) => re.is_match(name),
_ => false,
}
}
/// convert a function to serde's json
pub fn to_json(&self) -> serde_json::Value {
match self {
Node::Line((_, arr)) | Node::List(arr) => {
// Object if any element has a child
// List if none do
// Undefined if both
if let Some(thing) = arr.first() {
match thing {
// List
Node::Leaf(_) => serde_json::Value::Array(arr.iter().map(|x| x.to_json()).collect()),
// Object
_ => {
let mut map = serde_json::Map::new();
let mut stuff = vec![];
for element in arr.iter() {
match element {
Node::Line((name, innerLineItems)) => {
Node::insert_or_listify(name, &Node::List(innerLineItems.clone()).to_json(), &mut map, &mut stuff);
}
l @ Node::List(_) => {
Node::insert_or_listify("list", &l.to_json(), &mut map, &mut stuff);
}
Node::SingleElementLine((name, object)) => {
Node::insert_or_listify(name, &Node::Leaf(object).to_json(), &mut map, &mut stuff);
}
Node::Leaf(name) => {
//log!("{}", name);
// | values | identifier_name |
lib.rs | id }
}
}
#[serde_as]
#[derive(Deserialize, Debug, PartialEq)]
pub struct Province {
name: String,
#[serde(default)]
owner: Option<String>,
/// Small hack: make the remainder pops.
/// This, shockingly, actually works for any subfield we can think of,
/// so it's actually the magic backtracking we were looking for all along
#[serde(flatten)]
#[serde_as(as="HashMap<DefaultOnError, DefaultOnError>")]
pops: HashMap<String, SingleOrMany<Pop>>,
}
impl Province {
pub fn new(name: String, owner: Option<String>, pops: HashMap<String, SingleOrMany<Pop>, RandomState>) -> Self {
Province { name, owner, pops }
}
}
#[derive(Deserialize, Debug)]
struct Building {
#[serde(rename = "building")]
name: String,
money: f64,
}
#[derive(Deserialize, Debug)]
struct StateID {
// Name in a localization file
id: i32,
#[serde(rename = "type")]
state_type: i32,
}
/// A state owned by a country
#[derive(Deserialize, Debug)]
struct State {
#[serde(rename = "state_buildings", default)]
buildings: SingleOrMany<Building>,
// What are these?
#[serde(default)]
savings: f64,
#[serde(default)]
interest: f64,
id: StateID,
#[serde(rename = "provinces")]
province_ids: SingleOrMany<i32>,
}
#[derive(Deserialize, Debug)]
struct Country {
money: f64,
tax_base: f64,
// Don't count single-state countries rn
#[serde(rename="state", default)]
states: SingleOrMany<State>,
}
#[derive(Deserialize, Debug, PartialEq)]
#[serde(untagged)]
pub enum SingleOrMany<T> {
Single(T),
Many(Vec<T>),
None,
}
impl<T> Default for SingleOrMany<T> {
fn default() -> Self {
SingleOrMany::None
}
}
impl<T> SingleOrMany<T> {
// https://stackoverflow.com/a/30220832/998335
fn values(&self) -> Box<dyn Iterator<Item = &T> + '_> {
match self {
SingleOrMany::None => Box::new(iter::empty()),
SingleOrMany::Single(elem) => Box::new(iter::once(elem)),
SingleOrMany::Many(elems) => Box::new(elems.iter()),
}
}
}
#[wasm_bindgen]
#[derive(Deserialize, Debug)]
pub struct Save {
#[serde(deserialize_with = "vicky_date_serialize_serde")]
date: NaiveDate,
#[serde(rename = "player")]
player_tag: String,
// USA: Country,
/// Hack:
/// we know we want all aliases that are country tags,
/// so we'll accept all all uppercase sequences of characters of size two or three
/// (26^2 + 26^3) = 18252. Not great. I actually tried this and it killed the compiler. Sad!
/// The problem is around line 1168 on serde-rs's de.rs. It does explicit checking, not pattern
/// matching against valid rust patterns (we could use that to our advantage as we did with the
/// PEG parser). Additionally, it wouldn't populate a hashmap like we want - just a vec.
/// This is surmountable (can infer country from other tags) but irrelevant because we can't actually do that.
/// Solution: create an artificial countries tag somewhere else to do what we want.
countries: HashMap<String, Country>,
/// Same hack as countries
provinces: HashMap<i32, Province>,
}
#[wasm_bindgen]
impl Save {
pub fn js_forex_position(&self) -> D3Node {
let mut generator = (0u64..);
let forex = self.forex_position();
D3Node::parent(generator.nth(0).unwrap(), "Forex".to_string(),
forex.iter().map(|(countryname, (treasury, statewealth))| {
D3Node::parent(generator.nth(0).unwrap(), countryname.to_string(),
vec![
D3Node::leaf(generator.nth(0).unwrap(), "Treasury".to_string(), *treasury),
D3Node::parent(generator.nth(0).unwrap(), "States".to_string(),
statewealth.iter().map(|(state_id, (factories, provinces))| {
D3Node::parent(generator.nth(0).unwrap(), state_id.to_string(), vec![
D3Node::parent(generator.nth(0).unwrap(), "Factories".to_string(), factories.iter().map(|(x, y)|D3Node::leaf(generator.nth(0).unwrap(), x.to_string(), *y)).collect()),
D3Node::parent(generator.nth(0).unwrap(), "Provinces".to_string(), provinces.iter().map(|(province, pop)| {
D3Node::parent(generator.nth(0).unwrap(), province.to_string(), pop.iter().map(|(title, wealth)| {
D3Node::leaf(generator.nth(0).unwrap(), title.to_string(), *wealth)
}).collect())
}).collect())
])
}).collect())
]
)
}).collect()
)
}
}
#[wasm_bindgen]
#[derive(Serialize, Clone, Debug)]
pub struct D3Node {
id: u64,
name: String,
#[serde(flatten)]
atom: D3Atomic,
}
impl D3Node {
// For tests
pub fn parent(id: u64, name: String, children: Vec<D3Node>) -> Self {
D3Node { id, name, atom: D3Atomic::Parent{ children } }
}
pub fn leaf(id: u64, name: String, atom: f64) -> Self {
D3Node { id, name, atom: D3Atomic::Leaf{ size: atom } }
}
pub fn atom(&self) -> &D3Atomic {
&self.atom
}
pub fn name(&self) -> &str {
&self.name
}
// Actually useful
pub fn children_value(&self) -> f64 {
match &self.atom {
D3Atomic::Parent { children } => children.iter().map(D3Node::children_value).sum(),
D3Atomic::Leaf { size: loc } => *loc,
}
}
pub fn cauterize(&self, depth: u32) -> D3Node {
if depth == 0 {
D3Node::leaf(self.id, self.name.to_string(), self.children_value())
} else {
match &self.atom {
D3Atomic::Parent { children } => {
// https://github.com/plouc/nivo/issues/942
// For now, remove anything < 1% of the total
let stream = children.iter().map(|x| x.cauterize(depth - 1)).collect::<Vec<D3Node>>();
let values = stream.iter().map(|x| x.children_value()).collect::<Vec<f64>>();
let total: f64 = values.iter().sum();
let mut keptTotal: f64 = 0.0;
let mut kept: Vec<D3Node> = stream.iter().enumerate().filter(|(idx, _)| values[*idx] > (total * 0.01)).map(|(idx, y)| {
keptTotal += values[idx];
y.clone()
}).collect();
// kept.push(D3Node::leaf(depth as u64 + 1 * keptTotal as u64, "Other".to_string(), keptTotal));
D3Node::parent(self.id, self.name.to_string(), kept)
}
// gdi I can't borrow anything 'cause of that one stupid int parse
D3Atomic::Leaf { size: loc } => D3Node::leaf(self.id, self.name.to_string(), *loc )
}
}
}
// Everything from the end of the keypad down to depth, as truncated
// For forex -> chi -> states -> etc
// keypath = [CHI], depth = 1 => chi at root, all of the states under it, and nothing else
fn subtree_for_node<T: AsRef<str>>(&self, key_path: &[T], depth: u32) -> Result<D3Node, String> {
match key_path.first() {
None => {
// Navigate down depth
Ok(self.cauterize(depth))
}
Some(name) => {
// Navigate down keypath
let name = name.as_ref();
match &self.atom {
D3Atomic::Parent {children: child} => {
match child.iter().find(|x| x.name.as_str() == name) {
Some(element) => element.subtree_for_node(&key_path[1..], depth),
None => Err(format!("Expected to find {} in {} (found {:?})", name, &self.name, child))
}
}
_ => Err(format!("Expected {} to be a parent", &self.name))
}
}
}
}
}
#[wasm_bindgen]
impl D3Node {
pub fn js_subtree_for_node(&self, key_path: JsValue, depth: u32) -> Result<JsValue, JsValue> {
let keypath = key_path.into_serde::<Vec<String>>().map_err(|x| JsValue::from(x.to_string()))?;
let subtree = self.subtree_for_node(&keypath, depth).map_err(|x| JsValue::from(x.to_string()))?;
JsValue::from_serde(&subtree).map_err(|x| JsValue::from(x.to_string()))
}
}
#[derive(Serialize, Clone, Debug)]
#[serde(untagged)]
pub enum D3Atomic {
Parent { children: Vec<D3Node> },
Leaf { size: f64 },
}
impl Save {
/// Just return country -> treasury, wealth by state (ID is -> wealth by factory / pop (per province)
pub fn forex_position(&self) -> HashMap<&str, (f64, HashMap<i32, (HashMap<&str, f64>, HashMap<&str, HashMap<String, f64>>)>)> {
self.countries.iter().map(|(name, country)| {
(name.as_str(), (country.money, country.states.values()
.map(|state| {
(state.id.id, (
state.buildings.values().map(|building| (building.name.as_str(), building.money)).collect::<HashMap<&str, f64>>(),
state.province_ids.values()
.map(|x| self.provinces.get(x).unwrap())
.filter(|x| x.owner.as_ref().map(|unwrapper| unquote(unwrapper) == name).unwrap_or(false))
.map(|x| {
(x.name.as_str(), x.pops.iter()
.flat_map(|(title, pop)| {
pop.values().enumerate().map(move |(index, x)| (numerate(index, title.to_string()), x.bank + x.money))
})
.collect::<HashMap<String, f64>>())
}).collect::<HashMap<&str, HashMap<String, f64>>>()
))
}
).collect()))
}).collect()
}
}
fn numerate(index: usize, thing: String) -> String {
if index == 0 {
thing
} else {
thing + (index + 1).to_string().as_str()
}
}
fn vicky_date_serialize_serde<'de, D>(
deserializer: D,
) -> Result<NaiveDate, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
parse_victoria_date(&*s).map_err(serde::de::Error::custom)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum ParseError {
InvalidDate,
Integer(ParseIntError),
MissingNode,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::MissingNode => write!(f, "Missing node"),
ParseError::InvalidDate =>
write!(f, "Invalid date"),
// The wrapped error contains additional information and is available
// via the source() method.
ParseError::Integer(ref e) =>
e.fmt(f)
//write!(f, "the provided string could not be parsed as int"),
}
}
}
impl error::Error for ParseError {
fn source(&self) -> Option<&(dyn error::Error +'static)> {
match *self {
ParseError::InvalidDate | ParseError::MissingNode => None,
// The cause is the underlying implementation error type. Is implicitly
// cast to the trait object `&error::Error`. This works because the
// underlying type already implements the `Error` trait.
ParseError::Integer(ref e) => Some(e),
}
}
}
// Implement the conversion from `ParseIntError` to `DoubleError`.
// This will be automatically called by `?` if a `ParseIntError`
// needs to be converted into a `DoubleError`.
impl From<ParseIntError> for ParseError {
fn from(err: ParseIntError) -> ParseError {
ParseError::Integer(err)
}
}
// Until rust gets negative slice semantics, have to make do with this
pub fn unquote(thing: &str) -> &str {
assert_eq!(thing.chars().nth(0), Some('"'));
assert_eq!(thing.chars().nth(thing.len() - 1), Some('"'));
return &thing[1..= thing.len() - 2];
}
pub fn parse_victoria_date(text: &str) -> Result<NaiveDate, ParseError> {
let text = unquote(text);
let textiter = text.char_indices();
let dots: Vec<usize> = textiter.filter_map(|(x, y)| match y {
'.' => Some(x),
_ => None,
}).take(2).collect();
match (text[0..dots[0]].parse(),
text[(dots[0] + 1)..dots[1]].parse(),
text[(dots[1] + 1)..].parse(),
) {
(Ok(y), Ok(m), Ok(d)) => {
match NaiveDate::from_ymd_opt(y, m, d) {
Some(date) => Ok(date),
None => Err(ParseError::InvalidDate),
}
},
(y, m, d) => {
Err([y.err(), m.err(), d.err()]
.iter()
.find_map(|x| x.clone())
.map_or(ParseError::InvalidDate, |x| ParseError::Integer(x)))
},
}
}
impl Save {
pub fn new(list: Node) -> Result<Save, Error> {
serde_json::from_value(list.to_json())
}
}
// https://stackoverflow.com/questions/32571441/what-is-the-difference-between-storing-a-vec-vs-a-slice
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Node<'a> {
Line((&'a str, Vec<Node<'a >>)),
SingleElementLine((&'a str, &'a str)),
List(Vec<Node<'a >>),
Leaf(&'a str),
}
impl<'a> Node<'a> {
fn insert_or_listify(name: &'a str, object: &serde_json::Value, map: &mut serde_json::Map<String, serde_json::Value>, seen: &mut Vec<&'a str>) {
if let Some(prior) = map.get(name) {
// if we already have an entry in the map for this element,
// convert it to a list of this element with the name as a key
// for now, means we can't invert unless we make this nicer
if seen.contains(&name) {
// append to list
if let Some(serde_json::Value::Array(elements)) = map.get_mut(name) {
elements.push(object.clone());
} else {
unreachable!()
}
} else |
} else {
map.insert(name.to_string(), object.clone());
}
}
/// In-place modify to be parseable.
/// See the comment above for countries for rationale.
/// Call on root.
pub fn raise(&mut self) {
if let Node::List(nodes) = self {
// Get the first country index
for (name, tag) in [("provinces", &*PROVINCE_TAG), ("countries", &*COUNTRY_TAG)].iter() {
if let Some(country_index) = nodes.iter().position(|x| x.is_matching(tag)) {
// Drain all countries
let country_list: Vec<Node> = nodes.drain_filter(|x| x.is_matching(tag)).collect();
nodes.insert(country_index, Node::Line((name, country_list)));
}
}
}
}
fn is_matching(&self, re: &Regex) -> bool {
match self {
Node::Line((name, _)) => re.is_match(name),
_ => false,
}
}
/// convert a function to serde's json
pub fn to_json(&self) -> serde_json::Value {
match self {
Node::Line((_, arr)) | Node::List(arr) => {
// Object if any element has a child
// List if none do
// Undefined if both
if let Some(thing) = arr.first() {
match thing {
// List
Node::Leaf(_) => serde_json::Value::Array(arr.iter().map(|x| x.to_json()).collect()),
// Object
_ => {
let mut map = serde_json::Map::new();
let mut stuff = vec![];
for element in arr.iter() {
match element {
Node::Line((name, innerLineItems)) => {
Node::insert_or_listify(name, &Node::List(innerLineItems.clone()).to_json(), &mut map, &mut stuff);
}
l @ Node::List(_) => {
Node::insert_or_listify("list", &l.to_json(), &mut map, &mut stuff);
}
Node::SingleElementLine((name, object)) => {
Node::insert_or_listify(name, &Node::Leaf(object).to_json(), &mut map, &mut stuff);
}
Node::Leaf(name) => {
//log!("{}", name);
// | {
// create list
seen.push(name);
map.insert(name.to_string(), serde_json::Value::Array(vec![prior.clone(), object.clone()]));
} | conditional_block |
iter.rs | ///! source::iter: source code content iterator, with interner
use std::collections::{HashMap, hash_map::DefaultHasher};
use std::fmt;
use std::hash::{Hash, Hasher};
use std::num::NonZeroU32;
use std::path::PathBuf;
use std::ops::{Add, AddAssign};
use super::{SourceContext, SourceFile, FileId};
pub const EOF: char = 0u8 as char;
/// Character location
///
/// - it is byte index accross all source files, e.g. second file's position starts from first file's byte length (+1)
/// to reduce memory usage because location info is used extremely widely
/// - it is u32 not usize because it is not reasonable to
/// have a file size over 4GB or all source file total size over 4GB for this toy language (possibly for all languages)
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct Position(u32);
impl Position {
pub const fn new(v: u32) -> Self {
Self(v)
}
pub fn unwrap(self) -> u32 {
self.0
}
pub fn offset(self, offset: i32) -> Self {
Self(if offset >= 0 { self.0 + offset as u32 } else { self.0 - (-offset) as u32 })
}
}
impl fmt::Debug for Position {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl From<u32> for Position {
fn from(v: u32) -> Self {
Self(v)
}
}
// use `position1 + position2` to merge into span
impl Add for Position {
type Output = Span;
fn add(self, rhs: Position) -> Span {
debug_assert!(rhs.0 >= self.0, "invalid position + position");
Span::new(self, rhs)
}
}
/// Character range location
///
/// construct from 2 Positions,
/// while type name is Span, recommend variable name is `loc` or `location`
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct Span {
pub start: Position,
pub end: Position,
}
impl Span {
// e.g. Span::new(position1, position2) or Span::new(42, 43)
pub fn new(start: impl Into<Position>, end: impl Into<Position>) -> Span {
Span{ start: start.into(), end: end.into() }
}
}
impl fmt::Debug for Span {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Span({}, {})", self.start.0, self.end.0)
}
}
// use `span1 + span2` to merge span
// ATTENTION: only allow left + right, while gap/overlap both acceptable
impl Add for Span {
type Output = Span;
fn add(self, rhs: Span) -> Span {
debug_assert!(rhs.start.0 >= self.start.0 && rhs.end.0 >= self.end.0, "invalid span + span");
Span{ start: self.start, end: rhs.end }
}
}
// or `span + position`
impl Add<Position> for Span {
type Output = Span;
fn add(self, rhs: Position) -> Span {
debug_assert!(rhs.0 >= self.end.0, "invalid span + position");
Span{ start: self.start, end: rhs }
}
}
// use `span += position` to update span
impl AddAssign<Position> for Span {
fn add_assign(&mut self, rhs: Position) {
debug_assert!(rhs.0 >= self.end.0, "invalid span += position");
self.end = rhs;
}
}
// or use `span1 += span2`
// ATTENTION: only allow left += right, while gap/overlap both acceptable
impl AddAssign<Span> for Span {
fn add_assign(&mut self, rhs: Span) {
debug_assert!(rhs.start.0 >= self.start.0 && rhs.end.0 >= self.end.0, "invalid span += span");
self.end = rhs.end;
}
}
// use `position.into()` to turn position directly into span
impl From<Position> for Span {
fn from(position: Position) -> Span {
Span::new(position, position)
}
}
/// a handle to an interned string
///
/// - IsId means InternedStringID, it is short because it is widely used
/// - it was named SymbolID or SymId or Sym but I found that
/// symbol (in symbol table) in compiler principle means a "name", a name to a variable, function, etc.
/// although I will call that a "Name", or a "TypeId", "VarId" etc. in my semantic analysis, but this name
/// may confuse reader or myself after, for example, 10 years (although I'm not confused after this 5 years)
/// - SymbolID, StringID or InternedStringID is too long,
/// Str or String makes reader think it is kind of string (a ptr, cal, len structure)
/// - it is u32 not usize because it is not reasonable to
/// have more than u32::MAX strings in a program, and it is widely used
/// - recommend variable name `id` or `string_id`
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct IsId(NonZeroU32);
impl IsId {
pub(super) const POSITION_MASK: u32 = 1 << 31;
pub const fn new(v: u32) -> Self {
debug_assert!(v!= 0, "isid cannot be 0");
// SAFETY: debug_assert above
Self(unsafe { NonZeroU32::new_unchecked(v) })
}
pub fn unwrap(self) -> u32 {
self.0.get()
}
}
impl From<u32> for IsId {
fn from(v: u32) -> Self {
Self::new(v)
}
}
impl fmt::Debug for IsId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IsId").field(&self.0.get()).finish()
}
}
// this is currently an u128, but I suspect that it can fit in u64
// for current small test until even first bootstrap version, the string id and span should be easily fit in u64
// for "dont-know-whether-exist very large program",
// considering these 2 ids increase accordingly, squash `u32::MAX - id` and span together may still be ok
#[derive(PartialEq, Clone, Copy)]
pub struct IdSpan {
pub id: IsId,
pub span: Span,
}
impl IdSpan {
pub fn new(id: impl Into<IsId>, span: Span) -> Self {
Self{ id: id.into(), span }
}
}
impl fmt::Debug for IdSpan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IdSpan")
.field(&self.id)
.field(&self.span)
.finish()
}
}
fn get_hash(content: &str) -> u64 {
let mut hasher = DefaultHasher::new();
Hash::hash(content, &mut hasher);
hasher.finish()
}
// get LF byte indexes
fn get_endlines(content: &str) -> Vec<usize> {
content.char_indices().filter(|(_, c)| c == &'\n').map(|(i, _)| i).collect()
}
// this iterator is the exact first layer of processing above source code content,
// logically all location information comes from position created by the next function
//
// this iterator also includes the string intern interface, to make leixcal parser simpler
//
// from source context's perspective, this is also a source file builder which returns by
// entry and import function and when running, append to string table and when finished, append to files
#[derive(Debug)]
pub struct SourceChars<'a> {
content: String,
current_index: usize, // current iterating byte index, content bytes[current_index] should be the next returned char
start_index: usize, // starting byte index for this file, or previous files's total byte length, will copy to SourceFile.start_index when finished
// copy to SourceFile
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
// borrow other part of SourceContext except fs to prevent <F> propagation
files: &'a mut Vec<SourceFile>,
string_hash_to_id: &'a mut HashMap<u64, IsId>,
string_id_to_span: &'a mut Vec<Span>,
string_additional: &'a mut String,
}
impl<'a> SourceChars<'a> {
pub(super) fn new<F>(
mut content: String,
start_index: usize,
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
context: &'a mut SourceContext<F>,
) -> Self {
// append 3 '\0' char to end of content for the branchless (less branch actually) iterator
content.push_str("\0\0\0");
Self{
content,
start_index,
current_index: 0,
path,
namespace,
request,
files: &mut context.files,
string_hash_to_id: &mut context.string_hash_to_id,
string_id_to_span: &mut context.string_id_to_span,
string_additional: &mut context.string_additional,
}
}
/// iterate return char and byte index
///
/// ignore all bare or not bare CR, return EOF after EOF
pub fn next(&mut self) -> (char, Position) {
loop {
if self.current_index == self.content.len() - 3 {
return (EOF, Position::new((self.start_index + self.current_index) as u32));
}
let bytes = self.content.as_bytes();
match bytes[self.current_index] {
b'\r' => { // ignore \r
self.current_index += 1;
continue;
},
b @ 0..=128 => { // ascii fast path
self.current_index += 1;
return (b as char, Position::new((self.current_index - 1 + self.start_index) as u32));
},
_ => {
let width = get_char_width(&self.content, self.current_index);
if self.current_index + width > self.content.len() - 3 {
// TODO: this should be an error not panic, although unrecoverable
panic!("invalid utf-8 sequence");
}
const MASKS: [u8; 5] = [0, 0, 0x1F, 0x0F, 0x07]; // byte 0 masks
const SHIFTS: [u8; 5] = [0, 0, 12, 6, 0]; // shift back for width 2 and width 3
let bytes = &bytes[self.current_index..];
let r#char = ((((bytes[0] & MASKS[width]) as u32) << 18) | (((bytes[1] & 0x3F) as u32) << 12) | (((bytes[2] & 0x3F) as u32) << 6) | ((bytes[3] & 0x3F) as u32)) >> SHIFTS[width];
// TODO: check more invalid utf8 sequence include following bytes not start with 0b10 and larger than 10FFFF and between D800 and E000
self.current_index += width;
// SAFETY: invalid char should not cause severe issue in lexical parse and syntax parse
return (unsafe { char::from_u32_unchecked(r#char) }, Position::new((self.current_index - width + self.start_index) as u32));
},
}
}
}
pub fn intern(&mut self, value: &str) -> IsId {
// empty string is span 0,0, this span must exist because this function exists in this type
if value.is_empty() {
return IsId::new(1);
}
let hash = get_hash(value);
if let Some(id) = self.string_hash_to_id.get(&hash) {
*id
} else {
let new_id = IsId::new(self.string_id_to_span.len() as u32);
self.string_hash_to_id.insert(hash, new_id);
let start_position = if let Some(index) = self.string_additional.find(value) {
index
} else {
self.string_additional.push_str(value);
self.string_additional.len() - value.len()
} as u32;
// ATTENTION: this type of span's end is last byte index + 1 (exactly the one you use in str[begin..end], not last char
let span = Span::new(start_position | IsId::POSITION_MASK, start_position + value.len() as u32);
self.string_id_to_span.push(span);
new_id
}
}
// intern string at location
pub fn intern_span(&mut self, location: Span) -> IsId {
let (start, end) = (location.start.0 as usize, location.end.0 as usize);
debug_assert!(start <= end, "invalid span");
debug_assert!(self.start_index <= start, "not this file span");
// does not check position for EOF because it is not expected to intern something include EOF
debug_assert!(end - self.start_index < self.content.len() - 3 && start - self.start_index < self.content.len() - 3, "position overflow");
let end_width = get_char_width(&self.content, end - self.start_index);
let hash = get_hash(&self.content[start - self.start_index..end - self.start_index + end_width]);
if let Some(id) = self.string_hash_to_id.get(&hash) {
*id
} else {
let new_id = IsId::new(self.string_id_to_span.len() as u32);
self.string_hash_to_id.insert(hash, new_id);
self.string_id_to_span.push(location);
new_id
}
}
pub fn | (&self) -> FileId {
FileId::new(self.files.len() as u32 + 1)
}
pub fn finish(mut self) {
let content_length = self.content.len();
self.content.truncate(content_length - 3);
self.files.push(SourceFile{
path: self.path,
endlines: get_endlines(&self.content),
content: self.content,
namespace: self.namespace,
start_index: self.start_index,
request: self.request,
});
}
}
// impl<'a> Drop for SourceChars<'a> {
// fn drop(&mut self) {
// // this cannot be some SourceContext::finish_build because SourceChars
// // does not have a referece to SourceContext to prevent propagating <F: FileSystem> generic parameter
// let content_length = self.content.len();
// self.content.truncate(content_length - 3);
// self.files.push(SourceFile{
// path: std::mem::take(&mut self.path),
// endlines: get_endlines(&self.content),
// content: std::mem::take(&mut self.content),
// namespace: std::mem::take(&mut self.namespace),
// start_index: self.start_index,
// request: self.request,
// });
// }
// }
// width byte[0] byte[1] byte[2] byte[3]
// 1 0xxxxxxx
// 2 110xxxxx 10xxxxxx
// 3 1110xxxx 10xxxxxx 10xxxxxx
// 4 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
const WIDTH: [usize; 256] = [
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A, B, C, D, E, F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 8
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 9
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // A
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // B
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // D
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // E
4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, // F
];
pub fn get_char_width(content: &str, byte_index: usize) -> usize {
WIDTH[content.as_bytes()[byte_index] as usize]
}
| get_file_id | identifier_name |
iter.rs | ///! source::iter: source code content iterator, with interner
use std::collections::{HashMap, hash_map::DefaultHasher};
use std::fmt;
use std::hash::{Hash, Hasher};
use std::num::NonZeroU32;
use std::path::PathBuf;
use std::ops::{Add, AddAssign};
use super::{SourceContext, SourceFile, FileId};
pub const EOF: char = 0u8 as char;
/// Character location
///
/// - it is byte index accross all source files, e.g. second file's position starts from first file's byte length (+1)
/// to reduce memory usage because location info is used extremely widely
/// - it is u32 not usize because it is not reasonable to
/// have a file size over 4GB or all source file total size over 4GB for this toy language (possibly for all languages)
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct Position(u32);
impl Position {
pub const fn new(v: u32) -> Self {
Self(v)
}
pub fn unwrap(self) -> u32 {
self.0
}
pub fn offset(self, offset: i32) -> Self {
Self(if offset >= 0 { self.0 + offset as u32 } else { self.0 - (-offset) as u32 })
}
}
impl fmt::Debug for Position {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl From<u32> for Position {
fn from(v: u32) -> Self {
Self(v)
}
}
// use `position1 + position2` to merge into span
impl Add for Position {
type Output = Span;
fn add(self, rhs: Position) -> Span {
debug_assert!(rhs.0 >= self.0, "invalid position + position");
Span::new(self, rhs)
}
}
/// Character range location
///
/// construct from 2 Positions,
/// while type name is Span, recommend variable name is `loc` or `location`
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct Span {
pub start: Position,
pub end: Position,
}
impl Span {
// e.g. Span::new(position1, position2) or Span::new(42, 43)
pub fn new(start: impl Into<Position>, end: impl Into<Position>) -> Span {
Span{ start: start.into(), end: end.into() }
}
}
impl fmt::Debug for Span {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Span({}, {})", self.start.0, self.end.0)
}
}
// use `span1 + span2` to merge span
// ATTENTION: only allow left + right, while gap/overlap both acceptable
impl Add for Span {
type Output = Span;
fn add(self, rhs: Span) -> Span {
debug_assert!(rhs.start.0 >= self.start.0 && rhs.end.0 >= self.end.0, "invalid span + span");
Span{ start: self.start, end: rhs.end }
}
}
// or `span + position`
impl Add<Position> for Span {
type Output = Span;
fn add(self, rhs: Position) -> Span {
debug_assert!(rhs.0 >= self.end.0, "invalid span + position");
Span{ start: self.start, end: rhs }
}
}
// use `span += position` to update span
impl AddAssign<Position> for Span {
fn add_assign(&mut self, rhs: Position) {
debug_assert!(rhs.0 >= self.end.0, "invalid span += position");
self.end = rhs;
}
}
// or use `span1 += span2`
// ATTENTION: only allow left += right, while gap/overlap both acceptable
impl AddAssign<Span> for Span {
fn add_assign(&mut self, rhs: Span) {
debug_assert!(rhs.start.0 >= self.start.0 && rhs.end.0 >= self.end.0, "invalid span += span");
self.end = rhs.end;
}
}
// use `position.into()` to turn position directly into span
impl From<Position> for Span {
fn from(position: Position) -> Span {
Span::new(position, position)
}
}
/// a handle to an interned string
///
/// - IsId means InternedStringID, it is short because it is widely used
/// - it was named SymbolID or SymId or Sym but I found that
/// symbol (in symbol table) in compiler principle means a "name", a name to a variable, function, etc.
/// although I will call that a "Name", or a "TypeId", "VarId" etc. in my semantic analysis, but this name
/// may confuse reader or myself after, for example, 10 years (although I'm not confused after this 5 years)
/// - SymbolID, StringID or InternedStringID is too long,
/// Str or String makes reader think it is kind of string (a ptr, cal, len structure)
/// - it is u32 not usize because it is not reasonable to
/// have more than u32::MAX strings in a program, and it is widely used
/// - recommend variable name `id` or `string_id`
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct IsId(NonZeroU32);
impl IsId {
pub(super) const POSITION_MASK: u32 = 1 << 31;
pub const fn new(v: u32) -> Self {
debug_assert!(v!= 0, "isid cannot be 0");
// SAFETY: debug_assert above
Self(unsafe { NonZeroU32::new_unchecked(v) })
}
pub fn unwrap(self) -> u32 {
self.0.get()
}
}
impl From<u32> for IsId {
fn from(v: u32) -> Self {
Self::new(v)
}
}
impl fmt::Debug for IsId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IsId").field(&self.0.get()).finish()
}
}
// this is currently an u128, but I suspect that it can fit in u64
// for current small test until even first bootstrap version, the string id and span should be easily fit in u64
// for "dont-know-whether-exist very large program",
// considering these 2 ids increase accordingly, squash `u32::MAX - id` and span together may still be ok
#[derive(PartialEq, Clone, Copy)]
pub struct IdSpan {
pub id: IsId,
pub span: Span,
}
impl IdSpan {
pub fn new(id: impl Into<IsId>, span: Span) -> Self {
Self{ id: id.into(), span }
}
}
impl fmt::Debug for IdSpan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IdSpan")
.field(&self.id)
.field(&self.span)
.finish()
}
}
fn get_hash(content: &str) -> u64 {
let mut hasher = DefaultHasher::new();
Hash::hash(content, &mut hasher); | fn get_endlines(content: &str) -> Vec<usize> {
content.char_indices().filter(|(_, c)| c == &'\n').map(|(i, _)| i).collect()
}
// this iterator is the exact first layer of processing above source code content,
// logically all location information comes from position created by the next function
//
// this iterator also includes the string intern interface, to make leixcal parser simpler
//
// from source context's perspective, this is also a source file builder which returns by
// entry and import function and when running, append to string table and when finished, append to files
#[derive(Debug)]
pub struct SourceChars<'a> {
content: String,
current_index: usize, // current iterating byte index, content bytes[current_index] should be the next returned char
start_index: usize, // starting byte index for this file, or previous files's total byte length, will copy to SourceFile.start_index when finished
// copy to SourceFile
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
// borrow other part of SourceContext except fs to prevent <F> propagation
files: &'a mut Vec<SourceFile>,
string_hash_to_id: &'a mut HashMap<u64, IsId>,
string_id_to_span: &'a mut Vec<Span>,
string_additional: &'a mut String,
}
impl<'a> SourceChars<'a> {
pub(super) fn new<F>(
mut content: String,
start_index: usize,
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
context: &'a mut SourceContext<F>,
) -> Self {
// append 3 '\0' char to end of content for the branchless (less branch actually) iterator
content.push_str("\0\0\0");
Self{
content,
start_index,
current_index: 0,
path,
namespace,
request,
files: &mut context.files,
string_hash_to_id: &mut context.string_hash_to_id,
string_id_to_span: &mut context.string_id_to_span,
string_additional: &mut context.string_additional,
}
}
/// iterate return char and byte index
///
/// ignore all bare or not bare CR, return EOF after EOF
pub fn next(&mut self) -> (char, Position) {
loop {
if self.current_index == self.content.len() - 3 {
return (EOF, Position::new((self.start_index + self.current_index) as u32));
}
let bytes = self.content.as_bytes();
match bytes[self.current_index] {
b'\r' => { // ignore \r
self.current_index += 1;
continue;
},
b @ 0..=128 => { // ascii fast path
self.current_index += 1;
return (b as char, Position::new((self.current_index - 1 + self.start_index) as u32));
},
_ => {
let width = get_char_width(&self.content, self.current_index);
if self.current_index + width > self.content.len() - 3 {
// TODO: this should be an error not panic, although unrecoverable
panic!("invalid utf-8 sequence");
}
const MASKS: [u8; 5] = [0, 0, 0x1F, 0x0F, 0x07]; // byte 0 masks
const SHIFTS: [u8; 5] = [0, 0, 12, 6, 0]; // shift back for width 2 and width 3
let bytes = &bytes[self.current_index..];
let r#char = ((((bytes[0] & MASKS[width]) as u32) << 18) | (((bytes[1] & 0x3F) as u32) << 12) | (((bytes[2] & 0x3F) as u32) << 6) | ((bytes[3] & 0x3F) as u32)) >> SHIFTS[width];
// TODO: check more invalid utf8 sequence include following bytes not start with 0b10 and larger than 10FFFF and between D800 and E000
self.current_index += width;
// SAFETY: invalid char should not cause severe issue in lexical parse and syntax parse
return (unsafe { char::from_u32_unchecked(r#char) }, Position::new((self.current_index - width + self.start_index) as u32));
},
}
}
}
pub fn intern(&mut self, value: &str) -> IsId {
// empty string is span 0,0, this span must exist because this function exists in this type
if value.is_empty() {
return IsId::new(1);
}
let hash = get_hash(value);
if let Some(id) = self.string_hash_to_id.get(&hash) {
*id
} else {
let new_id = IsId::new(self.string_id_to_span.len() as u32);
self.string_hash_to_id.insert(hash, new_id);
let start_position = if let Some(index) = self.string_additional.find(value) {
index
} else {
self.string_additional.push_str(value);
self.string_additional.len() - value.len()
} as u32;
// ATTENTION: this type of span's end is last byte index + 1 (exactly the one you use in str[begin..end], not last char
let span = Span::new(start_position | IsId::POSITION_MASK, start_position + value.len() as u32);
self.string_id_to_span.push(span);
new_id
}
}
// intern string at location
pub fn intern_span(&mut self, location: Span) -> IsId {
let (start, end) = (location.start.0 as usize, location.end.0 as usize);
debug_assert!(start <= end, "invalid span");
debug_assert!(self.start_index <= start, "not this file span");
// does not check position for EOF because it is not expected to intern something include EOF
debug_assert!(end - self.start_index < self.content.len() - 3 && start - self.start_index < self.content.len() - 3, "position overflow");
let end_width = get_char_width(&self.content, end - self.start_index);
let hash = get_hash(&self.content[start - self.start_index..end - self.start_index + end_width]);
if let Some(id) = self.string_hash_to_id.get(&hash) {
*id
} else {
let new_id = IsId::new(self.string_id_to_span.len() as u32);
self.string_hash_to_id.insert(hash, new_id);
self.string_id_to_span.push(location);
new_id
}
}
pub fn get_file_id(&self) -> FileId {
FileId::new(self.files.len() as u32 + 1)
}
pub fn finish(mut self) {
let content_length = self.content.len();
self.content.truncate(content_length - 3);
self.files.push(SourceFile{
path: self.path,
endlines: get_endlines(&self.content),
content: self.content,
namespace: self.namespace,
start_index: self.start_index,
request: self.request,
});
}
}
// impl<'a> Drop for SourceChars<'a> {
// fn drop(&mut self) {
// // this cannot be some SourceContext::finish_build because SourceChars
// // does not have a referece to SourceContext to prevent propagating <F: FileSystem> generic parameter
// let content_length = self.content.len();
// self.content.truncate(content_length - 3);
// self.files.push(SourceFile{
// path: std::mem::take(&mut self.path),
// endlines: get_endlines(&self.content),
// content: std::mem::take(&mut self.content),
// namespace: std::mem::take(&mut self.namespace),
// start_index: self.start_index,
// request: self.request,
// });
// }
// }
// width byte[0] byte[1] byte[2] byte[3]
// 1 0xxxxxxx
// 2 110xxxxx 10xxxxxx
// 3 1110xxxx 10xxxxxx 10xxxxxx
// 4 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
const WIDTH: [usize; 256] = [
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A, B, C, D, E, F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 8
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 9
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // A
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // B
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // D
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // E
4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, // F
];
pub fn get_char_width(content: &str, byte_index: usize) -> usize {
WIDTH[content.as_bytes()[byte_index] as usize]
} | hasher.finish()
}
// get LF byte indexes | random_line_split |
iter.rs | ///! source::iter: source code content iterator, with interner
use std::collections::{HashMap, hash_map::DefaultHasher};
use std::fmt;
use std::hash::{Hash, Hasher};
use std::num::NonZeroU32;
use std::path::PathBuf;
use std::ops::{Add, AddAssign};
use super::{SourceContext, SourceFile, FileId};
pub const EOF: char = 0u8 as char;
/// Character location
///
/// - it is byte index accross all source files, e.g. second file's position starts from first file's byte length (+1)
/// to reduce memory usage because location info is used extremely widely
/// - it is u32 not usize because it is not reasonable to
/// have a file size over 4GB or all source file total size over 4GB for this toy language (possibly for all languages)
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct Position(u32);
impl Position {
pub const fn new(v: u32) -> Self {
Self(v)
}
pub fn unwrap(self) -> u32 {
self.0
}
pub fn offset(self, offset: i32) -> Self {
Self(if offset >= 0 { self.0 + offset as u32 } else { self.0 - (-offset) as u32 })
}
}
impl fmt::Debug for Position {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl From<u32> for Position {
fn from(v: u32) -> Self {
Self(v)
}
}
// use `position1 + position2` to merge into span
impl Add for Position {
type Output = Span;
fn add(self, rhs: Position) -> Span {
debug_assert!(rhs.0 >= self.0, "invalid position + position");
Span::new(self, rhs)
}
}
/// Character range location
///
/// construct from 2 Positions,
/// while type name is Span, recommend variable name is `loc` or `location`
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct Span {
pub start: Position,
pub end: Position,
}
impl Span {
// e.g. Span::new(position1, position2) or Span::new(42, 43)
pub fn new(start: impl Into<Position>, end: impl Into<Position>) -> Span {
Span{ start: start.into(), end: end.into() }
}
}
impl fmt::Debug for Span {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Span({}, {})", self.start.0, self.end.0)
}
}
// use `span1 + span2` to merge span
// ATTENTION: only allow left + right, while gap/overlap both acceptable
impl Add for Span {
type Output = Span;
fn add(self, rhs: Span) -> Span {
debug_assert!(rhs.start.0 >= self.start.0 && rhs.end.0 >= self.end.0, "invalid span + span");
Span{ start: self.start, end: rhs.end }
}
}
// or `span + position`
impl Add<Position> for Span {
type Output = Span;
fn add(self, rhs: Position) -> Span {
debug_assert!(rhs.0 >= self.end.0, "invalid span + position");
Span{ start: self.start, end: rhs }
}
}
// use `span += position` to update span
impl AddAssign<Position> for Span {
fn add_assign(&mut self, rhs: Position) {
debug_assert!(rhs.0 >= self.end.0, "invalid span += position");
self.end = rhs;
}
}
// or use `span1 += span2`
// ATTENTION: only allow left += right, while gap/overlap both acceptable
impl AddAssign<Span> for Span {
fn add_assign(&mut self, rhs: Span) {
debug_assert!(rhs.start.0 >= self.start.0 && rhs.end.0 >= self.end.0, "invalid span += span");
self.end = rhs.end;
}
}
// use `position.into()` to turn position directly into span
impl From<Position> for Span {
fn from(position: Position) -> Span {
Span::new(position, position)
}
}
/// a handle to an interned string
///
/// - IsId means InternedStringID, it is short because it is widely used
/// - it was named SymbolID or SymId or Sym but I found that
/// symbol (in symbol table) in compiler principle means a "name", a name to a variable, function, etc.
/// although I will call that a "Name", or a "TypeId", "VarId" etc. in my semantic analysis, but this name
/// may confuse reader or myself after, for example, 10 years (although I'm not confused after this 5 years)
/// - SymbolID, StringID or InternedStringID is too long,
/// Str or String makes reader think it is kind of string (a ptr, cal, len structure)
/// - it is u32 not usize because it is not reasonable to
/// have more than u32::MAX strings in a program, and it is widely used
/// - recommend variable name `id` or `string_id`
#[derive(Eq, PartialEq, Clone, Copy, Hash)]
pub struct IsId(NonZeroU32);
impl IsId {
pub(super) const POSITION_MASK: u32 = 1 << 31;
pub const fn new(v: u32) -> Self {
debug_assert!(v!= 0, "isid cannot be 0");
// SAFETY: debug_assert above
Self(unsafe { NonZeroU32::new_unchecked(v) })
}
pub fn unwrap(self) -> u32 {
self.0.get()
}
}
impl From<u32> for IsId {
fn from(v: u32) -> Self {
Self::new(v)
}
}
impl fmt::Debug for IsId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
// this is currently an u128, but I suspect that it can fit in u64
// for current small test until even first bootstrap version, the string id and span should be easily fit in u64
// for "dont-know-whether-exist very large program",
// considering these 2 ids increase accordingly, squash `u32::MAX - id` and span together may still be ok
#[derive(PartialEq, Clone, Copy)]
pub struct IdSpan {
pub id: IsId,
pub span: Span,
}
impl IdSpan {
pub fn new(id: impl Into<IsId>, span: Span) -> Self {
Self{ id: id.into(), span }
}
}
impl fmt::Debug for IdSpan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IdSpan")
.field(&self.id)
.field(&self.span)
.finish()
}
}
fn get_hash(content: &str) -> u64 {
let mut hasher = DefaultHasher::new();
Hash::hash(content, &mut hasher);
hasher.finish()
}
// get LF byte indexes
fn get_endlines(content: &str) -> Vec<usize> {
content.char_indices().filter(|(_, c)| c == &'\n').map(|(i, _)| i).collect()
}
// this iterator is the exact first layer of processing above source code content,
// logically all location information comes from position created by the next function
//
// this iterator also includes the string intern interface, to make leixcal parser simpler
//
// from source context's perspective, this is also a source file builder which returns by
// entry and import function and when running, append to string table and when finished, append to files
#[derive(Debug)]
pub struct SourceChars<'a> {
content: String,
current_index: usize, // current iterating byte index, content bytes[current_index] should be the next returned char
start_index: usize, // starting byte index for this file, or previous files's total byte length, will copy to SourceFile.start_index when finished
// copy to SourceFile
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
// borrow other part of SourceContext except fs to prevent <F> propagation
files: &'a mut Vec<SourceFile>,
string_hash_to_id: &'a mut HashMap<u64, IsId>,
string_id_to_span: &'a mut Vec<Span>,
string_additional: &'a mut String,
}
impl<'a> SourceChars<'a> {
pub(super) fn new<F>(
mut content: String,
start_index: usize,
path: PathBuf,
namespace: Vec<IsId>,
request: Option<Span>,
context: &'a mut SourceContext<F>,
) -> Self {
// append 3 '\0' char to end of content for the branchless (less branch actually) iterator
content.push_str("\0\0\0");
Self{
content,
start_index,
current_index: 0,
path,
namespace,
request,
files: &mut context.files,
string_hash_to_id: &mut context.string_hash_to_id,
string_id_to_span: &mut context.string_id_to_span,
string_additional: &mut context.string_additional,
}
}
/// iterate return char and byte index
///
/// ignore all bare or not bare CR, return EOF after EOF
pub fn next(&mut self) -> (char, Position) {
loop {
if self.current_index == self.content.len() - 3 {
return (EOF, Position::new((self.start_index + self.current_index) as u32));
}
let bytes = self.content.as_bytes();
match bytes[self.current_index] {
b'\r' => { // ignore \r
self.current_index += 1;
continue;
},
b @ 0..=128 => { // ascii fast path
self.current_index += 1;
return (b as char, Position::new((self.current_index - 1 + self.start_index) as u32));
},
_ => {
let width = get_char_width(&self.content, self.current_index);
if self.current_index + width > self.content.len() - 3 {
// TODO: this should be an error not panic, although unrecoverable
panic!("invalid utf-8 sequence");
}
const MASKS: [u8; 5] = [0, 0, 0x1F, 0x0F, 0x07]; // byte 0 masks
const SHIFTS: [u8; 5] = [0, 0, 12, 6, 0]; // shift back for width 2 and width 3
let bytes = &bytes[self.current_index..];
let r#char = ((((bytes[0] & MASKS[width]) as u32) << 18) | (((bytes[1] & 0x3F) as u32) << 12) | (((bytes[2] & 0x3F) as u32) << 6) | ((bytes[3] & 0x3F) as u32)) >> SHIFTS[width];
// TODO: check more invalid utf8 sequence include following bytes not start with 0b10 and larger than 10FFFF and between D800 and E000
self.current_index += width;
// SAFETY: invalid char should not cause severe issue in lexical parse and syntax parse
return (unsafe { char::from_u32_unchecked(r#char) }, Position::new((self.current_index - width + self.start_index) as u32));
},
}
}
}
pub fn intern(&mut self, value: &str) -> IsId {
// empty string is span 0,0, this span must exist because this function exists in this type
if value.is_empty() {
return IsId::new(1);
}
let hash = get_hash(value);
if let Some(id) = self.string_hash_to_id.get(&hash) {
*id
} else {
let new_id = IsId::new(self.string_id_to_span.len() as u32);
self.string_hash_to_id.insert(hash, new_id);
let start_position = if let Some(index) = self.string_additional.find(value) {
index
} else {
self.string_additional.push_str(value);
self.string_additional.len() - value.len()
} as u32;
// ATTENTION: this type of span's end is last byte index + 1 (exactly the one you use in str[begin..end], not last char
let span = Span::new(start_position | IsId::POSITION_MASK, start_position + value.len() as u32);
self.string_id_to_span.push(span);
new_id
}
}
// intern string at location
pub fn intern_span(&mut self, location: Span) -> IsId {
let (start, end) = (location.start.0 as usize, location.end.0 as usize);
debug_assert!(start <= end, "invalid span");
debug_assert!(self.start_index <= start, "not this file span");
// does not check position for EOF because it is not expected to intern something include EOF
debug_assert!(end - self.start_index < self.content.len() - 3 && start - self.start_index < self.content.len() - 3, "position overflow");
let end_width = get_char_width(&self.content, end - self.start_index);
let hash = get_hash(&self.content[start - self.start_index..end - self.start_index + end_width]);
if let Some(id) = self.string_hash_to_id.get(&hash) {
*id
} else {
let new_id = IsId::new(self.string_id_to_span.len() as u32);
self.string_hash_to_id.insert(hash, new_id);
self.string_id_to_span.push(location);
new_id
}
}
pub fn get_file_id(&self) -> FileId {
FileId::new(self.files.len() as u32 + 1)
}
pub fn finish(mut self) {
let content_length = self.content.len();
self.content.truncate(content_length - 3);
self.files.push(SourceFile{
path: self.path,
endlines: get_endlines(&self.content),
content: self.content,
namespace: self.namespace,
start_index: self.start_index,
request: self.request,
});
}
}
// impl<'a> Drop for SourceChars<'a> {
// fn drop(&mut self) {
// // this cannot be some SourceContext::finish_build because SourceChars
// // does not have a referece to SourceContext to prevent propagating <F: FileSystem> generic parameter
// let content_length = self.content.len();
// self.content.truncate(content_length - 3);
// self.files.push(SourceFile{
// path: std::mem::take(&mut self.path),
// endlines: get_endlines(&self.content),
// content: std::mem::take(&mut self.content),
// namespace: std::mem::take(&mut self.namespace),
// start_index: self.start_index,
// request: self.request,
// });
// }
// }
// width byte[0] byte[1] byte[2] byte[3]
// 1 0xxxxxxx
// 2 110xxxxx 10xxxxxx
// 3 1110xxxx 10xxxxxx 10xxxxxx
// 4 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
const WIDTH: [usize; 256] = [
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, A, B, C, D, E, F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 1
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 3
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 4
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 5
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 6
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 7
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 8
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 9
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // A
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // B
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // D
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // E
4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, // F
];
pub fn get_char_width(content: &str, byte_index: usize) -> usize {
WIDTH[content.as_bytes()[byte_index] as usize]
}
| {
f.debug_tuple("IsId").field(&self.0.get()).finish()
} | identifier_body |
read_state.rs | use crate::{IncomingRewriter, StreamChangeData};
/// Is a given read operation "peek" or "consume"?
#[derive(Clone, Copy)]
pub enum | {
/// A read operation which advances the buffer, and reads from it.
ConsumingRead,
/// A read operation which reads from the buffer but doesn't advance it.
PeekingRead,
}
impl Default for ReadIsPeek {
fn default() -> Self {
ReadIsPeek::ConsumingRead
}
}
impl ReadIsPeek {
/// Return `PeekingRead` if the [`libc::MSG_PEEK`] bit is set in `flags.`
/// Otherwise, return `ConsumingRead`.
pub fn from_flags(flags: libc::c_int) -> Self {
if (flags & libc::MSG_PEEK) == 0 {
ReadIsPeek::ConsumingRead
} else {
ReadIsPeek::PeekingRead
}
}
}
pub struct ReadState {
rewriter: Box<dyn IncomingRewriter + Send>,
// This buffer should be cleared across reads.
buffer: Vec<u8>,
output_buffer: Vec<u8>,
// This tracks the number of bytes that have been peeked-and-rewritten from the OS's data
// stream, but haven't been consumed by a non-peeking read. Note that because of
// `StreamChangeData` operations during peeking reads, this number can be different from
// `ReadState::rewritten_bytes.len()`.
already_peeked_bytes: usize,
// This buffer stores any rewritten bytes which have either been peeked, or didn't fit in the
// user's buffer and need to be saved for a future call to `rewrite_readv`.
rewritten_bytes: Vec<u8>,
}
impl ReadState {
pub fn new(rewriter: Box<dyn IncomingRewriter + Send>) -> Self {
Self {
rewriter,
buffer: Vec::with_capacity(1024 * 9),
output_buffer: Vec::with_capacity(1024),
already_peeked_bytes: 0,
rewritten_bytes: Vec::with_capacity(1024),
}
}
pub fn rewrite_readv<F>(
&mut self,
input_buffer_size: usize,
read_is_peek: ReadIsPeek,
mut do_read: F,
) -> Result<&[u8], isize>
where
F: FnMut(&mut [u8]) -> isize,
{
// We don't want to keep any data around from a previous call to this function.
self.buffer.clear();
self.output_buffer.clear();
// Size our internal read buffer to match the user-provided buffer.
self.buffer.resize(input_buffer_size, 0);
// Perform the provided read syscall. If we get an error, return immediately so we don't
// overwrite `errno`.
let mut bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
debug_assert!(bytes_read > 0);
debug_assert!(self.buffer.len() >= bytes_read);
// Shrink the buffer down to the size of the data that was actually read by `do_read`. The
// size of the input `iovecs` could be larger than the amount of data returned by
// `do_read`.
self.buffer.truncate(bytes_read);
/* Run the rewriter. */
// We've already rewritten `self.already_peeked_bytes` bytes in the OS stream (due to
// previous peeking reads), and those bytes (in their un-rewritten state) were just read
// again from `do_read` into the start of `self.buffer`. We don't want to pass those bytes to
// the rewriter.
let start_rewriting_index = self.already_peeked_bytes.min(self.buffer.len());
let buffer_to_rewrite = &mut self.buffer[start_rewriting_index..];
// Run the rewriter on the portion of the buffer that hasn't been rewritten yet.
let mut stream_change_data = {
let start = std::time::Instant::now();
let stream_change_data = self.rewriter.incoming_rewrite(buffer_to_rewrite);
stallone::info!(
"INCOMING REWRITE DURATION",
duration: std::time::Duration = start.elapsed(),
bytes_rewritten: usize = buffer_to_rewrite.len(),
);
stream_change_data
};
// Apply the operations encoded in `stream_change_data`. The indices encoded in
// `stream_change_data` point inside of the buffer that was just rewritten, so we must
// offset them to appropriately point within `self.buffer`.
if let Some((relative_add_index, byte_to_insert)) = stream_change_data.add_byte {
let add_index = start_rewriting_index + relative_add_index;
stallone::debug!(
"Inserting byte into stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
add_index: usize = add_index,
);
self.buffer.insert(add_index, byte_to_insert);
if let Some(relative_remove_index) = stream_change_data.remove_byte.as_mut() {
// For how we use these fields with TLS 1.3, this invariant should always hold
// (since we remove a byte from the start of a TLS record, and add a byte to the
// end of a TLS record).
assert!(*relative_remove_index > relative_add_index);
// The original remove index is now stale since we inserted an extra byte into this
// stream. Move that index forward to reflect the byte we just added.
*relative_remove_index += 1;
}
}
if let Some(relative_remove_index) = stream_change_data.remove_byte {
let remove_index = start_rewriting_index + relative_remove_index;
stallone::debug!(
"Removing byte from stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
remove_index: usize = remove_index,
byte_to_remove: Option<&u8> = self.buffer.get(*remove_index),
buffer: String = format!("{:02x?}", self.buffer),
// XXX It seems like this `buffer` doesn't match what I'm expecting from
// `mangle_application_data`
);
self.buffer.remove(remove_index);
}
// If the rewrite exhausted the buffer, that means we ran a remove `StreamChangeData`
// operation on a one-byte buffer. We can't return a zero-byte buffer, since the
// application will interpret that as a this-file-descriptor-is-closed message. So, we will
// manufacture an extra read-then-rewrite operation.
if self.buffer.is_empty() {
// The only way that `self.buffer` could be empty is if the only byte in the buffer was
// removed. That means this byte had to have just been run through the rewriter, since
// `StreamChangeData` can only operate on bytes that have been rewritten. This means
// `start_rewriting_index` had to be 0.
debug_assert_eq!(self.already_peeked_bytes, 0);
debug_assert_eq!(start_rewriting_index, 0);
// For a peeking read, we need to read past the single byte we just removed.
let fake_read_size = match read_is_peek {
ReadIsPeek::ConsumingRead => 1,
ReadIsPeek::PeekingRead => 2,
};
stallone::debug!(
"Calling do_read and the rewriter a second time",
fake_read_size: usize = fake_read_size,
);
self.buffer.resize(fake_read_size, 0);
let fake_bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
if matches!(read_is_peek, ReadIsPeek::PeekingRead) {
// If this fails, then we were only able to peek the byte that was already removed
// from the stream, so we won't be able to return a byte.
assert_eq!(fake_bytes_read, fake_read_size);
// Remove the byte that we already peeked-and-rewrote-and-discarded from the
// stream.
self.buffer.remove(0);
}
// Update the number of bytes we've read from the OS.
bytes_read = match read_is_peek {
ReadIsPeek::ConsumingRead => bytes_read + fake_bytes_read,
ReadIsPeek::PeekingRead => fake_bytes_read,
};
// Call the rewriter again on the result of the fake read. Note that we can pass the
// entire `self.buffer`, since we know `start_rewriting_index` is 0, and we removed the
// redundant first byte in the peeking read case.
let fake_stream_change_data = self.rewriter.incoming_rewrite(&mut self.buffer);
stallone::debug!(
"Discarding fake StreamChangeData",
fake_stream_change_data: StreamChangeData = fake_stream_change_data,
);
debug_assert!(fake_stream_change_data.add_byte.is_none());
debug_assert!(fake_stream_change_data.remove_byte.is_none());
}
// After the above work, this should always be true.
debug_assert!(!self.buffer.is_empty());
self.already_peeked_bytes = match read_is_peek {
// If there were some already-peeked-and-rewritten bytes in the OS's data stream, then
// subtract from that the number of bytes that were just consumed from the OS's data
// stream.
ReadIsPeek::ConsumingRead => self.already_peeked_bytes.saturating_sub(bytes_read),
// If we just peeked more bytes from the OS's data stream, then update our counter of
// already-peeked-and-rewritten bytes.
ReadIsPeek::PeekingRead => self.already_peeked_bytes.max(bytes_read),
};
// We want to replace the bytes that we've previously peeked (AKA all the bytes in
// `self.buffer` that weren't passed to the rewriter) with the contents of
// `self.rewritten_bytes`. Naively, we could assume that's equal to
// `&self.buffer[..start_rewriting_index]`, since the `stream_change_data` operations above
// only operate on `self.buffer` after `start_rewriting_index`. However, previous
// `stream_change_data` operations on peeking reads invalidate that assumption. If a
// previous peeking read happened during a `stream_change_data` operation, then
// `self.rewritten_bytes` stores the peeked data _after_ that `stream_change_data` operation
// was applied, so the length of `self.rewritten_bytes` is unpredictable relative to
// `start_rewriting_index`.
//
// Instead, we'll use all of `self.rewritten_bytes`, and then append onto that all of the
// bytes that were just rewritten, and potentially had `stream_change_data` operations
// applied to them. This new buffer might be larger than the user-provided buffer.
//
// For consuming reads, we'll save all the newly-rewritten bytes that don't fit in the
// user-provided buffer in `self.rewritten_bytes`.
//
// For peeking reads, we'll save all the rewritten-and-`stream_change_data`-applied bytes
// in `self.rewritten_bytes`.
let just_rewritten_bytes = &self.buffer[start_rewriting_index..];
self.output_buffer.extend_from_slice(&self.rewritten_bytes);
self.output_buffer.extend_from_slice(just_rewritten_bytes);
// Note that we're using `input_buffer_size` here rather than `bytes_read`. If the OS returns
// less data than we are able to store in the user's buffer, then take advantage of that.
let output_size = self.output_buffer.len().min(input_buffer_size);
stallone::debug!(
"Preparing rewrite_readv result",
bytes_read: usize = bytes_read,
input_buffer_size: usize = input_buffer_size,
rewritten_bytes_len: usize = self.rewritten_bytes.len(),
just_rewritten_bytes_len: usize = just_rewritten_bytes.len(),
output_buffer_len: usize = self.output_buffer.len(),
output_size: usize = output_size,
);
match read_is_peek {
ReadIsPeek::ConsumingRead => {
// For a consuming read, get rid of all the previously-rewritten bytes that are
// about to be copied into `self.buffer`.
let rewritten_bytes_used = self.rewritten_bytes.len().min(output_size);
if rewritten_bytes_used > 0 {
stallone::debug!(
"Dropping previously-rewritten bytes that have been consumed",
rewritten_bytes_used: usize = rewritten_bytes_used,
);
}
std::mem::drop(self.rewritten_bytes.drain(..rewritten_bytes_used));
// Find the just-rewritten bytes that won't be returned to the user, and that we
// need to save. If we didn't rewrite anything, then of course this is empty. If we
// did some rewriting, then the `output_size` index splits `self.output_buffer` into two
// parts: the part we'll return to the user, and the part we need to save.
let just_rewritten_bytes_to_save = if just_rewritten_bytes.is_empty() {
&[]
} else {
&self.output_buffer[output_size..]
};
if!just_rewritten_bytes_to_save.is_empty() {
stallone::debug!(
"Saving just-rewritten bytes that don't fit in user buffer",
num_just_rewritten_bytes_to_save: usize =
just_rewritten_bytes_to_save.len(),
);
}
// Save all the just-rewritten bytes that won't fit in the user-provided
// buffer.
self.rewritten_bytes
.extend_from_slice(just_rewritten_bytes_to_save);
}
ReadIsPeek::PeekingRead => {
if!just_rewritten_bytes.is_empty() {
stallone::debug!(
"Saving just-rewritten bytes that were peeked",
num_just_rewritten_bytes: usize = just_rewritten_bytes.len(),
);
}
self.rewritten_bytes.extend_from_slice(just_rewritten_bytes);
}
}
Ok(&self.output_buffer[..output_size])
}
}
| ReadIsPeek | identifier_name |
read_state.rs | use crate::{IncomingRewriter, StreamChangeData};
/// Is a given read operation "peek" or "consume"?
#[derive(Clone, Copy)]
pub enum ReadIsPeek {
/// A read operation which advances the buffer, and reads from it.
ConsumingRead,
/// A read operation which reads from the buffer but doesn't advance it.
PeekingRead,
}
impl Default for ReadIsPeek {
fn default() -> Self {
ReadIsPeek::ConsumingRead
}
}
impl ReadIsPeek {
/// Return `PeekingRead` if the [`libc::MSG_PEEK`] bit is set in `flags.`
/// Otherwise, return `ConsumingRead`.
pub fn from_flags(flags: libc::c_int) -> Self {
if (flags & libc::MSG_PEEK) == 0 {
ReadIsPeek::ConsumingRead
} else {
ReadIsPeek::PeekingRead
}
}
}
pub struct ReadState {
rewriter: Box<dyn IncomingRewriter + Send>,
// This buffer should be cleared across reads.
buffer: Vec<u8>,
output_buffer: Vec<u8>,
// This tracks the number of bytes that have been peeked-and-rewritten from the OS's data
// stream, but haven't been consumed by a non-peeking read. Note that because of
// `StreamChangeData` operations during peeking reads, this number can be different from
// `ReadState::rewritten_bytes.len()`.
already_peeked_bytes: usize,
// This buffer stores any rewritten bytes which have either been peeked, or didn't fit in the
// user's buffer and need to be saved for a future call to `rewrite_readv`.
rewritten_bytes: Vec<u8>,
}
impl ReadState {
pub fn new(rewriter: Box<dyn IncomingRewriter + Send>) -> Self {
Self {
rewriter,
buffer: Vec::with_capacity(1024 * 9),
output_buffer: Vec::with_capacity(1024),
already_peeked_bytes: 0,
rewritten_bytes: Vec::with_capacity(1024),
}
}
pub fn rewrite_readv<F>(
&mut self,
input_buffer_size: usize,
read_is_peek: ReadIsPeek,
mut do_read: F,
) -> Result<&[u8], isize>
where
F: FnMut(&mut [u8]) -> isize,
{
// We don't want to keep any data around from a previous call to this function.
self.buffer.clear();
self.output_buffer.clear();
// Size our internal read buffer to match the user-provided buffer.
self.buffer.resize(input_buffer_size, 0);
// Perform the provided read syscall. If we get an error, return immediately so we don't
// overwrite `errno`.
let mut bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
debug_assert!(bytes_read > 0);
debug_assert!(self.buffer.len() >= bytes_read);
// Shrink the buffer down to the size of the data that was actually read by `do_read`. The
// size of the input `iovecs` could be larger than the amount of data returned by
// `do_read`.
self.buffer.truncate(bytes_read);
/* Run the rewriter. */
// We've already rewritten `self.already_peeked_bytes` bytes in the OS stream (due to
// previous peeking reads), and those bytes (in their un-rewritten state) were just read
// again from `do_read` into the start of `self.buffer`. We don't want to pass those bytes to
// the rewriter.
let start_rewriting_index = self.already_peeked_bytes.min(self.buffer.len());
let buffer_to_rewrite = &mut self.buffer[start_rewriting_index..];
// Run the rewriter on the portion of the buffer that hasn't been rewritten yet.
let mut stream_change_data = {
let start = std::time::Instant::now();
let stream_change_data = self.rewriter.incoming_rewrite(buffer_to_rewrite);
stallone::info!(
"INCOMING REWRITE DURATION",
duration: std::time::Duration = start.elapsed(),
bytes_rewritten: usize = buffer_to_rewrite.len(),
);
stream_change_data
};
// Apply the operations encoded in `stream_change_data`. The indices encoded in
// `stream_change_data` point inside of the buffer that was just rewritten, so we must
// offset them to appropriately point within `self.buffer`.
if let Some((relative_add_index, byte_to_insert)) = stream_change_data.add_byte {
let add_index = start_rewriting_index + relative_add_index;
stallone::debug!(
"Inserting byte into stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
add_index: usize = add_index,
);
self.buffer.insert(add_index, byte_to_insert);
if let Some(relative_remove_index) = stream_change_data.remove_byte.as_mut() {
// For how we use these fields with TLS 1.3, this invariant should always hold
// (since we remove a byte from the start of a TLS record, and add a byte to the
// end of a TLS record).
assert!(*relative_remove_index > relative_add_index);
// The original remove index is now stale since we inserted an extra byte into this
// stream. Move that index forward to reflect the byte we just added.
*relative_remove_index += 1;
}
}
if let Some(relative_remove_index) = stream_change_data.remove_byte {
let remove_index = start_rewriting_index + relative_remove_index;
stallone::debug!(
"Removing byte from stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
remove_index: usize = remove_index,
byte_to_remove: Option<&u8> = self.buffer.get(*remove_index),
buffer: String = format!("{:02x?}", self.buffer),
// XXX It seems like this `buffer` doesn't match what I'm expecting from
// `mangle_application_data`
);
self.buffer.remove(remove_index);
}
// If the rewrite exhausted the buffer, that means we ran a remove `StreamChangeData`
// operation on a one-byte buffer. We can't return a zero-byte buffer, since the
// application will interpret that as a this-file-descriptor-is-closed message. So, we will
// manufacture an extra read-then-rewrite operation.
if self.buffer.is_empty() {
// The only way that `self.buffer` could be empty is if the only byte in the buffer was
// removed. That means this byte had to have just been run through the rewriter, since
// `StreamChangeData` can only operate on bytes that have been rewritten. This means
// `start_rewriting_index` had to be 0.
debug_assert_eq!(self.already_peeked_bytes, 0);
debug_assert_eq!(start_rewriting_index, 0);
// For a peeking read, we need to read past the single byte we just removed.
let fake_read_size = match read_is_peek {
ReadIsPeek::ConsumingRead => 1,
ReadIsPeek::PeekingRead => 2,
};
stallone::debug!(
"Calling do_read and the rewriter a second time",
fake_read_size: usize = fake_read_size,
);
self.buffer.resize(fake_read_size, 0);
let fake_bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
if matches!(read_is_peek, ReadIsPeek::PeekingRead) {
// If this fails, then we were only able to peek the byte that was already removed
// from the stream, so we won't be able to return a byte.
assert_eq!(fake_bytes_read, fake_read_size);
// Remove the byte that we already peeked-and-rewrote-and-discarded from the
// stream.
self.buffer.remove(0);
}
// Update the number of bytes we've read from the OS.
bytes_read = match read_is_peek {
ReadIsPeek::ConsumingRead => bytes_read + fake_bytes_read,
ReadIsPeek::PeekingRead => fake_bytes_read,
};
// Call the rewriter again on the result of the fake read. Note that we can pass the
// entire `self.buffer`, since we know `start_rewriting_index` is 0, and we removed the
// redundant first byte in the peeking read case.
let fake_stream_change_data = self.rewriter.incoming_rewrite(&mut self.buffer);
stallone::debug!(
"Discarding fake StreamChangeData",
fake_stream_change_data: StreamChangeData = fake_stream_change_data,
);
debug_assert!(fake_stream_change_data.add_byte.is_none());
debug_assert!(fake_stream_change_data.remove_byte.is_none());
}
// After the above work, this should always be true.
debug_assert!(!self.buffer.is_empty());
self.already_peeked_bytes = match read_is_peek {
// If there were some already-peeked-and-rewritten bytes in the OS's data stream, then
// subtract from that the number of bytes that were just consumed from the OS's data
// stream.
ReadIsPeek::ConsumingRead => self.already_peeked_bytes.saturating_sub(bytes_read),
// If we just peeked more bytes from the OS's data stream, then update our counter of
// already-peeked-and-rewritten bytes.
ReadIsPeek::PeekingRead => self.already_peeked_bytes.max(bytes_read),
};
// We want to replace the bytes that we've previously peeked (AKA all the bytes in
// `self.buffer` that weren't passed to the rewriter) with the contents of
// `self.rewritten_bytes`. Naively, we could assume that's equal to
// `&self.buffer[..start_rewriting_index]`, since the `stream_change_data` operations above
// only operate on `self.buffer` after `start_rewriting_index`. However, previous
// `stream_change_data` operations on peeking reads invalidate that assumption. If a
// previous peeking read happened during a `stream_change_data` operation, then
// `self.rewritten_bytes` stores the peeked data _after_ that `stream_change_data` operation
// was applied, so the length of `self.rewritten_bytes` is unpredictable relative to
// `start_rewriting_index`.
//
// Instead, we'll use all of `self.rewritten_bytes`, and then append onto that all of the
// bytes that were just rewritten, and potentially had `stream_change_data` operations
// applied to them. This new buffer might be larger than the user-provided buffer.
//
// For consuming reads, we'll save all the newly-rewritten bytes that don't fit in the
// user-provided buffer in `self.rewritten_bytes`.
//
// For peeking reads, we'll save all the rewritten-and-`stream_change_data`-applied bytes
// in `self.rewritten_bytes`.
let just_rewritten_bytes = &self.buffer[start_rewriting_index..];
self.output_buffer.extend_from_slice(&self.rewritten_bytes);
self.output_buffer.extend_from_slice(just_rewritten_bytes);
// Note that we're using `input_buffer_size` here rather than `bytes_read`. If the OS returns
// less data than we are able to store in the user's buffer, then take advantage of that.
let output_size = self.output_buffer.len().min(input_buffer_size);
stallone::debug!(
"Preparing rewrite_readv result",
bytes_read: usize = bytes_read,
input_buffer_size: usize = input_buffer_size,
rewritten_bytes_len: usize = self.rewritten_bytes.len(),
just_rewritten_bytes_len: usize = just_rewritten_bytes.len(),
output_buffer_len: usize = self.output_buffer.len(),
output_size: usize = output_size,
);
match read_is_peek {
ReadIsPeek::ConsumingRead => {
// For a consuming read, get rid of all the previously-rewritten bytes that are
// about to be copied into `self.buffer`.
let rewritten_bytes_used = self.rewritten_bytes.len().min(output_size);
if rewritten_bytes_used > 0 {
stallone::debug!(
"Dropping previously-rewritten bytes that have been consumed",
rewritten_bytes_used: usize = rewritten_bytes_used,
);
}
std::mem::drop(self.rewritten_bytes.drain(..rewritten_bytes_used));
// Find the just-rewritten bytes that won't be returned to the user, and that we
// need to save. If we didn't rewrite anything, then of course this is empty. If we
// did some rewriting, then the `output_size` index splits `self.output_buffer` into two
// parts: the part we'll return to the user, and the part we need to save.
let just_rewritten_bytes_to_save = if just_rewritten_bytes.is_empty() {
&[]
} else {
&self.output_buffer[output_size..]
};
if!just_rewritten_bytes_to_save.is_empty() {
stallone::debug!(
"Saving just-rewritten bytes that don't fit in user buffer",
num_just_rewritten_bytes_to_save: usize =
just_rewritten_bytes_to_save.len(),
);
}
// Save all the just-rewritten bytes that won't fit in the user-provided
// buffer.
self.rewritten_bytes
.extend_from_slice(just_rewritten_bytes_to_save);
}
ReadIsPeek::PeekingRead => { | stallone::debug!(
"Saving just-rewritten bytes that were peeked",
num_just_rewritten_bytes: usize = just_rewritten_bytes.len(),
);
}
self.rewritten_bytes.extend_from_slice(just_rewritten_bytes);
}
}
Ok(&self.output_buffer[..output_size])
}
} | if !just_rewritten_bytes.is_empty() { | random_line_split |
read_state.rs | use crate::{IncomingRewriter, StreamChangeData};
/// Is a given read operation "peek" or "consume"?
#[derive(Clone, Copy)]
pub enum ReadIsPeek {
/// A read operation which advances the buffer, and reads from it.
ConsumingRead,
/// A read operation which reads from the buffer but doesn't advance it.
PeekingRead,
}
impl Default for ReadIsPeek {
fn default() -> Self |
}
impl ReadIsPeek {
/// Return `PeekingRead` if the [`libc::MSG_PEEK`] bit is set in `flags.`
/// Otherwise, return `ConsumingRead`.
pub fn from_flags(flags: libc::c_int) -> Self {
if (flags & libc::MSG_PEEK) == 0 {
ReadIsPeek::ConsumingRead
} else {
ReadIsPeek::PeekingRead
}
}
}
pub struct ReadState {
rewriter: Box<dyn IncomingRewriter + Send>,
// This buffer should be cleared across reads.
buffer: Vec<u8>,
output_buffer: Vec<u8>,
// This tracks the number of bytes that have been peeked-and-rewritten from the OS's data
// stream, but haven't been consumed by a non-peeking read. Note that because of
// `StreamChangeData` operations during peeking reads, this number can be different from
// `ReadState::rewritten_bytes.len()`.
already_peeked_bytes: usize,
// This buffer stores any rewritten bytes which have either been peeked, or didn't fit in the
// user's buffer and need to be saved for a future call to `rewrite_readv`.
rewritten_bytes: Vec<u8>,
}
impl ReadState {
pub fn new(rewriter: Box<dyn IncomingRewriter + Send>) -> Self {
Self {
rewriter,
buffer: Vec::with_capacity(1024 * 9),
output_buffer: Vec::with_capacity(1024),
already_peeked_bytes: 0,
rewritten_bytes: Vec::with_capacity(1024),
}
}
pub fn rewrite_readv<F>(
&mut self,
input_buffer_size: usize,
read_is_peek: ReadIsPeek,
mut do_read: F,
) -> Result<&[u8], isize>
where
F: FnMut(&mut [u8]) -> isize,
{
// We don't want to keep any data around from a previous call to this function.
self.buffer.clear();
self.output_buffer.clear();
// Size our internal read buffer to match the user-provided buffer.
self.buffer.resize(input_buffer_size, 0);
// Perform the provided read syscall. If we get an error, return immediately so we don't
// overwrite `errno`.
let mut bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
debug_assert!(bytes_read > 0);
debug_assert!(self.buffer.len() >= bytes_read);
// Shrink the buffer down to the size of the data that was actually read by `do_read`. The
// size of the input `iovecs` could be larger than the amount of data returned by
// `do_read`.
self.buffer.truncate(bytes_read);
/* Run the rewriter. */
// We've already rewritten `self.already_peeked_bytes` bytes in the OS stream (due to
// previous peeking reads), and those bytes (in their un-rewritten state) were just read
// again from `do_read` into the start of `self.buffer`. We don't want to pass those bytes to
// the rewriter.
let start_rewriting_index = self.already_peeked_bytes.min(self.buffer.len());
let buffer_to_rewrite = &mut self.buffer[start_rewriting_index..];
// Run the rewriter on the portion of the buffer that hasn't been rewritten yet.
let mut stream_change_data = {
let start = std::time::Instant::now();
let stream_change_data = self.rewriter.incoming_rewrite(buffer_to_rewrite);
stallone::info!(
"INCOMING REWRITE DURATION",
duration: std::time::Duration = start.elapsed(),
bytes_rewritten: usize = buffer_to_rewrite.len(),
);
stream_change_data
};
// Apply the operations encoded in `stream_change_data`. The indices encoded in
// `stream_change_data` point inside of the buffer that was just rewritten, so we must
// offset them to appropriately point within `self.buffer`.
if let Some((relative_add_index, byte_to_insert)) = stream_change_data.add_byte {
let add_index = start_rewriting_index + relative_add_index;
stallone::debug!(
"Inserting byte into stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
add_index: usize = add_index,
);
self.buffer.insert(add_index, byte_to_insert);
if let Some(relative_remove_index) = stream_change_data.remove_byte.as_mut() {
// For how we use these fields with TLS 1.3, this invariant should always hold
// (since we remove a byte from the start of a TLS record, and add a byte to the
// end of a TLS record).
assert!(*relative_remove_index > relative_add_index);
// The original remove index is now stale since we inserted an extra byte into this
// stream. Move that index forward to reflect the byte we just added.
*relative_remove_index += 1;
}
}
if let Some(relative_remove_index) = stream_change_data.remove_byte {
let remove_index = start_rewriting_index + relative_remove_index;
stallone::debug!(
"Removing byte from stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
remove_index: usize = remove_index,
byte_to_remove: Option<&u8> = self.buffer.get(*remove_index),
buffer: String = format!("{:02x?}", self.buffer),
// XXX It seems like this `buffer` doesn't match what I'm expecting from
// `mangle_application_data`
);
self.buffer.remove(remove_index);
}
// If the rewrite exhausted the buffer, that means we ran a remove `StreamChangeData`
// operation on a one-byte buffer. We can't return a zero-byte buffer, since the
// application will interpret that as a this-file-descriptor-is-closed message. So, we will
// manufacture an extra read-then-rewrite operation.
if self.buffer.is_empty() {
// The only way that `self.buffer` could be empty is if the only byte in the buffer was
// removed. That means this byte had to have just been run through the rewriter, since
// `StreamChangeData` can only operate on bytes that have been rewritten. This means
// `start_rewriting_index` had to be 0.
debug_assert_eq!(self.already_peeked_bytes, 0);
debug_assert_eq!(start_rewriting_index, 0);
// For a peeking read, we need to read past the single byte we just removed.
let fake_read_size = match read_is_peek {
ReadIsPeek::ConsumingRead => 1,
ReadIsPeek::PeekingRead => 2,
};
stallone::debug!(
"Calling do_read and the rewriter a second time",
fake_read_size: usize = fake_read_size,
);
self.buffer.resize(fake_read_size, 0);
let fake_bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
if matches!(read_is_peek, ReadIsPeek::PeekingRead) {
// If this fails, then we were only able to peek the byte that was already removed
// from the stream, so we won't be able to return a byte.
assert_eq!(fake_bytes_read, fake_read_size);
// Remove the byte that we already peeked-and-rewrote-and-discarded from the
// stream.
self.buffer.remove(0);
}
// Update the number of bytes we've read from the OS.
bytes_read = match read_is_peek {
ReadIsPeek::ConsumingRead => bytes_read + fake_bytes_read,
ReadIsPeek::PeekingRead => fake_bytes_read,
};
// Call the rewriter again on the result of the fake read. Note that we can pass the
// entire `self.buffer`, since we know `start_rewriting_index` is 0, and we removed the
// redundant first byte in the peeking read case.
let fake_stream_change_data = self.rewriter.incoming_rewrite(&mut self.buffer);
stallone::debug!(
"Discarding fake StreamChangeData",
fake_stream_change_data: StreamChangeData = fake_stream_change_data,
);
debug_assert!(fake_stream_change_data.add_byte.is_none());
debug_assert!(fake_stream_change_data.remove_byte.is_none());
}
// After the above work, this should always be true.
debug_assert!(!self.buffer.is_empty());
self.already_peeked_bytes = match read_is_peek {
// If there were some already-peeked-and-rewritten bytes in the OS's data stream, then
// subtract from that the number of bytes that were just consumed from the OS's data
// stream.
ReadIsPeek::ConsumingRead => self.already_peeked_bytes.saturating_sub(bytes_read),
// If we just peeked more bytes from the OS's data stream, then update our counter of
// already-peeked-and-rewritten bytes.
ReadIsPeek::PeekingRead => self.already_peeked_bytes.max(bytes_read),
};
// We want to replace the bytes that we've previously peeked (AKA all the bytes in
// `self.buffer` that weren't passed to the rewriter) with the contents of
// `self.rewritten_bytes`. Naively, we could assume that's equal to
// `&self.buffer[..start_rewriting_index]`, since the `stream_change_data` operations above
// only operate on `self.buffer` after `start_rewriting_index`. However, previous
// `stream_change_data` operations on peeking reads invalidate that assumption. If a
// previous peeking read happened during a `stream_change_data` operation, then
// `self.rewritten_bytes` stores the peeked data _after_ that `stream_change_data` operation
// was applied, so the length of `self.rewritten_bytes` is unpredictable relative to
// `start_rewriting_index`.
//
// Instead, we'll use all of `self.rewritten_bytes`, and then append onto that all of the
// bytes that were just rewritten, and potentially had `stream_change_data` operations
// applied to them. This new buffer might be larger than the user-provided buffer.
//
// For consuming reads, we'll save all the newly-rewritten bytes that don't fit in the
// user-provided buffer in `self.rewritten_bytes`.
//
// For peeking reads, we'll save all the rewritten-and-`stream_change_data`-applied bytes
// in `self.rewritten_bytes`.
let just_rewritten_bytes = &self.buffer[start_rewriting_index..];
self.output_buffer.extend_from_slice(&self.rewritten_bytes);
self.output_buffer.extend_from_slice(just_rewritten_bytes);
// Note that we're using `input_buffer_size` here rather than `bytes_read`. If the OS returns
// less data than we are able to store in the user's buffer, then take advantage of that.
let output_size = self.output_buffer.len().min(input_buffer_size);
stallone::debug!(
"Preparing rewrite_readv result",
bytes_read: usize = bytes_read,
input_buffer_size: usize = input_buffer_size,
rewritten_bytes_len: usize = self.rewritten_bytes.len(),
just_rewritten_bytes_len: usize = just_rewritten_bytes.len(),
output_buffer_len: usize = self.output_buffer.len(),
output_size: usize = output_size,
);
match read_is_peek {
ReadIsPeek::ConsumingRead => {
// For a consuming read, get rid of all the previously-rewritten bytes that are
// about to be copied into `self.buffer`.
let rewritten_bytes_used = self.rewritten_bytes.len().min(output_size);
if rewritten_bytes_used > 0 {
stallone::debug!(
"Dropping previously-rewritten bytes that have been consumed",
rewritten_bytes_used: usize = rewritten_bytes_used,
);
}
std::mem::drop(self.rewritten_bytes.drain(..rewritten_bytes_used));
// Find the just-rewritten bytes that won't be returned to the user, and that we
// need to save. If we didn't rewrite anything, then of course this is empty. If we
// did some rewriting, then the `output_size` index splits `self.output_buffer` into two
// parts: the part we'll return to the user, and the part we need to save.
let just_rewritten_bytes_to_save = if just_rewritten_bytes.is_empty() {
&[]
} else {
&self.output_buffer[output_size..]
};
if!just_rewritten_bytes_to_save.is_empty() {
stallone::debug!(
"Saving just-rewritten bytes that don't fit in user buffer",
num_just_rewritten_bytes_to_save: usize =
just_rewritten_bytes_to_save.len(),
);
}
// Save all the just-rewritten bytes that won't fit in the user-provided
// buffer.
self.rewritten_bytes
.extend_from_slice(just_rewritten_bytes_to_save);
}
ReadIsPeek::PeekingRead => {
if!just_rewritten_bytes.is_empty() {
stallone::debug!(
"Saving just-rewritten bytes that were peeked",
num_just_rewritten_bytes: usize = just_rewritten_bytes.len(),
);
}
self.rewritten_bytes.extend_from_slice(just_rewritten_bytes);
}
}
Ok(&self.output_buffer[..output_size])
}
}
| {
ReadIsPeek::ConsumingRead
} | identifier_body |
read_state.rs | use crate::{IncomingRewriter, StreamChangeData};
/// Is a given read operation "peek" or "consume"?
#[derive(Clone, Copy)]
pub enum ReadIsPeek {
/// A read operation which advances the buffer, and reads from it.
ConsumingRead,
/// A read operation which reads from the buffer but doesn't advance it.
PeekingRead,
}
impl Default for ReadIsPeek {
fn default() -> Self {
ReadIsPeek::ConsumingRead
}
}
impl ReadIsPeek {
/// Return `PeekingRead` if the [`libc::MSG_PEEK`] bit is set in `flags.`
/// Otherwise, return `ConsumingRead`.
pub fn from_flags(flags: libc::c_int) -> Self {
if (flags & libc::MSG_PEEK) == 0 {
ReadIsPeek::ConsumingRead
} else {
ReadIsPeek::PeekingRead
}
}
}
pub struct ReadState {
rewriter: Box<dyn IncomingRewriter + Send>,
// This buffer should be cleared across reads.
buffer: Vec<u8>,
output_buffer: Vec<u8>,
// This tracks the number of bytes that have been peeked-and-rewritten from the OS's data
// stream, but haven't been consumed by a non-peeking read. Note that because of
// `StreamChangeData` operations during peeking reads, this number can be different from
// `ReadState::rewritten_bytes.len()`.
already_peeked_bytes: usize,
// This buffer stores any rewritten bytes which have either been peeked, or didn't fit in the
// user's buffer and need to be saved for a future call to `rewrite_readv`.
rewritten_bytes: Vec<u8>,
}
impl ReadState {
pub fn new(rewriter: Box<dyn IncomingRewriter + Send>) -> Self {
Self {
rewriter,
buffer: Vec::with_capacity(1024 * 9),
output_buffer: Vec::with_capacity(1024),
already_peeked_bytes: 0,
rewritten_bytes: Vec::with_capacity(1024),
}
}
pub fn rewrite_readv<F>(
&mut self,
input_buffer_size: usize,
read_is_peek: ReadIsPeek,
mut do_read: F,
) -> Result<&[u8], isize>
where
F: FnMut(&mut [u8]) -> isize,
{
// We don't want to keep any data around from a previous call to this function.
self.buffer.clear();
self.output_buffer.clear();
// Size our internal read buffer to match the user-provided buffer.
self.buffer.resize(input_buffer_size, 0);
// Perform the provided read syscall. If we get an error, return immediately so we don't
// overwrite `errno`.
let mut bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
debug_assert!(bytes_read > 0);
debug_assert!(self.buffer.len() >= bytes_read);
// Shrink the buffer down to the size of the data that was actually read by `do_read`. The
// size of the input `iovecs` could be larger than the amount of data returned by
// `do_read`.
self.buffer.truncate(bytes_read);
/* Run the rewriter. */
// We've already rewritten `self.already_peeked_bytes` bytes in the OS stream (due to
// previous peeking reads), and those bytes (in their un-rewritten state) were just read
// again from `do_read` into the start of `self.buffer`. We don't want to pass those bytes to
// the rewriter.
let start_rewriting_index = self.already_peeked_bytes.min(self.buffer.len());
let buffer_to_rewrite = &mut self.buffer[start_rewriting_index..];
// Run the rewriter on the portion of the buffer that hasn't been rewritten yet.
let mut stream_change_data = {
let start = std::time::Instant::now();
let stream_change_data = self.rewriter.incoming_rewrite(buffer_to_rewrite);
stallone::info!(
"INCOMING REWRITE DURATION",
duration: std::time::Duration = start.elapsed(),
bytes_rewritten: usize = buffer_to_rewrite.len(),
);
stream_change_data
};
// Apply the operations encoded in `stream_change_data`. The indices encoded in
// `stream_change_data` point inside of the buffer that was just rewritten, so we must
// offset them to appropriately point within `self.buffer`.
if let Some((relative_add_index, byte_to_insert)) = stream_change_data.add_byte |
if let Some(relative_remove_index) = stream_change_data.remove_byte {
let remove_index = start_rewriting_index + relative_remove_index;
stallone::debug!(
"Removing byte from stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
remove_index: usize = remove_index,
byte_to_remove: Option<&u8> = self.buffer.get(*remove_index),
buffer: String = format!("{:02x?}", self.buffer),
// XXX It seems like this `buffer` doesn't match what I'm expecting from
// `mangle_application_data`
);
self.buffer.remove(remove_index);
}
// If the rewrite exhausted the buffer, that means we ran a remove `StreamChangeData`
// operation on a one-byte buffer. We can't return a zero-byte buffer, since the
// application will interpret that as a this-file-descriptor-is-closed message. So, we will
// manufacture an extra read-then-rewrite operation.
if self.buffer.is_empty() {
// The only way that `self.buffer` could be empty is if the only byte in the buffer was
// removed. That means this byte had to have just been run through the rewriter, since
// `StreamChangeData` can only operate on bytes that have been rewritten. This means
// `start_rewriting_index` had to be 0.
debug_assert_eq!(self.already_peeked_bytes, 0);
debug_assert_eq!(start_rewriting_index, 0);
// For a peeking read, we need to read past the single byte we just removed.
let fake_read_size = match read_is_peek {
ReadIsPeek::ConsumingRead => 1,
ReadIsPeek::PeekingRead => 2,
};
stallone::debug!(
"Calling do_read and the rewriter a second time",
fake_read_size: usize = fake_read_size,
);
self.buffer.resize(fake_read_size, 0);
let fake_bytes_read = match do_read(&mut self.buffer) {
i if i <= 0 => return Err(i),
i => i as usize, // safe coerce, since we know `i` is positive
};
if matches!(read_is_peek, ReadIsPeek::PeekingRead) {
// If this fails, then we were only able to peek the byte that was already removed
// from the stream, so we won't be able to return a byte.
assert_eq!(fake_bytes_read, fake_read_size);
// Remove the byte that we already peeked-and-rewrote-and-discarded from the
// stream.
self.buffer.remove(0);
}
// Update the number of bytes we've read from the OS.
bytes_read = match read_is_peek {
ReadIsPeek::ConsumingRead => bytes_read + fake_bytes_read,
ReadIsPeek::PeekingRead => fake_bytes_read,
};
// Call the rewriter again on the result of the fake read. Note that we can pass the
// entire `self.buffer`, since we know `start_rewriting_index` is 0, and we removed the
// redundant first byte in the peeking read case.
let fake_stream_change_data = self.rewriter.incoming_rewrite(&mut self.buffer);
stallone::debug!(
"Discarding fake StreamChangeData",
fake_stream_change_data: StreamChangeData = fake_stream_change_data,
);
debug_assert!(fake_stream_change_data.add_byte.is_none());
debug_assert!(fake_stream_change_data.remove_byte.is_none());
}
// After the above work, this should always be true.
debug_assert!(!self.buffer.is_empty());
self.already_peeked_bytes = match read_is_peek {
// If there were some already-peeked-and-rewritten bytes in the OS's data stream, then
// subtract from that the number of bytes that were just consumed from the OS's data
// stream.
ReadIsPeek::ConsumingRead => self.already_peeked_bytes.saturating_sub(bytes_read),
// If we just peeked more bytes from the OS's data stream, then update our counter of
// already-peeked-and-rewritten bytes.
ReadIsPeek::PeekingRead => self.already_peeked_bytes.max(bytes_read),
};
// We want to replace the bytes that we've previously peeked (AKA all the bytes in
// `self.buffer` that weren't passed to the rewriter) with the contents of
// `self.rewritten_bytes`. Naively, we could assume that's equal to
// `&self.buffer[..start_rewriting_index]`, since the `stream_change_data` operations above
// only operate on `self.buffer` after `start_rewriting_index`. However, previous
// `stream_change_data` operations on peeking reads invalidate that assumption. If a
// previous peeking read happened during a `stream_change_data` operation, then
// `self.rewritten_bytes` stores the peeked data _after_ that `stream_change_data` operation
// was applied, so the length of `self.rewritten_bytes` is unpredictable relative to
// `start_rewriting_index`.
//
// Instead, we'll use all of `self.rewritten_bytes`, and then append onto that all of the
// bytes that were just rewritten, and potentially had `stream_change_data` operations
// applied to them. This new buffer might be larger than the user-provided buffer.
//
// For consuming reads, we'll save all the newly-rewritten bytes that don't fit in the
// user-provided buffer in `self.rewritten_bytes`.
//
// For peeking reads, we'll save all the rewritten-and-`stream_change_data`-applied bytes
// in `self.rewritten_bytes`.
let just_rewritten_bytes = &self.buffer[start_rewriting_index..];
self.output_buffer.extend_from_slice(&self.rewritten_bytes);
self.output_buffer.extend_from_slice(just_rewritten_bytes);
// Note that we're using `input_buffer_size` here rather than `bytes_read`. If the OS returns
// less data than we are able to store in the user's buffer, then take advantage of that.
let output_size = self.output_buffer.len().min(input_buffer_size);
stallone::debug!(
"Preparing rewrite_readv result",
bytes_read: usize = bytes_read,
input_buffer_size: usize = input_buffer_size,
rewritten_bytes_len: usize = self.rewritten_bytes.len(),
just_rewritten_bytes_len: usize = just_rewritten_bytes.len(),
output_buffer_len: usize = self.output_buffer.len(),
output_size: usize = output_size,
);
match read_is_peek {
ReadIsPeek::ConsumingRead => {
// For a consuming read, get rid of all the previously-rewritten bytes that are
// about to be copied into `self.buffer`.
let rewritten_bytes_used = self.rewritten_bytes.len().min(output_size);
if rewritten_bytes_used > 0 {
stallone::debug!(
"Dropping previously-rewritten bytes that have been consumed",
rewritten_bytes_used: usize = rewritten_bytes_used,
);
}
std::mem::drop(self.rewritten_bytes.drain(..rewritten_bytes_used));
// Find the just-rewritten bytes that won't be returned to the user, and that we
// need to save. If we didn't rewrite anything, then of course this is empty. If we
// did some rewriting, then the `output_size` index splits `self.output_buffer` into two
// parts: the part we'll return to the user, and the part we need to save.
let just_rewritten_bytes_to_save = if just_rewritten_bytes.is_empty() {
&[]
} else {
&self.output_buffer[output_size..]
};
if!just_rewritten_bytes_to_save.is_empty() {
stallone::debug!(
"Saving just-rewritten bytes that don't fit in user buffer",
num_just_rewritten_bytes_to_save: usize =
just_rewritten_bytes_to_save.len(),
);
}
// Save all the just-rewritten bytes that won't fit in the user-provided
// buffer.
self.rewritten_bytes
.extend_from_slice(just_rewritten_bytes_to_save);
}
ReadIsPeek::PeekingRead => {
if!just_rewritten_bytes.is_empty() {
stallone::debug!(
"Saving just-rewritten bytes that were peeked",
num_just_rewritten_bytes: usize = just_rewritten_bytes.len(),
);
}
self.rewritten_bytes.extend_from_slice(just_rewritten_bytes);
}
}
Ok(&self.output_buffer[..output_size])
}
}
| {
let add_index = start_rewriting_index + relative_add_index;
stallone::debug!(
"Inserting byte into stream",
stream_change_data: StreamChangeData = stream_change_data,
start_rewriting_index: usize = start_rewriting_index,
add_index: usize = add_index,
);
self.buffer.insert(add_index, byte_to_insert);
if let Some(relative_remove_index) = stream_change_data.remove_byte.as_mut() {
// For how we use these fields with TLS 1.3, this invariant should always hold
// (since we remove a byte from the start of a TLS record, and add a byte to the
// end of a TLS record).
assert!(*relative_remove_index > relative_add_index);
// The original remove index is now stale since we inserted an extra byte into this
// stream. Move that index forward to reflect the byte we just added.
*relative_remove_index += 1;
}
} | conditional_block |
demuxer.rs | use crate::error::*;
use crate::buffer::Buffered;
use std::any::Any;
use std::io::SeekFrom;
use std::sync::Arc;
use crate::common::*;
use crate::data::packet::Packet;
use crate::stream::Stream;
/// Events processed by a demuxer analyzing a source.
#[non_exhaustive]
#[derive(Clone, Debug)]
pub enum Event {
/// A new packet is found by a demuxer.
NewPacket(Packet),
/// A new stream is found by a demuxer.
NewStream(Stream),
/// More data are needed by a demuxer to complete its operations.
MoreDataNeeded(usize),
/// Event not processable by a demuxer.
///
/// Demux the next event.
Continue,
/// End of File.
///
/// Stop demuxing data.
Eof,
}
/// Used to implement demuxing operations.
pub trait Demuxer: Send + Sync {
/// Reads stream headers and global information from a data structure
/// implementing the `Buffered` trait.
///
/// Global information are saved into a `GlobalInfo` structure.
fn read_headers(&mut self, buf: &mut dyn Buffered, info: &mut GlobalInfo) -> Result<SeekFrom>;
/// Reads an event from a data structure implementing the `Buffered` trait.
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)>;
}
/// Auxiliary structure to encapsulate a demuxer object and
/// its additional data.
pub struct Context<D: Demuxer, R: Buffered> {
demuxer: D,
reader: R,
/// Global media file information.
pub info: GlobalInfo,
/// User private data.
///
/// This data cannot be cloned.
pub user_private: Option<Arc<dyn Any + Send + Sync>>,
}
impl<D: Demuxer, R: Buffered> Context<D, R> {
/// Creates a new `Context` instance.
pub fn new(demuxer: D, reader: R) -> Self {
Context {
demuxer,
reader,
info: GlobalInfo {
duration: None,
timebase: None,
streams: Vec::with_capacity(2),
},
user_private: None,
}
}
/// Returns the underlying demuxer.
pub fn demuxer(&self) -> &D {
&self.demuxer
}
fn read_headers_internal(&mut self) -> Result<()> {
let demux = &mut self.demuxer;
let res = demux.read_headers(&mut self.reader, &mut self.info);
match res {
Err(e) => Err(e),
Ok(seek) => {
//TODO: handle seeking here
let res = self.reader.seek(seek);
log::trace!("stream now at index: {:?}", res);
Ok(())
}
}
}
/// Reads stream headers and global information from a data source.
pub fn read_headers(&mut self) -> Result<()> {
loop {
// TODO: wrap fill_buf() with a check for Eof
self.reader.fill_buf()?;
match self.read_headers_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
self.reader.grow(needed);
}
_ => return Err(e),
},
Ok(_) => return Ok(()),
}
}
}
fn read_event_internal(&mut self) -> Result<Event> {
let demux = &mut self.demuxer;
let res = demux.read_event(&mut self.reader);
match res {
Err(e) => Err(e),
Ok((seek, mut event)) => {
//TODO: handle seeking here
let _ = self.reader.seek(seek)?;
if let Event::NewStream(ref st) = event {
self.info.streams.push(st.clone());
}
if let Event::MoreDataNeeded(size) = event {
return Err(Error::MoreDataNeeded(size));
}
if let Event::NewPacket(ref mut pkt) = event {
if pkt.t.timebase.is_none() {
if let Some(st) = self
.info
.streams
.iter()
.find(|s| s.index as isize == pkt.stream_index)
{
pkt.t.timebase = Some(st.timebase);
}
}
}
Ok(event)
}
}
}
/// Reads an event from a data source.
pub fn read_event(&mut self) -> Result<Event> {
// TODO: guard against infiniloops and maybe factor the loop.
loop {
match self.read_event_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
let len = self.reader.data().len();
// we might have sent MoreDatNeeded(0) to request a new call
if len >= needed {
continue;
}
self.reader.grow(needed);
self.reader.fill_buf()?;
if self.reader.data().len() <= len {
return Ok(Event::Eof);
}
}
_ => return Err(e),
},
Ok(ev) => return Ok(ev),
}
}
}
}
/// Format descriptor.
///
/// Contains information on a format and its own demuxer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Descr {
/// Format name.
pub name: &'static str,
/// Demuxer name.
pub demuxer: &'static str,
/// Format description.
pub description: &'static str,
/// Format media file extensions.
pub extensions: &'static [&'static str],
/// Format MIME.
pub mime: &'static [&'static str],
}
/// Used to get a format descriptor and create a new demuxer.
pub trait Descriptor {
/// The specific type of the demuxer.
type OutputDemuxer: Demuxer;
/// Creates a new demuxer for the requested format.
fn create(&self) -> Self::OutputDemuxer;
/// Returns the descriptor of a format.
fn describe(&self) -> &Descr;
/// Returns a score which represents how much the input data are associated
/// to a format.
fn probe(&self, data: &[u8]) -> u8;
}
/// Maximum data size to probe a format.
pub const PROBE_DATA: usize = 4 * 1024;
/// Data whose probe score is equal or greater than the value of this constant
/// surely is associated to the format currently being analyzed.
pub const PROBE_SCORE_EXTENSION: u8 = 50;
/// Used to define different ways to probe a format.
pub trait Probe<T: Descriptor +?Sized> {
/// Probes whether the input data is associated to a determined format.
fn probe(&self, data: &[u8]) -> Option<&'static T>;
}
impl<T: Descriptor +?Sized> Probe<T> for [&'static T] {
fn probe(&self, data: &[u8]) -> Option<&'static T> {
let mut max = u8::min_value();
let mut candidate: Option<&'static T> = None;
for desc in self {
let score = desc.probe(data);
if score > max {
max = score;
candidate = Some(*desc);
}
}
if max > PROBE_SCORE_EXTENSION {
candidate
} else |
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::data::packet::Packet;
use std::io::SeekFrom;
struct DummyDes {
d: Descr,
}
struct DummyDemuxer {}
impl Demuxer for DummyDemuxer {
fn read_headers(
&mut self,
buf: &mut dyn Buffered,
_info: &mut GlobalInfo,
) -> Result<SeekFrom> {
let len = buf.data().len();
if 9 > len {
let needed = 9 - len;
Err(Error::MoreDataNeeded(needed))
} else {
Ok(SeekFrom::Current(9))
}
}
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)> {
let size = 2;
let len = buf.data().len();
if size > len {
Err(Error::MoreDataNeeded(size - len))
} else {
log::debug!("{:?}", buf.data());
match &buf.data()[..2] {
b"p1" => Ok((SeekFrom::Current(3), Event::NewPacket(Packet::new()))),
b"e1" => Ok((SeekFrom::Current(3), Event::MoreDataNeeded(0))),
_ => Err(Error::InvalidData),
}
}
}
}
impl Descriptor for DummyDes {
type OutputDemuxer = DummyDemuxer;
fn create(&self) -> Self::OutputDemuxer {
DummyDemuxer {}
}
fn describe<'a>(&'_ self) -> &'_ Descr {
&self.d
}
fn probe(&self, data: &[u8]) -> u8 {
match data {
b"dummy" => 100,
_ => 0,
}
}
}
const DUMMY_DES: &dyn Descriptor<OutputDemuxer = DummyDemuxer> = &DummyDes {
d: Descr {
name: "dummy",
demuxer: "dummy",
description: "Dummy dem",
extensions: &["dm", "dum"],
mime: &["application/dummy"],
},
};
#[test]
fn probe() {
let demuxers: &[&'static dyn Descriptor<OutputDemuxer = DummyDemuxer>] = &[DUMMY_DES];
demuxers.probe(b"dummy").unwrap();
}
use crate::buffer::*;
use std::io::Cursor;
#[test]
fn read_headers() {
let buf = b"dummy header";
let r = AccReader::with_capacity(4, Cursor::new(buf));
let d = DUMMY_DES.create();
let mut c = Context::new(d, r);
c.read_headers().unwrap();
}
#[test]
fn read_event() {
let buf = b"dummy header p1 e1 p1 ";
let r = AccReader::with_capacity(4, Cursor::new(buf));
let d = DUMMY_DES.create();
let mut c = Context::new(d, r);
c.read_headers().unwrap();
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
}
}
| {
None
} | conditional_block |
demuxer.rs | use crate::error::*;
use crate::buffer::Buffered;
use std::any::Any;
use std::io::SeekFrom;
use std::sync::Arc;
use crate::common::*;
use crate::data::packet::Packet;
use crate::stream::Stream;
/// Events processed by a demuxer analyzing a source.
#[non_exhaustive]
#[derive(Clone, Debug)]
pub enum Event {
/// A new packet is found by a demuxer.
NewPacket(Packet),
/// A new stream is found by a demuxer.
NewStream(Stream),
/// More data are needed by a demuxer to complete its operations.
MoreDataNeeded(usize),
/// Event not processable by a demuxer.
///
/// Demux the next event.
Continue,
/// End of File.
///
/// Stop demuxing data.
Eof,
}
/// Used to implement demuxing operations.
pub trait Demuxer: Send + Sync {
/// Reads stream headers and global information from a data structure
/// implementing the `Buffered` trait.
///
/// Global information are saved into a `GlobalInfo` structure.
fn read_headers(&mut self, buf: &mut dyn Buffered, info: &mut GlobalInfo) -> Result<SeekFrom>;
/// Reads an event from a data structure implementing the `Buffered` trait.
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)>;
}
/// Auxiliary structure to encapsulate a demuxer object and
/// its additional data.
pub struct Context<D: Demuxer, R: Buffered> {
demuxer: D,
reader: R,
/// Global media file information.
pub info: GlobalInfo,
/// User private data.
///
/// This data cannot be cloned.
pub user_private: Option<Arc<dyn Any + Send + Sync>>,
}
impl<D: Demuxer, R: Buffered> Context<D, R> {
/// Creates a new `Context` instance.
pub fn new(demuxer: D, reader: R) -> Self {
Context {
demuxer,
reader,
info: GlobalInfo {
duration: None,
timebase: None,
streams: Vec::with_capacity(2),
},
user_private: None,
}
}
/// Returns the underlying demuxer.
pub fn demuxer(&self) -> &D {
&self.demuxer
}
fn read_headers_internal(&mut self) -> Result<()> {
let demux = &mut self.demuxer;
let res = demux.read_headers(&mut self.reader, &mut self.info);
match res {
Err(e) => Err(e),
Ok(seek) => {
//TODO: handle seeking here
let res = self.reader.seek(seek);
log::trace!("stream now at index: {:?}", res);
Ok(())
}
}
}
/// Reads stream headers and global information from a data source.
pub fn read_headers(&mut self) -> Result<()> {
loop {
// TODO: wrap fill_buf() with a check for Eof
self.reader.fill_buf()?;
match self.read_headers_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
self.reader.grow(needed);
}
_ => return Err(e),
},
Ok(_) => return Ok(()),
}
}
}
fn read_event_internal(&mut self) -> Result<Event> {
let demux = &mut self.demuxer;
let res = demux.read_event(&mut self.reader);
match res {
Err(e) => Err(e),
Ok((seek, mut event)) => {
//TODO: handle seeking here
let _ = self.reader.seek(seek)?;
if let Event::NewStream(ref st) = event {
self.info.streams.push(st.clone());
}
if let Event::MoreDataNeeded(size) = event {
return Err(Error::MoreDataNeeded(size));
}
if let Event::NewPacket(ref mut pkt) = event {
if pkt.t.timebase.is_none() {
if let Some(st) = self
.info
.streams
.iter()
.find(|s| s.index as isize == pkt.stream_index)
{
pkt.t.timebase = Some(st.timebase);
}
}
}
Ok(event)
}
}
}
/// Reads an event from a data source.
pub fn read_event(&mut self) -> Result<Event> {
// TODO: guard against infiniloops and maybe factor the loop.
loop {
match self.read_event_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
let len = self.reader.data().len();
// we might have sent MoreDatNeeded(0) to request a new call
if len >= needed {
continue;
}
self.reader.grow(needed);
self.reader.fill_buf()?;
if self.reader.data().len() <= len {
return Ok(Event::Eof);
}
}
_ => return Err(e),
},
Ok(ev) => return Ok(ev),
}
}
}
}
/// Format descriptor.
///
/// Contains information on a format and its own demuxer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Descr {
/// Format name.
pub name: &'static str,
/// Demuxer name.
pub demuxer: &'static str,
/// Format description.
pub description: &'static str,
/// Format media file extensions.
pub extensions: &'static [&'static str],
/// Format MIME.
pub mime: &'static [&'static str],
}
/// Used to get a format descriptor and create a new demuxer.
pub trait Descriptor {
/// The specific type of the demuxer.
type OutputDemuxer: Demuxer;
/// Creates a new demuxer for the requested format.
fn create(&self) -> Self::OutputDemuxer;
/// Returns the descriptor of a format.
fn describe(&self) -> &Descr;
/// Returns a score which represents how much the input data are associated
/// to a format.
fn probe(&self, data: &[u8]) -> u8;
}
/// Maximum data size to probe a format.
pub const PROBE_DATA: usize = 4 * 1024;
/// Data whose probe score is equal or greater than the value of this constant
/// surely is associated to the format currently being analyzed.
pub const PROBE_SCORE_EXTENSION: u8 = 50;
/// Used to define different ways to probe a format.
pub trait Probe<T: Descriptor +?Sized> {
/// Probes whether the input data is associated to a determined format.
fn probe(&self, data: &[u8]) -> Option<&'static T>;
}
impl<T: Descriptor +?Sized> Probe<T> for [&'static T] {
fn probe(&self, data: &[u8]) -> Option<&'static T> {
let mut max = u8::min_value();
let mut candidate: Option<&'static T> = None;
for desc in self {
let score = desc.probe(data);
if score > max {
max = score;
candidate = Some(*desc);
}
}
if max > PROBE_SCORE_EXTENSION {
candidate
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::data::packet::Packet;
use std::io::SeekFrom;
struct | {
d: Descr,
}
struct DummyDemuxer {}
impl Demuxer for DummyDemuxer {
fn read_headers(
&mut self,
buf: &mut dyn Buffered,
_info: &mut GlobalInfo,
) -> Result<SeekFrom> {
let len = buf.data().len();
if 9 > len {
let needed = 9 - len;
Err(Error::MoreDataNeeded(needed))
} else {
Ok(SeekFrom::Current(9))
}
}
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)> {
let size = 2;
let len = buf.data().len();
if size > len {
Err(Error::MoreDataNeeded(size - len))
} else {
log::debug!("{:?}", buf.data());
match &buf.data()[..2] {
b"p1" => Ok((SeekFrom::Current(3), Event::NewPacket(Packet::new()))),
b"e1" => Ok((SeekFrom::Current(3), Event::MoreDataNeeded(0))),
_ => Err(Error::InvalidData),
}
}
}
}
impl Descriptor for DummyDes {
type OutputDemuxer = DummyDemuxer;
fn create(&self) -> Self::OutputDemuxer {
DummyDemuxer {}
}
fn describe<'a>(&'_ self) -> &'_ Descr {
&self.d
}
fn probe(&self, data: &[u8]) -> u8 {
match data {
b"dummy" => 100,
_ => 0,
}
}
}
const DUMMY_DES: &dyn Descriptor<OutputDemuxer = DummyDemuxer> = &DummyDes {
d: Descr {
name: "dummy",
demuxer: "dummy",
description: "Dummy dem",
extensions: &["dm", "dum"],
mime: &["application/dummy"],
},
};
#[test]
fn probe() {
let demuxers: &[&'static dyn Descriptor<OutputDemuxer = DummyDemuxer>] = &[DUMMY_DES];
demuxers.probe(b"dummy").unwrap();
}
use crate::buffer::*;
use std::io::Cursor;
#[test]
fn read_headers() {
let buf = b"dummy header";
let r = AccReader::with_capacity(4, Cursor::new(buf));
let d = DUMMY_DES.create();
let mut c = Context::new(d, r);
c.read_headers().unwrap();
}
#[test]
fn read_event() {
let buf = b"dummy header p1 e1 p1 ";
let r = AccReader::with_capacity(4, Cursor::new(buf));
let d = DUMMY_DES.create();
let mut c = Context::new(d, r);
c.read_headers().unwrap();
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
}
}
| DummyDes | identifier_name |
demuxer.rs | use crate::error::*;
use crate::buffer::Buffered;
use std::any::Any;
use std::io::SeekFrom;
use std::sync::Arc;
use crate::common::*;
use crate::data::packet::Packet;
use crate::stream::Stream;
/// Events processed by a demuxer analyzing a source.
#[non_exhaustive]
#[derive(Clone, Debug)]
pub enum Event {
/// A new packet is found by a demuxer.
NewPacket(Packet),
/// A new stream is found by a demuxer.
NewStream(Stream),
/// More data are needed by a demuxer to complete its operations.
MoreDataNeeded(usize),
/// Event not processable by a demuxer.
///
/// Demux the next event.
Continue,
/// End of File.
///
/// Stop demuxing data.
Eof,
}
/// Used to implement demuxing operations.
pub trait Demuxer: Send + Sync {
/// Reads stream headers and global information from a data structure
/// implementing the `Buffered` trait.
///
/// Global information are saved into a `GlobalInfo` structure.
fn read_headers(&mut self, buf: &mut dyn Buffered, info: &mut GlobalInfo) -> Result<SeekFrom>;
/// Reads an event from a data structure implementing the `Buffered` trait.
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)>;
}
/// Auxiliary structure to encapsulate a demuxer object and
/// its additional data.
pub struct Context<D: Demuxer, R: Buffered> {
demuxer: D,
reader: R,
/// Global media file information.
pub info: GlobalInfo,
/// User private data.
///
/// This data cannot be cloned.
pub user_private: Option<Arc<dyn Any + Send + Sync>>,
}
impl<D: Demuxer, R: Buffered> Context<D, R> {
/// Creates a new `Context` instance.
pub fn new(demuxer: D, reader: R) -> Self {
Context {
demuxer,
reader,
info: GlobalInfo {
duration: None,
timebase: None,
streams: Vec::with_capacity(2),
},
user_private: None,
}
}
/// Returns the underlying demuxer.
pub fn demuxer(&self) -> &D {
&self.demuxer
}
fn read_headers_internal(&mut self) -> Result<()> {
let demux = &mut self.demuxer;
let res = demux.read_headers(&mut self.reader, &mut self.info);
match res {
Err(e) => Err(e),
Ok(seek) => {
//TODO: handle seeking here
let res = self.reader.seek(seek);
log::trace!("stream now at index: {:?}", res);
Ok(())
}
}
}
/// Reads stream headers and global information from a data source.
pub fn read_headers(&mut self) -> Result<()> {
loop {
// TODO: wrap fill_buf() with a check for Eof
self.reader.fill_buf()?;
match self.read_headers_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
self.reader.grow(needed);
}
_ => return Err(e),
},
Ok(_) => return Ok(()),
}
}
}
fn read_event_internal(&mut self) -> Result<Event> {
let demux = &mut self.demuxer;
let res = demux.read_event(&mut self.reader);
match res {
Err(e) => Err(e),
Ok((seek, mut event)) => {
//TODO: handle seeking here
let _ = self.reader.seek(seek)?;
if let Event::NewStream(ref st) = event { | }
if let Event::MoreDataNeeded(size) = event {
return Err(Error::MoreDataNeeded(size));
}
if let Event::NewPacket(ref mut pkt) = event {
if pkt.t.timebase.is_none() {
if let Some(st) = self
.info
.streams
.iter()
.find(|s| s.index as isize == pkt.stream_index)
{
pkt.t.timebase = Some(st.timebase);
}
}
}
Ok(event)
}
}
}
/// Reads an event from a data source.
pub fn read_event(&mut self) -> Result<Event> {
// TODO: guard against infiniloops and maybe factor the loop.
loop {
match self.read_event_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
let len = self.reader.data().len();
// we might have sent MoreDatNeeded(0) to request a new call
if len >= needed {
continue;
}
self.reader.grow(needed);
self.reader.fill_buf()?;
if self.reader.data().len() <= len {
return Ok(Event::Eof);
}
}
_ => return Err(e),
},
Ok(ev) => return Ok(ev),
}
}
}
}
/// Format descriptor.
///
/// Contains information on a format and its own demuxer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Descr {
/// Format name.
pub name: &'static str,
/// Demuxer name.
pub demuxer: &'static str,
/// Format description.
pub description: &'static str,
/// Format media file extensions.
pub extensions: &'static [&'static str],
/// Format MIME.
pub mime: &'static [&'static str],
}
/// Used to get a format descriptor and create a new demuxer.
pub trait Descriptor {
/// The specific type of the demuxer.
type OutputDemuxer: Demuxer;
/// Creates a new demuxer for the requested format.
fn create(&self) -> Self::OutputDemuxer;
/// Returns the descriptor of a format.
fn describe(&self) -> &Descr;
/// Returns a score which represents how much the input data are associated
/// to a format.
fn probe(&self, data: &[u8]) -> u8;
}
/// Maximum data size to probe a format.
pub const PROBE_DATA: usize = 4 * 1024;
/// Data whose probe score is equal or greater than the value of this constant
/// surely is associated to the format currently being analyzed.
pub const PROBE_SCORE_EXTENSION: u8 = 50;
/// Used to define different ways to probe a format.
pub trait Probe<T: Descriptor +?Sized> {
/// Probes whether the input data is associated to a determined format.
fn probe(&self, data: &[u8]) -> Option<&'static T>;
}
impl<T: Descriptor +?Sized> Probe<T> for [&'static T] {
fn probe(&self, data: &[u8]) -> Option<&'static T> {
let mut max = u8::min_value();
let mut candidate: Option<&'static T> = None;
for desc in self {
let score = desc.probe(data);
if score > max {
max = score;
candidate = Some(*desc);
}
}
if max > PROBE_SCORE_EXTENSION {
candidate
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::data::packet::Packet;
use std::io::SeekFrom;
struct DummyDes {
d: Descr,
}
struct DummyDemuxer {}
impl Demuxer for DummyDemuxer {
fn read_headers(
&mut self,
buf: &mut dyn Buffered,
_info: &mut GlobalInfo,
) -> Result<SeekFrom> {
let len = buf.data().len();
if 9 > len {
let needed = 9 - len;
Err(Error::MoreDataNeeded(needed))
} else {
Ok(SeekFrom::Current(9))
}
}
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)> {
let size = 2;
let len = buf.data().len();
if size > len {
Err(Error::MoreDataNeeded(size - len))
} else {
log::debug!("{:?}", buf.data());
match &buf.data()[..2] {
b"p1" => Ok((SeekFrom::Current(3), Event::NewPacket(Packet::new()))),
b"e1" => Ok((SeekFrom::Current(3), Event::MoreDataNeeded(0))),
_ => Err(Error::InvalidData),
}
}
}
}
impl Descriptor for DummyDes {
type OutputDemuxer = DummyDemuxer;
fn create(&self) -> Self::OutputDemuxer {
DummyDemuxer {}
}
fn describe<'a>(&'_ self) -> &'_ Descr {
&self.d
}
fn probe(&self, data: &[u8]) -> u8 {
match data {
b"dummy" => 100,
_ => 0,
}
}
}
const DUMMY_DES: &dyn Descriptor<OutputDemuxer = DummyDemuxer> = &DummyDes {
d: Descr {
name: "dummy",
demuxer: "dummy",
description: "Dummy dem",
extensions: &["dm", "dum"],
mime: &["application/dummy"],
},
};
#[test]
fn probe() {
let demuxers: &[&'static dyn Descriptor<OutputDemuxer = DummyDemuxer>] = &[DUMMY_DES];
demuxers.probe(b"dummy").unwrap();
}
use crate::buffer::*;
use std::io::Cursor;
#[test]
fn read_headers() {
let buf = b"dummy header";
let r = AccReader::with_capacity(4, Cursor::new(buf));
let d = DUMMY_DES.create();
let mut c = Context::new(d, r);
c.read_headers().unwrap();
}
#[test]
fn read_event() {
let buf = b"dummy header p1 e1 p1 ";
let r = AccReader::with_capacity(4, Cursor::new(buf));
let d = DUMMY_DES.create();
let mut c = Context::new(d, r);
c.read_headers().unwrap();
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
}
} | self.info.streams.push(st.clone()); | random_line_split |
demuxer.rs | use crate::error::*;
use crate::buffer::Buffered;
use std::any::Any;
use std::io::SeekFrom;
use std::sync::Arc;
use crate::common::*;
use crate::data::packet::Packet;
use crate::stream::Stream;
/// Events processed by a demuxer analyzing a source.
#[non_exhaustive]
#[derive(Clone, Debug)]
pub enum Event {
/// A new packet is found by a demuxer.
NewPacket(Packet),
/// A new stream is found by a demuxer.
NewStream(Stream),
/// More data are needed by a demuxer to complete its operations.
MoreDataNeeded(usize),
/// Event not processable by a demuxer.
///
/// Demux the next event.
Continue,
/// End of File.
///
/// Stop demuxing data.
Eof,
}
/// Used to implement demuxing operations.
pub trait Demuxer: Send + Sync {
/// Reads stream headers and global information from a data structure
/// implementing the `Buffered` trait.
///
/// Global information are saved into a `GlobalInfo` structure.
fn read_headers(&mut self, buf: &mut dyn Buffered, info: &mut GlobalInfo) -> Result<SeekFrom>;
/// Reads an event from a data structure implementing the `Buffered` trait.
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)>;
}
/// Auxiliary structure to encapsulate a demuxer object and
/// its additional data.
pub struct Context<D: Demuxer, R: Buffered> {
demuxer: D,
reader: R,
/// Global media file information.
pub info: GlobalInfo,
/// User private data.
///
/// This data cannot be cloned.
pub user_private: Option<Arc<dyn Any + Send + Sync>>,
}
impl<D: Demuxer, R: Buffered> Context<D, R> {
/// Creates a new `Context` instance.
pub fn new(demuxer: D, reader: R) -> Self {
Context {
demuxer,
reader,
info: GlobalInfo {
duration: None,
timebase: None,
streams: Vec::with_capacity(2),
},
user_private: None,
}
}
/// Returns the underlying demuxer.
pub fn demuxer(&self) -> &D {
&self.demuxer
}
fn read_headers_internal(&mut self) -> Result<()> {
let demux = &mut self.demuxer;
let res = demux.read_headers(&mut self.reader, &mut self.info);
match res {
Err(e) => Err(e),
Ok(seek) => {
//TODO: handle seeking here
let res = self.reader.seek(seek);
log::trace!("stream now at index: {:?}", res);
Ok(())
}
}
}
/// Reads stream headers and global information from a data source.
pub fn read_headers(&mut self) -> Result<()> {
loop {
// TODO: wrap fill_buf() with a check for Eof
self.reader.fill_buf()?;
match self.read_headers_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
self.reader.grow(needed);
}
_ => return Err(e),
},
Ok(_) => return Ok(()),
}
}
}
fn read_event_internal(&mut self) -> Result<Event> {
let demux = &mut self.demuxer;
let res = demux.read_event(&mut self.reader);
match res {
Err(e) => Err(e),
Ok((seek, mut event)) => {
//TODO: handle seeking here
let _ = self.reader.seek(seek)?;
if let Event::NewStream(ref st) = event {
self.info.streams.push(st.clone());
}
if let Event::MoreDataNeeded(size) = event {
return Err(Error::MoreDataNeeded(size));
}
if let Event::NewPacket(ref mut pkt) = event {
if pkt.t.timebase.is_none() {
if let Some(st) = self
.info
.streams
.iter()
.find(|s| s.index as isize == pkt.stream_index)
{
pkt.t.timebase = Some(st.timebase);
}
}
}
Ok(event)
}
}
}
/// Reads an event from a data source.
pub fn read_event(&mut self) -> Result<Event> {
// TODO: guard against infiniloops and maybe factor the loop.
loop {
match self.read_event_internal() {
Err(e) => match e {
Error::MoreDataNeeded(needed) => {
let len = self.reader.data().len();
// we might have sent MoreDatNeeded(0) to request a new call
if len >= needed {
continue;
}
self.reader.grow(needed);
self.reader.fill_buf()?;
if self.reader.data().len() <= len {
return Ok(Event::Eof);
}
}
_ => return Err(e),
},
Ok(ev) => return Ok(ev),
}
}
}
}
/// Format descriptor.
///
/// Contains information on a format and its own demuxer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Descr {
/// Format name.
pub name: &'static str,
/// Demuxer name.
pub demuxer: &'static str,
/// Format description.
pub description: &'static str,
/// Format media file extensions.
pub extensions: &'static [&'static str],
/// Format MIME.
pub mime: &'static [&'static str],
}
/// Used to get a format descriptor and create a new demuxer.
pub trait Descriptor {
/// The specific type of the demuxer.
type OutputDemuxer: Demuxer;
/// Creates a new demuxer for the requested format.
fn create(&self) -> Self::OutputDemuxer;
/// Returns the descriptor of a format.
fn describe(&self) -> &Descr;
/// Returns a score which represents how much the input data are associated
/// to a format.
fn probe(&self, data: &[u8]) -> u8;
}
/// Maximum data size to probe a format.
pub const PROBE_DATA: usize = 4 * 1024;
/// Data whose probe score is equal or greater than the value of this constant
/// surely is associated to the format currently being analyzed.
pub const PROBE_SCORE_EXTENSION: u8 = 50;
/// Used to define different ways to probe a format.
pub trait Probe<T: Descriptor +?Sized> {
/// Probes whether the input data is associated to a determined format.
fn probe(&self, data: &[u8]) -> Option<&'static T>;
}
impl<T: Descriptor +?Sized> Probe<T> for [&'static T] {
fn probe(&self, data: &[u8]) -> Option<&'static T> {
let mut max = u8::min_value();
let mut candidate: Option<&'static T> = None;
for desc in self {
let score = desc.probe(data);
if score > max {
max = score;
candidate = Some(*desc);
}
}
if max > PROBE_SCORE_EXTENSION {
candidate
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::data::packet::Packet;
use std::io::SeekFrom;
struct DummyDes {
d: Descr,
}
struct DummyDemuxer {}
impl Demuxer for DummyDemuxer {
fn read_headers(
&mut self,
buf: &mut dyn Buffered,
_info: &mut GlobalInfo,
) -> Result<SeekFrom> {
let len = buf.data().len();
if 9 > len {
let needed = 9 - len;
Err(Error::MoreDataNeeded(needed))
} else {
Ok(SeekFrom::Current(9))
}
}
fn read_event(&mut self, buf: &mut dyn Buffered) -> Result<(SeekFrom, Event)> {
let size = 2;
let len = buf.data().len();
if size > len {
Err(Error::MoreDataNeeded(size - len))
} else {
log::debug!("{:?}", buf.data());
match &buf.data()[..2] {
b"p1" => Ok((SeekFrom::Current(3), Event::NewPacket(Packet::new()))),
b"e1" => Ok((SeekFrom::Current(3), Event::MoreDataNeeded(0))),
_ => Err(Error::InvalidData),
}
}
}
}
impl Descriptor for DummyDes {
type OutputDemuxer = DummyDemuxer;
fn create(&self) -> Self::OutputDemuxer {
DummyDemuxer {}
}
fn describe<'a>(&'_ self) -> &'_ Descr {
&self.d
}
fn probe(&self, data: &[u8]) -> u8 {
match data {
b"dummy" => 100,
_ => 0,
}
}
}
const DUMMY_DES: &dyn Descriptor<OutputDemuxer = DummyDemuxer> = &DummyDes {
d: Descr {
name: "dummy",
demuxer: "dummy",
description: "Dummy dem",
extensions: &["dm", "dum"],
mime: &["application/dummy"],
},
};
#[test]
fn probe() |
use crate::buffer::*;
use std::io::Cursor;
#[test]
fn read_headers() {
let buf = b"dummy header";
let r = AccReader::with_capacity(4, Cursor::new(buf));
let d = DUMMY_DES.create();
let mut c = Context::new(d, r);
c.read_headers().unwrap();
}
#[test]
fn read_event() {
let buf = b"dummy header p1 e1 p1 ";
let r = AccReader::with_capacity(4, Cursor::new(buf));
let d = DUMMY_DES.create();
let mut c = Context::new(d, r);
c.read_headers().unwrap();
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
println!("{:?}", c.read_event());
}
}
| {
let demuxers: &[&'static dyn Descriptor<OutputDemuxer = DummyDemuxer>] = &[DUMMY_DES];
demuxers.probe(b"dummy").unwrap();
} | identifier_body |
db_transaction.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{
blocks::{blockheader::BlockHash, Block, BlockHeader},
proof_of_work::Difficulty,
transactions::{
transaction::{TransactionInput, TransactionKernel, TransactionOutput},
types::HashOutput,
},
};
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Error, Formatter};
use strum_macros::Display;
use tari_crypto::tari_utilities::{hex::to_hex, Hashable};
#[derive(Debug)]
pub struct DbTransaction {
pub operations: Vec<WriteOperation>,
}
impl Display for DbTransaction {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
fmt.write_str("Db transaction: \n")?;
for write_op in &self.operations {
fmt.write_str(&format!("{}\n", write_op))?;
}
Ok(())
}
}
impl Default for DbTransaction {
fn default() -> Self {
DbTransaction {
operations: Vec::with_capacity(128),
}
}
}
impl DbTransaction {
/// Creates a new Database transaction. To commit the transactions call [BlockchainDatabase::execute] with the
/// transaction as a parameter.
pub fn new() -> Self {
DbTransaction::default()
}
/// A general insert request. There are convenience functions for specific insert queries.
pub fn insert(&mut self, insert: DbKeyValuePair) {
self.operations.push(WriteOperation::Insert(insert));
}
/// A general insert request. There are convenience functions for specific delete queries.
pub fn delete(&mut self, delete: DbKey) {
self.operations.push(WriteOperation::Delete(delete));
}
/// Inserts a transaction kernel into the current transaction.
pub fn insert_kernel(&mut self, kernel: TransactionKernel, update_mmr: bool) {
let hash = kernel.hash();
self.insert(DbKeyValuePair::TransactionKernel(hash, Box::new(kernel), update_mmr));
}
/// Inserts a block header into the current transaction.
pub fn insert_header(&mut self, header: BlockHeader) {
let height = header.height;
self.insert(DbKeyValuePair::BlockHeader(height, Box::new(header)));
}
/// Adds a UTXO into the current transaction and update the TXO MMR.
pub fn insert_utxo(&mut self, utxo: TransactionOutput, update_mmr: bool) {
let hash = utxo.hash();
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Adds a UTXO into the current transaction and update the TXO MMR. This is a test only function used to ensure we
/// block duplicate entries. This function does not calculate the hash function but accepts one as a variable.
pub fn insert_utxo_with_hash(&mut self, hash: Vec<u8>, utxo: TransactionOutput, update_mmr: bool) {
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Stores an orphan block. No checks are made as to whether this is actually an orphan. That responsibility lies
/// with the calling function.
pub fn insert_orphan(&mut self, orphan: Block) {
let hash = orphan.hash();
self.insert(DbKeyValuePair::OrphanBlock(hash, Box::new(orphan)));
}
/// Moves a UTXO to the STXO set and mark it as spent on the MRR. If the UTXO is not in the UTXO set, the
/// transaction will fail with an `UnspendableOutput` error.
pub fn spend_utxo(&mut self, utxo_hash: HashOutput) {
self.operations
.push(WriteOperation::Spend(DbKey::UnspentOutput(utxo_hash)));
}
/// Moves a STXO to the UTXO set. If the STXO is not in the STXO set, the transaction will fail with an
/// `UnspendError`.
// TODO: unspend_utxo in memory_db doesn't unmark the node in the roaring bitmap.0
pub fn unspend_stxo(&mut self, stxo_hash: HashOutput) {
self.operations
.push(WriteOperation::UnSpend(DbKey::SpentOutput(stxo_hash)));
}
/// Moves the given set of transaction inputs from the UTXO set to the STXO set. All the inputs *must* currently
/// exist in the UTXO set, or the transaction will error with `ChainStorageError::UnspendableOutput`
pub fn spend_inputs(&mut self, inputs: &[TransactionInput]) {
for input in inputs {
let input_hash = input.hash();
self.spend_utxo(input_hash);
}
}
/// Adds a marker operation that allows the database to perform any additional work after adding a new block to
/// the database.
pub fn commit_block(&mut self) {
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Kernel));
self.operations.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Utxo));
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::RangeProof));
}
/// Set the horizon beyond which we cannot be guaranteed provide detailed blockchain information anymore.
/// A value of zero indicates that no pruning should be carried out at all. That is, this state should act as a
/// archival node.
///
/// This operation just sets the new horizon value. No pruning is done at this point.
pub fn set_pruning_horizon(&mut self, new_pruning_horizon: u64) {
self.operations.push(WriteOperation::Insert(DbKeyValuePair::Metadata(
MetadataKey::PruningHorizon,
MetadataValue::PruningHorizon(new_pruning_horizon),
)));
}
/// Rewinds the Kernel MMR state by the given number of Checkpoints.
pub fn rewind_kernel_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Kernel, steps_back));
}
/// Rewinds the UTXO MMR state by the given number of Checkpoints.
pub fn rewind_utxo_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Utxo, steps_back));
}
/// Rewinds the RangeProof MMR state by the given number of Checkpoints.
pub fn rewind_rp_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::RangeProof, steps_back));
}
}
#[derive(Debug, Display)]
pub enum WriteOperation {
Insert(DbKeyValuePair),
Delete(DbKey),
Spend(DbKey),
UnSpend(DbKey),
CreateMmrCheckpoint(MmrTree),
RewindMmr(MmrTree, usize),
}
/// A list of key-value pairs that are required for each insert operation
#[derive(Debug)]
pub enum DbKeyValuePair {
Metadata(MetadataKey, MetadataValue),
BlockHeader(u64, Box<BlockHeader>),
UnspentOutput(HashOutput, Box<TransactionOutput>, bool),
TransactionKernel(HashOutput, Box<TransactionKernel>, bool),
OrphanBlock(HashOutput, Box<Block>),
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum MmrTree {
Utxo,
Kernel,
RangeProof,
}
#[derive(Debug, Clone, PartialEq)]
pub enum MetadataKey {
ChainHeight,
BestBlock,
AccumulatedWork,
PruningHorizon,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub enum MetadataValue {
ChainHeight(Option<u64>),
BestBlock(Option<BlockHash>),
AccumulatedWork(Option<Difficulty>),
PruningHorizon(u64),
}
#[derive(Debug, Clone, PartialEq)]
pub enum DbKey {
Metadata(MetadataKey),
BlockHeader(u64),
BlockHash(BlockHash),
UnspentOutput(HashOutput),
SpentOutput(HashOutput),
TransactionKernel(HashOutput),
OrphanBlock(HashOutput),
}
#[derive(Debug)]
pub enum DbValue {
Metadata(MetadataValue),
BlockHeader(Box<BlockHeader>),
BlockHash(Box<BlockHeader>),
UnspentOutput(Box<TransactionOutput>),
SpentOutput(Box<TransactionOutput>),
TransactionKernel(Box<TransactionKernel>),
OrphanBlock(Box<Block>),
}
impl Display for DbValue {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbValue::Metadata(MetadataValue::ChainHeight(_)) => f.write_str("Current chain height"),
DbValue::Metadata(MetadataValue::AccumulatedWork(_)) => f.write_str("Total accumulated work"),
DbValue::Metadata(MetadataValue::PruningHorizon(_)) => f.write_str("Pruning horizon"),
DbValue::Metadata(MetadataValue::BestBlock(_)) => f.write_str("Chain tip block hash"),
DbValue::BlockHeader(_) => f.write_str("Block header"),
DbValue::BlockHash(_) => f.write_str("Block hash"),
DbValue::UnspentOutput(_) => f.write_str("Unspent output"),
DbValue::SpentOutput(_) => f.write_str("Spent output"),
DbValue::TransactionKernel(_) => f.write_str("Transaction kernel"),
DbValue::OrphanBlock(_) => f.write_str("Orphan block"),
}
} | impl Display for DbKey {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbKey::Metadata(MetadataKey::ChainHeight) => f.write_str("Current chain height"),
DbKey::Metadata(MetadataKey::AccumulatedWork) => f.write_str("Total accumulated work"),
DbKey::Metadata(MetadataKey::PruningHorizon) => f.write_str("Pruning horizon"),
DbKey::Metadata(MetadataKey::BestBlock) => f.write_str("Chain tip block hash"),
DbKey::BlockHeader(v) => f.write_str(&format!("Block header (#{})", v)),
DbKey::BlockHash(v) => f.write_str(&format!("Block hash (#{})", to_hex(v))),
DbKey::UnspentOutput(v) => f.write_str(&format!("Unspent output ({})", to_hex(v))),
DbKey::SpentOutput(v) => f.write_str(&format!("Spent output ({})", to_hex(v))),
DbKey::TransactionKernel(v) => f.write_str(&format!("Transaction kernel ({})", to_hex(v))),
DbKey::OrphanBlock(v) => f.write_str(&format!("Orphan block hash ({})", to_hex(v))),
}
}
}
impl Display for MmrTree {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
MmrTree::RangeProof => f.write_str("Range Proof"),
MmrTree::Utxo => f.write_str("UTXO"),
MmrTree::Kernel => f.write_str("Kernel"),
}
}
} | }
| random_line_split |
db_transaction.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{
blocks::{blockheader::BlockHash, Block, BlockHeader},
proof_of_work::Difficulty,
transactions::{
transaction::{TransactionInput, TransactionKernel, TransactionOutput},
types::HashOutput,
},
};
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Error, Formatter};
use strum_macros::Display;
use tari_crypto::tari_utilities::{hex::to_hex, Hashable};
#[derive(Debug)]
pub struct DbTransaction {
pub operations: Vec<WriteOperation>,
}
impl Display for DbTransaction {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
fmt.write_str("Db transaction: \n")?;
for write_op in &self.operations {
fmt.write_str(&format!("{}\n", write_op))?;
}
Ok(())
}
}
impl Default for DbTransaction {
fn default() -> Self {
DbTransaction {
operations: Vec::with_capacity(128),
}
}
}
impl DbTransaction {
/// Creates a new Database transaction. To commit the transactions call [BlockchainDatabase::execute] with the
/// transaction as a parameter.
pub fn new() -> Self {
DbTransaction::default()
}
/// A general insert request. There are convenience functions for specific insert queries.
pub fn insert(&mut self, insert: DbKeyValuePair) {
self.operations.push(WriteOperation::Insert(insert));
}
/// A general insert request. There are convenience functions for specific delete queries.
pub fn | (&mut self, delete: DbKey) {
self.operations.push(WriteOperation::Delete(delete));
}
/// Inserts a transaction kernel into the current transaction.
pub fn insert_kernel(&mut self, kernel: TransactionKernel, update_mmr: bool) {
let hash = kernel.hash();
self.insert(DbKeyValuePair::TransactionKernel(hash, Box::new(kernel), update_mmr));
}
/// Inserts a block header into the current transaction.
pub fn insert_header(&mut self, header: BlockHeader) {
let height = header.height;
self.insert(DbKeyValuePair::BlockHeader(height, Box::new(header)));
}
/// Adds a UTXO into the current transaction and update the TXO MMR.
pub fn insert_utxo(&mut self, utxo: TransactionOutput, update_mmr: bool) {
let hash = utxo.hash();
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Adds a UTXO into the current transaction and update the TXO MMR. This is a test only function used to ensure we
/// block duplicate entries. This function does not calculate the hash function but accepts one as a variable.
pub fn insert_utxo_with_hash(&mut self, hash: Vec<u8>, utxo: TransactionOutput, update_mmr: bool) {
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Stores an orphan block. No checks are made as to whether this is actually an orphan. That responsibility lies
/// with the calling function.
pub fn insert_orphan(&mut self, orphan: Block) {
let hash = orphan.hash();
self.insert(DbKeyValuePair::OrphanBlock(hash, Box::new(orphan)));
}
/// Moves a UTXO to the STXO set and mark it as spent on the MRR. If the UTXO is not in the UTXO set, the
/// transaction will fail with an `UnspendableOutput` error.
pub fn spend_utxo(&mut self, utxo_hash: HashOutput) {
self.operations
.push(WriteOperation::Spend(DbKey::UnspentOutput(utxo_hash)));
}
/// Moves a STXO to the UTXO set. If the STXO is not in the STXO set, the transaction will fail with an
/// `UnspendError`.
// TODO: unspend_utxo in memory_db doesn't unmark the node in the roaring bitmap.0
pub fn unspend_stxo(&mut self, stxo_hash: HashOutput) {
self.operations
.push(WriteOperation::UnSpend(DbKey::SpentOutput(stxo_hash)));
}
/// Moves the given set of transaction inputs from the UTXO set to the STXO set. All the inputs *must* currently
/// exist in the UTXO set, or the transaction will error with `ChainStorageError::UnspendableOutput`
pub fn spend_inputs(&mut self, inputs: &[TransactionInput]) {
for input in inputs {
let input_hash = input.hash();
self.spend_utxo(input_hash);
}
}
/// Adds a marker operation that allows the database to perform any additional work after adding a new block to
/// the database.
pub fn commit_block(&mut self) {
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Kernel));
self.operations.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Utxo));
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::RangeProof));
}
/// Set the horizon beyond which we cannot be guaranteed provide detailed blockchain information anymore.
/// A value of zero indicates that no pruning should be carried out at all. That is, this state should act as a
/// archival node.
///
/// This operation just sets the new horizon value. No pruning is done at this point.
pub fn set_pruning_horizon(&mut self, new_pruning_horizon: u64) {
self.operations.push(WriteOperation::Insert(DbKeyValuePair::Metadata(
MetadataKey::PruningHorizon,
MetadataValue::PruningHorizon(new_pruning_horizon),
)));
}
/// Rewinds the Kernel MMR state by the given number of Checkpoints.
pub fn rewind_kernel_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Kernel, steps_back));
}
/// Rewinds the UTXO MMR state by the given number of Checkpoints.
pub fn rewind_utxo_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Utxo, steps_back));
}
/// Rewinds the RangeProof MMR state by the given number of Checkpoints.
pub fn rewind_rp_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::RangeProof, steps_back));
}
}
#[derive(Debug, Display)]
pub enum WriteOperation {
Insert(DbKeyValuePair),
Delete(DbKey),
Spend(DbKey),
UnSpend(DbKey),
CreateMmrCheckpoint(MmrTree),
RewindMmr(MmrTree, usize),
}
/// A list of key-value pairs that are required for each insert operation
#[derive(Debug)]
pub enum DbKeyValuePair {
Metadata(MetadataKey, MetadataValue),
BlockHeader(u64, Box<BlockHeader>),
UnspentOutput(HashOutput, Box<TransactionOutput>, bool),
TransactionKernel(HashOutput, Box<TransactionKernel>, bool),
OrphanBlock(HashOutput, Box<Block>),
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum MmrTree {
Utxo,
Kernel,
RangeProof,
}
#[derive(Debug, Clone, PartialEq)]
pub enum MetadataKey {
ChainHeight,
BestBlock,
AccumulatedWork,
PruningHorizon,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub enum MetadataValue {
ChainHeight(Option<u64>),
BestBlock(Option<BlockHash>),
AccumulatedWork(Option<Difficulty>),
PruningHorizon(u64),
}
#[derive(Debug, Clone, PartialEq)]
pub enum DbKey {
Metadata(MetadataKey),
BlockHeader(u64),
BlockHash(BlockHash),
UnspentOutput(HashOutput),
SpentOutput(HashOutput),
TransactionKernel(HashOutput),
OrphanBlock(HashOutput),
}
#[derive(Debug)]
pub enum DbValue {
Metadata(MetadataValue),
BlockHeader(Box<BlockHeader>),
BlockHash(Box<BlockHeader>),
UnspentOutput(Box<TransactionOutput>),
SpentOutput(Box<TransactionOutput>),
TransactionKernel(Box<TransactionKernel>),
OrphanBlock(Box<Block>),
}
impl Display for DbValue {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbValue::Metadata(MetadataValue::ChainHeight(_)) => f.write_str("Current chain height"),
DbValue::Metadata(MetadataValue::AccumulatedWork(_)) => f.write_str("Total accumulated work"),
DbValue::Metadata(MetadataValue::PruningHorizon(_)) => f.write_str("Pruning horizon"),
DbValue::Metadata(MetadataValue::BestBlock(_)) => f.write_str("Chain tip block hash"),
DbValue::BlockHeader(_) => f.write_str("Block header"),
DbValue::BlockHash(_) => f.write_str("Block hash"),
DbValue::UnspentOutput(_) => f.write_str("Unspent output"),
DbValue::SpentOutput(_) => f.write_str("Spent output"),
DbValue::TransactionKernel(_) => f.write_str("Transaction kernel"),
DbValue::OrphanBlock(_) => f.write_str("Orphan block"),
}
}
}
impl Display for DbKey {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbKey::Metadata(MetadataKey::ChainHeight) => f.write_str("Current chain height"),
DbKey::Metadata(MetadataKey::AccumulatedWork) => f.write_str("Total accumulated work"),
DbKey::Metadata(MetadataKey::PruningHorizon) => f.write_str("Pruning horizon"),
DbKey::Metadata(MetadataKey::BestBlock) => f.write_str("Chain tip block hash"),
DbKey::BlockHeader(v) => f.write_str(&format!("Block header (#{})", v)),
DbKey::BlockHash(v) => f.write_str(&format!("Block hash (#{})", to_hex(v))),
DbKey::UnspentOutput(v) => f.write_str(&format!("Unspent output ({})", to_hex(v))),
DbKey::SpentOutput(v) => f.write_str(&format!("Spent output ({})", to_hex(v))),
DbKey::TransactionKernel(v) => f.write_str(&format!("Transaction kernel ({})", to_hex(v))),
DbKey::OrphanBlock(v) => f.write_str(&format!("Orphan block hash ({})", to_hex(v))),
}
}
}
impl Display for MmrTree {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
MmrTree::RangeProof => f.write_str("Range Proof"),
MmrTree::Utxo => f.write_str("UTXO"),
MmrTree::Kernel => f.write_str("Kernel"),
}
}
}
| delete | identifier_name |
db_transaction.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{
blocks::{blockheader::BlockHash, Block, BlockHeader},
proof_of_work::Difficulty,
transactions::{
transaction::{TransactionInput, TransactionKernel, TransactionOutput},
types::HashOutput,
},
};
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Error, Formatter};
use strum_macros::Display;
use tari_crypto::tari_utilities::{hex::to_hex, Hashable};
#[derive(Debug)]
pub struct DbTransaction {
pub operations: Vec<WriteOperation>,
}
impl Display for DbTransaction {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
fmt.write_str("Db transaction: \n")?;
for write_op in &self.operations {
fmt.write_str(&format!("{}\n", write_op))?;
}
Ok(())
}
}
impl Default for DbTransaction {
fn default() -> Self {
DbTransaction {
operations: Vec::with_capacity(128),
}
}
}
impl DbTransaction {
/// Creates a new Database transaction. To commit the transactions call [BlockchainDatabase::execute] with the
/// transaction as a parameter.
pub fn new() -> Self {
DbTransaction::default()
}
/// A general insert request. There are convenience functions for specific insert queries.
pub fn insert(&mut self, insert: DbKeyValuePair) {
self.operations.push(WriteOperation::Insert(insert));
}
/// A general insert request. There are convenience functions for specific delete queries.
pub fn delete(&mut self, delete: DbKey) {
self.operations.push(WriteOperation::Delete(delete));
}
/// Inserts a transaction kernel into the current transaction.
pub fn insert_kernel(&mut self, kernel: TransactionKernel, update_mmr: bool) {
let hash = kernel.hash();
self.insert(DbKeyValuePair::TransactionKernel(hash, Box::new(kernel), update_mmr));
}
/// Inserts a block header into the current transaction.
pub fn insert_header(&mut self, header: BlockHeader) {
let height = header.height;
self.insert(DbKeyValuePair::BlockHeader(height, Box::new(header)));
}
/// Adds a UTXO into the current transaction and update the TXO MMR.
pub fn insert_utxo(&mut self, utxo: TransactionOutput, update_mmr: bool) {
let hash = utxo.hash();
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Adds a UTXO into the current transaction and update the TXO MMR. This is a test only function used to ensure we
/// block duplicate entries. This function does not calculate the hash function but accepts one as a variable.
pub fn insert_utxo_with_hash(&mut self, hash: Vec<u8>, utxo: TransactionOutput, update_mmr: bool) {
self.insert(DbKeyValuePair::UnspentOutput(hash, Box::new(utxo), update_mmr));
}
/// Stores an orphan block. No checks are made as to whether this is actually an orphan. That responsibility lies
/// with the calling function.
pub fn insert_orphan(&mut self, orphan: Block) {
let hash = orphan.hash();
self.insert(DbKeyValuePair::OrphanBlock(hash, Box::new(orphan)));
}
/// Moves a UTXO to the STXO set and mark it as spent on the MRR. If the UTXO is not in the UTXO set, the
/// transaction will fail with an `UnspendableOutput` error.
pub fn spend_utxo(&mut self, utxo_hash: HashOutput) {
self.operations
.push(WriteOperation::Spend(DbKey::UnspentOutput(utxo_hash)));
}
/// Moves a STXO to the UTXO set. If the STXO is not in the STXO set, the transaction will fail with an
/// `UnspendError`.
// TODO: unspend_utxo in memory_db doesn't unmark the node in the roaring bitmap.0
pub fn unspend_stxo(&mut self, stxo_hash: HashOutput) {
self.operations
.push(WriteOperation::UnSpend(DbKey::SpentOutput(stxo_hash)));
}
/// Moves the given set of transaction inputs from the UTXO set to the STXO set. All the inputs *must* currently
/// exist in the UTXO set, or the transaction will error with `ChainStorageError::UnspendableOutput`
pub fn spend_inputs(&mut self, inputs: &[TransactionInput]) {
for input in inputs {
let input_hash = input.hash();
self.spend_utxo(input_hash);
}
}
/// Adds a marker operation that allows the database to perform any additional work after adding a new block to
/// the database.
pub fn commit_block(&mut self) {
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Kernel));
self.operations.push(WriteOperation::CreateMmrCheckpoint(MmrTree::Utxo));
self.operations
.push(WriteOperation::CreateMmrCheckpoint(MmrTree::RangeProof));
}
/// Set the horizon beyond which we cannot be guaranteed provide detailed blockchain information anymore.
/// A value of zero indicates that no pruning should be carried out at all. That is, this state should act as a
/// archival node.
///
/// This operation just sets the new horizon value. No pruning is done at this point.
pub fn set_pruning_horizon(&mut self, new_pruning_horizon: u64) {
self.operations.push(WriteOperation::Insert(DbKeyValuePair::Metadata(
MetadataKey::PruningHorizon,
MetadataValue::PruningHorizon(new_pruning_horizon),
)));
}
/// Rewinds the Kernel MMR state by the given number of Checkpoints.
pub fn rewind_kernel_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Kernel, steps_back));
}
/// Rewinds the UTXO MMR state by the given number of Checkpoints.
pub fn rewind_utxo_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::Utxo, steps_back));
}
/// Rewinds the RangeProof MMR state by the given number of Checkpoints.
pub fn rewind_rp_mmr(&mut self, steps_back: usize) {
self.operations
.push(WriteOperation::RewindMmr(MmrTree::RangeProof, steps_back));
}
}
#[derive(Debug, Display)]
pub enum WriteOperation {
Insert(DbKeyValuePair),
Delete(DbKey),
Spend(DbKey),
UnSpend(DbKey),
CreateMmrCheckpoint(MmrTree),
RewindMmr(MmrTree, usize),
}
/// A list of key-value pairs that are required for each insert operation
#[derive(Debug)]
pub enum DbKeyValuePair {
Metadata(MetadataKey, MetadataValue),
BlockHeader(u64, Box<BlockHeader>),
UnspentOutput(HashOutput, Box<TransactionOutput>, bool),
TransactionKernel(HashOutput, Box<TransactionKernel>, bool),
OrphanBlock(HashOutput, Box<Block>),
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum MmrTree {
Utxo,
Kernel,
RangeProof,
}
#[derive(Debug, Clone, PartialEq)]
pub enum MetadataKey {
ChainHeight,
BestBlock,
AccumulatedWork,
PruningHorizon,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub enum MetadataValue {
ChainHeight(Option<u64>),
BestBlock(Option<BlockHash>),
AccumulatedWork(Option<Difficulty>),
PruningHorizon(u64),
}
#[derive(Debug, Clone, PartialEq)]
pub enum DbKey {
Metadata(MetadataKey),
BlockHeader(u64),
BlockHash(BlockHash),
UnspentOutput(HashOutput),
SpentOutput(HashOutput),
TransactionKernel(HashOutput),
OrphanBlock(HashOutput),
}
#[derive(Debug)]
pub enum DbValue {
Metadata(MetadataValue),
BlockHeader(Box<BlockHeader>),
BlockHash(Box<BlockHeader>),
UnspentOutput(Box<TransactionOutput>),
SpentOutput(Box<TransactionOutput>),
TransactionKernel(Box<TransactionKernel>),
OrphanBlock(Box<Block>),
}
impl Display for DbValue {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> |
}
impl Display for DbKey {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
DbKey::Metadata(MetadataKey::ChainHeight) => f.write_str("Current chain height"),
DbKey::Metadata(MetadataKey::AccumulatedWork) => f.write_str("Total accumulated work"),
DbKey::Metadata(MetadataKey::PruningHorizon) => f.write_str("Pruning horizon"),
DbKey::Metadata(MetadataKey::BestBlock) => f.write_str("Chain tip block hash"),
DbKey::BlockHeader(v) => f.write_str(&format!("Block header (#{})", v)),
DbKey::BlockHash(v) => f.write_str(&format!("Block hash (#{})", to_hex(v))),
DbKey::UnspentOutput(v) => f.write_str(&format!("Unspent output ({})", to_hex(v))),
DbKey::SpentOutput(v) => f.write_str(&format!("Spent output ({})", to_hex(v))),
DbKey::TransactionKernel(v) => f.write_str(&format!("Transaction kernel ({})", to_hex(v))),
DbKey::OrphanBlock(v) => f.write_str(&format!("Orphan block hash ({})", to_hex(v))),
}
}
}
impl Display for MmrTree {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
match self {
MmrTree::RangeProof => f.write_str("Range Proof"),
MmrTree::Utxo => f.write_str("UTXO"),
MmrTree::Kernel => f.write_str("Kernel"),
}
}
}
| {
match self {
DbValue::Metadata(MetadataValue::ChainHeight(_)) => f.write_str("Current chain height"),
DbValue::Metadata(MetadataValue::AccumulatedWork(_)) => f.write_str("Total accumulated work"),
DbValue::Metadata(MetadataValue::PruningHorizon(_)) => f.write_str("Pruning horizon"),
DbValue::Metadata(MetadataValue::BestBlock(_)) => f.write_str("Chain tip block hash"),
DbValue::BlockHeader(_) => f.write_str("Block header"),
DbValue::BlockHash(_) => f.write_str("Block hash"),
DbValue::UnspentOutput(_) => f.write_str("Unspent output"),
DbValue::SpentOutput(_) => f.write_str("Spent output"),
DbValue::TransactionKernel(_) => f.write_str("Transaction kernel"),
DbValue::OrphanBlock(_) => f.write_str("Orphan block"),
}
} | identifier_body |
lib.rs | #![doc(html_root_url = "https://docs.rs/faimm/0.4.0")]
//! This crate provides indexed fasta access by using a memory mapped file to read the sequence
//! data. It is intended for accessing sequence data on genome sized fasta files and provides
//! random access based on base coordinates. Because an indexed fasta file uses a limited number of
//! bases per line separated by (sometimes platform-specific) newlines you cannot directly use the
//! bytes available from the mmap.
//!
//! Access is provided using a view of the mmap using zero-based base coordinates. This view can
//! then be used to iterate over bases (represented as `u8`) or parsed into a string. Naive gc
//! counting is also available.
//!
//! Access to the sequence data doesn't require the `IndexedFasta` to be mutable. This makes
//! it easy to share.
//!
//! # Example
//! ```
//! use faimm::IndexedFasta;
//! let fa = IndexedFasta::from_file("test/genome.fa").expect("Error opening fa");
//! let chr_index = fa.fai().tid("ACGT-25").expect("Cannot find chr in index");
//! let v = fa.view(chr_index,0,50).expect("Cannot get.fa view");
//! //count the bases
//! let counts = v.count_bases();
//! //or print the sequence
//! println!("{}", v.to_string());
//! ```
//! # Limitations
//! The parser uses a simple ascii mask for allowable characters (64..128), does not apply any
//! IUPAC converson or validation. Anything outside this range is silently skipped. This means that
//! also invalid `fasta` will be parsed. The mere presence of an accompanying `.fai` provides the
//! assumption of a valid fasta.
//! Requires Rust >=1.64
//!
//! # Alternatives
//! [Rust-bio](https://crates.io/crates/bio) provides a competent indexed fasta reader. The major
//! difference is that it has an internal buffer an therefore needs to be mutable when performing
//! read operations. faimm is also faster. If you want record based access (without an.fai index
//! file) [rust-bio](https://crates.io/crates/bio) or [seq_io](https://crates.io/crates/seq_io)
//! provide this.
//!
//! # Performance
//! Calculating the gc content of target regions of an exome (231_410 regions) on the Human
//! reference (GRCh38) takes about 0.7 seconds (warm cache), slightly faster than bedtools nuc (0.9s probably a more
//! sound implementation) and rust-bio (1.3s same implementation as example)
//! Some tests show counting can also be improved using simd, but nothing has been released.
use std::fs::File;
use std::io::{self, BufRead, BufReader, Read};
use std::path::Path;
use indexmap::IndexSet;
use memmap2::{Mmap, MmapOptions};
/// The object that stores the parsed fasta index file. You can use it to map chromosome names to
/// indexes and lookup offsets for chr-start:end coordinates
#[derive(Debug, Clone)]
pub struct Fai {
chromosomes: Vec<FaiRecord>,
name_map: IndexSet<String>,
}
impl Fai {
/// Open a fasta index file from path `P`.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let f = File::open(path)?;
let br = BufReader::new(f);
let mut name_map = IndexSet::new();
let mut chromosomes = Vec::new();
for l in br.lines() {
let line = l?;
let p: Vec<_> = line.split('\t').collect();
if p.len()!= 5 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Expected 5 columns in.fai file.",
));
}
name_map.insert(p[0].to_owned());
let ioerr =
|e, msg| io::Error::new(io::ErrorKind::InvalidData, format!("{}:{}", msg, e));
chromosomes.push(FaiRecord {
len: p[1]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr len in.fai"))?,
offset: p[2]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr offset in.fai"))?,
line_bases: p[3]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_bases in.fai"))?,
line_width: p[4]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_width in.fai"))?,
});
}
Ok(Fai {
chromosomes,
name_map,
})
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
/// start, end: zero based coordinates of the requested range.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset(&self, tid: usize, start: usize, stop: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
if stop > chr.len {
return Err(io::Error::new(
io::ErrorKind::Other,
"FASTA read interval was out of bounds",
));
}
let start_offset =
chr.offset + (start / chr.line_bases) * chr.line_width + start % chr.line_bases;
let stop_offset =
chr.offset + (stop / chr.line_bases) * chr.line_width + stop % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset_tid(&self, tid: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
let start_offset = chr.offset;
let stop_offset =
chr.offset + (chr.len / chr.line_bases) * chr.line_width + chr.len % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Return the index of the chromosome by name in the fasta index.
///
/// Returns the position of chr `name` if succesful, None otherwise.
#[inline]
pub fn tid(&self, name: &str) -> Option<usize> {
self.name_map.get_index_of(name)
}
/// Return the index of a chromosome in the fasta index.
///
/// Returns the size in bases as usize.
pub fn size(&self, tid: usize) -> io::Result<usize> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
Ok(chr.len)
}
/// Return the name of the chromomsome at index tid
pub fn name(&self, tid: usize) -> io::Result<&String> {
self.name_map.get_index(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})
}
/// Return the names of the chromosomes from the fasta index in the same order as in the
/// `.fai`. You can use `Fai::tid` to map it back to an index.
///
/// Returns a `Vec<&str>` with the chromosome names.
pub fn names(&self) -> Vec<&str> {
self.name_map.iter().map(|s| s.as_str()).collect()
}
}
/// FaiRecord stores the length, offset, and fasta file characterics of a single chromosome
#[derive(Debug, Clone)]
pub struct FaiRecord {
len: usize,
offset: usize,
line_bases: usize,
line_width: usize,
}
/// The `IndexFasta` can be used to open a fasta file that has a valid.fai index file.
pub struct IndexedFasta {
mmap: Mmap,
fasta_index: Fai,
}
impl IndexedFasta {
/// Open a fasta file from path `P`. It is assumed that it has a valid.fai index file. The
///.fai file is created by appending.fai to the fasta file.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let mut fai_path = path.as_ref().as_os_str().to_owned();
fai_path.push(".fai");
let fasta_index = Fai::from_file(&fai_path)?;
let file = File::open(path)?;
let mmap = unsafe { MmapOptions::new().map(&file)? };
Ok(IndexedFasta { mmap, fasta_index })
}
/// Use tid, start and end to calculate a slice on the Fasta file. Use this view to iterate
/// over the bases.
///
/// Returns FastaView for the provided chromsome, start, end if successful, Error otherwise.
pub fn view(&self, tid: usize, start: usize, stop: usize) -> io::Result<FastaView> {
if start > stop |
let (start_byte, stop_byte) = self.fasta_index.offset(tid, start, stop)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Use tid to return a view of an entire chromosome.
///
/// Returns FastaView for the provided chromsome indicated by tid if successful, Error otherwise.
pub fn view_tid(&self, tid: usize) -> io::Result<FastaView> {
let (start_byte, stop_byte) = self.fasta_index.offset_tid(tid)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Return a reference to the `Fai` that contains information from the fasta index.
///
/// Returns a reference to `Fai`.
pub fn fai(&self) -> &Fai {
&self.fasta_index
}
}
/// A view of a slice of the fasta file bounded by provided coordinates
pub struct FastaView<'a>(&'a [u8]);
impl<'a> FastaView<'a> {
/// Count the occurences of A, C, G, T, N, and other in the current view. This function does
/// not differentiate between upper or lower case bases.
///
/// Returns a `BasecCounts` object.
pub fn count_bases(&self) -> BaseCounts {
let mut bc: BaseCounts = Default::default();
for b in self.bases() {
let v: u8 = b << 3;
if v ^ 8 == 0 {
bc.a += 1;
} else if v ^ 24 == 0 {
bc.c += 1;
} else if v ^ 56 == 0 {
bc.g += 1;
} else if v ^ 112 == 0 {
bc.n += 1;
} else if v ^ 160 == 0 {
bc.t += 1;
} else {
bc.other += 1;
}
}
bc
}
/// Iterator over the bases in the current view. Bases are returned as `u8` representations of
/// the `char`s in the fasta file. Keep only that chars between 164 and 128 (effectively
/// skipping newlines)
pub fn bases(&self) -> impl Iterator<Item = &'a u8> {
self.0.iter().filter(|&&b| b & 192 == 64)
}
}
/// Returns a newly allocated, utf8-validated string with the sequence data in `Self`
impl<'a> ToString for FastaView<'a> {
fn to_string(&self) -> String {
String::from_utf8(self.bases().cloned().collect()).unwrap()
}
}
impl<'a> Read for FastaView<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut read = 0;
let mut skipped = 0;
for (t, s) in buf.iter_mut().zip(self.0.iter().filter(|&&c| {
let base = c & 192 == 64;
if!base {
skipped += 1;
}
base
})) {
*t = *s;
read += 1;
}
self.0 = &self.0[(skipped + read)..];
Ok(read)
}
}
/// Object that contains count occurrences of the most common bases in DNA genome references: A, C, G,
/// T, N and other.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct BaseCounts {
pub a: usize,
pub c: usize,
pub g: usize,
pub t: usize,
pub n: usize,
pub other: usize,
}
/// Initialize basecount with zeros
impl Default for BaseCounts {
fn default() -> BaseCounts {
BaseCounts {
a: 0,
c: 0,
g: 0,
t: 0,
n: 0,
other: 0,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn fai() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.fai().names().len(), 3);
assert_eq!(ir.fai().tid("ACGT-25"), Some(2));
assert_eq!(ir.fai().tid("NotFound"), None);
assert_eq!(ir.fai().size(2).unwrap(), 100);
assert_eq!(ir.fai().name(2).unwrap(), "ACGT-25");
assert!(ir.fai().name(3).is_err());
}
#[test]
fn view() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.view(0, 0, 10).unwrap().to_string(), "AAAAAAAAAA");
assert!(ir.view(0, 0, 11).is_err());
assert_eq!(
ir.view(2, 38, 62).unwrap().to_string(),
"CCCCCCCCCCCCGGGGGGGGGGGG"
);
assert_eq!(
ir.view(2, 74, 100).unwrap().to_string(),
"GTTTTTTTTTTTTTTTTTTTTTTTTT"
);
assert!(ir.view(0, 120, 130).is_err());
}
#[test]
fn view_tid() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.view_tid(0).unwrap().to_string(), "AAAAAAAAAA");
assert_eq!(ir.view_tid(1).unwrap().to_string(),
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
assert_eq!(ir.view_tid(2).unwrap().to_string(),
"AAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTT");
assert!(ir.view_tid(3).is_err());
}
#[test]
fn view_bases() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
let v = ir.view(2, 48, 52).unwrap();
let mut b = v.bases();
assert_eq!(b.next(), Some(&b'C'));
assert_eq!(b.next(), Some(&b'C'));
assert_eq!(b.next(), Some(&b'G'));
assert_eq!(b.next(), Some(&b'G'));
assert_eq!(b.next(), None);
}
#[test]
fn view_counts() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(
ir.view(2, 48, 52).unwrap().count_bases(),
BaseCounts {
c: 2,
g: 2,
..Default::default()
}
);
}
#[test]
fn read_view() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
let mut buf = vec![0; 25];
let mut v = ir.view_tid(2).unwrap();
println!("{}", v.to_string());
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'A'; 25]);
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'C'; 25]);
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'G'; 25]);
let mut buf2 = vec![0; 10];
assert_eq!(v.read(&mut buf2).unwrap(), 10);
assert_eq!(buf2, vec![b'T'; 10]);
assert_eq!(v.read(&mut buf2).unwrap(), 10);
assert_eq!(buf2, vec![b'T'; 10]);
assert_eq!(v.read(&mut buf2).unwrap(), 5);
assert_eq!(&buf2[0..5], vec![b'T'; 5].as_slice());
}
}
| {
return Err(io::Error::new(
io::ErrorKind::Other,
"Invalid query interval",
));
} | conditional_block |
lib.rs | #![doc(html_root_url = "https://docs.rs/faimm/0.4.0")]
//! This crate provides indexed fasta access by using a memory mapped file to read the sequence
//! data. It is intended for accessing sequence data on genome sized fasta files and provides
//! random access based on base coordinates. Because an indexed fasta file uses a limited number of
//! bases per line separated by (sometimes platform-specific) newlines you cannot directly use the
//! bytes available from the mmap.
//!
//! Access is provided using a view of the mmap using zero-based base coordinates. This view can
//! then be used to iterate over bases (represented as `u8`) or parsed into a string. Naive gc
//! counting is also available.
//!
//! Access to the sequence data doesn't require the `IndexedFasta` to be mutable. This makes
//! it easy to share.
//!
//! # Example
//! ```
//! use faimm::IndexedFasta;
//! let fa = IndexedFasta::from_file("test/genome.fa").expect("Error opening fa");
//! let chr_index = fa.fai().tid("ACGT-25").expect("Cannot find chr in index");
//! let v = fa.view(chr_index,0,50).expect("Cannot get.fa view");
//! //count the bases
//! let counts = v.count_bases();
//! //or print the sequence
//! println!("{}", v.to_string());
//! ```
//! # Limitations
//! The parser uses a simple ascii mask for allowable characters (64..128), does not apply any
//! IUPAC converson or validation. Anything outside this range is silently skipped. This means that
//! also invalid `fasta` will be parsed. The mere presence of an accompanying `.fai` provides the
//! assumption of a valid fasta.
//! Requires Rust >=1.64
//!
//! # Alternatives
//! [Rust-bio](https://crates.io/crates/bio) provides a competent indexed fasta reader. The major
//! difference is that it has an internal buffer an therefore needs to be mutable when performing
//! read operations. faimm is also faster. If you want record based access (without an.fai index
//! file) [rust-bio](https://crates.io/crates/bio) or [seq_io](https://crates.io/crates/seq_io)
//! provide this.
//!
//! # Performance
//! Calculating the gc content of target regions of an exome (231_410 regions) on the Human
//! reference (GRCh38) takes about 0.7 seconds (warm cache), slightly faster than bedtools nuc (0.9s probably a more
//! sound implementation) and rust-bio (1.3s same implementation as example)
//! Some tests show counting can also be improved using simd, but nothing has been released.
use std::fs::File;
use std::io::{self, BufRead, BufReader, Read};
use std::path::Path;
use indexmap::IndexSet;
use memmap2::{Mmap, MmapOptions};
/// The object that stores the parsed fasta index file. You can use it to map chromosome names to
/// indexes and lookup offsets for chr-start:end coordinates
#[derive(Debug, Clone)]
pub struct Fai {
chromosomes: Vec<FaiRecord>,
name_map: IndexSet<String>,
}
impl Fai {
/// Open a fasta index file from path `P`.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let f = File::open(path)?;
let br = BufReader::new(f);
let mut name_map = IndexSet::new();
let mut chromosomes = Vec::new();
for l in br.lines() {
let line = l?;
let p: Vec<_> = line.split('\t').collect();
if p.len()!= 5 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Expected 5 columns in.fai file.",
));
}
name_map.insert(p[0].to_owned());
let ioerr =
|e, msg| io::Error::new(io::ErrorKind::InvalidData, format!("{}:{}", msg, e));
chromosomes.push(FaiRecord {
len: p[1]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr len in.fai"))?,
offset: p[2]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr offset in.fai"))?,
line_bases: p[3]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_bases in.fai"))?,
line_width: p[4]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_width in.fai"))?,
});
}
Ok(Fai {
chromosomes,
name_map,
})
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
/// start, end: zero based coordinates of the requested range.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset(&self, tid: usize, start: usize, stop: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
if stop > chr.len {
return Err(io::Error::new(
io::ErrorKind::Other,
"FASTA read interval was out of bounds",
));
}
let start_offset =
chr.offset + (start / chr.line_bases) * chr.line_width + start % chr.line_bases;
let stop_offset =
chr.offset + (stop / chr.line_bases) * chr.line_width + stop % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset_tid(&self, tid: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
let start_offset = chr.offset;
let stop_offset =
chr.offset + (chr.len / chr.line_bases) * chr.line_width + chr.len % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Return the index of the chromosome by name in the fasta index.
///
/// Returns the position of chr `name` if succesful, None otherwise.
#[inline]
pub fn tid(&self, name: &str) -> Option<usize> {
self.name_map.get_index_of(name)
}
/// Return the index of a chromosome in the fasta index.
///
/// Returns the size in bases as usize.
pub fn size(&self, tid: usize) -> io::Result<usize> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
Ok(chr.len)
}
/// Return the name of the chromomsome at index tid
pub fn name(&self, tid: usize) -> io::Result<&String> {
self.name_map.get_index(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})
}
/// Return the names of the chromosomes from the fasta index in the same order as in the
/// `.fai`. You can use `Fai::tid` to map it back to an index.
///
/// Returns a `Vec<&str>` with the chromosome names.
pub fn names(&self) -> Vec<&str> {
self.name_map.iter().map(|s| s.as_str()).collect()
}
}
/// FaiRecord stores the length, offset, and fasta file characterics of a single chromosome
#[derive(Debug, Clone)]
pub struct FaiRecord {
len: usize,
offset: usize,
line_bases: usize,
line_width: usize,
}
/// The `IndexFasta` can be used to open a fasta file that has a valid.fai index file.
pub struct IndexedFasta {
mmap: Mmap,
fasta_index: Fai,
}
impl IndexedFasta {
/// Open a fasta file from path `P`. It is assumed that it has a valid.fai index file. The
///.fai file is created by appending.fai to the fasta file.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let mut fai_path = path.as_ref().as_os_str().to_owned();
fai_path.push(".fai");
let fasta_index = Fai::from_file(&fai_path)?;
let file = File::open(path)?;
let mmap = unsafe { MmapOptions::new().map(&file)? };
Ok(IndexedFasta { mmap, fasta_index })
}
/// Use tid, start and end to calculate a slice on the Fasta file. Use this view to iterate
/// over the bases.
///
/// Returns FastaView for the provided chromsome, start, end if successful, Error otherwise.
pub fn view(&self, tid: usize, start: usize, stop: usize) -> io::Result<FastaView> {
if start > stop {
return Err(io::Error::new(
io::ErrorKind::Other,
"Invalid query interval",
));
}
let (start_byte, stop_byte) = self.fasta_index.offset(tid, start, stop)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Use tid to return a view of an entire chromosome.
///
/// Returns FastaView for the provided chromsome indicated by tid if successful, Error otherwise.
pub fn view_tid(&self, tid: usize) -> io::Result<FastaView> {
let (start_byte, stop_byte) = self.fasta_index.offset_tid(tid)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Return a reference to the `Fai` that contains information from the fasta index.
///
/// Returns a reference to `Fai`.
pub fn fai(&self) -> &Fai {
&self.fasta_index
}
}
/// A view of a slice of the fasta file bounded by provided coordinates
pub struct FastaView<'a>(&'a [u8]);
impl<'a> FastaView<'a> {
/// Count the occurences of A, C, G, T, N, and other in the current view. This function does
/// not differentiate between upper or lower case bases.
///
/// Returns a `BasecCounts` object.
pub fn count_bases(&self) -> BaseCounts {
let mut bc: BaseCounts = Default::default();
for b in self.bases() {
let v: u8 = b << 3;
if v ^ 8 == 0 {
bc.a += 1;
} else if v ^ 24 == 0 {
bc.c += 1;
} else if v ^ 56 == 0 {
bc.g += 1;
} else if v ^ 112 == 0 {
bc.n += 1;
} else if v ^ 160 == 0 {
bc.t += 1;
} else {
bc.other += 1;
}
}
bc
}
/// Iterator over the bases in the current view. Bases are returned as `u8` representations of
/// the `char`s in the fasta file. Keep only that chars between 164 and 128 (effectively
/// skipping newlines)
pub fn bases(&self) -> impl Iterator<Item = &'a u8> {
self.0.iter().filter(|&&b| b & 192 == 64)
}
}
/// Returns a newly allocated, utf8-validated string with the sequence data in `Self`
impl<'a> ToString for FastaView<'a> {
fn to_string(&self) -> String {
String::from_utf8(self.bases().cloned().collect()).unwrap()
}
}
impl<'a> Read for FastaView<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut read = 0;
let mut skipped = 0;
for (t, s) in buf.iter_mut().zip(self.0.iter().filter(|&&c| {
let base = c & 192 == 64;
if!base {
skipped += 1;
}
base
})) {
*t = *s;
read += 1;
}
self.0 = &self.0[(skipped + read)..];
Ok(read)
}
}
/// Object that contains count occurrences of the most common bases in DNA genome references: A, C, G,
/// T, N and other.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct BaseCounts {
pub a: usize,
pub c: usize,
pub g: usize,
pub t: usize,
pub n: usize,
pub other: usize,
}
/// Initialize basecount with zeros
impl Default for BaseCounts {
fn default() -> BaseCounts |
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn fai() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.fai().names().len(), 3);
assert_eq!(ir.fai().tid("ACGT-25"), Some(2));
assert_eq!(ir.fai().tid("NotFound"), None);
assert_eq!(ir.fai().size(2).unwrap(), 100);
assert_eq!(ir.fai().name(2).unwrap(), "ACGT-25");
assert!(ir.fai().name(3).is_err());
}
#[test]
fn view() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.view(0, 0, 10).unwrap().to_string(), "AAAAAAAAAA");
assert!(ir.view(0, 0, 11).is_err());
assert_eq!(
ir.view(2, 38, 62).unwrap().to_string(),
"CCCCCCCCCCCCGGGGGGGGGGGG"
);
assert_eq!(
ir.view(2, 74, 100).unwrap().to_string(),
"GTTTTTTTTTTTTTTTTTTTTTTTTT"
);
assert!(ir.view(0, 120, 130).is_err());
}
#[test]
fn view_tid() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.view_tid(0).unwrap().to_string(), "AAAAAAAAAA");
assert_eq!(ir.view_tid(1).unwrap().to_string(),
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
assert_eq!(ir.view_tid(2).unwrap().to_string(),
"AAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTT");
assert!(ir.view_tid(3).is_err());
}
#[test]
fn view_bases() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
let v = ir.view(2, 48, 52).unwrap();
let mut b = v.bases();
assert_eq!(b.next(), Some(&b'C'));
assert_eq!(b.next(), Some(&b'C'));
assert_eq!(b.next(), Some(&b'G'));
assert_eq!(b.next(), Some(&b'G'));
assert_eq!(b.next(), None);
}
#[test]
fn view_counts() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(
ir.view(2, 48, 52).unwrap().count_bases(),
BaseCounts {
c: 2,
g: 2,
..Default::default()
}
);
}
#[test]
fn read_view() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
let mut buf = vec![0; 25];
let mut v = ir.view_tid(2).unwrap();
println!("{}", v.to_string());
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'A'; 25]);
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'C'; 25]);
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'G'; 25]);
let mut buf2 = vec![0; 10];
assert_eq!(v.read(&mut buf2).unwrap(), 10);
assert_eq!(buf2, vec![b'T'; 10]);
assert_eq!(v.read(&mut buf2).unwrap(), 10);
assert_eq!(buf2, vec![b'T'; 10]);
assert_eq!(v.read(&mut buf2).unwrap(), 5);
assert_eq!(&buf2[0..5], vec![b'T'; 5].as_slice());
}
}
| {
BaseCounts {
a: 0,
c: 0,
g: 0,
t: 0,
n: 0,
other: 0,
}
} | identifier_body |
lib.rs | #![doc(html_root_url = "https://docs.rs/faimm/0.4.0")]
//! This crate provides indexed fasta access by using a memory mapped file to read the sequence
//! data. It is intended for accessing sequence data on genome sized fasta files and provides
//! random access based on base coordinates. Because an indexed fasta file uses a limited number of
//! bases per line separated by (sometimes platform-specific) newlines you cannot directly use the
//! bytes available from the mmap.
//!
//! Access is provided using a view of the mmap using zero-based base coordinates. This view can
//! then be used to iterate over bases (represented as `u8`) or parsed into a string. Naive gc
//! counting is also available.
//!
//! Access to the sequence data doesn't require the `IndexedFasta` to be mutable. This makes
//! it easy to share.
//!
//! # Example
//! ```
//! use faimm::IndexedFasta;
//! let fa = IndexedFasta::from_file("test/genome.fa").expect("Error opening fa");
//! let chr_index = fa.fai().tid("ACGT-25").expect("Cannot find chr in index");
//! let v = fa.view(chr_index,0,50).expect("Cannot get.fa view");
//! //count the bases
//! let counts = v.count_bases();
//! //or print the sequence
//! println!("{}", v.to_string());
//! ```
//! # Limitations
//! The parser uses a simple ascii mask for allowable characters (64..128), does not apply any
//! IUPAC converson or validation. Anything outside this range is silently skipped. This means that
//! also invalid `fasta` will be parsed. The mere presence of an accompanying `.fai` provides the
//! assumption of a valid fasta.
//! Requires Rust >=1.64
//!
//! # Alternatives
//! [Rust-bio](https://crates.io/crates/bio) provides a competent indexed fasta reader. The major
//! difference is that it has an internal buffer an therefore needs to be mutable when performing
//! read operations. faimm is also faster. If you want record based access (without an.fai index
//! file) [rust-bio](https://crates.io/crates/bio) or [seq_io](https://crates.io/crates/seq_io)
//! provide this.
//!
//! # Performance
//! Calculating the gc content of target regions of an exome (231_410 regions) on the Human
//! reference (GRCh38) takes about 0.7 seconds (warm cache), slightly faster than bedtools nuc (0.9s probably a more
//! sound implementation) and rust-bio (1.3s same implementation as example)
//! Some tests show counting can also be improved using simd, but nothing has been released.
use std::fs::File;
use std::io::{self, BufRead, BufReader, Read};
use std::path::Path;
use indexmap::IndexSet;
use memmap2::{Mmap, MmapOptions};
/// The object that stores the parsed fasta index file. You can use it to map chromosome names to
/// indexes and lookup offsets for chr-start:end coordinates
#[derive(Debug, Clone)]
pub struct Fai {
chromosomes: Vec<FaiRecord>,
name_map: IndexSet<String>,
}
impl Fai {
/// Open a fasta index file from path `P`.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let f = File::open(path)?;
let br = BufReader::new(f);
let mut name_map = IndexSet::new();
let mut chromosomes = Vec::new();
for l in br.lines() {
let line = l?;
let p: Vec<_> = line.split('\t').collect();
if p.len()!= 5 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Expected 5 columns in.fai file.",
));
} | let ioerr =
|e, msg| io::Error::new(io::ErrorKind::InvalidData, format!("{}:{}", msg, e));
chromosomes.push(FaiRecord {
len: p[1]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr len in.fai"))?,
offset: p[2]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr offset in.fai"))?,
line_bases: p[3]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_bases in.fai"))?,
line_width: p[4]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_width in.fai"))?,
});
}
Ok(Fai {
chromosomes,
name_map,
})
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
/// start, end: zero based coordinates of the requested range.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset(&self, tid: usize, start: usize, stop: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
if stop > chr.len {
return Err(io::Error::new(
io::ErrorKind::Other,
"FASTA read interval was out of bounds",
));
}
let start_offset =
chr.offset + (start / chr.line_bases) * chr.line_width + start % chr.line_bases;
let stop_offset =
chr.offset + (stop / chr.line_bases) * chr.line_width + stop % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset_tid(&self, tid: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
let start_offset = chr.offset;
let stop_offset =
chr.offset + (chr.len / chr.line_bases) * chr.line_width + chr.len % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Return the index of the chromosome by name in the fasta index.
///
/// Returns the position of chr `name` if succesful, None otherwise.
#[inline]
pub fn tid(&self, name: &str) -> Option<usize> {
self.name_map.get_index_of(name)
}
/// Return the index of a chromosome in the fasta index.
///
/// Returns the size in bases as usize.
pub fn size(&self, tid: usize) -> io::Result<usize> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
Ok(chr.len)
}
/// Return the name of the chromomsome at index tid
pub fn name(&self, tid: usize) -> io::Result<&String> {
self.name_map.get_index(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})
}
/// Return the names of the chromosomes from the fasta index in the same order as in the
/// `.fai`. You can use `Fai::tid` to map it back to an index.
///
/// Returns a `Vec<&str>` with the chromosome names.
pub fn names(&self) -> Vec<&str> {
self.name_map.iter().map(|s| s.as_str()).collect()
}
}
/// FaiRecord stores the length, offset, and fasta file characterics of a single chromosome
#[derive(Debug, Clone)]
pub struct FaiRecord {
len: usize,
offset: usize,
line_bases: usize,
line_width: usize,
}
/// The `IndexFasta` can be used to open a fasta file that has a valid.fai index file.
pub struct IndexedFasta {
mmap: Mmap,
fasta_index: Fai,
}
impl IndexedFasta {
/// Open a fasta file from path `P`. It is assumed that it has a valid.fai index file. The
///.fai file is created by appending.fai to the fasta file.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let mut fai_path = path.as_ref().as_os_str().to_owned();
fai_path.push(".fai");
let fasta_index = Fai::from_file(&fai_path)?;
let file = File::open(path)?;
let mmap = unsafe { MmapOptions::new().map(&file)? };
Ok(IndexedFasta { mmap, fasta_index })
}
/// Use tid, start and end to calculate a slice on the Fasta file. Use this view to iterate
/// over the bases.
///
/// Returns FastaView for the provided chromsome, start, end if successful, Error otherwise.
pub fn view(&self, tid: usize, start: usize, stop: usize) -> io::Result<FastaView> {
if start > stop {
return Err(io::Error::new(
io::ErrorKind::Other,
"Invalid query interval",
));
}
let (start_byte, stop_byte) = self.fasta_index.offset(tid, start, stop)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Use tid to return a view of an entire chromosome.
///
/// Returns FastaView for the provided chromsome indicated by tid if successful, Error otherwise.
pub fn view_tid(&self, tid: usize) -> io::Result<FastaView> {
let (start_byte, stop_byte) = self.fasta_index.offset_tid(tid)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Return a reference to the `Fai` that contains information from the fasta index.
///
/// Returns a reference to `Fai`.
pub fn fai(&self) -> &Fai {
&self.fasta_index
}
}
/// A view of a slice of the fasta file bounded by provided coordinates
pub struct FastaView<'a>(&'a [u8]);
impl<'a> FastaView<'a> {
/// Count the occurences of A, C, G, T, N, and other in the current view. This function does
/// not differentiate between upper or lower case bases.
///
/// Returns a `BasecCounts` object.
pub fn count_bases(&self) -> BaseCounts {
let mut bc: BaseCounts = Default::default();
for b in self.bases() {
let v: u8 = b << 3;
if v ^ 8 == 0 {
bc.a += 1;
} else if v ^ 24 == 0 {
bc.c += 1;
} else if v ^ 56 == 0 {
bc.g += 1;
} else if v ^ 112 == 0 {
bc.n += 1;
} else if v ^ 160 == 0 {
bc.t += 1;
} else {
bc.other += 1;
}
}
bc
}
/// Iterator over the bases in the current view. Bases are returned as `u8` representations of
/// the `char`s in the fasta file. Keep only that chars between 164 and 128 (effectively
/// skipping newlines)
pub fn bases(&self) -> impl Iterator<Item = &'a u8> {
self.0.iter().filter(|&&b| b & 192 == 64)
}
}
/// Returns a newly allocated, utf8-validated string with the sequence data in `Self`
impl<'a> ToString for FastaView<'a> {
fn to_string(&self) -> String {
String::from_utf8(self.bases().cloned().collect()).unwrap()
}
}
impl<'a> Read for FastaView<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut read = 0;
let mut skipped = 0;
for (t, s) in buf.iter_mut().zip(self.0.iter().filter(|&&c| {
let base = c & 192 == 64;
if!base {
skipped += 1;
}
base
})) {
*t = *s;
read += 1;
}
self.0 = &self.0[(skipped + read)..];
Ok(read)
}
}
/// Object that contains count occurrences of the most common bases in DNA genome references: A, C, G,
/// T, N and other.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct BaseCounts {
pub a: usize,
pub c: usize,
pub g: usize,
pub t: usize,
pub n: usize,
pub other: usize,
}
/// Initialize basecount with zeros
impl Default for BaseCounts {
fn default() -> BaseCounts {
BaseCounts {
a: 0,
c: 0,
g: 0,
t: 0,
n: 0,
other: 0,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn fai() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.fai().names().len(), 3);
assert_eq!(ir.fai().tid("ACGT-25"), Some(2));
assert_eq!(ir.fai().tid("NotFound"), None);
assert_eq!(ir.fai().size(2).unwrap(), 100);
assert_eq!(ir.fai().name(2).unwrap(), "ACGT-25");
assert!(ir.fai().name(3).is_err());
}
#[test]
fn view() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.view(0, 0, 10).unwrap().to_string(), "AAAAAAAAAA");
assert!(ir.view(0, 0, 11).is_err());
assert_eq!(
ir.view(2, 38, 62).unwrap().to_string(),
"CCCCCCCCCCCCGGGGGGGGGGGG"
);
assert_eq!(
ir.view(2, 74, 100).unwrap().to_string(),
"GTTTTTTTTTTTTTTTTTTTTTTTTT"
);
assert!(ir.view(0, 120, 130).is_err());
}
#[test]
fn view_tid() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.view_tid(0).unwrap().to_string(), "AAAAAAAAAA");
assert_eq!(ir.view_tid(1).unwrap().to_string(),
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
assert_eq!(ir.view_tid(2).unwrap().to_string(),
"AAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTT");
assert!(ir.view_tid(3).is_err());
}
#[test]
fn view_bases() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
let v = ir.view(2, 48, 52).unwrap();
let mut b = v.bases();
assert_eq!(b.next(), Some(&b'C'));
assert_eq!(b.next(), Some(&b'C'));
assert_eq!(b.next(), Some(&b'G'));
assert_eq!(b.next(), Some(&b'G'));
assert_eq!(b.next(), None);
}
#[test]
fn view_counts() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(
ir.view(2, 48, 52).unwrap().count_bases(),
BaseCounts {
c: 2,
g: 2,
..Default::default()
}
);
}
#[test]
fn read_view() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
let mut buf = vec![0; 25];
let mut v = ir.view_tid(2).unwrap();
println!("{}", v.to_string());
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'A'; 25]);
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'C'; 25]);
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'G'; 25]);
let mut buf2 = vec![0; 10];
assert_eq!(v.read(&mut buf2).unwrap(), 10);
assert_eq!(buf2, vec![b'T'; 10]);
assert_eq!(v.read(&mut buf2).unwrap(), 10);
assert_eq!(buf2, vec![b'T'; 10]);
assert_eq!(v.read(&mut buf2).unwrap(), 5);
assert_eq!(&buf2[0..5], vec![b'T'; 5].as_slice());
}
} |
name_map.insert(p[0].to_owned());
| random_line_split |
lib.rs | #![doc(html_root_url = "https://docs.rs/faimm/0.4.0")]
//! This crate provides indexed fasta access by using a memory mapped file to read the sequence
//! data. It is intended for accessing sequence data on genome sized fasta files and provides
//! random access based on base coordinates. Because an indexed fasta file uses a limited number of
//! bases per line separated by (sometimes platform-specific) newlines you cannot directly use the
//! bytes available from the mmap.
//!
//! Access is provided using a view of the mmap using zero-based base coordinates. This view can
//! then be used to iterate over bases (represented as `u8`) or parsed into a string. Naive gc
//! counting is also available.
//!
//! Access to the sequence data doesn't require the `IndexedFasta` to be mutable. This makes
//! it easy to share.
//!
//! # Example
//! ```
//! use faimm::IndexedFasta;
//! let fa = IndexedFasta::from_file("test/genome.fa").expect("Error opening fa");
//! let chr_index = fa.fai().tid("ACGT-25").expect("Cannot find chr in index");
//! let v = fa.view(chr_index,0,50).expect("Cannot get.fa view");
//! //count the bases
//! let counts = v.count_bases();
//! //or print the sequence
//! println!("{}", v.to_string());
//! ```
//! # Limitations
//! The parser uses a simple ascii mask for allowable characters (64..128), does not apply any
//! IUPAC converson or validation. Anything outside this range is silently skipped. This means that
//! also invalid `fasta` will be parsed. The mere presence of an accompanying `.fai` provides the
//! assumption of a valid fasta.
//! Requires Rust >=1.64
//!
//! # Alternatives
//! [Rust-bio](https://crates.io/crates/bio) provides a competent indexed fasta reader. The major
//! difference is that it has an internal buffer an therefore needs to be mutable when performing
//! read operations. faimm is also faster. If you want record based access (without an.fai index
//! file) [rust-bio](https://crates.io/crates/bio) or [seq_io](https://crates.io/crates/seq_io)
//! provide this.
//!
//! # Performance
//! Calculating the gc content of target regions of an exome (231_410 regions) on the Human
//! reference (GRCh38) takes about 0.7 seconds (warm cache), slightly faster than bedtools nuc (0.9s probably a more
//! sound implementation) and rust-bio (1.3s same implementation as example)
//! Some tests show counting can also be improved using simd, but nothing has been released.
use std::fs::File;
use std::io::{self, BufRead, BufReader, Read};
use std::path::Path;
use indexmap::IndexSet;
use memmap2::{Mmap, MmapOptions};
/// The object that stores the parsed fasta index file. You can use it to map chromosome names to
/// indexes and lookup offsets for chr-start:end coordinates
#[derive(Debug, Clone)]
pub struct Fai {
chromosomes: Vec<FaiRecord>,
name_map: IndexSet<String>,
}
impl Fai {
/// Open a fasta index file from path `P`.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let f = File::open(path)?;
let br = BufReader::new(f);
let mut name_map = IndexSet::new();
let mut chromosomes = Vec::new();
for l in br.lines() {
let line = l?;
let p: Vec<_> = line.split('\t').collect();
if p.len()!= 5 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Expected 5 columns in.fai file.",
));
}
name_map.insert(p[0].to_owned());
let ioerr =
|e, msg| io::Error::new(io::ErrorKind::InvalidData, format!("{}:{}", msg, e));
chromosomes.push(FaiRecord {
len: p[1]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr len in.fai"))?,
offset: p[2]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr offset in.fai"))?,
line_bases: p[3]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_bases in.fai"))?,
line_width: p[4]
.parse()
.map_err(|e| ioerr(e, "Error parsing chr line_width in.fai"))?,
});
}
Ok(Fai {
chromosomes,
name_map,
})
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
/// start, end: zero based coordinates of the requested range.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset(&self, tid: usize, start: usize, stop: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
if stop > chr.len {
return Err(io::Error::new(
io::ErrorKind::Other,
"FASTA read interval was out of bounds",
));
}
let start_offset =
chr.offset + (start / chr.line_bases) * chr.line_width + start % chr.line_bases;
let stop_offset =
chr.offset + (stop / chr.line_bases) * chr.line_width + stop % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Calculate the slice coordinates (byte offsets).
/// tid is the index of the chromosome (lookup with `Fai::tid` if necessary.
///
/// Returns an tuple (start, end) if successful. `io::Error` otherwise.
#[inline]
pub fn offset_tid(&self, tid: usize) -> io::Result<(usize, usize)> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
let start_offset = chr.offset;
let stop_offset =
chr.offset + (chr.len / chr.line_bases) * chr.line_width + chr.len % chr.line_bases;
Ok((start_offset, stop_offset))
}
/// Return the index of the chromosome by name in the fasta index.
///
/// Returns the position of chr `name` if succesful, None otherwise.
#[inline]
pub fn tid(&self, name: &str) -> Option<usize> {
self.name_map.get_index_of(name)
}
/// Return the index of a chromosome in the fasta index.
///
/// Returns the size in bases as usize.
pub fn size(&self, tid: usize) -> io::Result<usize> {
let chr = &self.chromosomes.get(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})?;
Ok(chr.len)
}
/// Return the name of the chromomsome at index tid
pub fn | (&self, tid: usize) -> io::Result<&String> {
self.name_map.get_index(tid).ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "Chromomsome tid was out of bounds")
})
}
/// Return the names of the chromosomes from the fasta index in the same order as in the
/// `.fai`. You can use `Fai::tid` to map it back to an index.
///
/// Returns a `Vec<&str>` with the chromosome names.
pub fn names(&self) -> Vec<&str> {
self.name_map.iter().map(|s| s.as_str()).collect()
}
}
/// FaiRecord stores the length, offset, and fasta file characterics of a single chromosome
#[derive(Debug, Clone)]
pub struct FaiRecord {
len: usize,
offset: usize,
line_bases: usize,
line_width: usize,
}
/// The `IndexFasta` can be used to open a fasta file that has a valid.fai index file.
pub struct IndexedFasta {
mmap: Mmap,
fasta_index: Fai,
}
impl IndexedFasta {
/// Open a fasta file from path `P`. It is assumed that it has a valid.fai index file. The
///.fai file is created by appending.fai to the fasta file.
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let mut fai_path = path.as_ref().as_os_str().to_owned();
fai_path.push(".fai");
let fasta_index = Fai::from_file(&fai_path)?;
let file = File::open(path)?;
let mmap = unsafe { MmapOptions::new().map(&file)? };
Ok(IndexedFasta { mmap, fasta_index })
}
/// Use tid, start and end to calculate a slice on the Fasta file. Use this view to iterate
/// over the bases.
///
/// Returns FastaView for the provided chromsome, start, end if successful, Error otherwise.
pub fn view(&self, tid: usize, start: usize, stop: usize) -> io::Result<FastaView> {
if start > stop {
return Err(io::Error::new(
io::ErrorKind::Other,
"Invalid query interval",
));
}
let (start_byte, stop_byte) = self.fasta_index.offset(tid, start, stop)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Use tid to return a view of an entire chromosome.
///
/// Returns FastaView for the provided chromsome indicated by tid if successful, Error otherwise.
pub fn view_tid(&self, tid: usize) -> io::Result<FastaView> {
let (start_byte, stop_byte) = self.fasta_index.offset_tid(tid)?;
//println!("offset for chr {}:{}-{} is {}-{}", tid, start, stop, start_byte, stop_byte);
Ok(FastaView(&self.mmap[start_byte..stop_byte]))
}
/// Return a reference to the `Fai` that contains information from the fasta index.
///
/// Returns a reference to `Fai`.
pub fn fai(&self) -> &Fai {
&self.fasta_index
}
}
/// A view of a slice of the fasta file bounded by provided coordinates
pub struct FastaView<'a>(&'a [u8]);
impl<'a> FastaView<'a> {
/// Count the occurences of A, C, G, T, N, and other in the current view. This function does
/// not differentiate between upper or lower case bases.
///
/// Returns a `BasecCounts` object.
pub fn count_bases(&self) -> BaseCounts {
let mut bc: BaseCounts = Default::default();
for b in self.bases() {
let v: u8 = b << 3;
if v ^ 8 == 0 {
bc.a += 1;
} else if v ^ 24 == 0 {
bc.c += 1;
} else if v ^ 56 == 0 {
bc.g += 1;
} else if v ^ 112 == 0 {
bc.n += 1;
} else if v ^ 160 == 0 {
bc.t += 1;
} else {
bc.other += 1;
}
}
bc
}
/// Iterator over the bases in the current view. Bases are returned as `u8` representations of
/// the `char`s in the fasta file. Keep only that chars between 164 and 128 (effectively
/// skipping newlines)
pub fn bases(&self) -> impl Iterator<Item = &'a u8> {
self.0.iter().filter(|&&b| b & 192 == 64)
}
}
/// Returns a newly allocated, utf8-validated string with the sequence data in `Self`
impl<'a> ToString for FastaView<'a> {
fn to_string(&self) -> String {
String::from_utf8(self.bases().cloned().collect()).unwrap()
}
}
impl<'a> Read for FastaView<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut read = 0;
let mut skipped = 0;
for (t, s) in buf.iter_mut().zip(self.0.iter().filter(|&&c| {
let base = c & 192 == 64;
if!base {
skipped += 1;
}
base
})) {
*t = *s;
read += 1;
}
self.0 = &self.0[(skipped + read)..];
Ok(read)
}
}
/// Object that contains count occurrences of the most common bases in DNA genome references: A, C, G,
/// T, N and other.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct BaseCounts {
pub a: usize,
pub c: usize,
pub g: usize,
pub t: usize,
pub n: usize,
pub other: usize,
}
/// Initialize basecount with zeros
impl Default for BaseCounts {
fn default() -> BaseCounts {
BaseCounts {
a: 0,
c: 0,
g: 0,
t: 0,
n: 0,
other: 0,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn fai() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.fai().names().len(), 3);
assert_eq!(ir.fai().tid("ACGT-25"), Some(2));
assert_eq!(ir.fai().tid("NotFound"), None);
assert_eq!(ir.fai().size(2).unwrap(), 100);
assert_eq!(ir.fai().name(2).unwrap(), "ACGT-25");
assert!(ir.fai().name(3).is_err());
}
#[test]
fn view() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.view(0, 0, 10).unwrap().to_string(), "AAAAAAAAAA");
assert!(ir.view(0, 0, 11).is_err());
assert_eq!(
ir.view(2, 38, 62).unwrap().to_string(),
"CCCCCCCCCCCCGGGGGGGGGGGG"
);
assert_eq!(
ir.view(2, 74, 100).unwrap().to_string(),
"GTTTTTTTTTTTTTTTTTTTTTTTTT"
);
assert!(ir.view(0, 120, 130).is_err());
}
#[test]
fn view_tid() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(ir.view_tid(0).unwrap().to_string(), "AAAAAAAAAA");
assert_eq!(ir.view_tid(1).unwrap().to_string(),
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
assert_eq!(ir.view_tid(2).unwrap().to_string(),
"AAAAAAAAAAAAAAAAAAAAAAAAACCCCCCCCCCCCCCCCCCCCCCCCCGGGGGGGGGGGGGGGGGGGGGGGGGTTTTTTTTTTTTTTTTTTTTTTTTT");
assert!(ir.view_tid(3).is_err());
}
#[test]
fn view_bases() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
let v = ir.view(2, 48, 52).unwrap();
let mut b = v.bases();
assert_eq!(b.next(), Some(&b'C'));
assert_eq!(b.next(), Some(&b'C'));
assert_eq!(b.next(), Some(&b'G'));
assert_eq!(b.next(), Some(&b'G'));
assert_eq!(b.next(), None);
}
#[test]
fn view_counts() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
assert_eq!(
ir.view(2, 48, 52).unwrap().count_bases(),
BaseCounts {
c: 2,
g: 2,
..Default::default()
}
);
}
#[test]
fn read_view() {
let ir = IndexedFasta::from_file("test/genome.fa").unwrap();
let mut buf = vec![0; 25];
let mut v = ir.view_tid(2).unwrap();
println!("{}", v.to_string());
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'A'; 25]);
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'C'; 25]);
assert_eq!(v.read(&mut buf).unwrap(), 25);
assert_eq!(buf, vec![b'G'; 25]);
let mut buf2 = vec![0; 10];
assert_eq!(v.read(&mut buf2).unwrap(), 10);
assert_eq!(buf2, vec![b'T'; 10]);
assert_eq!(v.read(&mut buf2).unwrap(), 10);
assert_eq!(buf2, vec![b'T'; 10]);
assert_eq!(v.read(&mut buf2).unwrap(), 5);
assert_eq!(&buf2[0..5], vec![b'T'; 5].as_slice());
}
}
| name | identifier_name |
backend.rs | // Copyright 2017-2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! State machine backends. These manage the code and storage of contracts.
use std::{error, fmt};
use std::cmp::Ord;
use std::collections::HashMap;
use std::marker::PhantomData;
use log::warn;
use hash_db::Hasher;
use crate::trie_backend::TrieBackend;
use crate::trie_backend_essence::TrieBackendStorage;
use trie::{TrieMut, MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration};
use trie::trie_types::{TrieDBMut, Layout};
/// A state backend is used to read state data and can have changes committed
/// to it.
///
/// The clone operation (if implemented) should be cheap.
pub trait Backend<H: Hasher> {
/// An error type when fetching data is not possible.
type Error: super::Error;
/// Storage changes to be applied if committing
type Transaction: Consolidate + Default;
/// Type of trie backend storage.
type TrieBackendStorage: TrieBackendStorage<H>;
/// Get keyed storage or None if there is nothing associated.
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Get keyed storage value hash or None if there is nothing associated.
fn storage_hash(&self, key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.storage(key).map(|v| v.map(|v| H::hash(&v)))
}
/// Get keyed child storage or None if there is nothing associated.
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Get child keyed storage value hash or None if there is nothing associated.
fn child_storage_hash(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.child_storage(storage_key, key).map(|v| v.map(|v| H::hash(&v)))
}
/// true if a key exists in storage.
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.storage(key)?.is_some())
}
/// true if a key exists in child storage.
fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.child_storage(storage_key, key)?.is_some())
}
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F);
/// Retrieve all entries keys of which start with the given prefix and
/// call `f` for each of those keys.
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F);
/// Calculate the storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
/// Does not include child storage updates.
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Calculate the child storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit. The second argument
/// is true if child storage root equals default storage root.
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Get all key/value pairs into a Vec.
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>;
/// Get all keys with given prefix
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec()));
all
}
/// Get all keys of child storage with given prefix
fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_in_child_storage(child_storage_key, |k| {
if k.starts_with(prefix) {
all.push(k.to_vec());
}
});
all
}
/// Try convert into trie backend.
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>>;
/// Calculate the storage root, with given delta over what is already stored
/// in the backend, and produce a "transaction" that can be used to commit.
/// Does include child storage updates.
fn full_storage_root<I1, I2i, I2>(
&self,
delta: I1,
child_deltas: I2) | -> (H::Out, Self::Transaction)
where
I1: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2i: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2: IntoIterator<Item=(Vec<u8>, I2i)>,
<H as Hasher>::Out: Ord,
{
let mut txs: Self::Transaction = Default::default();
let mut child_roots: Vec<_> = Default::default();
// child first
for (storage_key, child_delta) in child_deltas {
let (child_root, empty, child_txs) =
self.child_storage_root(&storage_key[..], child_delta);
txs.consolidate(child_txs);
if empty {
child_roots.push((storage_key, None));
} else {
child_roots.push((storage_key, Some(child_root)));
}
}
let (root, parent_txs) = self.storage_root(
delta.into_iter().chain(child_roots.into_iter())
);
txs.consolidate(parent_txs);
(root, txs)
}
}
/// Trait that allows consolidate two transactions together.
pub trait Consolidate {
/// Consolidate two transactions into one.
fn consolidate(&mut self, other: Self);
}
impl Consolidate for () {
fn consolidate(&mut self, _: Self) {
()
}
}
impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> {
fn consolidate(&mut self, mut other: Self) {
self.append(&mut other);
}
}
impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> {
fn consolidate(&mut self, other: Self) {
trie::GenericMemoryDB::consolidate(self, other)
}
}
/// Error impossible.
// FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121
#[derive(Debug)]
pub enum Void {}
impl fmt::Display for Void {
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
match *self {}
}
}
impl error::Error for Void {
fn description(&self) -> &str { "unreachable error" }
}
/// In-memory backend. Fully recomputes tries on each commit but useful for
/// tests.
pub struct InMemory<H: Hasher> {
inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>,
trie: Option<TrieBackend<MemoryDB<H>, H>>,
_hasher: PhantomData<H>,
}
impl<H: Hasher> Default for InMemory<H> {
fn default() -> Self {
InMemory {
inner: Default::default(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> Clone for InMemory<H> {
fn clone(&self) -> Self {
InMemory {
inner: self.inner.clone(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> PartialEq for InMemory<H> {
fn eq(&self, other: &Self) -> bool {
self.inner.eq(&other.inner)
}
}
impl<H: Hasher> InMemory<H> {
/// Copy the state, with applied updates
pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self {
let mut inner: HashMap<_, _> = self.inner.clone();
for (storage_key, key, val) in changes {
match val {
Some(v) => { inner.entry(storage_key).or_default().insert(key, v); },
None => { inner.entry(storage_key).or_default().remove(&key); },
}
}
inner.into()
}
}
impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> {
fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self {
InMemory {
inner: inner,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self {
let mut expanded = HashMap::new();
expanded.insert(None, inner);
InMemory {
inner: expanded,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> {
fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self {
let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new();
for (child_key, key, value) in inner {
if let Some(value) = value {
expanded.entry(child_key).or_default().insert(key, value);
}
}
expanded.into()
}
}
impl super::Error for Void {}
impl<H: Hasher> InMemory<H> {
/// child storage key iterator
pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> {
self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..]))
}
}
impl<H: Hasher> Backend<H> for InMemory<H> {
type Error = Void;
type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>;
type TrieBackendStorage = MemoryDB<H>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone)))
}
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone)))
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false))
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f));
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], mut f: F) {
self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k)));
}
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
<H as Hasher>::Out: Ord,
{
let existing_pairs = self.inner.get(&None)
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = Layout::<H>::trie_root(existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect();
(root, full_transaction)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
let storage_key = storage_key.to_vec();
let existing_pairs = self.inner.get(&Some(storage_key.clone()))
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = child_trie_root::<Layout<H>, _, _, _>(
&storage_key,
existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect();
let is_default = root == default_child_trie_root::<Layout<H>>(&storage_key);
(root, is_default, full_transaction)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.inner.get(&None)
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone())))
.collect()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.inner.get(&None)
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
}
fn child_keys(&self, storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
self.inner.get(&Some(storage_key.to_vec()))
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
}
fn as_trie_backend(&mut self)-> Option<&TrieBackend<Self::TrieBackendStorage, H>> {
let mut mdb = MemoryDB::default();
let mut root = None;
let mut new_child_roots = Vec::new();
let mut root_map = None;
for (storage_key, map) in &self.inner {
if let Some(storage_key) = storage_key.as_ref() {
let ch = insert_into_memory_db::<H, _>(&mut mdb, map.clone().into_iter())?;
new_child_roots.push((storage_key.clone(), ch.as_ref().into()));
} else {
root_map = Some(map);
}
}
// root handling
if let Some(map) = root_map.take() {
root = Some(insert_into_memory_db::<H, _>(
&mut mdb,
map.clone().into_iter().chain(new_child_roots.into_iter())
)?);
}
let root = match root {
Some(root) => root,
None => insert_into_memory_db::<H, _>(&mut mdb, ::std::iter::empty())?,
};
self.trie = Some(TrieBackend::new(mdb, root));
self.trie.as_ref()
}
}
/// Insert input pairs into memory db.
pub(crate) fn insert_into_memory_db<H, I>(mdb: &mut MemoryDB<H>, input: I) -> Option<H::Out>
where
H: Hasher,
I: IntoIterator<Item=(Vec<u8>, Vec<u8>)>,
{
let mut root = <H as Hasher>::Out::default();
{
let mut trie = TrieDBMut::<H>::new(mdb, &mut root);
for (key, value) in input {
if let Err(e) = trie.insert(&key, &value) {
warn!(target: "trie", "Failed to write to trie: {}", e);
return None;
}
}
}
Some(root)
} | random_line_split |
|
backend.rs | // Copyright 2017-2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! State machine backends. These manage the code and storage of contracts.
use std::{error, fmt};
use std::cmp::Ord;
use std::collections::HashMap;
use std::marker::PhantomData;
use log::warn;
use hash_db::Hasher;
use crate::trie_backend::TrieBackend;
use crate::trie_backend_essence::TrieBackendStorage;
use trie::{TrieMut, MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration};
use trie::trie_types::{TrieDBMut, Layout};
/// A state backend is used to read state data and can have changes committed
/// to it.
///
/// The clone operation (if implemented) should be cheap.
pub trait Backend<H: Hasher> {
/// An error type when fetching data is not possible.
type Error: super::Error;
/// Storage changes to be applied if committing
type Transaction: Consolidate + Default;
/// Type of trie backend storage.
type TrieBackendStorage: TrieBackendStorage<H>;
/// Get keyed storage or None if there is nothing associated.
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Get keyed storage value hash or None if there is nothing associated.
fn storage_hash(&self, key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.storage(key).map(|v| v.map(|v| H::hash(&v)))
}
/// Get keyed child storage or None if there is nothing associated.
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Get child keyed storage value hash or None if there is nothing associated.
fn | (&self, storage_key: &[u8], key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.child_storage(storage_key, key).map(|v| v.map(|v| H::hash(&v)))
}
/// true if a key exists in storage.
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.storage(key)?.is_some())
}
/// true if a key exists in child storage.
fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.child_storage(storage_key, key)?.is_some())
}
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F);
/// Retrieve all entries keys of which start with the given prefix and
/// call `f` for each of those keys.
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F);
/// Calculate the storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
/// Does not include child storage updates.
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Calculate the child storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit. The second argument
/// is true if child storage root equals default storage root.
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Get all key/value pairs into a Vec.
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>;
/// Get all keys with given prefix
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec()));
all
}
/// Get all keys of child storage with given prefix
fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_in_child_storage(child_storage_key, |k| {
if k.starts_with(prefix) {
all.push(k.to_vec());
}
});
all
}
/// Try convert into trie backend.
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>>;
/// Calculate the storage root, with given delta over what is already stored
/// in the backend, and produce a "transaction" that can be used to commit.
/// Does include child storage updates.
fn full_storage_root<I1, I2i, I2>(
&self,
delta: I1,
child_deltas: I2)
-> (H::Out, Self::Transaction)
where
I1: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2i: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2: IntoIterator<Item=(Vec<u8>, I2i)>,
<H as Hasher>::Out: Ord,
{
let mut txs: Self::Transaction = Default::default();
let mut child_roots: Vec<_> = Default::default();
// child first
for (storage_key, child_delta) in child_deltas {
let (child_root, empty, child_txs) =
self.child_storage_root(&storage_key[..], child_delta);
txs.consolidate(child_txs);
if empty {
child_roots.push((storage_key, None));
} else {
child_roots.push((storage_key, Some(child_root)));
}
}
let (root, parent_txs) = self.storage_root(
delta.into_iter().chain(child_roots.into_iter())
);
txs.consolidate(parent_txs);
(root, txs)
}
}
/// Trait that allows consolidate two transactions together.
pub trait Consolidate {
/// Consolidate two transactions into one.
fn consolidate(&mut self, other: Self);
}
impl Consolidate for () {
fn consolidate(&mut self, _: Self) {
()
}
}
impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> {
fn consolidate(&mut self, mut other: Self) {
self.append(&mut other);
}
}
impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> {
fn consolidate(&mut self, other: Self) {
trie::GenericMemoryDB::consolidate(self, other)
}
}
/// Error impossible.
// FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121
#[derive(Debug)]
pub enum Void {}
impl fmt::Display for Void {
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
match *self {}
}
}
impl error::Error for Void {
fn description(&self) -> &str { "unreachable error" }
}
/// In-memory backend. Fully recomputes tries on each commit but useful for
/// tests.
pub struct InMemory<H: Hasher> {
inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>,
trie: Option<TrieBackend<MemoryDB<H>, H>>,
_hasher: PhantomData<H>,
}
impl<H: Hasher> Default for InMemory<H> {
fn default() -> Self {
InMemory {
inner: Default::default(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> Clone for InMemory<H> {
fn clone(&self) -> Self {
InMemory {
inner: self.inner.clone(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> PartialEq for InMemory<H> {
fn eq(&self, other: &Self) -> bool {
self.inner.eq(&other.inner)
}
}
impl<H: Hasher> InMemory<H> {
/// Copy the state, with applied updates
pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self {
let mut inner: HashMap<_, _> = self.inner.clone();
for (storage_key, key, val) in changes {
match val {
Some(v) => { inner.entry(storage_key).or_default().insert(key, v); },
None => { inner.entry(storage_key).or_default().remove(&key); },
}
}
inner.into()
}
}
impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> {
fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self {
InMemory {
inner: inner,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self {
let mut expanded = HashMap::new();
expanded.insert(None, inner);
InMemory {
inner: expanded,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> {
fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self {
let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new();
for (child_key, key, value) in inner {
if let Some(value) = value {
expanded.entry(child_key).or_default().insert(key, value);
}
}
expanded.into()
}
}
impl super::Error for Void {}
impl<H: Hasher> InMemory<H> {
/// child storage key iterator
pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> {
self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..]))
}
}
impl<H: Hasher> Backend<H> for InMemory<H> {
type Error = Void;
type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>;
type TrieBackendStorage = MemoryDB<H>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone)))
}
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone)))
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false))
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f));
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], mut f: F) {
self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k)));
}
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
<H as Hasher>::Out: Ord,
{
let existing_pairs = self.inner.get(&None)
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = Layout::<H>::trie_root(existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect();
(root, full_transaction)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
let storage_key = storage_key.to_vec();
let existing_pairs = self.inner.get(&Some(storage_key.clone()))
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = child_trie_root::<Layout<H>, _, _, _>(
&storage_key,
existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect();
let is_default = root == default_child_trie_root::<Layout<H>>(&storage_key);
(root, is_default, full_transaction)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.inner.get(&None)
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone())))
.collect()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.inner.get(&None)
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
}
fn child_keys(&self, storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
self.inner.get(&Some(storage_key.to_vec()))
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
}
fn as_trie_backend(&mut self)-> Option<&TrieBackend<Self::TrieBackendStorage, H>> {
let mut mdb = MemoryDB::default();
let mut root = None;
let mut new_child_roots = Vec::new();
let mut root_map = None;
for (storage_key, map) in &self.inner {
if let Some(storage_key) = storage_key.as_ref() {
let ch = insert_into_memory_db::<H, _>(&mut mdb, map.clone().into_iter())?;
new_child_roots.push((storage_key.clone(), ch.as_ref().into()));
} else {
root_map = Some(map);
}
}
// root handling
if let Some(map) = root_map.take() {
root = Some(insert_into_memory_db::<H, _>(
&mut mdb,
map.clone().into_iter().chain(new_child_roots.into_iter())
)?);
}
let root = match root {
Some(root) => root,
None => insert_into_memory_db::<H, _>(&mut mdb, ::std::iter::empty())?,
};
self.trie = Some(TrieBackend::new(mdb, root));
self.trie.as_ref()
}
}
/// Insert input pairs into memory db.
pub(crate) fn insert_into_memory_db<H, I>(mdb: &mut MemoryDB<H>, input: I) -> Option<H::Out>
where
H: Hasher,
I: IntoIterator<Item=(Vec<u8>, Vec<u8>)>,
{
let mut root = <H as Hasher>::Out::default();
{
let mut trie = TrieDBMut::<H>::new(mdb, &mut root);
for (key, value) in input {
if let Err(e) = trie.insert(&key, &value) {
warn!(target: "trie", "Failed to write to trie: {}", e);
return None;
}
}
}
Some(root)
}
| child_storage_hash | identifier_name |
backend.rs | // Copyright 2017-2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! State machine backends. These manage the code and storage of contracts.
use std::{error, fmt};
use std::cmp::Ord;
use std::collections::HashMap;
use std::marker::PhantomData;
use log::warn;
use hash_db::Hasher;
use crate::trie_backend::TrieBackend;
use crate::trie_backend_essence::TrieBackendStorage;
use trie::{TrieMut, MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration};
use trie::trie_types::{TrieDBMut, Layout};
/// A state backend is used to read state data and can have changes committed
/// to it.
///
/// The clone operation (if implemented) should be cheap.
pub trait Backend<H: Hasher> {
/// An error type when fetching data is not possible.
type Error: super::Error;
/// Storage changes to be applied if committing
type Transaction: Consolidate + Default;
/// Type of trie backend storage.
type TrieBackendStorage: TrieBackendStorage<H>;
/// Get keyed storage or None if there is nothing associated.
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Get keyed storage value hash or None if there is nothing associated.
fn storage_hash(&self, key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.storage(key).map(|v| v.map(|v| H::hash(&v)))
}
/// Get keyed child storage or None if there is nothing associated.
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Get child keyed storage value hash or None if there is nothing associated.
fn child_storage_hash(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.child_storage(storage_key, key).map(|v| v.map(|v| H::hash(&v)))
}
/// true if a key exists in storage.
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.storage(key)?.is_some())
}
/// true if a key exists in child storage.
fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.child_storage(storage_key, key)?.is_some())
}
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F);
/// Retrieve all entries keys of which start with the given prefix and
/// call `f` for each of those keys.
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F);
/// Calculate the storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
/// Does not include child storage updates.
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Calculate the child storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit. The second argument
/// is true if child storage root equals default storage root.
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Get all key/value pairs into a Vec.
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>;
/// Get all keys with given prefix
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec()));
all
}
/// Get all keys of child storage with given prefix
fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_in_child_storage(child_storage_key, |k| {
if k.starts_with(prefix) {
all.push(k.to_vec());
}
});
all
}
/// Try convert into trie backend.
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>>;
/// Calculate the storage root, with given delta over what is already stored
/// in the backend, and produce a "transaction" that can be used to commit.
/// Does include child storage updates.
fn full_storage_root<I1, I2i, I2>(
&self,
delta: I1,
child_deltas: I2)
-> (H::Out, Self::Transaction)
where
I1: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2i: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2: IntoIterator<Item=(Vec<u8>, I2i)>,
<H as Hasher>::Out: Ord,
{
let mut txs: Self::Transaction = Default::default();
let mut child_roots: Vec<_> = Default::default();
// child first
for (storage_key, child_delta) in child_deltas {
let (child_root, empty, child_txs) =
self.child_storage_root(&storage_key[..], child_delta);
txs.consolidate(child_txs);
if empty {
child_roots.push((storage_key, None));
} else {
child_roots.push((storage_key, Some(child_root)));
}
}
let (root, parent_txs) = self.storage_root(
delta.into_iter().chain(child_roots.into_iter())
);
txs.consolidate(parent_txs);
(root, txs)
}
}
/// Trait that allows consolidate two transactions together.
pub trait Consolidate {
/// Consolidate two transactions into one.
fn consolidate(&mut self, other: Self);
}
impl Consolidate for () {
fn consolidate(&mut self, _: Self) {
()
}
}
impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> {
fn consolidate(&mut self, mut other: Self) {
self.append(&mut other);
}
}
impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> {
fn consolidate(&mut self, other: Self) {
trie::GenericMemoryDB::consolidate(self, other)
}
}
/// Error impossible.
// FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121
#[derive(Debug)]
pub enum Void {}
impl fmt::Display for Void {
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
match *self {}
}
}
impl error::Error for Void {
fn description(&self) -> &str { "unreachable error" }
}
/// In-memory backend. Fully recomputes tries on each commit but useful for
/// tests.
pub struct InMemory<H: Hasher> {
inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>,
trie: Option<TrieBackend<MemoryDB<H>, H>>,
_hasher: PhantomData<H>,
}
impl<H: Hasher> Default for InMemory<H> {
fn default() -> Self {
InMemory {
inner: Default::default(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> Clone for InMemory<H> {
fn clone(&self) -> Self {
InMemory {
inner: self.inner.clone(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> PartialEq for InMemory<H> {
fn eq(&self, other: &Self) -> bool {
self.inner.eq(&other.inner)
}
}
impl<H: Hasher> InMemory<H> {
/// Copy the state, with applied updates
pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self {
let mut inner: HashMap<_, _> = self.inner.clone();
for (storage_key, key, val) in changes {
match val {
Some(v) => { inner.entry(storage_key).or_default().insert(key, v); },
None => { inner.entry(storage_key).or_default().remove(&key); },
}
}
inner.into()
}
}
impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> {
fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self {
InMemory {
inner: inner,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self {
let mut expanded = HashMap::new();
expanded.insert(None, inner);
InMemory {
inner: expanded,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> {
fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self {
let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new();
for (child_key, key, value) in inner {
if let Some(value) = value {
expanded.entry(child_key).or_default().insert(key, value);
}
}
expanded.into()
}
}
impl super::Error for Void {}
impl<H: Hasher> InMemory<H> {
/// child storage key iterator
pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> {
self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..]))
}
}
impl<H: Hasher> Backend<H> for InMemory<H> {
type Error = Void;
type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>;
type TrieBackendStorage = MemoryDB<H>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone)))
}
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone)))
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false))
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f));
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], mut f: F) {
self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k)));
}
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
<H as Hasher>::Out: Ord,
{
let existing_pairs = self.inner.get(&None)
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = Layout::<H>::trie_root(existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect();
(root, full_transaction)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
let storage_key = storage_key.to_vec();
let existing_pairs = self.inner.get(&Some(storage_key.clone()))
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = child_trie_root::<Layout<H>, _, _, _>(
&storage_key,
existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect();
let is_default = root == default_child_trie_root::<Layout<H>>(&storage_key);
(root, is_default, full_transaction)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.inner.get(&None)
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone())))
.collect()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.inner.get(&None)
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
}
fn child_keys(&self, storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
self.inner.get(&Some(storage_key.to_vec()))
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
}
fn as_trie_backend(&mut self)-> Option<&TrieBackend<Self::TrieBackendStorage, H>> {
let mut mdb = MemoryDB::default();
let mut root = None;
let mut new_child_roots = Vec::new();
let mut root_map = None;
for (storage_key, map) in &self.inner {
if let Some(storage_key) = storage_key.as_ref() | else {
root_map = Some(map);
}
}
// root handling
if let Some(map) = root_map.take() {
root = Some(insert_into_memory_db::<H, _>(
&mut mdb,
map.clone().into_iter().chain(new_child_roots.into_iter())
)?);
}
let root = match root {
Some(root) => root,
None => insert_into_memory_db::<H, _>(&mut mdb, ::std::iter::empty())?,
};
self.trie = Some(TrieBackend::new(mdb, root));
self.trie.as_ref()
}
}
/// Insert input pairs into memory db.
pub(crate) fn insert_into_memory_db<H, I>(mdb: &mut MemoryDB<H>, input: I) -> Option<H::Out>
where
H: Hasher,
I: IntoIterator<Item=(Vec<u8>, Vec<u8>)>,
{
let mut root = <H as Hasher>::Out::default();
{
let mut trie = TrieDBMut::<H>::new(mdb, &mut root);
for (key, value) in input {
if let Err(e) = trie.insert(&key, &value) {
warn!(target: "trie", "Failed to write to trie: {}", e);
return None;
}
}
}
Some(root)
}
| {
let ch = insert_into_memory_db::<H, _>(&mut mdb, map.clone().into_iter())?;
new_child_roots.push((storage_key.clone(), ch.as_ref().into()));
} | conditional_block |
backend.rs | // Copyright 2017-2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! State machine backends. These manage the code and storage of contracts.
use std::{error, fmt};
use std::cmp::Ord;
use std::collections::HashMap;
use std::marker::PhantomData;
use log::warn;
use hash_db::Hasher;
use crate::trie_backend::TrieBackend;
use crate::trie_backend_essence::TrieBackendStorage;
use trie::{TrieMut, MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration};
use trie::trie_types::{TrieDBMut, Layout};
/// A state backend is used to read state data and can have changes committed
/// to it.
///
/// The clone operation (if implemented) should be cheap.
pub trait Backend<H: Hasher> {
/// An error type when fetching data is not possible.
type Error: super::Error;
/// Storage changes to be applied if committing
type Transaction: Consolidate + Default;
/// Type of trie backend storage.
type TrieBackendStorage: TrieBackendStorage<H>;
/// Get keyed storage or None if there is nothing associated.
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Get keyed storage value hash or None if there is nothing associated.
fn storage_hash(&self, key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.storage(key).map(|v| v.map(|v| H::hash(&v)))
}
/// Get keyed child storage or None if there is nothing associated.
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Get child keyed storage value hash or None if there is nothing associated.
fn child_storage_hash(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.child_storage(storage_key, key).map(|v| v.map(|v| H::hash(&v)))
}
/// true if a key exists in storage.
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.storage(key)?.is_some())
}
/// true if a key exists in child storage.
fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.child_storage(storage_key, key)?.is_some())
}
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F);
/// Retrieve all entries keys of which start with the given prefix and
/// call `f` for each of those keys.
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F);
/// Calculate the storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
/// Does not include child storage updates.
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Calculate the child storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit. The second argument
/// is true if child storage root equals default storage root.
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Get all key/value pairs into a Vec.
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>;
/// Get all keys with given prefix
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec()));
all
}
/// Get all keys of child storage with given prefix
fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
let mut all = Vec::new();
self.for_keys_in_child_storage(child_storage_key, |k| {
if k.starts_with(prefix) {
all.push(k.to_vec());
}
});
all
}
/// Try convert into trie backend.
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>>;
/// Calculate the storage root, with given delta over what is already stored
/// in the backend, and produce a "transaction" that can be used to commit.
/// Does include child storage updates.
fn full_storage_root<I1, I2i, I2>(
&self,
delta: I1,
child_deltas: I2)
-> (H::Out, Self::Transaction)
where
I1: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2i: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
I2: IntoIterator<Item=(Vec<u8>, I2i)>,
<H as Hasher>::Out: Ord,
{
let mut txs: Self::Transaction = Default::default();
let mut child_roots: Vec<_> = Default::default();
// child first
for (storage_key, child_delta) in child_deltas {
let (child_root, empty, child_txs) =
self.child_storage_root(&storage_key[..], child_delta);
txs.consolidate(child_txs);
if empty {
child_roots.push((storage_key, None));
} else {
child_roots.push((storage_key, Some(child_root)));
}
}
let (root, parent_txs) = self.storage_root(
delta.into_iter().chain(child_roots.into_iter())
);
txs.consolidate(parent_txs);
(root, txs)
}
}
/// Trait that allows consolidate two transactions together.
pub trait Consolidate {
/// Consolidate two transactions into one.
fn consolidate(&mut self, other: Self);
}
impl Consolidate for () {
fn consolidate(&mut self, _: Self) {
()
}
}
impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> {
fn consolidate(&mut self, mut other: Self) {
self.append(&mut other);
}
}
impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> {
fn consolidate(&mut self, other: Self) {
trie::GenericMemoryDB::consolidate(self, other)
}
}
/// Error impossible.
// FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121
#[derive(Debug)]
pub enum Void {}
impl fmt::Display for Void {
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
match *self {}
}
}
impl error::Error for Void {
fn description(&self) -> &str { "unreachable error" }
}
/// In-memory backend. Fully recomputes tries on each commit but useful for
/// tests.
pub struct InMemory<H: Hasher> {
inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>,
trie: Option<TrieBackend<MemoryDB<H>, H>>,
_hasher: PhantomData<H>,
}
impl<H: Hasher> Default for InMemory<H> {
fn default() -> Self {
InMemory {
inner: Default::default(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> Clone for InMemory<H> {
fn clone(&self) -> Self {
InMemory {
inner: self.inner.clone(),
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> PartialEq for InMemory<H> {
fn eq(&self, other: &Self) -> bool {
self.inner.eq(&other.inner)
}
}
impl<H: Hasher> InMemory<H> {
/// Copy the state, with applied updates
pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self {
let mut inner: HashMap<_, _> = self.inner.clone();
for (storage_key, key, val) in changes {
match val {
Some(v) => { inner.entry(storage_key).or_default().insert(key, v); },
None => { inner.entry(storage_key).or_default().remove(&key); },
}
}
inner.into()
}
}
impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> {
fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self {
InMemory {
inner: inner,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self {
let mut expanded = HashMap::new();
expanded.insert(None, inner);
InMemory {
inner: expanded,
trie: None,
_hasher: PhantomData,
}
}
}
impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> {
fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self {
let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new();
for (child_key, key, value) in inner {
if let Some(value) = value {
expanded.entry(child_key).or_default().insert(key, value);
}
}
expanded.into()
}
}
impl super::Error for Void {}
impl<H: Hasher> InMemory<H> {
/// child storage key iterator
pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> {
self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..]))
}
}
impl<H: Hasher> Backend<H> for InMemory<H> {
type Error = Void;
type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>;
type TrieBackendStorage = MemoryDB<H>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone)))
}
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone)))
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> |
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f));
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], mut f: F) {
self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k)));
}
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
<H as Hasher>::Out: Ord,
{
let existing_pairs = self.inner.get(&None)
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = Layout::<H>::trie_root(existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect();
(root, full_transaction)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
let storage_key = storage_key.to_vec();
let existing_pairs = self.inner.get(&Some(storage_key.clone()))
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = child_trie_root::<Layout<H>, _, _, _>(
&storage_key,
existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect();
let is_default = root == default_child_trie_root::<Layout<H>>(&storage_key);
(root, is_default, full_transaction)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.inner.get(&None)
.into_iter()
.flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone())))
.collect()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.inner.get(&None)
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
}
fn child_keys(&self, storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> {
self.inner.get(&Some(storage_key.to_vec()))
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
}
fn as_trie_backend(&mut self)-> Option<&TrieBackend<Self::TrieBackendStorage, H>> {
let mut mdb = MemoryDB::default();
let mut root = None;
let mut new_child_roots = Vec::new();
let mut root_map = None;
for (storage_key, map) in &self.inner {
if let Some(storage_key) = storage_key.as_ref() {
let ch = insert_into_memory_db::<H, _>(&mut mdb, map.clone().into_iter())?;
new_child_roots.push((storage_key.clone(), ch.as_ref().into()));
} else {
root_map = Some(map);
}
}
// root handling
if let Some(map) = root_map.take() {
root = Some(insert_into_memory_db::<H, _>(
&mut mdb,
map.clone().into_iter().chain(new_child_roots.into_iter())
)?);
}
let root = match root {
Some(root) => root,
None => insert_into_memory_db::<H, _>(&mut mdb, ::std::iter::empty())?,
};
self.trie = Some(TrieBackend::new(mdb, root));
self.trie.as_ref()
}
}
/// Insert input pairs into memory db.
pub(crate) fn insert_into_memory_db<H, I>(mdb: &mut MemoryDB<H>, input: I) -> Option<H::Out>
where
H: Hasher,
I: IntoIterator<Item=(Vec<u8>, Vec<u8>)>,
{
let mut root = <H as Hasher>::Out::default();
{
let mut trie = TrieDBMut::<H>::new(mdb, &mut root);
for (key, value) in input {
if let Err(e) = trie.insert(&key, &value) {
warn!(target: "trie", "Failed to write to trie: {}", e);
return None;
}
}
}
Some(root)
}
| {
Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false))
} | identifier_body |
mod.rs | // High-level Internal Representation of GObject artifacts
//
// Here we provide a view of the world in terms of what GObject knows:
// classes, interfaces, signals, etc.
//
// We construct this view of the world from the raw Abstract Syntax
// Tree (AST) from the previous stage.
use std::collections::HashMap;
use proc_macro::TokenStream;
use proc_macro2::{Delimiter, Span, TokenNode, TokenTree};
use quote::{Tokens, ToTokens};
use syn::{self, Ident, Path, Block, ReturnType};
use syn::punctuated::Punctuated;
use syn::synom::Synom;
use syn::buffer::TokenBuffer;
use super::ast;
use super::checking::*;
use super::errors::*;
use super::glib_utils::*;
pub struct Program<'ast> {
pub classes: Classes<'ast>,
}
pub struct Classes<'ast> {
items: HashMap<Ident, Class<'ast>>,
}
pub struct Class<'ast> {
pub name: Ident, // Foo
pub gobject_parent: bool,
pub parent: Tokens, // Parent
pub parent_ffi: Tokens, // ffi::Parent
pub parent_class_ffi: Tokens, // ffi::ParentClass
pub implements: Vec<Path>, // names of GTypeInterfaces
pub instance_private: Option<&'ast Path>,
// pub class_private: Option<&'ast ast::PrivateStruct>
// The order of these is important; it's the order of the slots in FooClass
pub slots: Vec<Slot<'ast>>,
// pub n_reserved_slots: usize,
// pub properties: Vec<Property>,
pub overrides: HashMap<Ident, Vec<Method<'ast>>>
}
pub enum Slot<'ast> {
Method(Method<'ast>),
VirtualMethod(VirtualMethod<'ast>),
Signal(Signal)
}
pub struct Method<'ast> {
pub public: bool,
pub sig: FnSig<'ast>,
pub body: &'ast Block,
}
pub struct VirtualMethod<'ast> {
pub sig: FnSig<'ast>,
pub body: Option<&'ast Block>,
}
pub struct FnSig<'ast> {
pub name: Ident,
pub inputs: Vec<FnArg<'ast>>,
pub output: Ty<'ast>,
}
pub enum FnArg<'ast> {
SelfRef(Token!(&), Token!(self)),
Arg {
mutbl: Option<Token![mut]>,
name: Ident,
ty: Ty<'ast>,
}
}
pub struct Signal {
// FIXME: signal flags
}
pub enum Ty<'ast> {
Unit,
Char(Ident),
Bool(Ident),
Borrowed(Box<Ty<'ast>>),
Integer(Ident),
Owned(&'ast syn::Path),
}
impl<'ast> Program<'ast> {
pub fn from_ast_program(ast: &'ast ast::Program) -> Result<Program<'ast>> {
check_program(ast)?;
let mut classes = Classes::new();
for class in ast.classes() {
classes.add(class)?;
}
for impl_ in ast.impls() {
classes.add_impl(impl_)?;
}
Ok(Program {
classes: classes,
})
}
}
impl<'ast> Classes<'ast> {
fn new() -> Classes<'ast> {
Classes {
items: HashMap::new(),
}
}
pub fn len(&self) -> usize {
self.items.len()
}
pub fn get(&self, name: &str) -> &Class {
self.items.iter().find(|c| c.1.name == name).unwrap().1
}
fn add(&mut self, ast_class: &'ast ast::Class) -> Result<()>
{
let prev = self.items.insert(ast_class.name, Class {
name: ast_class.name,
gobject_parent: ast_class.extends.is_none(),
parent: tokens_ParentInstance(ast_class),
parent_ffi: tokens_ParentInstanceFfi(ast_class),
parent_class_ffi: tokens_ParentClassFfi(ast_class),
implements: Vec::new(),
instance_private: ast_class.items.iter().filter_map(|i| {
match *i {
ast::ClassItem::InstancePrivate(ref ip) => Some(&ip.path),
}
}).next(),
slots: Vec::new(),
overrides: HashMap::new(),
});
if prev.is_some() {
bail!("redefinition of class `{}`", ast_class.name);
}
Ok(())
}
fn add_impl(&mut self, impl_: &'ast ast::Impl) -> Result<()> {
let class = match self.items.get_mut(&impl_.self_path) {
Some(class) => class,
None => bail!("impl for class that doesn't exist: {}", impl_.self_path),
};
match impl_.trait_ {
Some(parent_class) => {
for item in impl_.items.iter() {
let item = match item.node {
ast::ImplItemKind::Method(ref m) => m,
ast::ImplItemKind::ReserveSlots(_) => {
bail!("can't reserve slots in a parent class impl");
}
};
if item.signal {
bail!("can't implement signals for parent classes")
}
if!item.virtual_ {
bail!("can only implement virtual functions for parent classes")
}
if item.public {
bail!("overrides are always public, no `pub` needed")
}
let method = match class.translate_method(item)? {
Slot::VirtualMethod(VirtualMethod { sig, body: Some(body) }) => {
Method { public: false, sig, body }
}
Slot::VirtualMethod(VirtualMethod {.. }) => {
bail!("overrides must provide a body for virtual \
methods");
}
_ => unreachable!(),
};
class.overrides
.entry(parent_class)
.or_insert(Vec::new())
.push(method);
}
}
None => {
for item in impl_.items.iter() {
let slot = class.translate_slot(item)?;
class.slots.push(slot);
}
}
}
Ok(())
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Class> + 'a {
self.items.values()
}
}
impl<'ast> Class<'ast> {
fn translate_slot(&mut self, item: &'ast ast::ImplItem) -> Result<Slot<'ast>> {
assert_eq!(item.attrs.len(), 0); // attributes unimplemented
match item.node {
ast::ImplItemKind::Method(ref method) => self.translate_method(method),
ast::ImplItemKind::ReserveSlots(ref _slots) => {
panic!("reserve slots not implemented");
}
}
}
fn translate_method(&mut self, method: &'ast ast::ImplItemMethod)
-> Result<Slot<'ast>>
{
if method.signal {
panic!("signals not implemented");
}
if method.virtual_ {
if method.public {
bail!("function `{}` is virtual so it doesn't need to be public",
method.name)
}
let sig = self.extract_sig(method)?;
Ok(Slot::VirtualMethod(VirtualMethod {
sig,
body: method.body.as_ref(),
}))
} else {
let sig = self.extract_sig(method)?;
Ok(Slot::Method(Method {
sig,
public: method.public,
body: method.body.as_ref().ok_or_else(|| {
format!("function `{}` requires a body", method.name)
})?,
}))
}
}
fn extract_sig(&mut self, method: &'ast ast::ImplItemMethod) -> Result<FnSig<'ast>> {
Ok(FnSig {
output: self.extract_output(&method.output)?,
inputs: self.extract_inputs(&method.inputs)?,
name: method.name,
})
}
fn extract_output(&mut self, output: &'ast ReturnType) -> Result<Ty<'ast>> |
fn extract_inputs(&mut self, punc: &'ast Punctuated<syn::FnArg, Token!(,)>) -> Result<Vec<FnArg<'ast>>> {
punc.iter().map(|arg| {
match *arg {
syn::FnArg::Captured(syn::ArgCaptured { ref pat, ref ty,.. }) => {
let (name, mutbl) = match *pat {
syn::Pat::Ident(syn::PatIdent {
by_ref: None,
mutability: m,
ident,
subpat: None,
}) => {
(ident, m)
}
_ => bail!("only bare identifiers are allowed as \
argument patterns"),
};
Ok(FnArg::Arg {
mutbl,
name,
ty: self.extract_ty(ty)?,
})
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
and_token,
lifetime: None,
mutability: None,
self_token,
}) => {
Ok(FnArg::SelfRef(and_token, self_token))
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
mutability: Some(..),
..
}) => {
bail!("&mut self not implemented yet")
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
lifetime: Some(..),
..
}) => {
bail!("lifetime arguments on self not implemented yet")
}
syn::FnArg::SelfValue(_) => bail!("by-value self not implemented"),
syn::FnArg::Inferred(_) => bail!("cannot have inferred function arguments"),
syn::FnArg::Ignored(_) => bail!("cannot have ignored function arguments"),
}
}).collect()
}
fn extract_ty(&mut self, t: &'ast syn::Type) -> Result<Ty<'ast>> {
match *t {
syn::Type::Slice(_) => bail!("slice types not implemented yet"),
syn::Type::Array(_) => bail!("array types not implemented yet"),
syn::Type::Ptr(_) => bail!("ptr types not implemented yet"),
syn::Type::Reference(syn::TypeReference { lifetime: Some(_),.. }) => {
bail!("borrowed types with lifetimes not implemented yet")
}
syn::Type::Reference(syn::TypeReference { lifetime: None, ref elem, ref mutability,.. }) => {
if let Some(_) = *mutability {
bail!("mutable borrowed pointers not implemented");
}
let path = match **elem {
syn::Type::Path(syn::TypePath { qself: None, ref path }) => path,
_ => bail!("only borrowed pointers to paths supported"),
};
let ty = self.extract_ty_path(path)?;
Ok(Ty::Borrowed(Box::new(ty)))
}
syn::Type::BareFn(_) => bail!("function pointer types not implemented yet"),
syn::Type::Never(_) => bail!("never not implemented yet"),
syn::Type::Tuple(syn::TypeTuple { ref elems,.. }) => {
if elems.len() == 0 {
Ok(Ty::Unit)
} else {
bail!("tuple types not implemented yet")
}
}
syn::Type::Path(syn::TypePath { qself: Some(_),.. }) => {
bail!("path types with qualified self (`as` syntax) not allowed")
}
syn::Type::Path(syn::TypePath { qself: None, ref path }) => {
self.extract_ty_path(path)
}
syn::Type::TraitObject(_) => bail!("trait objects not implemented yet"),
syn::Type::ImplTrait(_) => bail!("trait objects not implemented yet"),
syn::Type::Paren(syn::TypeParen { ref elem,.. }) => self.extract_ty(elem),
syn::Type::Group(syn::TypeGroup { ref elem,.. }) => self.extract_ty(elem),
syn::Type::Infer(_) => bail!("underscore types not allowed"),
syn::Type::Macro(_) => bail!("type macros not allowed"),
syn::Type::Verbatim(_) => bail!("type macros not allowed"),
}
}
fn extract_ty_path(&mut self, t: &'ast syn::Path) -> Result<Ty<'ast>> {
if t.segments.iter().any(|segment| {
match segment.arguments {
syn::PathArguments::None => false,
_ => true,
}
}) {
bail!("type or lifetime parameters not allowed")
}
if t.leading_colon.is_some() || t.segments.len() > 1 {
return Ok(Ty::Owned(t))
}
// let ident = t.segments.get(0).item().ident;
let ident = t.segments.first().unwrap().value().ident;
match ident.as_ref() {
"char" => Ok(Ty::Char(ident)),
"bool" => Ok(Ty::Bool(ident)),
"i8" |
"i16" |
"i32" |
"i64" |
"isize" |
"u8" |
"u16" |
"u32" |
"u64" |
"usize" => {
Ok(Ty::Integer(ident))
}
_other => Ok(Ty::Owned(t)),
}
}
}
fn make_path_glib_object() -> Path {
let tokens = quote_cs! { glib::Object };
let token_stream = TokenStream::from(tokens);
let buffer = TokenBuffer::new(token_stream);
let cursor = buffer.begin();
Path::parse(cursor).unwrap().0
}
impl<'a> ToTokens for FnArg<'a> {
fn to_tokens(&self, tokens: &mut Tokens) {
match *self {
FnArg::SelfRef(and, self_) => {
and.to_tokens(tokens);
self_.to_tokens(tokens);
}
FnArg::Arg { name, ref ty, mutbl } => {
mutbl.to_tokens(tokens);
name.to_tokens(tokens);
Token!(:)([Span::def_site()]).to_tokens(tokens);
ty.to_tokens(tokens);
}
}
}
}
impl<'a> ToTokens for Ty<'a> {
fn to_tokens(&self, tokens: &mut Tokens) {
match *self {
Ty::Unit => tokens.append(TokenTree {
span: Span::call_site(),
kind: TokenNode::Group(Delimiter::Parenthesis, quote!{ () }.into()),
}),
Ty::Char(tok) => tok.to_tokens(tokens),
Ty::Bool(tok) => tok.to_tokens(tokens),
Ty::Integer(t) => t.to_tokens(tokens),
Ty::Borrowed(ref t) => {
Token!(&)([Span::def_site()]).to_tokens(tokens);
t.to_tokens(tokens)
}
Ty::Owned(t) => t.to_tokens(tokens),
}
}
}
pub mod tests {
use super::*;
pub fn run() {
creates_trivial_class();
creates_class_with_superclass();
}
fn test_class_and_superclass (raw: &str, class_name: &str, superclass_name: &str) {
let token_stream = raw.parse::<TokenStream>().unwrap();
let buffer = TokenBuffer::new(token_stream);
let cursor = buffer.begin();
let ast_program = ast::Program::parse(cursor).unwrap().0;
let program = Program::from_ast_program(&ast_program).unwrap();
assert!(program.classes.len() == 1);
let class = program.classes.get(class_name);
assert_eq!(class.name.as_ref(), class_name);
assert_eq!(class.parent.to_string(), superclass_name);
}
fn creates_trivial_class() {
let raw = "class Foo {}";
test_class_and_superclass(raw, "Foo", "glib :: Object");
}
fn creates_class_with_superclass() {
let raw = "class Foo: Bar {}";
test_class_and_superclass(raw, "Foo", "Bar");
}
}
| {
match *output {
ReturnType::Type(_, ref boxt) => self.extract_ty(boxt),
ReturnType::Default => Ok(Ty::Unit),
}
} | identifier_body |
mod.rs | // High-level Internal Representation of GObject artifacts
//
// Here we provide a view of the world in terms of what GObject knows:
// classes, interfaces, signals, etc.
//
// We construct this view of the world from the raw Abstract Syntax
// Tree (AST) from the previous stage.
use std::collections::HashMap;
use proc_macro::TokenStream;
use proc_macro2::{Delimiter, Span, TokenNode, TokenTree};
use quote::{Tokens, ToTokens};
use syn::{self, Ident, Path, Block, ReturnType};
use syn::punctuated::Punctuated;
use syn::synom::Synom;
use syn::buffer::TokenBuffer;
use super::ast;
use super::checking::*;
use super::errors::*;
use super::glib_utils::*;
pub struct Program<'ast> {
pub classes: Classes<'ast>,
}
pub struct Classes<'ast> {
items: HashMap<Ident, Class<'ast>>,
}
pub struct Class<'ast> {
pub name: Ident, // Foo
pub gobject_parent: bool,
pub parent: Tokens, // Parent
pub parent_ffi: Tokens, // ffi::Parent
pub parent_class_ffi: Tokens, // ffi::ParentClass
pub implements: Vec<Path>, // names of GTypeInterfaces
pub instance_private: Option<&'ast Path>,
// pub class_private: Option<&'ast ast::PrivateStruct>
// The order of these is important; it's the order of the slots in FooClass
pub slots: Vec<Slot<'ast>>,
// pub n_reserved_slots: usize,
// pub properties: Vec<Property>,
pub overrides: HashMap<Ident, Vec<Method<'ast>>>
}
pub enum Slot<'ast> {
Method(Method<'ast>),
VirtualMethod(VirtualMethod<'ast>),
Signal(Signal)
}
pub struct Method<'ast> {
pub public: bool,
pub sig: FnSig<'ast>,
pub body: &'ast Block,
}
pub struct VirtualMethod<'ast> {
pub sig: FnSig<'ast>,
pub body: Option<&'ast Block>,
}
pub struct FnSig<'ast> {
pub name: Ident,
pub inputs: Vec<FnArg<'ast>>,
pub output: Ty<'ast>,
}
pub enum FnArg<'ast> {
SelfRef(Token!(&), Token!(self)),
Arg {
mutbl: Option<Token![mut]>,
name: Ident,
ty: Ty<'ast>,
}
}
pub struct Signal {
// FIXME: signal flags
}
pub enum Ty<'ast> {
Unit,
Char(Ident),
Bool(Ident),
Borrowed(Box<Ty<'ast>>),
Integer(Ident),
Owned(&'ast syn::Path),
}
impl<'ast> Program<'ast> {
pub fn from_ast_program(ast: &'ast ast::Program) -> Result<Program<'ast>> {
check_program(ast)?;
let mut classes = Classes::new();
for class in ast.classes() {
classes.add(class)?;
}
for impl_ in ast.impls() {
classes.add_impl(impl_)?;
}
Ok(Program {
classes: classes,
})
}
}
impl<'ast> Classes<'ast> {
fn new() -> Classes<'ast> {
Classes {
items: HashMap::new(),
}
}
pub fn len(&self) -> usize {
self.items.len()
}
pub fn get(&self, name: &str) -> &Class {
self.items.iter().find(|c| c.1.name == name).unwrap().1
}
fn add(&mut self, ast_class: &'ast ast::Class) -> Result<()>
{
let prev = self.items.insert(ast_class.name, Class {
name: ast_class.name,
gobject_parent: ast_class.extends.is_none(),
parent: tokens_ParentInstance(ast_class),
parent_ffi: tokens_ParentInstanceFfi(ast_class),
parent_class_ffi: tokens_ParentClassFfi(ast_class),
implements: Vec::new(),
instance_private: ast_class.items.iter().filter_map(|i| {
match *i {
ast::ClassItem::InstancePrivate(ref ip) => Some(&ip.path),
}
}).next(),
slots: Vec::new(),
overrides: HashMap::new(),
});
if prev.is_some() {
bail!("redefinition of class `{}`", ast_class.name);
}
Ok(())
}
fn add_impl(&mut self, impl_: &'ast ast::Impl) -> Result<()> {
let class = match self.items.get_mut(&impl_.self_path) {
Some(class) => class,
None => bail!("impl for class that doesn't exist: {}", impl_.self_path),
};
match impl_.trait_ {
Some(parent_class) => {
for item in impl_.items.iter() {
let item = match item.node {
ast::ImplItemKind::Method(ref m) => m,
ast::ImplItemKind::ReserveSlots(_) => {
bail!("can't reserve slots in a parent class impl");
}
};
if item.signal {
bail!("can't implement signals for parent classes")
}
if!item.virtual_ {
bail!("can only implement virtual functions for parent classes")
}
if item.public {
bail!("overrides are always public, no `pub` needed")
}
let method = match class.translate_method(item)? {
Slot::VirtualMethod(VirtualMethod { sig, body: Some(body) }) => {
Method { public: false, sig, body }
}
Slot::VirtualMethod(VirtualMethod {.. }) => {
bail!("overrides must provide a body for virtual \
methods");
}
_ => unreachable!(),
};
class.overrides
.entry(parent_class)
.or_insert(Vec::new())
.push(method);
}
}
None => {
for item in impl_.items.iter() {
let slot = class.translate_slot(item)?;
class.slots.push(slot); | }
}
}
Ok(())
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Class> + 'a {
self.items.values()
}
}
impl<'ast> Class<'ast> {
fn translate_slot(&mut self, item: &'ast ast::ImplItem) -> Result<Slot<'ast>> {
assert_eq!(item.attrs.len(), 0); // attributes unimplemented
match item.node {
ast::ImplItemKind::Method(ref method) => self.translate_method(method),
ast::ImplItemKind::ReserveSlots(ref _slots) => {
panic!("reserve slots not implemented");
}
}
}
fn translate_method(&mut self, method: &'ast ast::ImplItemMethod)
-> Result<Slot<'ast>>
{
if method.signal {
panic!("signals not implemented");
}
if method.virtual_ {
if method.public {
bail!("function `{}` is virtual so it doesn't need to be public",
method.name)
}
let sig = self.extract_sig(method)?;
Ok(Slot::VirtualMethod(VirtualMethod {
sig,
body: method.body.as_ref(),
}))
} else {
let sig = self.extract_sig(method)?;
Ok(Slot::Method(Method {
sig,
public: method.public,
body: method.body.as_ref().ok_or_else(|| {
format!("function `{}` requires a body", method.name)
})?,
}))
}
}
fn extract_sig(&mut self, method: &'ast ast::ImplItemMethod) -> Result<FnSig<'ast>> {
Ok(FnSig {
output: self.extract_output(&method.output)?,
inputs: self.extract_inputs(&method.inputs)?,
name: method.name,
})
}
fn extract_output(&mut self, output: &'ast ReturnType) -> Result<Ty<'ast>> {
match *output {
ReturnType::Type(_, ref boxt) => self.extract_ty(boxt),
ReturnType::Default => Ok(Ty::Unit),
}
}
fn extract_inputs(&mut self, punc: &'ast Punctuated<syn::FnArg, Token!(,)>) -> Result<Vec<FnArg<'ast>>> {
punc.iter().map(|arg| {
match *arg {
syn::FnArg::Captured(syn::ArgCaptured { ref pat, ref ty,.. }) => {
let (name, mutbl) = match *pat {
syn::Pat::Ident(syn::PatIdent {
by_ref: None,
mutability: m,
ident,
subpat: None,
}) => {
(ident, m)
}
_ => bail!("only bare identifiers are allowed as \
argument patterns"),
};
Ok(FnArg::Arg {
mutbl,
name,
ty: self.extract_ty(ty)?,
})
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
and_token,
lifetime: None,
mutability: None,
self_token,
}) => {
Ok(FnArg::SelfRef(and_token, self_token))
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
mutability: Some(..),
..
}) => {
bail!("&mut self not implemented yet")
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
lifetime: Some(..),
..
}) => {
bail!("lifetime arguments on self not implemented yet")
}
syn::FnArg::SelfValue(_) => bail!("by-value self not implemented"),
syn::FnArg::Inferred(_) => bail!("cannot have inferred function arguments"),
syn::FnArg::Ignored(_) => bail!("cannot have ignored function arguments"),
}
}).collect()
}
fn extract_ty(&mut self, t: &'ast syn::Type) -> Result<Ty<'ast>> {
match *t {
syn::Type::Slice(_) => bail!("slice types not implemented yet"),
syn::Type::Array(_) => bail!("array types not implemented yet"),
syn::Type::Ptr(_) => bail!("ptr types not implemented yet"),
syn::Type::Reference(syn::TypeReference { lifetime: Some(_),.. }) => {
bail!("borrowed types with lifetimes not implemented yet")
}
syn::Type::Reference(syn::TypeReference { lifetime: None, ref elem, ref mutability,.. }) => {
if let Some(_) = *mutability {
bail!("mutable borrowed pointers not implemented");
}
let path = match **elem {
syn::Type::Path(syn::TypePath { qself: None, ref path }) => path,
_ => bail!("only borrowed pointers to paths supported"),
};
let ty = self.extract_ty_path(path)?;
Ok(Ty::Borrowed(Box::new(ty)))
}
syn::Type::BareFn(_) => bail!("function pointer types not implemented yet"),
syn::Type::Never(_) => bail!("never not implemented yet"),
syn::Type::Tuple(syn::TypeTuple { ref elems,.. }) => {
if elems.len() == 0 {
Ok(Ty::Unit)
} else {
bail!("tuple types not implemented yet")
}
}
syn::Type::Path(syn::TypePath { qself: Some(_),.. }) => {
bail!("path types with qualified self (`as` syntax) not allowed")
}
syn::Type::Path(syn::TypePath { qself: None, ref path }) => {
self.extract_ty_path(path)
}
syn::Type::TraitObject(_) => bail!("trait objects not implemented yet"),
syn::Type::ImplTrait(_) => bail!("trait objects not implemented yet"),
syn::Type::Paren(syn::TypeParen { ref elem,.. }) => self.extract_ty(elem),
syn::Type::Group(syn::TypeGroup { ref elem,.. }) => self.extract_ty(elem),
syn::Type::Infer(_) => bail!("underscore types not allowed"),
syn::Type::Macro(_) => bail!("type macros not allowed"),
syn::Type::Verbatim(_) => bail!("type macros not allowed"),
}
}
fn extract_ty_path(&mut self, t: &'ast syn::Path) -> Result<Ty<'ast>> {
if t.segments.iter().any(|segment| {
match segment.arguments {
syn::PathArguments::None => false,
_ => true,
}
}) {
bail!("type or lifetime parameters not allowed")
}
if t.leading_colon.is_some() || t.segments.len() > 1 {
return Ok(Ty::Owned(t))
}
// let ident = t.segments.get(0).item().ident;
let ident = t.segments.first().unwrap().value().ident;
match ident.as_ref() {
"char" => Ok(Ty::Char(ident)),
"bool" => Ok(Ty::Bool(ident)),
"i8" |
"i16" |
"i32" |
"i64" |
"isize" |
"u8" |
"u16" |
"u32" |
"u64" |
"usize" => {
Ok(Ty::Integer(ident))
}
_other => Ok(Ty::Owned(t)),
}
}
}
fn make_path_glib_object() -> Path {
let tokens = quote_cs! { glib::Object };
let token_stream = TokenStream::from(tokens);
let buffer = TokenBuffer::new(token_stream);
let cursor = buffer.begin();
Path::parse(cursor).unwrap().0
}
impl<'a> ToTokens for FnArg<'a> {
fn to_tokens(&self, tokens: &mut Tokens) {
match *self {
FnArg::SelfRef(and, self_) => {
and.to_tokens(tokens);
self_.to_tokens(tokens);
}
FnArg::Arg { name, ref ty, mutbl } => {
mutbl.to_tokens(tokens);
name.to_tokens(tokens);
Token!(:)([Span::def_site()]).to_tokens(tokens);
ty.to_tokens(tokens);
}
}
}
}
impl<'a> ToTokens for Ty<'a> {
fn to_tokens(&self, tokens: &mut Tokens) {
match *self {
Ty::Unit => tokens.append(TokenTree {
span: Span::call_site(),
kind: TokenNode::Group(Delimiter::Parenthesis, quote!{ () }.into()),
}),
Ty::Char(tok) => tok.to_tokens(tokens),
Ty::Bool(tok) => tok.to_tokens(tokens),
Ty::Integer(t) => t.to_tokens(tokens),
Ty::Borrowed(ref t) => {
Token!(&)([Span::def_site()]).to_tokens(tokens);
t.to_tokens(tokens)
}
Ty::Owned(t) => t.to_tokens(tokens),
}
}
}
pub mod tests {
use super::*;
pub fn run() {
creates_trivial_class();
creates_class_with_superclass();
}
fn test_class_and_superclass (raw: &str, class_name: &str, superclass_name: &str) {
let token_stream = raw.parse::<TokenStream>().unwrap();
let buffer = TokenBuffer::new(token_stream);
let cursor = buffer.begin();
let ast_program = ast::Program::parse(cursor).unwrap().0;
let program = Program::from_ast_program(&ast_program).unwrap();
assert!(program.classes.len() == 1);
let class = program.classes.get(class_name);
assert_eq!(class.name.as_ref(), class_name);
assert_eq!(class.parent.to_string(), superclass_name);
}
fn creates_trivial_class() {
let raw = "class Foo {}";
test_class_and_superclass(raw, "Foo", "glib :: Object");
}
fn creates_class_with_superclass() {
let raw = "class Foo: Bar {}";
test_class_and_superclass(raw, "Foo", "Bar");
}
} | random_line_split |
|
mod.rs | // High-level Internal Representation of GObject artifacts
//
// Here we provide a view of the world in terms of what GObject knows:
// classes, interfaces, signals, etc.
//
// We construct this view of the world from the raw Abstract Syntax
// Tree (AST) from the previous stage.
use std::collections::HashMap;
use proc_macro::TokenStream;
use proc_macro2::{Delimiter, Span, TokenNode, TokenTree};
use quote::{Tokens, ToTokens};
use syn::{self, Ident, Path, Block, ReturnType};
use syn::punctuated::Punctuated;
use syn::synom::Synom;
use syn::buffer::TokenBuffer;
use super::ast;
use super::checking::*;
use super::errors::*;
use super::glib_utils::*;
pub struct Program<'ast> {
pub classes: Classes<'ast>,
}
pub struct Classes<'ast> {
items: HashMap<Ident, Class<'ast>>,
}
pub struct Class<'ast> {
pub name: Ident, // Foo
pub gobject_parent: bool,
pub parent: Tokens, // Parent
pub parent_ffi: Tokens, // ffi::Parent
pub parent_class_ffi: Tokens, // ffi::ParentClass
pub implements: Vec<Path>, // names of GTypeInterfaces
pub instance_private: Option<&'ast Path>,
// pub class_private: Option<&'ast ast::PrivateStruct>
// The order of these is important; it's the order of the slots in FooClass
pub slots: Vec<Slot<'ast>>,
// pub n_reserved_slots: usize,
// pub properties: Vec<Property>,
pub overrides: HashMap<Ident, Vec<Method<'ast>>>
}
pub enum Slot<'ast> {
Method(Method<'ast>),
VirtualMethod(VirtualMethod<'ast>),
Signal(Signal)
}
pub struct Method<'ast> {
pub public: bool,
pub sig: FnSig<'ast>,
pub body: &'ast Block,
}
pub struct VirtualMethod<'ast> {
pub sig: FnSig<'ast>,
pub body: Option<&'ast Block>,
}
pub struct FnSig<'ast> {
pub name: Ident,
pub inputs: Vec<FnArg<'ast>>,
pub output: Ty<'ast>,
}
pub enum FnArg<'ast> {
SelfRef(Token!(&), Token!(self)),
Arg {
mutbl: Option<Token![mut]>,
name: Ident,
ty: Ty<'ast>,
}
}
pub struct Signal {
// FIXME: signal flags
}
pub enum Ty<'ast> {
Unit,
Char(Ident),
Bool(Ident),
Borrowed(Box<Ty<'ast>>),
Integer(Ident),
Owned(&'ast syn::Path),
}
impl<'ast> Program<'ast> {
pub fn from_ast_program(ast: &'ast ast::Program) -> Result<Program<'ast>> {
check_program(ast)?;
let mut classes = Classes::new();
for class in ast.classes() {
classes.add(class)?;
}
for impl_ in ast.impls() {
classes.add_impl(impl_)?;
}
Ok(Program {
classes: classes,
})
}
}
impl<'ast> Classes<'ast> {
fn new() -> Classes<'ast> {
Classes {
items: HashMap::new(),
}
}
pub fn | (&self) -> usize {
self.items.len()
}
pub fn get(&self, name: &str) -> &Class {
self.items.iter().find(|c| c.1.name == name).unwrap().1
}
fn add(&mut self, ast_class: &'ast ast::Class) -> Result<()>
{
let prev = self.items.insert(ast_class.name, Class {
name: ast_class.name,
gobject_parent: ast_class.extends.is_none(),
parent: tokens_ParentInstance(ast_class),
parent_ffi: tokens_ParentInstanceFfi(ast_class),
parent_class_ffi: tokens_ParentClassFfi(ast_class),
implements: Vec::new(),
instance_private: ast_class.items.iter().filter_map(|i| {
match *i {
ast::ClassItem::InstancePrivate(ref ip) => Some(&ip.path),
}
}).next(),
slots: Vec::new(),
overrides: HashMap::new(),
});
if prev.is_some() {
bail!("redefinition of class `{}`", ast_class.name);
}
Ok(())
}
fn add_impl(&mut self, impl_: &'ast ast::Impl) -> Result<()> {
let class = match self.items.get_mut(&impl_.self_path) {
Some(class) => class,
None => bail!("impl for class that doesn't exist: {}", impl_.self_path),
};
match impl_.trait_ {
Some(parent_class) => {
for item in impl_.items.iter() {
let item = match item.node {
ast::ImplItemKind::Method(ref m) => m,
ast::ImplItemKind::ReserveSlots(_) => {
bail!("can't reserve slots in a parent class impl");
}
};
if item.signal {
bail!("can't implement signals for parent classes")
}
if!item.virtual_ {
bail!("can only implement virtual functions for parent classes")
}
if item.public {
bail!("overrides are always public, no `pub` needed")
}
let method = match class.translate_method(item)? {
Slot::VirtualMethod(VirtualMethod { sig, body: Some(body) }) => {
Method { public: false, sig, body }
}
Slot::VirtualMethod(VirtualMethod {.. }) => {
bail!("overrides must provide a body for virtual \
methods");
}
_ => unreachable!(),
};
class.overrides
.entry(parent_class)
.or_insert(Vec::new())
.push(method);
}
}
None => {
for item in impl_.items.iter() {
let slot = class.translate_slot(item)?;
class.slots.push(slot);
}
}
}
Ok(())
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Class> + 'a {
self.items.values()
}
}
impl<'ast> Class<'ast> {
fn translate_slot(&mut self, item: &'ast ast::ImplItem) -> Result<Slot<'ast>> {
assert_eq!(item.attrs.len(), 0); // attributes unimplemented
match item.node {
ast::ImplItemKind::Method(ref method) => self.translate_method(method),
ast::ImplItemKind::ReserveSlots(ref _slots) => {
panic!("reserve slots not implemented");
}
}
}
fn translate_method(&mut self, method: &'ast ast::ImplItemMethod)
-> Result<Slot<'ast>>
{
if method.signal {
panic!("signals not implemented");
}
if method.virtual_ {
if method.public {
bail!("function `{}` is virtual so it doesn't need to be public",
method.name)
}
let sig = self.extract_sig(method)?;
Ok(Slot::VirtualMethod(VirtualMethod {
sig,
body: method.body.as_ref(),
}))
} else {
let sig = self.extract_sig(method)?;
Ok(Slot::Method(Method {
sig,
public: method.public,
body: method.body.as_ref().ok_or_else(|| {
format!("function `{}` requires a body", method.name)
})?,
}))
}
}
fn extract_sig(&mut self, method: &'ast ast::ImplItemMethod) -> Result<FnSig<'ast>> {
Ok(FnSig {
output: self.extract_output(&method.output)?,
inputs: self.extract_inputs(&method.inputs)?,
name: method.name,
})
}
fn extract_output(&mut self, output: &'ast ReturnType) -> Result<Ty<'ast>> {
match *output {
ReturnType::Type(_, ref boxt) => self.extract_ty(boxt),
ReturnType::Default => Ok(Ty::Unit),
}
}
fn extract_inputs(&mut self, punc: &'ast Punctuated<syn::FnArg, Token!(,)>) -> Result<Vec<FnArg<'ast>>> {
punc.iter().map(|arg| {
match *arg {
syn::FnArg::Captured(syn::ArgCaptured { ref pat, ref ty,.. }) => {
let (name, mutbl) = match *pat {
syn::Pat::Ident(syn::PatIdent {
by_ref: None,
mutability: m,
ident,
subpat: None,
}) => {
(ident, m)
}
_ => bail!("only bare identifiers are allowed as \
argument patterns"),
};
Ok(FnArg::Arg {
mutbl,
name,
ty: self.extract_ty(ty)?,
})
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
and_token,
lifetime: None,
mutability: None,
self_token,
}) => {
Ok(FnArg::SelfRef(and_token, self_token))
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
mutability: Some(..),
..
}) => {
bail!("&mut self not implemented yet")
}
syn::FnArg::SelfRef(syn::ArgSelfRef {
lifetime: Some(..),
..
}) => {
bail!("lifetime arguments on self not implemented yet")
}
syn::FnArg::SelfValue(_) => bail!("by-value self not implemented"),
syn::FnArg::Inferred(_) => bail!("cannot have inferred function arguments"),
syn::FnArg::Ignored(_) => bail!("cannot have ignored function arguments"),
}
}).collect()
}
fn extract_ty(&mut self, t: &'ast syn::Type) -> Result<Ty<'ast>> {
match *t {
syn::Type::Slice(_) => bail!("slice types not implemented yet"),
syn::Type::Array(_) => bail!("array types not implemented yet"),
syn::Type::Ptr(_) => bail!("ptr types not implemented yet"),
syn::Type::Reference(syn::TypeReference { lifetime: Some(_),.. }) => {
bail!("borrowed types with lifetimes not implemented yet")
}
syn::Type::Reference(syn::TypeReference { lifetime: None, ref elem, ref mutability,.. }) => {
if let Some(_) = *mutability {
bail!("mutable borrowed pointers not implemented");
}
let path = match **elem {
syn::Type::Path(syn::TypePath { qself: None, ref path }) => path,
_ => bail!("only borrowed pointers to paths supported"),
};
let ty = self.extract_ty_path(path)?;
Ok(Ty::Borrowed(Box::new(ty)))
}
syn::Type::BareFn(_) => bail!("function pointer types not implemented yet"),
syn::Type::Never(_) => bail!("never not implemented yet"),
syn::Type::Tuple(syn::TypeTuple { ref elems,.. }) => {
if elems.len() == 0 {
Ok(Ty::Unit)
} else {
bail!("tuple types not implemented yet")
}
}
syn::Type::Path(syn::TypePath { qself: Some(_),.. }) => {
bail!("path types with qualified self (`as` syntax) not allowed")
}
syn::Type::Path(syn::TypePath { qself: None, ref path }) => {
self.extract_ty_path(path)
}
syn::Type::TraitObject(_) => bail!("trait objects not implemented yet"),
syn::Type::ImplTrait(_) => bail!("trait objects not implemented yet"),
syn::Type::Paren(syn::TypeParen { ref elem,.. }) => self.extract_ty(elem),
syn::Type::Group(syn::TypeGroup { ref elem,.. }) => self.extract_ty(elem),
syn::Type::Infer(_) => bail!("underscore types not allowed"),
syn::Type::Macro(_) => bail!("type macros not allowed"),
syn::Type::Verbatim(_) => bail!("type macros not allowed"),
}
}
fn extract_ty_path(&mut self, t: &'ast syn::Path) -> Result<Ty<'ast>> {
if t.segments.iter().any(|segment| {
match segment.arguments {
syn::PathArguments::None => false,
_ => true,
}
}) {
bail!("type or lifetime parameters not allowed")
}
if t.leading_colon.is_some() || t.segments.len() > 1 {
return Ok(Ty::Owned(t))
}
// let ident = t.segments.get(0).item().ident;
let ident = t.segments.first().unwrap().value().ident;
match ident.as_ref() {
"char" => Ok(Ty::Char(ident)),
"bool" => Ok(Ty::Bool(ident)),
"i8" |
"i16" |
"i32" |
"i64" |
"isize" |
"u8" |
"u16" |
"u32" |
"u64" |
"usize" => {
Ok(Ty::Integer(ident))
}
_other => Ok(Ty::Owned(t)),
}
}
}
fn make_path_glib_object() -> Path {
let tokens = quote_cs! { glib::Object };
let token_stream = TokenStream::from(tokens);
let buffer = TokenBuffer::new(token_stream);
let cursor = buffer.begin();
Path::parse(cursor).unwrap().0
}
impl<'a> ToTokens for FnArg<'a> {
fn to_tokens(&self, tokens: &mut Tokens) {
match *self {
FnArg::SelfRef(and, self_) => {
and.to_tokens(tokens);
self_.to_tokens(tokens);
}
FnArg::Arg { name, ref ty, mutbl } => {
mutbl.to_tokens(tokens);
name.to_tokens(tokens);
Token!(:)([Span::def_site()]).to_tokens(tokens);
ty.to_tokens(tokens);
}
}
}
}
impl<'a> ToTokens for Ty<'a> {
fn to_tokens(&self, tokens: &mut Tokens) {
match *self {
Ty::Unit => tokens.append(TokenTree {
span: Span::call_site(),
kind: TokenNode::Group(Delimiter::Parenthesis, quote!{ () }.into()),
}),
Ty::Char(tok) => tok.to_tokens(tokens),
Ty::Bool(tok) => tok.to_tokens(tokens),
Ty::Integer(t) => t.to_tokens(tokens),
Ty::Borrowed(ref t) => {
Token!(&)([Span::def_site()]).to_tokens(tokens);
t.to_tokens(tokens)
}
Ty::Owned(t) => t.to_tokens(tokens),
}
}
}
pub mod tests {
use super::*;
pub fn run() {
creates_trivial_class();
creates_class_with_superclass();
}
fn test_class_and_superclass (raw: &str, class_name: &str, superclass_name: &str) {
let token_stream = raw.parse::<TokenStream>().unwrap();
let buffer = TokenBuffer::new(token_stream);
let cursor = buffer.begin();
let ast_program = ast::Program::parse(cursor).unwrap().0;
let program = Program::from_ast_program(&ast_program).unwrap();
assert!(program.classes.len() == 1);
let class = program.classes.get(class_name);
assert_eq!(class.name.as_ref(), class_name);
assert_eq!(class.parent.to_string(), superclass_name);
}
fn creates_trivial_class() {
let raw = "class Foo {}";
test_class_and_superclass(raw, "Foo", "glib :: Object");
}
fn creates_class_with_superclass() {
let raw = "class Foo: Bar {}";
test_class_and_superclass(raw, "Foo", "Bar");
}
}
| len | identifier_name |
merkle.rs | // LNP/BP client-side-validation foundation libraries implementing LNPBP
// specifications & standards (LNPBP-4, 7, 8, 9, 42, 81)
//
// Written in 2019-2021 by
// Dr. Maxim Orlovsky <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the Apache 2.0 License along with this
// software. If not, see <https://opensource.org/licenses/Apache-2.0>.
//! Merklization procedures for client-side-validation according to [LNPBP-81]
//! standard.
//!
//! [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
use std::io;
use std::iter::FromIterator;
use bitcoin_hashes::{sha256, Hash, HashEngine};
use crate::{commit_encode, CommitEncode, CommitVerify, ConsensusCommit};
/// Marker trait for types that require merklization of the underlying data
/// during [`ConsensusCommit`] procedure. Allows specifying custom tag for the
/// tagged hash used in the merklization (see [`merklize`]).
pub trait ConsensusMerkleCommit:
ConsensusCommit<Commitment = MerkleNode>
{
/// The tag prefix which will be used in the merklization process (see
/// [`merklize`])
const MERKLE_NODE_PREFIX: &'static str;
}
hash_newtype!(
MerkleNode,
sha256::Hash,
32,
doc = "A hash type for LNPBP-81 Merkle tree leaves, branches and root",
false // We do not reverse displaying MerkleNodes in hexadecimal
);
impl strict_encoding::Strategy for MerkleNode {
type Strategy = strict_encoding::strategies::HashFixedBytes;
}
impl commit_encode::Strategy for MerkleNode {
type Strategy = commit_encode::strategies::UsingStrict;
}
impl<MSG> CommitVerify<MSG> for MerkleNode
where
MSG: AsRef<[u8]>,
{
#[inline]
fn commit(msg: &MSG) -> MerkleNode { MerkleNode::hash(msg.as_ref()) }
}
impl<A, B> ConsensusCommit for (A, B)
where
A: CommitEncode,
B: CommitEncode,
{
type Commitment = MerkleNode;
}
impl<A, B, C> ConsensusCommit for (A, B, C)
where
A: CommitEncode,
B: CommitEncode,
C: CommitEncode,
{
type Commitment = MerkleNode;
}
/// Merklization procedure that uses tagged hashes with depth commitments
/// according to [LNPBP-81] standard of client-side-validation merklization
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub fn merklize<I>(prefix: &str, data: I) -> (MerkleNode, u8)
where
I: IntoIterator<Item = MerkleNode>,
<I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>,
{
let mut tag_engine = sha256::Hash::engine();
tag_engine.input(prefix.as_bytes());
tag_engine.input(":merkle:".as_bytes());
let iter = data.into_iter();
let width = iter.len();
// Tagging merkle tree root
let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None);
tag_engine.input("root:height=".as_bytes());
tag_engine.input(&height.to_string().into_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(&width.to_string().into_bytes());
let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
let mut engine = MerkleNode::engine();
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
root.commit_encode(&mut engine);
let tagged_root = MerkleNode::from_engine(engine);
(tagged_root, height)
}
// TODO: Optimize to avoid allocations
// In current rust generic iterators do not work with recursion :(
fn merklize_inner(
engine_proto: &sha256::HashEngine,
mut iter: impl ExactSizeIterator<Item = MerkleNode>,
depth: u8,
extend: bool,
empty_node: Option<MerkleNode>,
) -> (MerkleNode, u8) {
let len = iter.len() + extend as usize;
let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF]));
// Computing tagged hash as per BIP-340
let mut tag_engine = engine_proto.clone();
tag_engine.input("depth=".as_bytes());
tag_engine.input(depth.to_string().as_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(len.to_string().as_bytes());
tag_engine.input(":height=".as_bytes());
let mut engine = MerkleNode::engine();
if len <= 2 {
tag_engine.input("0:".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
let mut leaf_tag_engine = engine_proto.clone();
leaf_tag_engine.input("leaf".as_bytes());
let leaf_tag =
sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine));
let mut leaf_engine = MerkleNode::engine();
leaf_engine.input(&leaf_tag[..]);
leaf_engine.input(&leaf_tag[..]);
let mut leaf1 = leaf_engine.clone();
leaf1.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf1).commit_encode(&mut engine);
leaf_engine.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine);
(MerkleNode::from_engine(engine), 1)
} else | engine_proto,
iter,
depth + 1,
(div % 2 + len % 2) / 2 == 1,
Some(empty_node),
);
assert_eq!(
height1,
height2,
"merklization algorithm failure: height of subtrees is not equal \
(width = {}, depth = {}, prev_extend = {}, next_extend = {})",
len,
depth,
extend,
div % 2 == 1 && len % 2 == 1
);
tag_engine.input(height1.to_string().as_bytes());
tag_engine.input(":".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
node1.commit_encode(&mut engine);
node2.commit_encode(&mut engine);
(MerkleNode::from_engine(engine), height1 + 1)
}
}
/// The source data for the [LNPBP-81] merklization process.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)]
pub struct MerkleSource<T>(
/// Array of the data which will be merklized
pub Vec<T>,
);
impl<L, I> From<I> for MerkleSource<L>
where
I: IntoIterator<Item = L>,
L: CommitEncode,
{
fn from(collection: I) -> Self { Self(collection.into_iter().collect()) }
}
impl<L> FromIterator<L> for MerkleSource<L>
where
L: CommitEncode,
{
fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self {
iter.into_iter().collect::<Vec<_>>().into()
}
}
impl<L> CommitEncode for MerkleSource<L>
where
L: ConsensusMerkleCommit,
{
fn commit_encode<E: io::Write>(&self, e: E) -> usize {
let leafs = self.0.iter().map(L::consensus_commit);
merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e)
}
}
impl<L> ConsensusCommit for MerkleSource<L>
where
L: ConsensusMerkleCommit + CommitEncode,
{
type Commitment = MerkleNode;
#[inline]
fn consensus_commit(&self) -> Self::Commitment {
MerkleNode::from_slice(&self.commit_serialize())
.expect("MerkleSource::commit_serialize must produce MerkleNode")
}
#[inline]
fn consensus_verify(&self, commitment: &Self::Commitment) -> bool {
self.consensus_commit() == *commitment
}
}
/// Converts given piece of client-side-validated data into a structure which
/// can be used in merklization process.
///
/// This dedicated structure is required since with
/// `impl From<_> for MerkleSource` we would not be able to specify a concrete
/// tagged hash, which we require in [LNPBP-81] merklization and which we
/// provide here via [`ToMerkleSource::Leaf`]` associated type holding
/// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub trait ToMerkleSource {
/// Defining type of the commitment produced during merlization process
type Leaf: ConsensusMerkleCommit;
/// Performs transformation of the data type into a merkilzable data
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>;
}
#[cfg(test)]
mod test {
use std::collections::BTreeMap;
use amplify::{bmap, s};
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::{sha256d, Hash};
use strict_encoding::StrictEncode;
use super::*;
use crate::commit_encode::{strategies, Strategy};
use crate::CommitConceal;
#[test]
fn collections() {
// First, we define a data type
#[derive(
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Debug,
StrictEncode,
StrictDecode
)]
struct Item(pub String);
// Next, we say that it should be concealed using some function
// (double SHA256 hash in this case)
impl CommitConceal for Item {
type ConcealedCommitment = sha256d::Hash;
fn commit_conceal(&self) -> Self::ConcealedCommitment {
sha256d::Hash::hash(self.0.as_bytes())
}
}
// Next, we need to specify how the concealed data should be
// commit-encoded: this time we strict-serialize the hash
impl Strategy for sha256d::Hash {
type Strategy = strategies::UsingStrict;
}
// Now, we define commitment encoding for our concealable type: it
// should conceal the data
impl Strategy for Item {
type Strategy = strategies::UsingConceal;
}
// Now, we need to say that consensus commit procedure should produce
// a final commitment from commit-encoded data (equal to the
// strict encoding of the conceal result) using `CommitVerify` type.
// Here, we use another round of hashing, producing merkle node hash
// from the concealed data.
impl ConsensusCommit for Item {
type Commitment = MerkleNode;
}
// Next, we need to provide merkle node tags for each type of the tree
impl ConsensusMerkleCommit for Item {
const MERKLE_NODE_PREFIX: &'static str = "item";
}
impl ConsensusMerkleCommit for (usize, Item) {
const MERKLE_NODE_PREFIX: &'static str = "usize->item";
}
impl ToMerkleSource for BTreeMap<usize, Item> {
type Leaf = (usize, Item);
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf> {
self.iter().map(|(k, v)| (*k, v.clone())).collect()
}
}
let large = vec![Item(s!("none")); 3];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"71ea45868fbd924061c4deb84f37ed82b0ac808de12aa7659afda7d9303e7a71"
);
let large = vec![Item(s!("none")); 5];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"e255e0124efe0555fde0d932a0bc0042614129e1a02f7b8c0bf608b81af3eb94"
);
let large = vec![Item(s!("none")); 9];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"6cd2d5345a654af4720bdcc637183ded8e432dc88f778b7d27c8d5a0e342c65f"
);
let large = vec![Item(s!("none")); 13];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"3714c08c7c94a4ef769ad2cb7df9aaca1e1252d6599a02aff281c37e7242797d"
);
let large = vec![Item(s!("none")); 17];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"6093dec47e5bdd706da01e4479cb65632eac426eb59c8c28c4e6c199438c8b6f"
);
let item = Item(s!("Some text"));
assert_eq!(&b"\x09\x00Some text"[..], item.strict_serialize().unwrap());
assert_eq!(
"6680bbec0d05d3eaac9c8b658c40f28d2f0cb0f245c7b1cabf5a61c35bd03d8e",
item.commit_serialize().to_hex()
);
assert_eq!(
"3e4b2dcf9bca33400028c8947565c1ff421f6d561e9ec48f88f0c9a24ebc8c30",
item.consensus_commit().to_hex()
);
assert_ne!(item.commit_serialize(), item.strict_serialize().unwrap());
assert_eq!(
MerkleNode::hash(&item.commit_serialize()),
item.consensus_commit()
);
let original = bmap! {
0usize => Item(s!("My first case")),
1usize => Item(s!("My second case with a very long string")),
3usize => Item(s!("My third case to make the Merkle tree two layered"))
};
let collection = original.to_merkle_source();
assert_eq!(
&b"\x03\x00\
\x00\x00\
\x0d\x00\
My first case\
\x01\x00\
\x26\x00\
My second case with a very long string\
\x03\x00\
\x31\x00\
My third case to make the Merkle tree two layered"[..],
original.strict_serialize().unwrap()
);
assert_eq!(
"d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d",
collection.commit_serialize().to_hex()
);
assert_eq!(
"d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d",
collection.consensus_commit().to_hex()
);
assert_ne!(
collection.commit_serialize(),
original.strict_serialize().unwrap()
);
assert_eq!(
MerkleNode::from_slice(&collection.commit_serialize()).unwrap(),
collection.consensus_commit()
);
let original = vec![
Item(s!("My first case")),
Item(s!("My second case with a very long string")),
Item(s!("My third case to make the Merkle tree two layered")),
];
let vec: MerkleSource<Item> = original.clone().into();
assert_eq!(
&b"\x03\x00\
\x0d\x00\
My first case\
\x26\x00\
My second case with a very long string\
\x31\x00\
My third case to make the Merkle tree two layered"[..],
original.strict_serialize().unwrap()
);
assert_eq!(
"fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469",
vec.commit_serialize().to_hex()
);
assert_eq!(
"fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469",
vec.consensus_commit().to_hex()
);
assert_ne!(
vec.commit_serialize(),
original.strict_serialize().unwrap()
);
assert_eq!(
MerkleNode::from_slice(&vec.commit_serialize()).unwrap(),
vec.consensus_commit()
);
assert_ne!(vec.consensus_commit(), collection.consensus_commit());
}
}
| {
let div = len / 2 + len % 2;
let (node1, height1) = merklize_inner(
engine_proto,
// Normally we should use `iter.by_ref().take(div)`, but currently
// rust compilers is unable to parse recursion with generic types
iter.by_ref().take(div).collect::<Vec<_>>().into_iter(),
depth + 1,
false,
Some(empty_node),
);
let iter = if extend {
iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter()
} else {
iter.collect::<Vec<_>>().into_iter()
};
let (node2, height2) = merklize_inner( | conditional_block |
merkle.rs | // LNP/BP client-side-validation foundation libraries implementing LNPBP
// specifications & standards (LNPBP-4, 7, 8, 9, 42, 81)
//
// Written in 2019-2021 by
// Dr. Maxim Orlovsky <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the Apache 2.0 License along with this
// software. If not, see <https://opensource.org/licenses/Apache-2.0>.
//! Merklization procedures for client-side-validation according to [LNPBP-81]
//! standard.
//!
//! [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
use std::io;
use std::iter::FromIterator;
use bitcoin_hashes::{sha256, Hash, HashEngine};
use crate::{commit_encode, CommitEncode, CommitVerify, ConsensusCommit};
/// Marker trait for types that require merklization of the underlying data
/// during [`ConsensusCommit`] procedure. Allows specifying custom tag for the
/// tagged hash used in the merklization (see [`merklize`]).
pub trait ConsensusMerkleCommit:
ConsensusCommit<Commitment = MerkleNode>
{
/// The tag prefix which will be used in the merklization process (see
/// [`merklize`])
const MERKLE_NODE_PREFIX: &'static str;
}
hash_newtype!(
MerkleNode,
sha256::Hash,
32,
doc = "A hash type for LNPBP-81 Merkle tree leaves, branches and root",
false // We do not reverse displaying MerkleNodes in hexadecimal
);
impl strict_encoding::Strategy for MerkleNode {
type Strategy = strict_encoding::strategies::HashFixedBytes;
}
impl commit_encode::Strategy for MerkleNode {
type Strategy = commit_encode::strategies::UsingStrict;
}
impl<MSG> CommitVerify<MSG> for MerkleNode
where
MSG: AsRef<[u8]>,
{
#[inline]
fn commit(msg: &MSG) -> MerkleNode { MerkleNode::hash(msg.as_ref()) }
}
impl<A, B> ConsensusCommit for (A, B)
where
A: CommitEncode,
B: CommitEncode,
{
type Commitment = MerkleNode;
}
impl<A, B, C> ConsensusCommit for (A, B, C)
where
A: CommitEncode,
B: CommitEncode,
C: CommitEncode,
{
type Commitment = MerkleNode;
}
/// Merklization procedure that uses tagged hashes with depth commitments
/// according to [LNPBP-81] standard of client-side-validation merklization
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub fn merklize<I>(prefix: &str, data: I) -> (MerkleNode, u8)
where
I: IntoIterator<Item = MerkleNode>,
<I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>,
{
let mut tag_engine = sha256::Hash::engine();
tag_engine.input(prefix.as_bytes());
tag_engine.input(":merkle:".as_bytes());
let iter = data.into_iter();
let width = iter.len();
// Tagging merkle tree root
let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None);
tag_engine.input("root:height=".as_bytes());
tag_engine.input(&height.to_string().into_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(&width.to_string().into_bytes());
let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
let mut engine = MerkleNode::engine();
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
root.commit_encode(&mut engine);
let tagged_root = MerkleNode::from_engine(engine);
(tagged_root, height)
}
// TODO: Optimize to avoid allocations
// In current rust generic iterators do not work with recursion :(
fn merklize_inner(
engine_proto: &sha256::HashEngine,
mut iter: impl ExactSizeIterator<Item = MerkleNode>,
depth: u8,
extend: bool,
empty_node: Option<MerkleNode>,
) -> (MerkleNode, u8) {
let len = iter.len() + extend as usize;
let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF]));
// Computing tagged hash as per BIP-340
let mut tag_engine = engine_proto.clone();
tag_engine.input("depth=".as_bytes());
tag_engine.input(depth.to_string().as_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(len.to_string().as_bytes());
tag_engine.input(":height=".as_bytes());
let mut engine = MerkleNode::engine();
if len <= 2 {
tag_engine.input("0:".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
let mut leaf_tag_engine = engine_proto.clone();
leaf_tag_engine.input("leaf".as_bytes());
let leaf_tag =
sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine));
let mut leaf_engine = MerkleNode::engine();
leaf_engine.input(&leaf_tag[..]);
leaf_engine.input(&leaf_tag[..]);
let mut leaf1 = leaf_engine.clone();
leaf1.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf1).commit_encode(&mut engine);
leaf_engine.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine);
(MerkleNode::from_engine(engine), 1)
} else {
let div = len / 2 + len % 2;
let (node1, height1) = merklize_inner(
engine_proto,
// Normally we should use `iter.by_ref().take(div)`, but currently
// rust compilers is unable to parse recursion with generic types
iter.by_ref().take(div).collect::<Vec<_>>().into_iter(),
depth + 1,
false,
Some(empty_node),
);
let iter = if extend {
iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter()
} else {
iter.collect::<Vec<_>>().into_iter()
};
let (node2, height2) = merklize_inner(
engine_proto,
iter,
depth + 1,
(div % 2 + len % 2) / 2 == 1,
Some(empty_node),
);
assert_eq!(
height1,
height2,
"merklization algorithm failure: height of subtrees is not equal \
(width = {}, depth = {}, prev_extend = {}, next_extend = {})",
len,
depth,
extend,
div % 2 == 1 && len % 2 == 1
);
tag_engine.input(height1.to_string().as_bytes());
tag_engine.input(":".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
node1.commit_encode(&mut engine);
node2.commit_encode(&mut engine);
(MerkleNode::from_engine(engine), height1 + 1)
}
}
/// The source data for the [LNPBP-81] merklization process.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)]
pub struct MerkleSource<T>(
/// Array of the data which will be merklized
pub Vec<T>,
);
impl<L, I> From<I> for MerkleSource<L>
where
I: IntoIterator<Item = L>,
L: CommitEncode,
{
fn from(collection: I) -> Self |
}
impl<L> FromIterator<L> for MerkleSource<L>
where
L: CommitEncode,
{
fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self {
iter.into_iter().collect::<Vec<_>>().into()
}
}
impl<L> CommitEncode for MerkleSource<L>
where
L: ConsensusMerkleCommit,
{
fn commit_encode<E: io::Write>(&self, e: E) -> usize {
let leafs = self.0.iter().map(L::consensus_commit);
merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e)
}
}
impl<L> ConsensusCommit for MerkleSource<L>
where
L: ConsensusMerkleCommit + CommitEncode,
{
type Commitment = MerkleNode;
#[inline]
fn consensus_commit(&self) -> Self::Commitment {
MerkleNode::from_slice(&self.commit_serialize())
.expect("MerkleSource::commit_serialize must produce MerkleNode")
}
#[inline]
fn consensus_verify(&self, commitment: &Self::Commitment) -> bool {
self.consensus_commit() == *commitment
}
}
/// Converts given piece of client-side-validated data into a structure which
/// can be used in merklization process.
///
/// This dedicated structure is required since with
/// `impl From<_> for MerkleSource` we would not be able to specify a concrete
/// tagged hash, which we require in [LNPBP-81] merklization and which we
/// provide here via [`ToMerkleSource::Leaf`]` associated type holding
/// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub trait ToMerkleSource {
/// Defining type of the commitment produced during merlization process
type Leaf: ConsensusMerkleCommit;
/// Performs transformation of the data type into a merkilzable data
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>;
}
#[cfg(test)]
mod test {
use std::collections::BTreeMap;
use amplify::{bmap, s};
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::{sha256d, Hash};
use strict_encoding::StrictEncode;
use super::*;
use crate::commit_encode::{strategies, Strategy};
use crate::CommitConceal;
#[test]
fn collections() {
// First, we define a data type
#[derive(
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Debug,
StrictEncode,
StrictDecode
)]
struct Item(pub String);
// Next, we say that it should be concealed using some function
// (double SHA256 hash in this case)
impl CommitConceal for Item {
type ConcealedCommitment = sha256d::Hash;
fn commit_conceal(&self) -> Self::ConcealedCommitment {
sha256d::Hash::hash(self.0.as_bytes())
}
}
// Next, we need to specify how the concealed data should be
// commit-encoded: this time we strict-serialize the hash
impl Strategy for sha256d::Hash {
type Strategy = strategies::UsingStrict;
}
// Now, we define commitment encoding for our concealable type: it
// should conceal the data
impl Strategy for Item {
type Strategy = strategies::UsingConceal;
}
// Now, we need to say that consensus commit procedure should produce
// a final commitment from commit-encoded data (equal to the
// strict encoding of the conceal result) using `CommitVerify` type.
// Here, we use another round of hashing, producing merkle node hash
// from the concealed data.
impl ConsensusCommit for Item {
type Commitment = MerkleNode;
}
// Next, we need to provide merkle node tags for each type of the tree
impl ConsensusMerkleCommit for Item {
const MERKLE_NODE_PREFIX: &'static str = "item";
}
impl ConsensusMerkleCommit for (usize, Item) {
const MERKLE_NODE_PREFIX: &'static str = "usize->item";
}
impl ToMerkleSource for BTreeMap<usize, Item> {
type Leaf = (usize, Item);
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf> {
self.iter().map(|(k, v)| (*k, v.clone())).collect()
}
}
let large = vec![Item(s!("none")); 3];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"71ea45868fbd924061c4deb84f37ed82b0ac808de12aa7659afda7d9303e7a71"
);
let large = vec![Item(s!("none")); 5];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"e255e0124efe0555fde0d932a0bc0042614129e1a02f7b8c0bf608b81af3eb94"
);
let large = vec![Item(s!("none")); 9];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"6cd2d5345a654af4720bdcc637183ded8e432dc88f778b7d27c8d5a0e342c65f"
);
let large = vec![Item(s!("none")); 13];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"3714c08c7c94a4ef769ad2cb7df9aaca1e1252d6599a02aff281c37e7242797d"
);
let large = vec![Item(s!("none")); 17];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"6093dec47e5bdd706da01e4479cb65632eac426eb59c8c28c4e6c199438c8b6f"
);
let item = Item(s!("Some text"));
assert_eq!(&b"\x09\x00Some text"[..], item.strict_serialize().unwrap());
assert_eq!(
"6680bbec0d05d3eaac9c8b658c40f28d2f0cb0f245c7b1cabf5a61c35bd03d8e",
item.commit_serialize().to_hex()
);
assert_eq!(
"3e4b2dcf9bca33400028c8947565c1ff421f6d561e9ec48f88f0c9a24ebc8c30",
item.consensus_commit().to_hex()
);
assert_ne!(item.commit_serialize(), item.strict_serialize().unwrap());
assert_eq!(
MerkleNode::hash(&item.commit_serialize()),
item.consensus_commit()
);
let original = bmap! {
0usize => Item(s!("My first case")),
1usize => Item(s!("My second case with a very long string")),
3usize => Item(s!("My third case to make the Merkle tree two layered"))
};
let collection = original.to_merkle_source();
assert_eq!(
&b"\x03\x00\
\x00\x00\
\x0d\x00\
My first case\
\x01\x00\
\x26\x00\
My second case with a very long string\
\x03\x00\
\x31\x00\
My third case to make the Merkle tree two layered"[..],
original.strict_serialize().unwrap()
);
assert_eq!(
"d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d",
collection.commit_serialize().to_hex()
);
assert_eq!(
"d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d",
collection.consensus_commit().to_hex()
);
assert_ne!(
collection.commit_serialize(),
original.strict_serialize().unwrap()
);
assert_eq!(
MerkleNode::from_slice(&collection.commit_serialize()).unwrap(),
collection.consensus_commit()
);
let original = vec![
Item(s!("My first case")),
Item(s!("My second case with a very long string")),
Item(s!("My third case to make the Merkle tree two layered")),
];
let vec: MerkleSource<Item> = original.clone().into();
assert_eq!(
&b"\x03\x00\
\x0d\x00\
My first case\
\x26\x00\
My second case with a very long string\
\x31\x00\
My third case to make the Merkle tree two layered"[..],
original.strict_serialize().unwrap()
);
assert_eq!(
"fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469",
vec.commit_serialize().to_hex()
);
assert_eq!(
"fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469",
vec.consensus_commit().to_hex()
);
assert_ne!(
vec.commit_serialize(),
original.strict_serialize().unwrap()
);
assert_eq!(
MerkleNode::from_slice(&vec.commit_serialize()).unwrap(),
vec.consensus_commit()
);
assert_ne!(vec.consensus_commit(), collection.consensus_commit());
}
}
| { Self(collection.into_iter().collect()) } | identifier_body |
merkle.rs | // LNP/BP client-side-validation foundation libraries implementing LNPBP
// specifications & standards (LNPBP-4, 7, 8, 9, 42, 81)
//
// Written in 2019-2021 by
// Dr. Maxim Orlovsky <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the Apache 2.0 License along with this
// software. If not, see <https://opensource.org/licenses/Apache-2.0>.
//! Merklization procedures for client-side-validation according to [LNPBP-81]
//! standard.
//!
//! [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
use std::io;
use std::iter::FromIterator;
use bitcoin_hashes::{sha256, Hash, HashEngine};
use crate::{commit_encode, CommitEncode, CommitVerify, ConsensusCommit};
/// Marker trait for types that require merklization of the underlying data
/// during [`ConsensusCommit`] procedure. Allows specifying custom tag for the
/// tagged hash used in the merklization (see [`merklize`]).
pub trait ConsensusMerkleCommit:
ConsensusCommit<Commitment = MerkleNode>
{
/// The tag prefix which will be used in the merklization process (see
/// [`merklize`])
const MERKLE_NODE_PREFIX: &'static str;
}
hash_newtype!(
MerkleNode,
sha256::Hash,
32,
doc = "A hash type for LNPBP-81 Merkle tree leaves, branches and root",
false // We do not reverse displaying MerkleNodes in hexadecimal
);
impl strict_encoding::Strategy for MerkleNode {
type Strategy = strict_encoding::strategies::HashFixedBytes;
}
impl commit_encode::Strategy for MerkleNode {
type Strategy = commit_encode::strategies::UsingStrict;
}
impl<MSG> CommitVerify<MSG> for MerkleNode
where
MSG: AsRef<[u8]>,
{
#[inline]
fn commit(msg: &MSG) -> MerkleNode { MerkleNode::hash(msg.as_ref()) }
}
impl<A, B> ConsensusCommit for (A, B)
where
A: CommitEncode,
B: CommitEncode,
{
type Commitment = MerkleNode;
}
impl<A, B, C> ConsensusCommit for (A, B, C)
where
A: CommitEncode,
B: CommitEncode,
C: CommitEncode,
{
type Commitment = MerkleNode;
}
/// Merklization procedure that uses tagged hashes with depth commitments
/// according to [LNPBP-81] standard of client-side-validation merklization
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub fn merklize<I>(prefix: &str, data: I) -> (MerkleNode, u8)
where
I: IntoIterator<Item = MerkleNode>,
<I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>,
{
let mut tag_engine = sha256::Hash::engine();
tag_engine.input(prefix.as_bytes());
tag_engine.input(":merkle:".as_bytes());
let iter = data.into_iter();
let width = iter.len();
// Tagging merkle tree root
let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None);
tag_engine.input("root:height=".as_bytes());
tag_engine.input(&height.to_string().into_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(&width.to_string().into_bytes());
let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
let mut engine = MerkleNode::engine();
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
root.commit_encode(&mut engine);
let tagged_root = MerkleNode::from_engine(engine);
(tagged_root, height)
}
// TODO: Optimize to avoid allocations
// In current rust generic iterators do not work with recursion :(
fn merklize_inner(
engine_proto: &sha256::HashEngine,
mut iter: impl ExactSizeIterator<Item = MerkleNode>,
depth: u8,
extend: bool,
empty_node: Option<MerkleNode>,
) -> (MerkleNode, u8) {
let len = iter.len() + extend as usize;
let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF]));
// Computing tagged hash as per BIP-340
let mut tag_engine = engine_proto.clone();
tag_engine.input("depth=".as_bytes());
tag_engine.input(depth.to_string().as_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(len.to_string().as_bytes());
tag_engine.input(":height=".as_bytes());
let mut engine = MerkleNode::engine();
if len <= 2 {
tag_engine.input("0:".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
let mut leaf_tag_engine = engine_proto.clone();
leaf_tag_engine.input("leaf".as_bytes());
let leaf_tag =
sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine));
let mut leaf_engine = MerkleNode::engine();
leaf_engine.input(&leaf_tag[..]);
leaf_engine.input(&leaf_tag[..]);
let mut leaf1 = leaf_engine.clone();
leaf1.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf1).commit_encode(&mut engine);
leaf_engine.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine);
(MerkleNode::from_engine(engine), 1)
} else {
let div = len / 2 + len % 2;
let (node1, height1) = merklize_inner(
engine_proto,
// Normally we should use `iter.by_ref().take(div)`, but currently
// rust compilers is unable to parse recursion with generic types
iter.by_ref().take(div).collect::<Vec<_>>().into_iter(), | let iter = if extend {
iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter()
} else {
iter.collect::<Vec<_>>().into_iter()
};
let (node2, height2) = merklize_inner(
engine_proto,
iter,
depth + 1,
(div % 2 + len % 2) / 2 == 1,
Some(empty_node),
);
assert_eq!(
height1,
height2,
"merklization algorithm failure: height of subtrees is not equal \
(width = {}, depth = {}, prev_extend = {}, next_extend = {})",
len,
depth,
extend,
div % 2 == 1 && len % 2 == 1
);
tag_engine.input(height1.to_string().as_bytes());
tag_engine.input(":".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
node1.commit_encode(&mut engine);
node2.commit_encode(&mut engine);
(MerkleNode::from_engine(engine), height1 + 1)
}
}
/// The source data for the [LNPBP-81] merklization process.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)]
pub struct MerkleSource<T>(
/// Array of the data which will be merklized
pub Vec<T>,
);
impl<L, I> From<I> for MerkleSource<L>
where
I: IntoIterator<Item = L>,
L: CommitEncode,
{
fn from(collection: I) -> Self { Self(collection.into_iter().collect()) }
}
impl<L> FromIterator<L> for MerkleSource<L>
where
L: CommitEncode,
{
fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self {
iter.into_iter().collect::<Vec<_>>().into()
}
}
impl<L> CommitEncode for MerkleSource<L>
where
L: ConsensusMerkleCommit,
{
fn commit_encode<E: io::Write>(&self, e: E) -> usize {
let leafs = self.0.iter().map(L::consensus_commit);
merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e)
}
}
impl<L> ConsensusCommit for MerkleSource<L>
where
L: ConsensusMerkleCommit + CommitEncode,
{
type Commitment = MerkleNode;
#[inline]
fn consensus_commit(&self) -> Self::Commitment {
MerkleNode::from_slice(&self.commit_serialize())
.expect("MerkleSource::commit_serialize must produce MerkleNode")
}
#[inline]
fn consensus_verify(&self, commitment: &Self::Commitment) -> bool {
self.consensus_commit() == *commitment
}
}
/// Converts given piece of client-side-validated data into a structure which
/// can be used in merklization process.
///
/// This dedicated structure is required since with
/// `impl From<_> for MerkleSource` we would not be able to specify a concrete
/// tagged hash, which we require in [LNPBP-81] merklization and which we
/// provide here via [`ToMerkleSource::Leaf`]` associated type holding
/// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub trait ToMerkleSource {
/// Defining type of the commitment produced during merlization process
type Leaf: ConsensusMerkleCommit;
/// Performs transformation of the data type into a merkilzable data
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>;
}
#[cfg(test)]
mod test {
use std::collections::BTreeMap;
use amplify::{bmap, s};
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::{sha256d, Hash};
use strict_encoding::StrictEncode;
use super::*;
use crate::commit_encode::{strategies, Strategy};
use crate::CommitConceal;
#[test]
fn collections() {
// First, we define a data type
#[derive(
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Debug,
StrictEncode,
StrictDecode
)]
struct Item(pub String);
// Next, we say that it should be concealed using some function
// (double SHA256 hash in this case)
impl CommitConceal for Item {
type ConcealedCommitment = sha256d::Hash;
fn commit_conceal(&self) -> Self::ConcealedCommitment {
sha256d::Hash::hash(self.0.as_bytes())
}
}
// Next, we need to specify how the concealed data should be
// commit-encoded: this time we strict-serialize the hash
impl Strategy for sha256d::Hash {
type Strategy = strategies::UsingStrict;
}
// Now, we define commitment encoding for our concealable type: it
// should conceal the data
impl Strategy for Item {
type Strategy = strategies::UsingConceal;
}
// Now, we need to say that consensus commit procedure should produce
// a final commitment from commit-encoded data (equal to the
// strict encoding of the conceal result) using `CommitVerify` type.
// Here, we use another round of hashing, producing merkle node hash
// from the concealed data.
impl ConsensusCommit for Item {
type Commitment = MerkleNode;
}
// Next, we need to provide merkle node tags for each type of the tree
impl ConsensusMerkleCommit for Item {
const MERKLE_NODE_PREFIX: &'static str = "item";
}
impl ConsensusMerkleCommit for (usize, Item) {
const MERKLE_NODE_PREFIX: &'static str = "usize->item";
}
impl ToMerkleSource for BTreeMap<usize, Item> {
type Leaf = (usize, Item);
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf> {
self.iter().map(|(k, v)| (*k, v.clone())).collect()
}
}
let large = vec![Item(s!("none")); 3];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"71ea45868fbd924061c4deb84f37ed82b0ac808de12aa7659afda7d9303e7a71"
);
let large = vec![Item(s!("none")); 5];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"e255e0124efe0555fde0d932a0bc0042614129e1a02f7b8c0bf608b81af3eb94"
);
let large = vec![Item(s!("none")); 9];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"6cd2d5345a654af4720bdcc637183ded8e432dc88f778b7d27c8d5a0e342c65f"
);
let large = vec![Item(s!("none")); 13];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"3714c08c7c94a4ef769ad2cb7df9aaca1e1252d6599a02aff281c37e7242797d"
);
let large = vec![Item(s!("none")); 17];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"6093dec47e5bdd706da01e4479cb65632eac426eb59c8c28c4e6c199438c8b6f"
);
let item = Item(s!("Some text"));
assert_eq!(&b"\x09\x00Some text"[..], item.strict_serialize().unwrap());
assert_eq!(
"6680bbec0d05d3eaac9c8b658c40f28d2f0cb0f245c7b1cabf5a61c35bd03d8e",
item.commit_serialize().to_hex()
);
assert_eq!(
"3e4b2dcf9bca33400028c8947565c1ff421f6d561e9ec48f88f0c9a24ebc8c30",
item.consensus_commit().to_hex()
);
assert_ne!(item.commit_serialize(), item.strict_serialize().unwrap());
assert_eq!(
MerkleNode::hash(&item.commit_serialize()),
item.consensus_commit()
);
let original = bmap! {
0usize => Item(s!("My first case")),
1usize => Item(s!("My second case with a very long string")),
3usize => Item(s!("My third case to make the Merkle tree two layered"))
};
let collection = original.to_merkle_source();
assert_eq!(
&b"\x03\x00\
\x00\x00\
\x0d\x00\
My first case\
\x01\x00\
\x26\x00\
My second case with a very long string\
\x03\x00\
\x31\x00\
My third case to make the Merkle tree two layered"[..],
original.strict_serialize().unwrap()
);
assert_eq!(
"d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d",
collection.commit_serialize().to_hex()
);
assert_eq!(
"d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d",
collection.consensus_commit().to_hex()
);
assert_ne!(
collection.commit_serialize(),
original.strict_serialize().unwrap()
);
assert_eq!(
MerkleNode::from_slice(&collection.commit_serialize()).unwrap(),
collection.consensus_commit()
);
let original = vec![
Item(s!("My first case")),
Item(s!("My second case with a very long string")),
Item(s!("My third case to make the Merkle tree two layered")),
];
let vec: MerkleSource<Item> = original.clone().into();
assert_eq!(
&b"\x03\x00\
\x0d\x00\
My first case\
\x26\x00\
My second case with a very long string\
\x31\x00\
My third case to make the Merkle tree two layered"[..],
original.strict_serialize().unwrap()
);
assert_eq!(
"fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469",
vec.commit_serialize().to_hex()
);
assert_eq!(
"fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469",
vec.consensus_commit().to_hex()
);
assert_ne!(
vec.commit_serialize(),
original.strict_serialize().unwrap()
);
assert_eq!(
MerkleNode::from_slice(&vec.commit_serialize()).unwrap(),
vec.consensus_commit()
);
assert_ne!(vec.consensus_commit(), collection.consensus_commit());
}
} | depth + 1,
false,
Some(empty_node),
);
| random_line_split |
merkle.rs | // LNP/BP client-side-validation foundation libraries implementing LNPBP
// specifications & standards (LNPBP-4, 7, 8, 9, 42, 81)
//
// Written in 2019-2021 by
// Dr. Maxim Orlovsky <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the Apache 2.0 License along with this
// software. If not, see <https://opensource.org/licenses/Apache-2.0>.
//! Merklization procedures for client-side-validation according to [LNPBP-81]
//! standard.
//!
//! [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
use std::io;
use std::iter::FromIterator;
use bitcoin_hashes::{sha256, Hash, HashEngine};
use crate::{commit_encode, CommitEncode, CommitVerify, ConsensusCommit};
/// Marker trait for types that require merklization of the underlying data
/// during [`ConsensusCommit`] procedure. Allows specifying custom tag for the
/// tagged hash used in the merklization (see [`merklize`]).
pub trait ConsensusMerkleCommit:
ConsensusCommit<Commitment = MerkleNode>
{
/// The tag prefix which will be used in the merklization process (see
/// [`merklize`])
const MERKLE_NODE_PREFIX: &'static str;
}
hash_newtype!(
MerkleNode,
sha256::Hash,
32,
doc = "A hash type for LNPBP-81 Merkle tree leaves, branches and root",
false // We do not reverse displaying MerkleNodes in hexadecimal
);
impl strict_encoding::Strategy for MerkleNode {
type Strategy = strict_encoding::strategies::HashFixedBytes;
}
impl commit_encode::Strategy for MerkleNode {
type Strategy = commit_encode::strategies::UsingStrict;
}
impl<MSG> CommitVerify<MSG> for MerkleNode
where
MSG: AsRef<[u8]>,
{
#[inline]
fn commit(msg: &MSG) -> MerkleNode { MerkleNode::hash(msg.as_ref()) }
}
impl<A, B> ConsensusCommit for (A, B)
where
A: CommitEncode,
B: CommitEncode,
{
type Commitment = MerkleNode;
}
impl<A, B, C> ConsensusCommit for (A, B, C)
where
A: CommitEncode,
B: CommitEncode,
C: CommitEncode,
{
type Commitment = MerkleNode;
}
/// Merklization procedure that uses tagged hashes with depth commitments
/// according to [LNPBP-81] standard of client-side-validation merklization
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub fn merklize<I>(prefix: &str, data: I) -> (MerkleNode, u8)
where
I: IntoIterator<Item = MerkleNode>,
<I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>,
{
let mut tag_engine = sha256::Hash::engine();
tag_engine.input(prefix.as_bytes());
tag_engine.input(":merkle:".as_bytes());
let iter = data.into_iter();
let width = iter.len();
// Tagging merkle tree root
let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None);
tag_engine.input("root:height=".as_bytes());
tag_engine.input(&height.to_string().into_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(&width.to_string().into_bytes());
let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
let mut engine = MerkleNode::engine();
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
root.commit_encode(&mut engine);
let tagged_root = MerkleNode::from_engine(engine);
(tagged_root, height)
}
// TODO: Optimize to avoid allocations
// In current rust generic iterators do not work with recursion :(
fn merklize_inner(
engine_proto: &sha256::HashEngine,
mut iter: impl ExactSizeIterator<Item = MerkleNode>,
depth: u8,
extend: bool,
empty_node: Option<MerkleNode>,
) -> (MerkleNode, u8) {
let len = iter.len() + extend as usize;
let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF]));
// Computing tagged hash as per BIP-340
let mut tag_engine = engine_proto.clone();
tag_engine.input("depth=".as_bytes());
tag_engine.input(depth.to_string().as_bytes());
tag_engine.input(":width=".as_bytes());
tag_engine.input(len.to_string().as_bytes());
tag_engine.input(":height=".as_bytes());
let mut engine = MerkleNode::engine();
if len <= 2 {
tag_engine.input("0:".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
let mut leaf_tag_engine = engine_proto.clone();
leaf_tag_engine.input("leaf".as_bytes());
let leaf_tag =
sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine));
let mut leaf_engine = MerkleNode::engine();
leaf_engine.input(&leaf_tag[..]);
leaf_engine.input(&leaf_tag[..]);
let mut leaf1 = leaf_engine.clone();
leaf1.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf1).commit_encode(&mut engine);
leaf_engine.input(
iter.next()
.as_ref()
.map(|d| d.as_ref())
.unwrap_or_else(|| empty_node.as_ref()),
);
MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine);
(MerkleNode::from_engine(engine), 1)
} else {
let div = len / 2 + len % 2;
let (node1, height1) = merklize_inner(
engine_proto,
// Normally we should use `iter.by_ref().take(div)`, but currently
// rust compilers is unable to parse recursion with generic types
iter.by_ref().take(div).collect::<Vec<_>>().into_iter(),
depth + 1,
false,
Some(empty_node),
);
let iter = if extend {
iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter()
} else {
iter.collect::<Vec<_>>().into_iter()
};
let (node2, height2) = merklize_inner(
engine_proto,
iter,
depth + 1,
(div % 2 + len % 2) / 2 == 1,
Some(empty_node),
);
assert_eq!(
height1,
height2,
"merklization algorithm failure: height of subtrees is not equal \
(width = {}, depth = {}, prev_extend = {}, next_extend = {})",
len,
depth,
extend,
div % 2 == 1 && len % 2 == 1
);
tag_engine.input(height1.to_string().as_bytes());
tag_engine.input(":".as_bytes());
let tag_hash =
sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine));
engine.input(&tag_hash[..]);
engine.input(&tag_hash[..]);
node1.commit_encode(&mut engine);
node2.commit_encode(&mut engine);
(MerkleNode::from_engine(engine), height1 + 1)
}
}
/// The source data for the [LNPBP-81] merklization process.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)]
pub struct MerkleSource<T>(
/// Array of the data which will be merklized
pub Vec<T>,
);
impl<L, I> From<I> for MerkleSource<L>
where
I: IntoIterator<Item = L>,
L: CommitEncode,
{
fn from(collection: I) -> Self { Self(collection.into_iter().collect()) }
}
impl<L> FromIterator<L> for MerkleSource<L>
where
L: CommitEncode,
{
fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self {
iter.into_iter().collect::<Vec<_>>().into()
}
}
impl<L> CommitEncode for MerkleSource<L>
where
L: ConsensusMerkleCommit,
{
fn | <E: io::Write>(&self, e: E) -> usize {
let leafs = self.0.iter().map(L::consensus_commit);
merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e)
}
}
impl<L> ConsensusCommit for MerkleSource<L>
where
L: ConsensusMerkleCommit + CommitEncode,
{
type Commitment = MerkleNode;
#[inline]
fn consensus_commit(&self) -> Self::Commitment {
MerkleNode::from_slice(&self.commit_serialize())
.expect("MerkleSource::commit_serialize must produce MerkleNode")
}
#[inline]
fn consensus_verify(&self, commitment: &Self::Commitment) -> bool {
self.consensus_commit() == *commitment
}
}
/// Converts given piece of client-side-validated data into a structure which
/// can be used in merklization process.
///
/// This dedicated structure is required since with
/// `impl From<_> for MerkleSource` we would not be able to specify a concrete
/// tagged hash, which we require in [LNPBP-81] merklization and which we
/// provide here via [`ToMerkleSource::Leaf`]` associated type holding
/// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value.
///
/// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md
pub trait ToMerkleSource {
/// Defining type of the commitment produced during merlization process
type Leaf: ConsensusMerkleCommit;
/// Performs transformation of the data type into a merkilzable data
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>;
}
#[cfg(test)]
mod test {
use std::collections::BTreeMap;
use amplify::{bmap, s};
use bitcoin_hashes::hex::ToHex;
use bitcoin_hashes::{sha256d, Hash};
use strict_encoding::StrictEncode;
use super::*;
use crate::commit_encode::{strategies, Strategy};
use crate::CommitConceal;
#[test]
fn collections() {
// First, we define a data type
#[derive(
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Debug,
StrictEncode,
StrictDecode
)]
struct Item(pub String);
// Next, we say that it should be concealed using some function
// (double SHA256 hash in this case)
impl CommitConceal for Item {
type ConcealedCommitment = sha256d::Hash;
fn commit_conceal(&self) -> Self::ConcealedCommitment {
sha256d::Hash::hash(self.0.as_bytes())
}
}
// Next, we need to specify how the concealed data should be
// commit-encoded: this time we strict-serialize the hash
impl Strategy for sha256d::Hash {
type Strategy = strategies::UsingStrict;
}
// Now, we define commitment encoding for our concealable type: it
// should conceal the data
impl Strategy for Item {
type Strategy = strategies::UsingConceal;
}
// Now, we need to say that consensus commit procedure should produce
// a final commitment from commit-encoded data (equal to the
// strict encoding of the conceal result) using `CommitVerify` type.
// Here, we use another round of hashing, producing merkle node hash
// from the concealed data.
impl ConsensusCommit for Item {
type Commitment = MerkleNode;
}
// Next, we need to provide merkle node tags for each type of the tree
impl ConsensusMerkleCommit for Item {
const MERKLE_NODE_PREFIX: &'static str = "item";
}
impl ConsensusMerkleCommit for (usize, Item) {
const MERKLE_NODE_PREFIX: &'static str = "usize->item";
}
impl ToMerkleSource for BTreeMap<usize, Item> {
type Leaf = (usize, Item);
fn to_merkle_source(&self) -> MerkleSource<Self::Leaf> {
self.iter().map(|(k, v)| (*k, v.clone())).collect()
}
}
let large = vec![Item(s!("none")); 3];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"71ea45868fbd924061c4deb84f37ed82b0ac808de12aa7659afda7d9303e7a71"
);
let large = vec![Item(s!("none")); 5];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"e255e0124efe0555fde0d932a0bc0042614129e1a02f7b8c0bf608b81af3eb94"
);
let large = vec![Item(s!("none")); 9];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"6cd2d5345a654af4720bdcc637183ded8e432dc88f778b7d27c8d5a0e342c65f"
);
let large = vec![Item(s!("none")); 13];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"3714c08c7c94a4ef769ad2cb7df9aaca1e1252d6599a02aff281c37e7242797d"
);
let large = vec![Item(s!("none")); 17];
let vec: MerkleSource<Item> = large.clone().into();
assert_eq!(
vec.commit_serialize().to_hex(),
"6093dec47e5bdd706da01e4479cb65632eac426eb59c8c28c4e6c199438c8b6f"
);
let item = Item(s!("Some text"));
assert_eq!(&b"\x09\x00Some text"[..], item.strict_serialize().unwrap());
assert_eq!(
"6680bbec0d05d3eaac9c8b658c40f28d2f0cb0f245c7b1cabf5a61c35bd03d8e",
item.commit_serialize().to_hex()
);
assert_eq!(
"3e4b2dcf9bca33400028c8947565c1ff421f6d561e9ec48f88f0c9a24ebc8c30",
item.consensus_commit().to_hex()
);
assert_ne!(item.commit_serialize(), item.strict_serialize().unwrap());
assert_eq!(
MerkleNode::hash(&item.commit_serialize()),
item.consensus_commit()
);
let original = bmap! {
0usize => Item(s!("My first case")),
1usize => Item(s!("My second case with a very long string")),
3usize => Item(s!("My third case to make the Merkle tree two layered"))
};
let collection = original.to_merkle_source();
assert_eq!(
&b"\x03\x00\
\x00\x00\
\x0d\x00\
My first case\
\x01\x00\
\x26\x00\
My second case with a very long string\
\x03\x00\
\x31\x00\
My third case to make the Merkle tree two layered"[..],
original.strict_serialize().unwrap()
);
assert_eq!(
"d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d",
collection.commit_serialize().to_hex()
);
assert_eq!(
"d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d",
collection.consensus_commit().to_hex()
);
assert_ne!(
collection.commit_serialize(),
original.strict_serialize().unwrap()
);
assert_eq!(
MerkleNode::from_slice(&collection.commit_serialize()).unwrap(),
collection.consensus_commit()
);
let original = vec![
Item(s!("My first case")),
Item(s!("My second case with a very long string")),
Item(s!("My third case to make the Merkle tree two layered")),
];
let vec: MerkleSource<Item> = original.clone().into();
assert_eq!(
&b"\x03\x00\
\x0d\x00\
My first case\
\x26\x00\
My second case with a very long string\
\x31\x00\
My third case to make the Merkle tree two layered"[..],
original.strict_serialize().unwrap()
);
assert_eq!(
"fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469",
vec.commit_serialize().to_hex()
);
assert_eq!(
"fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469",
vec.consensus_commit().to_hex()
);
assert_ne!(
vec.commit_serialize(),
original.strict_serialize().unwrap()
);
assert_eq!(
MerkleNode::from_slice(&vec.commit_serialize()).unwrap(),
vec.consensus_commit()
);
assert_ne!(vec.consensus_commit(), collection.consensus_commit());
}
}
| commit_encode | identifier_name |
mod.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate service tasks management module.
use crate::{config::TaskType, Error};
use exit_future::Signal;
use futures::{
future::{pending, select, try_join_all, BoxFuture, Either},
Future, FutureExt, StreamExt,
};
use parking_lot::Mutex;
use prometheus_endpoint::{
exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError,
Registry, U64,
};
use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
use std::{
collections::{hash_map::Entry, HashMap},
panic,
pin::Pin,
result::Result,
sync::Arc,
};
use tokio::runtime::Handle;
use tracing_futures::Instrument;
mod prometheus_future;
#[cfg(test)]
mod tests;
/// Default task group name.
pub const DEFAULT_GROUP_NAME: &str = "default";
/// The name of a group a task belongs to.
///
/// This name is passed belong-side the task name to the prometheus metrics and can be used
/// to group tasks.
pub enum GroupName {
/// Sets the group name to `default`.
Default,
/// Use the specifically given name as group name.
Specific(&'static str),
}
impl From<Option<&'static str>> for GroupName {
fn from(name: Option<&'static str>) -> Self {
match name {
Some(name) => Self::Specific(name),
None => Self::Default,
}
}
}
impl From<&'static str> for GroupName {
fn from(name: &'static str) -> Self {
Self::Specific(name)
}
}
/// An handle for spawning tasks in the service.
#[derive(Clone)]
pub struct SpawnTaskHandle {
on_exit: exit_future::Exit,
tokio_handle: Handle,
metrics: Option<Metrics>,
task_registry: TaskRegistry,
}
impl SpawnTaskHandle {
/// Spawns the given task with the given name and a group name.
/// If group is not specified `DEFAULT_GROUP_NAME` will be used.
///
/// Note that the `name` is a `&'static str`. The reason for this choice is that
/// statistics about this task are getting reported to the Prometheus endpoint (if enabled), and
/// that therefore the set of possible task names must be bounded.
///
/// In other words, it would be a bad idea for someone to do for example
/// `spawn(format!("{:?}", some_public_key))`.
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name. See also `spawn`.
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
/// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`.
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
task_type: TaskType,
) {
let on_exit = self.on_exit.clone();
let metrics = self.metrics.clone();
let registry = self.task_registry.clone();
let group = match group.into() {
GroupName::Specific(var) => var,
// If no group is specified use default.
GroupName::Default => DEFAULT_GROUP_NAME,
};
let task_type_label = match task_type {
TaskType::Blocking => "blocking",
TaskType::Async => "async",
};
// Note that we increase the started counter here and not within the future. This way,
// we could properly visualize on Prometheus situations where the spawning doesn't work.
if let Some(metrics) = &self.metrics {
metrics.tasks_spawned.with_label_values(&[name, group, task_type_label]).inc();
// We do a dummy increase in order for the task to show up in metrics.
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc_by(0);
}
let future = async move {
// Register the task and keep the "token" alive until the task is ended. Then this
// "token" will unregister this task.
let _registry_token = registry.register_task(name, group);
if let Some(metrics) = metrics {
// Add some wrappers around `task`.
let task = { | prometheus_future::with_poll_durations(poll_duration, poll_start, task);
// The logic of `AssertUnwindSafe` here is ok considering that we throw
// away the `Future` after it has panicked.
panic::AssertUnwindSafe(inner).catch_unwind()
};
futures::pin_mut!(task);
match select(on_exit, task).await {
Either::Right((Err(payload), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "panic", group, task_type_label])
.inc();
panic::resume_unwind(payload)
},
Either::Right((Ok(()), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc();
},
Either::Left(((), _)) => {
// The `on_exit` has triggered.
metrics
.tasks_ended
.with_label_values(&[name, "interrupted", group, task_type_label])
.inc();
},
}
} else {
futures::pin_mut!(task);
let _ = select(on_exit, task).await;
}
}
.in_current_span();
match task_type {
TaskType::Async => {
self.tokio_handle.spawn(future);
},
TaskType::Blocking => {
let handle = self.tokio_handle.clone();
self.tokio_handle.spawn_blocking(move || {
handle.block_on(future);
});
},
}
}
}
impl sp_core::traits::SpawnNamed for SpawnTaskHandle {
fn spawn_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Blocking)
}
fn spawn(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Async)
}
}
/// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any
/// task spawned through it fails. The service should be on the receiver side
/// and will shut itself down whenever it receives any message, i.e. an
/// essential task has failed.
#[derive(Clone)]
pub struct SpawnEssentialTaskHandle {
essential_failed_tx: TracingUnboundedSender<()>,
inner: SpawnTaskHandle,
}
impl SpawnEssentialTaskHandle {
/// Creates a new `SpawnEssentialTaskHandle`.
pub fn new(
essential_failed_tx: TracingUnboundedSender<()>,
spawn_task_handle: SpawnTaskHandle,
) -> SpawnEssentialTaskHandle {
SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle }
}
/// Spawns the given task with the given name.
///
/// See also [`SpawnTaskHandle::spawn`].
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name.
///
/// See also [`SpawnTaskHandle::spawn_blocking`].
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
task_type: TaskType,
) {
let essential_failed = self.essential_failed_tx.clone();
let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| {
log::error!("Essential task `{}` failed. Shutting down service.", name);
let _ = essential_failed.close();
});
let _ = self.inner.spawn_inner(name, group, essential_task, task_type);
}
}
impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle {
fn spawn_essential_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_blocking(name, group, future);
}
fn spawn_essential(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn(name, group, future);
}
}
/// Helper struct to manage background/async tasks in Service.
pub struct TaskManager {
/// A future that resolves when the service has exited, this is useful to
/// make sure any internally spawned futures stop when the service does.
on_exit: exit_future::Exit,
/// A signal that makes the exit future above resolve, fired on drop.
_signal: Signal,
/// Tokio runtime handle that is used to spawn futures.
tokio_handle: Handle,
/// Prometheus metric where to report the polling times.
metrics: Option<Metrics>,
/// Send a signal when a spawned essential task has concluded. The next time
/// the service future is polled it should complete with an error.
essential_failed_tx: TracingUnboundedSender<()>,
/// A receiver for spawned essential-tasks concluding.
essential_failed_rx: TracingUnboundedReceiver<()>,
/// Things to keep alive until the task manager is dropped.
keep_alive: Box<dyn std::any::Any + Send>,
/// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent
/// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential
/// task fails.
children: Vec<TaskManager>,
/// The registry of all running tasks.
task_registry: TaskRegistry,
}
impl TaskManager {
/// If a Prometheus registry is passed, it will be used to report statistics about the
/// service tasks.
pub fn new(
tokio_handle: Handle,
prometheus_registry: Option<&Registry>,
) -> Result<Self, PrometheusError> {
let (signal, on_exit) = exit_future::signal();
// A side-channel for essential tasks to communicate shutdown.
let (essential_failed_tx, essential_failed_rx) =
tracing_unbounded("mpsc_essential_tasks", 100);
let metrics = prometheus_registry.map(Metrics::register).transpose()?;
Ok(Self {
on_exit,
_signal: signal,
tokio_handle,
metrics,
essential_failed_tx,
essential_failed_rx,
keep_alive: Box::new(()),
children: Vec::new(),
task_registry: Default::default(),
})
}
/// Get a handle for spawning tasks.
pub fn spawn_handle(&self) -> SpawnTaskHandle {
SpawnTaskHandle {
on_exit: self.on_exit.clone(),
tokio_handle: self.tokio_handle.clone(),
metrics: self.metrics.clone(),
task_registry: self.task_registry.clone(),
}
}
/// Get a handle for spawning essential tasks.
pub fn spawn_essential_handle(&self) -> SpawnEssentialTaskHandle {
SpawnEssentialTaskHandle::new(self.essential_failed_tx.clone(), self.spawn_handle())
}
/// Return a future that will end with success if the signal to terminate was sent
/// (`self.terminate()`) or with an error if an essential task fails.
///
/// # Warning
///
/// This function will not wait until the end of the remaining task.
pub fn future<'a>(
&'a mut self,
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>> {
Box::pin(async move {
let mut t1 = self.essential_failed_rx.next().fuse();
let mut t2 = self.on_exit.clone().fuse();
let mut t3 = try_join_all(
self.children
.iter_mut()
.map(|x| x.future())
// Never end this future if there is no error because if there is no children,
// it must not stop
.chain(std::iter::once(pending().boxed())),
)
.fuse();
futures::select! {
_ = t1 => Err(Error::Other("Essential task failed.".into())),
_ = t2 => Ok(()),
res = t3 => Err(res.map(|_| ()).expect_err("this future never ends; qed")),
}
})
}
/// Set what the task manager should keep alive, can be called multiple times.
pub fn keep_alive<T:'static + Send>(&mut self, to_keep_alive: T) {
// allows this fn to safely called multiple times.
use std::mem;
let old = mem::replace(&mut self.keep_alive, Box::new(()));
self.keep_alive = Box::new((to_keep_alive, old));
}
/// Register another TaskManager to terminate and gracefully shutdown when the parent
/// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential
/// task fails. (But don't end the parent if a child's normal task fails.)
pub fn add_child(&mut self, child: TaskManager) {
self.children.push(child);
}
/// Consume `self` and return the [`TaskRegistry`].
///
/// This [`TaskRegistry`] can be used to check for still running tasks after this task manager
/// was dropped.
pub fn into_task_registry(self) -> TaskRegistry {
self.task_registry
}
}
#[derive(Clone)]
struct Metrics {
// This list is ordered alphabetically
poll_duration: HistogramVec,
poll_start: CounterVec<U64>,
tasks_spawned: CounterVec<U64>,
tasks_ended: CounterVec<U64>,
}
impl Metrics {
fn register(registry: &Registry) -> Result<Self, PrometheusError> {
Ok(Self {
poll_duration: register(HistogramVec::new(
HistogramOpts {
common_opts: Opts::new(
"substrate_tasks_polling_duration",
"Duration in seconds of each invocation of Future::poll"
),
buckets: exponential_buckets(0.001, 4.0, 9)
.expect("function parameters are constant and always valid; qed"),
},
&["task_name", "task_group", "kind"]
)?, registry)?,
poll_start: register(CounterVec::new(
Opts::new(
"substrate_tasks_polling_started_total",
"Total number of times we started invoking Future::poll"
),
&["task_name", "task_group", "kind"]
)?, registry)?,
tasks_spawned: register(CounterVec::new(
Opts::new(
"substrate_tasks_spawned_total",
"Total number of tasks that have been spawned on the Service"
),
&["task_name", "task_group", "kind"]
)?, registry)?,
tasks_ended: register(CounterVec::new(
Opts::new(
"substrate_tasks_ended_total",
"Total number of tasks for which Future::poll has returned Ready(()) or panicked"
),
&["task_name", "reason", "task_group", "kind"]
)?, registry)?,
})
}
}
/// Ensures that a [`Task`] is unregistered when this object is dropped.
struct UnregisterOnDrop {
task: Task,
registry: TaskRegistry,
}
impl Drop for UnregisterOnDrop {
fn drop(&mut self) {
let mut tasks = self.registry.tasks.lock();
if let Entry::Occupied(mut entry) = (*tasks).entry(self.task.clone()) {
*entry.get_mut() -= 1;
if *entry.get() == 0 {
entry.remove();
}
}
}
}
/// Represents a running async task in the [`TaskManager`].
///
/// As a task is identified by a name and a group, it is totally valid that there exists multiple
/// tasks with the same name and group.
#[derive(Clone, Hash, Eq, PartialEq)]
pub struct Task {
/// The name of the task.
pub name: &'static str,
/// The group this task is associated to.
pub group: &'static str,
}
impl Task {
/// Returns if the `group` is the [`DEFAULT_GROUP_NAME`].
pub fn is_default_group(&self) -> bool {
self.group == DEFAULT_GROUP_NAME
}
}
/// Keeps track of all running [`Task`]s in [`TaskManager`].
#[derive(Clone, Default)]
pub struct TaskRegistry {
tasks: Arc<Mutex<HashMap<Task, usize>>>,
}
impl TaskRegistry {
/// Register a task with the given `name` and `group`.
///
/// Returns [`UnregisterOnDrop`] that ensures that the task is unregistered when this value is
/// dropped.
fn register_task(&self, name: &'static str, group: &'static str) -> UnregisterOnDrop {
let task = Task { name, group };
{
let mut tasks = self.tasks.lock();
*(*tasks).entry(task.clone()).or_default() += 1;
}
UnregisterOnDrop { task, registry: self.clone() }
}
/// Returns the running tasks.
///
/// As a task is only identified by its `name` and `group`, there can be duplicate tasks. The
/// number per task represents the concurrently running tasks with the same identifier.
pub fn running_tasks(&self) -> HashMap<Task, usize> {
(*self.tasks.lock()).clone()
}
} | let poll_duration =
metrics.poll_duration.with_label_values(&[name, group, task_type_label]);
let poll_start =
metrics.poll_start.with_label_values(&[name, group, task_type_label]);
let inner = | random_line_split |
mod.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate service tasks management module.
use crate::{config::TaskType, Error};
use exit_future::Signal;
use futures::{
future::{pending, select, try_join_all, BoxFuture, Either},
Future, FutureExt, StreamExt,
};
use parking_lot::Mutex;
use prometheus_endpoint::{
exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError,
Registry, U64,
};
use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
use std::{
collections::{hash_map::Entry, HashMap},
panic,
pin::Pin,
result::Result,
sync::Arc,
};
use tokio::runtime::Handle;
use tracing_futures::Instrument;
mod prometheus_future;
#[cfg(test)]
mod tests;
/// Default task group name.
pub const DEFAULT_GROUP_NAME: &str = "default";
/// The name of a group a task belongs to.
///
/// This name is passed belong-side the task name to the prometheus metrics and can be used
/// to group tasks.
pub enum GroupName {
/// Sets the group name to `default`.
Default,
/// Use the specifically given name as group name.
Specific(&'static str),
}
impl From<Option<&'static str>> for GroupName {
fn from(name: Option<&'static str>) -> Self {
match name {
Some(name) => Self::Specific(name),
None => Self::Default,
}
}
}
impl From<&'static str> for GroupName {
fn from(name: &'static str) -> Self {
Self::Specific(name)
}
}
/// An handle for spawning tasks in the service.
#[derive(Clone)]
pub struct SpawnTaskHandle {
on_exit: exit_future::Exit,
tokio_handle: Handle,
metrics: Option<Metrics>,
task_registry: TaskRegistry,
}
impl SpawnTaskHandle {
/// Spawns the given task with the given name and a group name.
/// If group is not specified `DEFAULT_GROUP_NAME` will be used.
///
/// Note that the `name` is a `&'static str`. The reason for this choice is that
/// statistics about this task are getting reported to the Prometheus endpoint (if enabled), and
/// that therefore the set of possible task names must be bounded.
///
/// In other words, it would be a bad idea for someone to do for example
/// `spawn(format!("{:?}", some_public_key))`.
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name. See also `spawn`.
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
/// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`.
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
task_type: TaskType,
) {
let on_exit = self.on_exit.clone();
let metrics = self.metrics.clone();
let registry = self.task_registry.clone();
let group = match group.into() {
GroupName::Specific(var) => var,
// If no group is specified use default.
GroupName::Default => DEFAULT_GROUP_NAME,
};
let task_type_label = match task_type {
TaskType::Blocking => "blocking",
TaskType::Async => "async",
};
// Note that we increase the started counter here and not within the future. This way,
// we could properly visualize on Prometheus situations where the spawning doesn't work.
if let Some(metrics) = &self.metrics {
metrics.tasks_spawned.with_label_values(&[name, group, task_type_label]).inc();
// We do a dummy increase in order for the task to show up in metrics.
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc_by(0);
}
let future = async move {
// Register the task and keep the "token" alive until the task is ended. Then this
// "token" will unregister this task.
let _registry_token = registry.register_task(name, group);
if let Some(metrics) = metrics {
// Add some wrappers around `task`.
let task = {
let poll_duration =
metrics.poll_duration.with_label_values(&[name, group, task_type_label]);
let poll_start =
metrics.poll_start.with_label_values(&[name, group, task_type_label]);
let inner =
prometheus_future::with_poll_durations(poll_duration, poll_start, task);
// The logic of `AssertUnwindSafe` here is ok considering that we throw
// away the `Future` after it has panicked.
panic::AssertUnwindSafe(inner).catch_unwind()
};
futures::pin_mut!(task);
match select(on_exit, task).await {
Either::Right((Err(payload), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "panic", group, task_type_label])
.inc();
panic::resume_unwind(payload)
},
Either::Right((Ok(()), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc();
},
Either::Left(((), _)) => {
// The `on_exit` has triggered.
metrics
.tasks_ended
.with_label_values(&[name, "interrupted", group, task_type_label])
.inc();
},
}
} else {
futures::pin_mut!(task);
let _ = select(on_exit, task).await;
}
}
.in_current_span();
match task_type {
TaskType::Async => | ,
TaskType::Blocking => {
let handle = self.tokio_handle.clone();
self.tokio_handle.spawn_blocking(move || {
handle.block_on(future);
});
},
}
}
}
impl sp_core::traits::SpawnNamed for SpawnTaskHandle {
fn spawn_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Blocking)
}
fn spawn(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Async)
}
}
/// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any
/// task spawned through it fails. The service should be on the receiver side
/// and will shut itself down whenever it receives any message, i.e. an
/// essential task has failed.
#[derive(Clone)]
pub struct SpawnEssentialTaskHandle {
essential_failed_tx: TracingUnboundedSender<()>,
inner: SpawnTaskHandle,
}
impl SpawnEssentialTaskHandle {
/// Creates a new `SpawnEssentialTaskHandle`.
pub fn new(
essential_failed_tx: TracingUnboundedSender<()>,
spawn_task_handle: SpawnTaskHandle,
) -> SpawnEssentialTaskHandle {
SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle }
}
/// Spawns the given task with the given name.
///
/// See also [`SpawnTaskHandle::spawn`].
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name.
///
/// See also [`SpawnTaskHandle::spawn_blocking`].
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
task_type: TaskType,
) {
let essential_failed = self.essential_failed_tx.clone();
let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| {
log::error!("Essential task `{}` failed. Shutting down service.", name);
let _ = essential_failed.close();
});
let _ = self.inner.spawn_inner(name, group, essential_task, task_type);
}
}
impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle {
fn spawn_essential_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_blocking(name, group, future);
}
fn spawn_essential(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn(name, group, future);
}
}
/// Helper struct to manage background/async tasks in Service.
pub struct TaskManager {
/// A future that resolves when the service has exited, this is useful to
/// make sure any internally spawned futures stop when the service does.
on_exit: exit_future::Exit,
/// A signal that makes the exit future above resolve, fired on drop.
_signal: Signal,
/// Tokio runtime handle that is used to spawn futures.
tokio_handle: Handle,
/// Prometheus metric where to report the polling times.
metrics: Option<Metrics>,
/// Send a signal when a spawned essential task has concluded. The next time
/// the service future is polled it should complete with an error.
essential_failed_tx: TracingUnboundedSender<()>,
/// A receiver for spawned essential-tasks concluding.
essential_failed_rx: TracingUnboundedReceiver<()>,
/// Things to keep alive until the task manager is dropped.
keep_alive: Box<dyn std::any::Any + Send>,
/// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent
/// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential
/// task fails.
children: Vec<TaskManager>,
/// The registry of all running tasks.
task_registry: TaskRegistry,
}
impl TaskManager {
/// If a Prometheus registry is passed, it will be used to report statistics about the
/// service tasks.
pub fn new(
tokio_handle: Handle,
prometheus_registry: Option<&Registry>,
) -> Result<Self, PrometheusError> {
let (signal, on_exit) = exit_future::signal();
// A side-channel for essential tasks to communicate shutdown.
let (essential_failed_tx, essential_failed_rx) =
tracing_unbounded("mpsc_essential_tasks", 100);
let metrics = prometheus_registry.map(Metrics::register).transpose()?;
Ok(Self {
on_exit,
_signal: signal,
tokio_handle,
metrics,
essential_failed_tx,
essential_failed_rx,
keep_alive: Box::new(()),
children: Vec::new(),
task_registry: Default::default(),
})
}
/// Get a handle for spawning tasks.
pub fn spawn_handle(&self) -> SpawnTaskHandle {
SpawnTaskHandle {
on_exit: self.on_exit.clone(),
tokio_handle: self.tokio_handle.clone(),
metrics: self.metrics.clone(),
task_registry: self.task_registry.clone(),
}
}
/// Get a handle for spawning essential tasks.
pub fn spawn_essential_handle(&self) -> SpawnEssentialTaskHandle {
SpawnEssentialTaskHandle::new(self.essential_failed_tx.clone(), self.spawn_handle())
}
/// Return a future that will end with success if the signal to terminate was sent
/// (`self.terminate()`) or with an error if an essential task fails.
///
/// # Warning
///
/// This function will not wait until the end of the remaining task.
pub fn future<'a>(
&'a mut self,
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>> {
Box::pin(async move {
let mut t1 = self.essential_failed_rx.next().fuse();
let mut t2 = self.on_exit.clone().fuse();
let mut t3 = try_join_all(
self.children
.iter_mut()
.map(|x| x.future())
// Never end this future if there is no error because if there is no children,
// it must not stop
.chain(std::iter::once(pending().boxed())),
)
.fuse();
futures::select! {
_ = t1 => Err(Error::Other("Essential task failed.".into())),
_ = t2 => Ok(()),
res = t3 => Err(res.map(|_| ()).expect_err("this future never ends; qed")),
}
})
}
/// Set what the task manager should keep alive, can be called multiple times.
pub fn keep_alive<T:'static + Send>(&mut self, to_keep_alive: T) {
// allows this fn to safely called multiple times.
use std::mem;
let old = mem::replace(&mut self.keep_alive, Box::new(()));
self.keep_alive = Box::new((to_keep_alive, old));
}
/// Register another TaskManager to terminate and gracefully shutdown when the parent
/// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential
/// task fails. (But don't end the parent if a child's normal task fails.)
pub fn add_child(&mut self, child: TaskManager) {
self.children.push(child);
}
/// Consume `self` and return the [`TaskRegistry`].
///
/// This [`TaskRegistry`] can be used to check for still running tasks after this task manager
/// was dropped.
pub fn into_task_registry(self) -> TaskRegistry {
self.task_registry
}
}
#[derive(Clone)]
struct Metrics {
// This list is ordered alphabetically
poll_duration: HistogramVec,
poll_start: CounterVec<U64>,
tasks_spawned: CounterVec<U64>,
tasks_ended: CounterVec<U64>,
}
impl Metrics {
fn register(registry: &Registry) -> Result<Self, PrometheusError> {
Ok(Self {
poll_duration: register(HistogramVec::new(
HistogramOpts {
common_opts: Opts::new(
"substrate_tasks_polling_duration",
"Duration in seconds of each invocation of Future::poll"
),
buckets: exponential_buckets(0.001, 4.0, 9)
.expect("function parameters are constant and always valid; qed"),
},
&["task_name", "task_group", "kind"]
)?, registry)?,
poll_start: register(CounterVec::new(
Opts::new(
"substrate_tasks_polling_started_total",
"Total number of times we started invoking Future::poll"
),
&["task_name", "task_group", "kind"]
)?, registry)?,
tasks_spawned: register(CounterVec::new(
Opts::new(
"substrate_tasks_spawned_total",
"Total number of tasks that have been spawned on the Service"
),
&["task_name", "task_group", "kind"]
)?, registry)?,
tasks_ended: register(CounterVec::new(
Opts::new(
"substrate_tasks_ended_total",
"Total number of tasks for which Future::poll has returned Ready(()) or panicked"
),
&["task_name", "reason", "task_group", "kind"]
)?, registry)?,
})
}
}
/// Ensures that a [`Task`] is unregistered when this object is dropped.
struct UnregisterOnDrop {
task: Task,
registry: TaskRegistry,
}
impl Drop for UnregisterOnDrop {
fn drop(&mut self) {
let mut tasks = self.registry.tasks.lock();
if let Entry::Occupied(mut entry) = (*tasks).entry(self.task.clone()) {
*entry.get_mut() -= 1;
if *entry.get() == 0 {
entry.remove();
}
}
}
}
/// Represents a running async task in the [`TaskManager`].
///
/// As a task is identified by a name and a group, it is totally valid that there exists multiple
/// tasks with the same name and group.
#[derive(Clone, Hash, Eq, PartialEq)]
pub struct Task {
/// The name of the task.
pub name: &'static str,
/// The group this task is associated to.
pub group: &'static str,
}
impl Task {
/// Returns if the `group` is the [`DEFAULT_GROUP_NAME`].
pub fn is_default_group(&self) -> bool {
self.group == DEFAULT_GROUP_NAME
}
}
/// Keeps track of all running [`Task`]s in [`TaskManager`].
#[derive(Clone, Default)]
pub struct TaskRegistry {
tasks: Arc<Mutex<HashMap<Task, usize>>>,
}
impl TaskRegistry {
/// Register a task with the given `name` and `group`.
///
/// Returns [`UnregisterOnDrop`] that ensures that the task is unregistered when this value is
/// dropped.
fn register_task(&self, name: &'static str, group: &'static str) -> UnregisterOnDrop {
let task = Task { name, group };
{
let mut tasks = self.tasks.lock();
*(*tasks).entry(task.clone()).or_default() += 1;
}
UnregisterOnDrop { task, registry: self.clone() }
}
/// Returns the running tasks.
///
/// As a task is only identified by its `name` and `group`, there can be duplicate tasks. The
/// number per task represents the concurrently running tasks with the same identifier.
pub fn running_tasks(&self) -> HashMap<Task, usize> {
(*self.tasks.lock()).clone()
}
}
| {
self.tokio_handle.spawn(future);
} | conditional_block |
mod.rs | // This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate service tasks management module.
use crate::{config::TaskType, Error};
use exit_future::Signal;
use futures::{
future::{pending, select, try_join_all, BoxFuture, Either},
Future, FutureExt, StreamExt,
};
use parking_lot::Mutex;
use prometheus_endpoint::{
exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError,
Registry, U64,
};
use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender};
use std::{
collections::{hash_map::Entry, HashMap},
panic,
pin::Pin,
result::Result,
sync::Arc,
};
use tokio::runtime::Handle;
use tracing_futures::Instrument;
mod prometheus_future;
#[cfg(test)]
mod tests;
/// Default task group name.
pub const DEFAULT_GROUP_NAME: &str = "default";
/// The name of a group a task belongs to.
///
/// This name is passed belong-side the task name to the prometheus metrics and can be used
/// to group tasks.
pub enum GroupName {
/// Sets the group name to `default`.
Default,
/// Use the specifically given name as group name.
Specific(&'static str),
}
impl From<Option<&'static str>> for GroupName {
fn from(name: Option<&'static str>) -> Self {
match name {
Some(name) => Self::Specific(name),
None => Self::Default,
}
}
}
impl From<&'static str> for GroupName {
fn from(name: &'static str) -> Self {
Self::Specific(name)
}
}
/// An handle for spawning tasks in the service.
#[derive(Clone)]
pub struct SpawnTaskHandle {
on_exit: exit_future::Exit,
tokio_handle: Handle,
metrics: Option<Metrics>,
task_registry: TaskRegistry,
}
impl SpawnTaskHandle {
/// Spawns the given task with the given name and a group name.
/// If group is not specified `DEFAULT_GROUP_NAME` will be used.
///
/// Note that the `name` is a `&'static str`. The reason for this choice is that
/// statistics about this task are getting reported to the Prometheus endpoint (if enabled), and
/// that therefore the set of possible task names must be bounded.
///
/// In other words, it would be a bad idea for someone to do for example
/// `spawn(format!("{:?}", some_public_key))`.
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name. See also `spawn`.
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
/// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`.
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
task_type: TaskType,
) {
let on_exit = self.on_exit.clone();
let metrics = self.metrics.clone();
let registry = self.task_registry.clone();
let group = match group.into() {
GroupName::Specific(var) => var,
// If no group is specified use default.
GroupName::Default => DEFAULT_GROUP_NAME,
};
let task_type_label = match task_type {
TaskType::Blocking => "blocking",
TaskType::Async => "async",
};
// Note that we increase the started counter here and not within the future. This way,
// we could properly visualize on Prometheus situations where the spawning doesn't work.
if let Some(metrics) = &self.metrics {
metrics.tasks_spawned.with_label_values(&[name, group, task_type_label]).inc();
// We do a dummy increase in order for the task to show up in metrics.
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc_by(0);
}
let future = async move {
// Register the task and keep the "token" alive until the task is ended. Then this
// "token" will unregister this task.
let _registry_token = registry.register_task(name, group);
if let Some(metrics) = metrics {
// Add some wrappers around `task`.
let task = {
let poll_duration =
metrics.poll_duration.with_label_values(&[name, group, task_type_label]);
let poll_start =
metrics.poll_start.with_label_values(&[name, group, task_type_label]);
let inner =
prometheus_future::with_poll_durations(poll_duration, poll_start, task);
// The logic of `AssertUnwindSafe` here is ok considering that we throw
// away the `Future` after it has panicked.
panic::AssertUnwindSafe(inner).catch_unwind()
};
futures::pin_mut!(task);
match select(on_exit, task).await {
Either::Right((Err(payload), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "panic", group, task_type_label])
.inc();
panic::resume_unwind(payload)
},
Either::Right((Ok(()), _)) => {
metrics
.tasks_ended
.with_label_values(&[name, "finished", group, task_type_label])
.inc();
},
Either::Left(((), _)) => {
// The `on_exit` has triggered.
metrics
.tasks_ended
.with_label_values(&[name, "interrupted", group, task_type_label])
.inc();
},
}
} else {
futures::pin_mut!(task);
let _ = select(on_exit, task).await;
}
}
.in_current_span();
match task_type {
TaskType::Async => {
self.tokio_handle.spawn(future);
},
TaskType::Blocking => {
let handle = self.tokio_handle.clone();
self.tokio_handle.spawn_blocking(move || {
handle.block_on(future);
});
},
}
}
}
impl sp_core::traits::SpawnNamed for SpawnTaskHandle {
fn spawn_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Blocking)
}
fn spawn(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_inner(name, group, future, TaskType::Async)
}
}
/// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any
/// task spawned through it fails. The service should be on the receiver side
/// and will shut itself down whenever it receives any message, i.e. an
/// essential task has failed.
#[derive(Clone)]
pub struct | {
essential_failed_tx: TracingUnboundedSender<()>,
inner: SpawnTaskHandle,
}
impl SpawnEssentialTaskHandle {
/// Creates a new `SpawnEssentialTaskHandle`.
pub fn new(
essential_failed_tx: TracingUnboundedSender<()>,
spawn_task_handle: SpawnTaskHandle,
) -> SpawnEssentialTaskHandle {
SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle }
}
/// Spawns the given task with the given name.
///
/// See also [`SpawnTaskHandle::spawn`].
pub fn spawn(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Async)
}
/// Spawns the blocking task with the given name.
///
/// See also [`SpawnTaskHandle::spawn_blocking`].
pub fn spawn_blocking(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
) {
self.spawn_inner(name, group, task, TaskType::Blocking)
}
fn spawn_inner(
&self,
name: &'static str,
group: impl Into<GroupName>,
task: impl Future<Output = ()> + Send +'static,
task_type: TaskType,
) {
let essential_failed = self.essential_failed_tx.clone();
let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| {
log::error!("Essential task `{}` failed. Shutting down service.", name);
let _ = essential_failed.close();
});
let _ = self.inner.spawn_inner(name, group, essential_task, task_type);
}
}
impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle {
fn spawn_essential_blocking(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn_blocking(name, group, future);
}
fn spawn_essential(
&self,
name: &'static str,
group: Option<&'static str>,
future: BoxFuture<'static, ()>,
) {
self.spawn(name, group, future);
}
}
/// Helper struct to manage background/async tasks in Service.
pub struct TaskManager {
/// A future that resolves when the service has exited, this is useful to
/// make sure any internally spawned futures stop when the service does.
on_exit: exit_future::Exit,
/// A signal that makes the exit future above resolve, fired on drop.
_signal: Signal,
/// Tokio runtime handle that is used to spawn futures.
tokio_handle: Handle,
/// Prometheus metric where to report the polling times.
metrics: Option<Metrics>,
/// Send a signal when a spawned essential task has concluded. The next time
/// the service future is polled it should complete with an error.
essential_failed_tx: TracingUnboundedSender<()>,
/// A receiver for spawned essential-tasks concluding.
essential_failed_rx: TracingUnboundedReceiver<()>,
/// Things to keep alive until the task manager is dropped.
keep_alive: Box<dyn std::any::Any + Send>,
/// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent
/// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential
/// task fails.
children: Vec<TaskManager>,
/// The registry of all running tasks.
task_registry: TaskRegistry,
}
impl TaskManager {
/// If a Prometheus registry is passed, it will be used to report statistics about the
/// service tasks.
pub fn new(
tokio_handle: Handle,
prometheus_registry: Option<&Registry>,
) -> Result<Self, PrometheusError> {
let (signal, on_exit) = exit_future::signal();
// A side-channel for essential tasks to communicate shutdown.
let (essential_failed_tx, essential_failed_rx) =
tracing_unbounded("mpsc_essential_tasks", 100);
let metrics = prometheus_registry.map(Metrics::register).transpose()?;
Ok(Self {
on_exit,
_signal: signal,
tokio_handle,
metrics,
essential_failed_tx,
essential_failed_rx,
keep_alive: Box::new(()),
children: Vec::new(),
task_registry: Default::default(),
})
}
/// Get a handle for spawning tasks.
pub fn spawn_handle(&self) -> SpawnTaskHandle {
SpawnTaskHandle {
on_exit: self.on_exit.clone(),
tokio_handle: self.tokio_handle.clone(),
metrics: self.metrics.clone(),
task_registry: self.task_registry.clone(),
}
}
/// Get a handle for spawning essential tasks.
pub fn spawn_essential_handle(&self) -> SpawnEssentialTaskHandle {
SpawnEssentialTaskHandle::new(self.essential_failed_tx.clone(), self.spawn_handle())
}
/// Return a future that will end with success if the signal to terminate was sent
/// (`self.terminate()`) or with an error if an essential task fails.
///
/// # Warning
///
/// This function will not wait until the end of the remaining task.
pub fn future<'a>(
&'a mut self,
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>> {
Box::pin(async move {
let mut t1 = self.essential_failed_rx.next().fuse();
let mut t2 = self.on_exit.clone().fuse();
let mut t3 = try_join_all(
self.children
.iter_mut()
.map(|x| x.future())
// Never end this future if there is no error because if there is no children,
// it must not stop
.chain(std::iter::once(pending().boxed())),
)
.fuse();
futures::select! {
_ = t1 => Err(Error::Other("Essential task failed.".into())),
_ = t2 => Ok(()),
res = t3 => Err(res.map(|_| ()).expect_err("this future never ends; qed")),
}
})
}
/// Set what the task manager should keep alive, can be called multiple times.
pub fn keep_alive<T:'static + Send>(&mut self, to_keep_alive: T) {
// allows this fn to safely called multiple times.
use std::mem;
let old = mem::replace(&mut self.keep_alive, Box::new(()));
self.keep_alive = Box::new((to_keep_alive, old));
}
/// Register another TaskManager to terminate and gracefully shutdown when the parent
/// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential
/// task fails. (But don't end the parent if a child's normal task fails.)
pub fn add_child(&mut self, child: TaskManager) {
self.children.push(child);
}
/// Consume `self` and return the [`TaskRegistry`].
///
/// This [`TaskRegistry`] can be used to check for still running tasks after this task manager
/// was dropped.
pub fn into_task_registry(self) -> TaskRegistry {
self.task_registry
}
}
#[derive(Clone)]
struct Metrics {
// This list is ordered alphabetically
poll_duration: HistogramVec,
poll_start: CounterVec<U64>,
tasks_spawned: CounterVec<U64>,
tasks_ended: CounterVec<U64>,
}
impl Metrics {
fn register(registry: &Registry) -> Result<Self, PrometheusError> {
Ok(Self {
poll_duration: register(HistogramVec::new(
HistogramOpts {
common_opts: Opts::new(
"substrate_tasks_polling_duration",
"Duration in seconds of each invocation of Future::poll"
),
buckets: exponential_buckets(0.001, 4.0, 9)
.expect("function parameters are constant and always valid; qed"),
},
&["task_name", "task_group", "kind"]
)?, registry)?,
poll_start: register(CounterVec::new(
Opts::new(
"substrate_tasks_polling_started_total",
"Total number of times we started invoking Future::poll"
),
&["task_name", "task_group", "kind"]
)?, registry)?,
tasks_spawned: register(CounterVec::new(
Opts::new(
"substrate_tasks_spawned_total",
"Total number of tasks that have been spawned on the Service"
),
&["task_name", "task_group", "kind"]
)?, registry)?,
tasks_ended: register(CounterVec::new(
Opts::new(
"substrate_tasks_ended_total",
"Total number of tasks for which Future::poll has returned Ready(()) or panicked"
),
&["task_name", "reason", "task_group", "kind"]
)?, registry)?,
})
}
}
/// Ensures that a [`Task`] is unregistered when this object is dropped.
struct UnregisterOnDrop {
task: Task,
registry: TaskRegistry,
}
impl Drop for UnregisterOnDrop {
fn drop(&mut self) {
let mut tasks = self.registry.tasks.lock();
if let Entry::Occupied(mut entry) = (*tasks).entry(self.task.clone()) {
*entry.get_mut() -= 1;
if *entry.get() == 0 {
entry.remove();
}
}
}
}
/// Represents a running async task in the [`TaskManager`].
///
/// As a task is identified by a name and a group, it is totally valid that there exists multiple
/// tasks with the same name and group.
#[derive(Clone, Hash, Eq, PartialEq)]
pub struct Task {
/// The name of the task.
pub name: &'static str,
/// The group this task is associated to.
pub group: &'static str,
}
impl Task {
/// Returns if the `group` is the [`DEFAULT_GROUP_NAME`].
pub fn is_default_group(&self) -> bool {
self.group == DEFAULT_GROUP_NAME
}
}
/// Keeps track of all running [`Task`]s in [`TaskManager`].
#[derive(Clone, Default)]
pub struct TaskRegistry {
tasks: Arc<Mutex<HashMap<Task, usize>>>,
}
impl TaskRegistry {
/// Register a task with the given `name` and `group`.
///
/// Returns [`UnregisterOnDrop`] that ensures that the task is unregistered when this value is
/// dropped.
fn register_task(&self, name: &'static str, group: &'static str) -> UnregisterOnDrop {
let task = Task { name, group };
{
let mut tasks = self.tasks.lock();
*(*tasks).entry(task.clone()).or_default() += 1;
}
UnregisterOnDrop { task, registry: self.clone() }
}
/// Returns the running tasks.
///
/// As a task is only identified by its `name` and `group`, there can be duplicate tasks. The
/// number per task represents the concurrently running tasks with the same identifier.
pub fn running_tasks(&self) -> HashMap<Task, usize> {
(*self.tasks.lock()).clone()
}
}
| SpawnEssentialTaskHandle | identifier_name |
mod.rs | // Copyright © 2020 Brian Merchant.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod quantity;
use crate::cell::chemistry::RgtpDistribution;
use crate::math::geometry::{calc_poly_area, BBox};
use crate::math::v2d::V2d;
use crate::parameters::quantity::{
Diffusion, Force, Length, Quantity, Stress, Time, Tinv, Viscosity,
};
use crate::NVERTS;
use modify_derive::Modify;
use rand_distr::num_traits::Pow;
use serde::{Deserialize, Serialize};
use std::f64::consts::PI;
/// Characteristic quantities used for normalization.
#[derive(
Clone, Copy, Deserialize, Serialize, Default, Debug, PartialEq, Modify,
)]
pub struct CharacteristicQuantities {
pub eta: Viscosity,
pub f: Force,
pub l: Length,
pub t: Time,
pub l3d: Length,
pub kgtp: Tinv,
}
impl CharacteristicQuantities {
/// Given a quantity `q`, normalize its units using the primary units `f` (Force),
/// `l` (`Length`) and `t` (`Time`) provided in `CharQuants`.
pub fn normalize<T: Quantity>(&self, q: &T) -> f64 {
let q = q.g();
let u = q.units();
(q * self.f.pow(-1.0 * u.f)
* self.l.pow(-1.0 * u.l)
* self.t.pow(-1.0 * u.t))
.number()
}
pub fn time(&self) -> f64 {
self.t.0
}
}
#[derive(
Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq, Modify,
)]
pub struct RawCloseBounds {
pub zero_at: Length,
pub one_at: Length,
}
impl RawCloseBounds {
pub fn new(zero_at: Length, one_at: Length) -> RawCloseBounds {
RawCloseBounds { zero_at, one_at }
}
}
#[derive(
Copy, Clone, Debug, Default, Deserialize, Serialize, PartialEq, Modify,
)]
pub struct RawPhysicalContactParams {
pub crl_one_at: Length,
pub zero_at: Length,
pub cil_mag: f64,
pub adh_break: Option<Length>,
pub adh_mag: Option<Force>,
pub cal_mag: Option<f64>,
}
impl RawPhysicalContactParams {
pub fn refine(
&self,
cq: &CharacteristicQuantities,
) -> PhysicalContactParams {
let zero_at = cq.normalize(&self.zero_at);
let crl_one_at = cq.normalize(&self.crl_one_at);
let adh_break =
cq.normalize(&self.adh_break.unwrap_or(self.crl_one_at));
let adh_rest = 0.5 * adh_break;
PhysicalContactParams {
zero_at,
zero_at_sq: zero_at.pow(2),
crl_one_at,
adh_rest,
adh_break,
adh_mag: self.adh_mag.map(|adh_mag| cq.normalize(&adh_mag)),
cal_mag: self.cal_mag,
cil_mag: self.cil_mag,
}
}
}
#[derive(
Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug, Modify,
)]
pub struct RawCoaParams {
/// Factor controlling to what extent line-of-sight blockage should be
/// penalized.
pub los_penalty: f64,
/// Distance from point of emission at which COA signal reaches half
/// its maximum value.
pub halfmax_dist: Length,
/// Magnitude of COA. It will be divided by `NVERTS` so that it scales based
/// on the number of vertices.
pub mag: f64,
/// If two vertices are within this distance, then COA cannot occur between them.
pub too_close_dist: Length,
}
impl RawCoaParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> CoaParams {
let halfmax_dist = bq.normalize(&self.halfmax_dist);
CoaParams {
los_penalty: self.los_penalty,
halfmax_dist,
vertex_mag: self.mag / NVERTS as f64,
// self.mag * exp(distrib_exp * x), where x is distance
// between points.
distrib_exp: 0.5f64.ln() / halfmax_dist,
too_close_dist_sq: bq.normalize(&self.too_close_dist).pow(2),
}
}
}
#[derive(Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug)]
pub struct RawChemAttrParams {
pub center: [Length; 2],
pub mag: f64,
pub drop_per_char_l: f64,
pub char_l: Length,
}
impl RawChemAttrParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> ChemAttrParams {
ChemAttrParams {
center: V2d {
x: bq.normalize(&self.center[0]),
y: bq.normalize(&self.center[1]),
},
center_mag: self.mag,
slope: self.drop_per_char_l / bq.normalize(&self.char_l),
}
}
}
#[derive(Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug)]
pub struct RawBdryParams {
shape: [[Length; 2]; 4],
skip_bb_check: bool,
mag: f64,
}
impl RawBdryParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> BdryParams {
let shape = self
.shape
.iter()
.map(|p| V2d {
x: bq.normalize(&p[0]),
y: bq.normalize(&p[1]),
})
.collect::<Vec<V2d>>();
let bbox = BBox::from_points(&shape);
BdryParams {
shape,
bbox,
skip_bb_check: self.skip_bb_check,
mag: self.mag,
}
}
}
#[derive(
Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug, Modify,
)]
pub struct RawInteractionParams {
pub coa: Option<RawCoaParams>,
pub chem_attr: Option<RawChemAttrParams>,
pub bdry: Option<RawBdryParams>,
pub phys_contact: RawPhysicalContactParams,
}
impl RawInteractionParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> InteractionParams {
InteractionParams {
coa: self.coa.as_ref().map(|coa| coa.refine(bq)),
chem_attr: self
.chem_attr
.as_ref()
.map(|chem_attr| chem_attr.refine(bq)),
bdry: self.bdry.as_ref().map(|bdry| bdry.refine(bq)),
phys_contact: self.phys_contact.refine(bq),
}
}
}
#[derive(
Deserialize, Serialize, Copy, Clone, PartialEq, Default, Debug, Modify,
)]
pub struct RawWorldParameters {
pub vertex_eta: Viscosity,
pub interactions: RawInteractionParams,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct PhysicalContactParams {
/// If two points are within this range, then they are considered
/// to be in contact for the purposes of CRL and adhesion.
pub zero_at: f64,
/// The square of `zero_at`.
pub zero_at_sq: f64,
/// If two points are within this range, then they are considered
/// to be in maximal contact, so that there is no smoothing factor
/// applied to CRL (i.e. the smoothing factor is `1.0`).
pub crl_one_at: f64,
/// The resting length of an adhesion. Same as `range.one_at * 0.8`.
pub adh_rest: f64,
/// This is distance at which the adhesion bond starts breaking/stops developing.
pub adh_break: f64,
/// Optional adhesion magnitude. If it is `None`, no adhesion
/// will be calculated.
pub adh_mag: Option<f64>,
/// Optional CAL magnitude. If it is `None`, simulation will
/// always execute CIL upon contact.
pub cal_mag: Option<f64>,
/// Magnitude of CIL that acts on Rho GTPase activation/
/// inactivation rates.
pub cil_mag: f64,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)]
pub struct CoaParams {
//TODO: Expand upon LOS system.
/// Factor controlling to what extent line-of-sight blockage
/// should be penalized. See SI for further information.
pub los_penalty: f64,
/// The distance at which COA signal reaches half-maximum value.
pub halfmax_dist: f64,
/// Magnitude of COA that acts on Rac1 activation rates.
pub vertex_mag: f64,
//TODO: look up exactly what is being done for this (see where
// parameter is being generated for hint).
/// Factor controlling the shape of the exponential modelling
/// COA interaction (a function shaping parameter). It determines
/// the distance at which two points would sense COA at half-max
/// magnitude.
pub distrib_exp: f64,
/// If two vertices are within the square root of this distance, then COA cannot occur between
/// them.
pub too_close_dist_sq: f64,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)]
pub struct ChemAttrParams {
/// Location of the chemoattractant center.
pub center: V2d,
/// Magnitude of chemoattractant a cell would sense if it were
/// right on top of the chemoattractant source.
pub center_mag: f64,
/// Assuming shallow chemoattractant gradient, which can be
/// modelled using a linear function with slope `slope`.
pub slope: f64,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Debug)]
pub struct BdryParams {
/// Shape of the boundary.
pub shape: Vec<V2d>,
/// Bounding box of the boundary.
pub bbox: BBox,
/// Should boundary bounding box be checked to see if cell is
/// within the boundary?
pub skip_bb_check: bool,
/// Magnitude of CIL-type interaction.
pub mag: f64,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct InteractionParams {
pub phys_contact: PhysicalContactParams,
pub coa: Option<CoaParams>,
pub chem_attr: Option<ChemAttrParams>,
pub bdry: Option<BdryParams>,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct WorldParameters {
/// Viscosity value used to calculate change in position of a
/// vertex due to calculated forces on it.
pub vertex_eta: f64,
pub interactions: InteractionParams,
}
impl RawWorldParameters {
pub fn refine(&self, bq: &CharacteristicQuantities) -> WorldParameters {
WorldParameters {
vertex_eta: bq.normalize(&self.vertex_eta),
interactions: self.interactions.refine(bq),
}
}
}
/// The "raw", unprocessed, parameters that are supplied by the user.
#[derive(Clone, Copy, Modify)]
pub struct RawParameters {
/// Cell diameter.
pub cell_diam: Length,
/// Fraction of max force achieved at `rgtp_act_at_max_f`.
pub halfmax_rgtp_max_f_frac: f64,
/// Stiffness of the membrane-cortex complex.
pub stiffness_cortex: Stress,
/// Typical lamellipod height: typical height of lamellipod (on the order of 100 nm).
pub lm_h: Length,
/// Halfmax Rho GTPase activity.
pub halfmax_rgtp_frac: f64,
/// Lamellipod stall stress: how much stress can lamellipod exert at most.
pub lm_ss: Stress,
/// Friction force opposing RhoA pulling.
pub rho_friction: f64,
/// Stiffness of cytoplasm.
pub stiffness_cyto: Force,
/// Diffusion rate of Rho GTPase on membrane.
pub diffusion_rgtp: Diffusion,
/// Initial distribution of Rac1.
pub init_rac: RgtpDistribution,
/// Initial distribution of RhoA.
pub init_rho: RgtpDistribution,
/// Baseline Rac1 activation rate.
pub kgtp_rac: Tinv,
/// Rac1 auto-activation rate.
pub kgtp_rac_auto: Tinv,
/// Baseline Rac1 inactivation rate.
pub kdgtp_rac: Tinv,
/// RhoA mediated inhibition of Rac1.
pub kdgtp_rho_on_rac: Tinv,
/// Strain at which Rac1 tension-mediated inhibition is half-strength.
pub halfmax_tension_inhib: f64,
/// Maximum tension-mediated Rac1 inhibition as a multiple of baseline Rac1 inactivation rate.
pub tension_inhib: f64,
/// Rate at which inactive membrane bound Rho GTPase dissociates from the
/// membrane.
pub k_mem_off: Tinv,
/// Rate at which cytosolic Rho GTPase associates with the membrane.
pub k_mem_on: Tinv,
/// Baseline RhoA activation rate.
pub kgtp_rho: Tinv,
/// RhoA auto-activation rate.
pub kgtp_auto_rho: Tinv,
/// Baseline RhoA inactivation rate.
pub kdgtp_rho: Tinv,
/// Rac1 mediated inhibition of RhoA.
pub kdgtp_rac_on_rho: Tinv,
/// Enable randomization of bursts in Rac1 activity?
pub randomization: bool,
/// Average period between randomization events.
pub rand_avg_t: Time,
/// Standard deviation of period between randomization events.
pub rand_std_t: Time,
/// Magnitude of randomly applied factor affecting Rac1 activation rate: how big a burst?
pub rand_mag: f64,
/// Fraction of vertices to be selected for increased Rac1 activation due to random events.
pub rand_vs: f64,
}
#[derive(Copy, Clone, Deserialize, Serialize, Default, Debug, PartialEq)]
pub struct Parameters {
/// Resting cell radius.
pub cell_r: f64,
/// Resting edge length.
pub rest_edge_len: f64,
/// Resting area.
pub rest_area: f64, | pub const_retractive: f64,
/// Stiffness of cytoplasm.
pub stiffness_cyto: f64,
/// Rate of Rho GTPase GDI unbinding and subsequent membrane attachment.
pub k_mem_on_vertex: f64,
/// Rate of Rho GTPase membrane disassociation.
pub k_mem_off: f64,
/// Diffusion rate of Rho GTPase on membrane.
pub diffusion_rgtp: f64,
/// Initial distribution of Rac1.
pub init_rac: RgtpDistribution,
/// Initial distribution of RhoA.
pub init_rho: RgtpDistribution,
/// Halfmax Rho GTPase activity per vertex.
pub halfmax_vertex_rgtp: f64,
/// Halfmax Rho GTPase activity per vertex as concentration.
pub halfmax_vertex_rgtp_conc: f64,
/// Baseline Rac1 activation rate.
pub kgtp_rac: f64,
/// Rac1 auto-activation rate as a multiple of baseline Rac1 activation rate.
pub kgtp_rac_auto: f64,
/// Baseline Rac1 inactivation rate.
pub kdgtp_rac: f64,
/// RhoA mediated inhibition of Rac1 as a multiple of baseline Rac1 inactivation rate.
pub kdgtp_rho_on_rac: f64,
/// Strain at which Rac1 tension-mediated inhibition is half-strength.
pub halfmax_tension_inhib: f64,
/// Tension-mediated Rac1 inhibition as a multiple of baseline Rac1 inactivation rate.
pub tension_inhib: f64,
/// Baseline RhoA activation rate.
pub kgtp_rho: f64,
/// RhoA auto-activation rate as a multiple of baseline RhoA activation rate.
pub kgtp_rho_auto: f64,
/// Baseline RhoA inactivation rate.
pub kdgtp_rho: f64,
/// Rac1 mediated inhibition of RhoA as a multiple of baseline RhoA inactivation rate.
pub kdgtp_rac_on_rho: f64,
/// Enable randomization of bursts in Rac1 activity?
pub randomization: bool,
/// Average time between random events, in timepoints.
pub rand_avg_t: f64,
/// Standard deviation of time between random events, in timepoints.
pub rand_std_t: f64,
/// Magnitude of factor randomly applied to Rac1 activation rate.
pub rand_mag: f64,
/// Number of vertices to be selected for random Rac1 activity boost.
pub num_rand_vs: u32,
}
impl RawParameters {
pub fn refine(&self, bq: &CharacteristicQuantities) -> Parameters {
let cell_r = self.cell_diam.scale(0.5);
let rel = self.cell_diam.scale((PI / (NVERTS as f64)).sin());
let ra = Length(1.0)
.pow(2.0)
.scale(calc_init_cell_area(cell_r.number()));
let const_protrusive = (self.lm_h.g() * self.lm_ss.g() * rel.g())
.scale(self.halfmax_rgtp_max_f_frac);
let const_retractive = const_protrusive.scale(self.rho_friction);
let halfmax_vertex_rgtp = self.halfmax_rgtp_frac / NVERTS as f64;
let halfmax_vertex_rgtp_conc = rel.pow(-1.0).scale(halfmax_vertex_rgtp);
let stiffness_edge = self.stiffness_cortex.g() * bq.l3d.g();
let stiffness_cyto = self.stiffness_cyto.g().scale(1.0 / NVERTS as f64);
Parameters {
cell_r: bq.normalize(&cell_r),
rest_edge_len: bq.normalize(&rel),
rest_area: bq.normalize(&ra),
stiffness_edge: bq.normalize(&stiffness_edge),
const_protrusive: bq.normalize(&const_protrusive),
const_retractive: bq.normalize(&const_retractive),
stiffness_cyto: bq.normalize(&stiffness_cyto),
k_mem_on_vertex: bq.normalize(&self.k_mem_on) / NVERTS as f64,
k_mem_off: bq.normalize(&self.k_mem_off),
diffusion_rgtp: bq.normalize(&self.diffusion_rgtp),
init_rac: self.init_rac,
init_rho: self.init_rho,
halfmax_vertex_rgtp,
halfmax_vertex_rgtp_conc: bq.normalize(&halfmax_vertex_rgtp_conc),
kgtp_rac: bq.normalize(&self.kgtp_rac),
kgtp_rac_auto: bq.normalize(&self.kgtp_rac_auto),
kdgtp_rac: bq.normalize(&self.kdgtp_rac),
kdgtp_rho_on_rac: bq.normalize(&self.kdgtp_rho_on_rac),
halfmax_tension_inhib: self.halfmax_tension_inhib,
tension_inhib: self.tension_inhib,
kgtp_rho: bq.normalize(&self.kgtp_rho),
kgtp_rho_auto: bq.normalize(&self.kgtp_auto_rho),
kdgtp_rho: bq.normalize(&self.kdgtp_rho),
kdgtp_rac_on_rho: bq.normalize(&self.kdgtp_rac_on_rho),
randomization: self.randomization,
rand_avg_t: bq.normalize(&self.rand_avg_t).ceil(),
rand_std_t: bq.normalize(&self.rand_std_t).ceil(),
rand_mag: self.rand_mag,
num_rand_vs: (self.rand_vs * NVERTS as f64) as u32,
}
}
}
/// Calculate the area of an "ideal" initial cell of radius R, if it has
/// `NVERTS` vertices.
pub fn calc_init_cell_area(r: f64) -> f64 {
let poly_coords = (0..NVERTS)
.map(|vix| {
let theta = (vix as f64) / (NVERTS as f64) * 2.0 * PI;
V2d {
x: r * theta.cos(),
y: r * theta.sin(),
}
})
.collect::<Vec<V2d>>();
calc_poly_area(&poly_coords)
} | /// Stiffness of edge.
pub stiffness_edge: f64,
/// Rac1 mediated protrusive force constant.
pub const_protrusive: f64,
/// RhoA mediated protrusive force constant. | random_line_split |
mod.rs | // Copyright © 2020 Brian Merchant.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod quantity;
use crate::cell::chemistry::RgtpDistribution;
use crate::math::geometry::{calc_poly_area, BBox};
use crate::math::v2d::V2d;
use crate::parameters::quantity::{
Diffusion, Force, Length, Quantity, Stress, Time, Tinv, Viscosity,
};
use crate::NVERTS;
use modify_derive::Modify;
use rand_distr::num_traits::Pow;
use serde::{Deserialize, Serialize};
use std::f64::consts::PI;
/// Characteristic quantities used for normalization.
#[derive(
Clone, Copy, Deserialize, Serialize, Default, Debug, PartialEq, Modify,
)]
pub struct CharacteristicQuantities {
pub eta: Viscosity,
pub f: Force,
pub l: Length,
pub t: Time,
pub l3d: Length,
pub kgtp: Tinv,
}
impl CharacteristicQuantities {
/// Given a quantity `q`, normalize its units using the primary units `f` (Force),
/// `l` (`Length`) and `t` (`Time`) provided in `CharQuants`.
pub fn normalize<T: Quantity>(&self, q: &T) -> f64 {
let q = q.g();
let u = q.units();
(q * self.f.pow(-1.0 * u.f)
* self.l.pow(-1.0 * u.l)
* self.t.pow(-1.0 * u.t))
.number()
}
pub fn time(&self) -> f64 {
self.t.0
}
}
#[derive(
Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq, Modify,
)]
pub struct RawCloseBounds {
pub zero_at: Length,
pub one_at: Length,
}
impl RawCloseBounds {
pub fn new(zero_at: Length, one_at: Length) -> RawCloseBounds {
RawCloseBounds { zero_at, one_at }
}
}
#[derive(
Copy, Clone, Debug, Default, Deserialize, Serialize, PartialEq, Modify,
)]
pub struct RawPhysicalContactParams {
pub crl_one_at: Length,
pub zero_at: Length,
pub cil_mag: f64,
pub adh_break: Option<Length>,
pub adh_mag: Option<Force>,
pub cal_mag: Option<f64>,
}
impl RawPhysicalContactParams {
pub fn refine(
&self,
cq: &CharacteristicQuantities,
) -> PhysicalContactParams {
let zero_at = cq.normalize(&self.zero_at);
let crl_one_at = cq.normalize(&self.crl_one_at);
let adh_break =
cq.normalize(&self.adh_break.unwrap_or(self.crl_one_at));
let adh_rest = 0.5 * adh_break;
PhysicalContactParams {
zero_at,
zero_at_sq: zero_at.pow(2),
crl_one_at,
adh_rest,
adh_break,
adh_mag: self.adh_mag.map(|adh_mag| cq.normalize(&adh_mag)),
cal_mag: self.cal_mag,
cil_mag: self.cil_mag,
}
}
}
#[derive(
Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug, Modify,
)]
pub struct RawCoaParams {
/// Factor controlling to what extent line-of-sight blockage should be
/// penalized.
pub los_penalty: f64,
/// Distance from point of emission at which COA signal reaches half
/// its maximum value.
pub halfmax_dist: Length,
/// Magnitude of COA. It will be divided by `NVERTS` so that it scales based
/// on the number of vertices.
pub mag: f64,
/// If two vertices are within this distance, then COA cannot occur between them.
pub too_close_dist: Length,
}
impl RawCoaParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> CoaParams {
let halfmax_dist = bq.normalize(&self.halfmax_dist);
CoaParams {
los_penalty: self.los_penalty,
halfmax_dist,
vertex_mag: self.mag / NVERTS as f64,
// self.mag * exp(distrib_exp * x), where x is distance
// between points.
distrib_exp: 0.5f64.ln() / halfmax_dist,
too_close_dist_sq: bq.normalize(&self.too_close_dist).pow(2),
}
}
}
#[derive(Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug)]
pub struct RawChemAttrParams {
pub center: [Length; 2],
pub mag: f64,
pub drop_per_char_l: f64,
pub char_l: Length,
}
impl RawChemAttrParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> ChemAttrParams {
ChemAttrParams {
center: V2d {
x: bq.normalize(&self.center[0]),
y: bq.normalize(&self.center[1]),
},
center_mag: self.mag,
slope: self.drop_per_char_l / bq.normalize(&self.char_l),
}
}
}
#[derive(Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug)]
pub struct RawBdryParams {
shape: [[Length; 2]; 4],
skip_bb_check: bool,
mag: f64,
}
impl RawBdryParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> BdryParams {
let shape = self
.shape
.iter()
.map(|p| V2d {
x: bq.normalize(&p[0]),
y: bq.normalize(&p[1]),
})
.collect::<Vec<V2d>>();
let bbox = BBox::from_points(&shape);
BdryParams {
shape,
bbox,
skip_bb_check: self.skip_bb_check,
mag: self.mag,
}
}
}
#[derive(
Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug, Modify,
)]
pub struct RawInteractionParams {
pub coa: Option<RawCoaParams>,
pub chem_attr: Option<RawChemAttrParams>,
pub bdry: Option<RawBdryParams>,
pub phys_contact: RawPhysicalContactParams,
}
impl RawInteractionParams {
pub fn refine(&self, bq: &CharacteristicQuantities) -> InteractionParams {
InteractionParams {
coa: self.coa.as_ref().map(|coa| coa.refine(bq)),
chem_attr: self
.chem_attr
.as_ref()
.map(|chem_attr| chem_attr.refine(bq)),
bdry: self.bdry.as_ref().map(|bdry| bdry.refine(bq)),
phys_contact: self.phys_contact.refine(bq),
}
}
}
#[derive(
Deserialize, Serialize, Copy, Clone, PartialEq, Default, Debug, Modify,
)]
pub struct RawWorldParameters {
pub vertex_eta: Viscosity,
pub interactions: RawInteractionParams,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct PhysicalContactParams {
/// If two points are within this range, then they are considered
/// to be in contact for the purposes of CRL and adhesion.
pub zero_at: f64,
/// The square of `zero_at`.
pub zero_at_sq: f64,
/// If two points are within this range, then they are considered
/// to be in maximal contact, so that there is no smoothing factor
/// applied to CRL (i.e. the smoothing factor is `1.0`).
pub crl_one_at: f64,
/// The resting length of an adhesion. Same as `range.one_at * 0.8`.
pub adh_rest: f64,
/// This is distance at which the adhesion bond starts breaking/stops developing.
pub adh_break: f64,
/// Optional adhesion magnitude. If it is `None`, no adhesion
/// will be calculated.
pub adh_mag: Option<f64>,
/// Optional CAL magnitude. If it is `None`, simulation will
/// always execute CIL upon contact.
pub cal_mag: Option<f64>,
/// Magnitude of CIL that acts on Rho GTPase activation/
/// inactivation rates.
pub cil_mag: f64,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)]
pub struct CoaParams {
//TODO: Expand upon LOS system.
/// Factor controlling to what extent line-of-sight blockage
/// should be penalized. See SI for further information.
pub los_penalty: f64,
/// The distance at which COA signal reaches half-maximum value.
pub halfmax_dist: f64,
/// Magnitude of COA that acts on Rac1 activation rates.
pub vertex_mag: f64,
//TODO: look up exactly what is being done for this (see where
// parameter is being generated for hint).
/// Factor controlling the shape of the exponential modelling
/// COA interaction (a function shaping parameter). It determines
/// the distance at which two points would sense COA at half-max
/// magnitude.
pub distrib_exp: f64,
/// If two vertices are within the square root of this distance, then COA cannot occur between
/// them.
pub too_close_dist_sq: f64,
}
#[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)]
pub struct ChemAttrParams {
/// Location of the chemoattractant center.
pub center: V2d,
/// Magnitude of chemoattractant a cell would sense if it were
/// right on top of the chemoattractant source.
pub center_mag: f64,
/// Assuming shallow chemoattractant gradient, which can be
/// modelled using a linear function with slope `slope`.
pub slope: f64,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Debug)]
pub struct BdryParams {
/// Shape of the boundary.
pub shape: Vec<V2d>,
/// Bounding box of the boundary.
pub bbox: BBox,
/// Should boundary bounding box be checked to see if cell is
/// within the boundary?
pub skip_bb_check: bool,
/// Magnitude of CIL-type interaction.
pub mag: f64,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct InteractionParams {
pub phys_contact: PhysicalContactParams,
pub coa: Option<CoaParams>,
pub chem_attr: Option<ChemAttrParams>,
pub bdry: Option<BdryParams>,
}
#[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)]
pub struct W | {
/// Viscosity value used to calculate change in position of a
/// vertex due to calculated forces on it.
pub vertex_eta: f64,
pub interactions: InteractionParams,
}
impl RawWorldParameters {
pub fn refine(&self, bq: &CharacteristicQuantities) -> WorldParameters {
WorldParameters {
vertex_eta: bq.normalize(&self.vertex_eta),
interactions: self.interactions.refine(bq),
}
}
}
/// The "raw", unprocessed, parameters that are supplied by the user.
#[derive(Clone, Copy, Modify)]
pub struct RawParameters {
/// Cell diameter.
pub cell_diam: Length,
/// Fraction of max force achieved at `rgtp_act_at_max_f`.
pub halfmax_rgtp_max_f_frac: f64,
/// Stiffness of the membrane-cortex complex.
pub stiffness_cortex: Stress,
/// Typical lamellipod height: typical height of lamellipod (on the order of 100 nm).
pub lm_h: Length,
/// Halfmax Rho GTPase activity.
pub halfmax_rgtp_frac: f64,
/// Lamellipod stall stress: how much stress can lamellipod exert at most.
pub lm_ss: Stress,
/// Friction force opposing RhoA pulling.
pub rho_friction: f64,
/// Stiffness of cytoplasm.
pub stiffness_cyto: Force,
/// Diffusion rate of Rho GTPase on membrane.
pub diffusion_rgtp: Diffusion,
/// Initial distribution of Rac1.
pub init_rac: RgtpDistribution,
/// Initial distribution of RhoA.
pub init_rho: RgtpDistribution,
/// Baseline Rac1 activation rate.
pub kgtp_rac: Tinv,
/// Rac1 auto-activation rate.
pub kgtp_rac_auto: Tinv,
/// Baseline Rac1 inactivation rate.
pub kdgtp_rac: Tinv,
/// RhoA mediated inhibition of Rac1.
pub kdgtp_rho_on_rac: Tinv,
/// Strain at which Rac1 tension-mediated inhibition is half-strength.
pub halfmax_tension_inhib: f64,
/// Maximum tension-mediated Rac1 inhibition as a multiple of baseline Rac1 inactivation rate.
pub tension_inhib: f64,
/// Rate at which inactive membrane bound Rho GTPase dissociates from the
/// membrane.
pub k_mem_off: Tinv,
/// Rate at which cytosolic Rho GTPase associates with the membrane.
pub k_mem_on: Tinv,
/// Baseline RhoA activation rate.
pub kgtp_rho: Tinv,
/// RhoA auto-activation rate.
pub kgtp_auto_rho: Tinv,
/// Baseline RhoA inactivation rate.
pub kdgtp_rho: Tinv,
/// Rac1 mediated inhibition of RhoA.
pub kdgtp_rac_on_rho: Tinv,
/// Enable randomization of bursts in Rac1 activity?
pub randomization: bool,
/// Average period between randomization events.
pub rand_avg_t: Time,
/// Standard deviation of period between randomization events.
pub rand_std_t: Time,
/// Magnitude of randomly applied factor affecting Rac1 activation rate: how big a burst?
pub rand_mag: f64,
/// Fraction of vertices to be selected for increased Rac1 activation due to random events.
pub rand_vs: f64,
}
#[derive(Copy, Clone, Deserialize, Serialize, Default, Debug, PartialEq)]
pub struct Parameters {
/// Resting cell radius.
pub cell_r: f64,
/// Resting edge length.
pub rest_edge_len: f64,
/// Resting area.
pub rest_area: f64,
/// Stiffness of edge.
pub stiffness_edge: f64,
/// Rac1 mediated protrusive force constant.
pub const_protrusive: f64,
/// RhoA mediated protrusive force constant.
pub const_retractive: f64,
/// Stiffness of cytoplasm.
pub stiffness_cyto: f64,
/// Rate of Rho GTPase GDI unbinding and subsequent membrane attachment.
pub k_mem_on_vertex: f64,
/// Rate of Rho GTPase membrane disassociation.
pub k_mem_off: f64,
/// Diffusion rate of Rho GTPase on membrane.
pub diffusion_rgtp: f64,
/// Initial distribution of Rac1.
pub init_rac: RgtpDistribution,
/// Initial distribution of RhoA.
pub init_rho: RgtpDistribution,
/// Halfmax Rho GTPase activity per vertex.
pub halfmax_vertex_rgtp: f64,
/// Halfmax Rho GTPase activity per vertex as concentration.
pub halfmax_vertex_rgtp_conc: f64,
/// Baseline Rac1 activation rate.
pub kgtp_rac: f64,
/// Rac1 auto-activation rate as a multiple of baseline Rac1 activation rate.
pub kgtp_rac_auto: f64,
/// Baseline Rac1 inactivation rate.
pub kdgtp_rac: f64,
/// RhoA mediated inhibition of Rac1 as a multiple of baseline Rac1 inactivation rate.
pub kdgtp_rho_on_rac: f64,
/// Strain at which Rac1 tension-mediated inhibition is half-strength.
pub halfmax_tension_inhib: f64,
/// Tension-mediated Rac1 inhibition as a multiple of baseline Rac1 inactivation rate.
pub tension_inhib: f64,
/// Baseline RhoA activation rate.
pub kgtp_rho: f64,
/// RhoA auto-activation rate as a multiple of baseline RhoA activation rate.
pub kgtp_rho_auto: f64,
/// Baseline RhoA inactivation rate.
pub kdgtp_rho: f64,
/// Rac1 mediated inhibition of RhoA as a multiple of baseline RhoA inactivation rate.
pub kdgtp_rac_on_rho: f64,
/// Enable randomization of bursts in Rac1 activity?
pub randomization: bool,
/// Average time between random events, in timepoints.
pub rand_avg_t: f64,
/// Standard deviation of time between random events, in timepoints.
pub rand_std_t: f64,
/// Magnitude of factor randomly applied to Rac1 activation rate.
pub rand_mag: f64,
/// Number of vertices to be selected for random Rac1 activity boost.
pub num_rand_vs: u32,
}
impl RawParameters {
pub fn refine(&self, bq: &CharacteristicQuantities) -> Parameters {
let cell_r = self.cell_diam.scale(0.5);
let rel = self.cell_diam.scale((PI / (NVERTS as f64)).sin());
let ra = Length(1.0)
.pow(2.0)
.scale(calc_init_cell_area(cell_r.number()));
let const_protrusive = (self.lm_h.g() * self.lm_ss.g() * rel.g())
.scale(self.halfmax_rgtp_max_f_frac);
let const_retractive = const_protrusive.scale(self.rho_friction);
let halfmax_vertex_rgtp = self.halfmax_rgtp_frac / NVERTS as f64;
let halfmax_vertex_rgtp_conc = rel.pow(-1.0).scale(halfmax_vertex_rgtp);
let stiffness_edge = self.stiffness_cortex.g() * bq.l3d.g();
let stiffness_cyto = self.stiffness_cyto.g().scale(1.0 / NVERTS as f64);
Parameters {
cell_r: bq.normalize(&cell_r),
rest_edge_len: bq.normalize(&rel),
rest_area: bq.normalize(&ra),
stiffness_edge: bq.normalize(&stiffness_edge),
const_protrusive: bq.normalize(&const_protrusive),
const_retractive: bq.normalize(&const_retractive),
stiffness_cyto: bq.normalize(&stiffness_cyto),
k_mem_on_vertex: bq.normalize(&self.k_mem_on) / NVERTS as f64,
k_mem_off: bq.normalize(&self.k_mem_off),
diffusion_rgtp: bq.normalize(&self.diffusion_rgtp),
init_rac: self.init_rac,
init_rho: self.init_rho,
halfmax_vertex_rgtp,
halfmax_vertex_rgtp_conc: bq.normalize(&halfmax_vertex_rgtp_conc),
kgtp_rac: bq.normalize(&self.kgtp_rac),
kgtp_rac_auto: bq.normalize(&self.kgtp_rac_auto),
kdgtp_rac: bq.normalize(&self.kdgtp_rac),
kdgtp_rho_on_rac: bq.normalize(&self.kdgtp_rho_on_rac),
halfmax_tension_inhib: self.halfmax_tension_inhib,
tension_inhib: self.tension_inhib,
kgtp_rho: bq.normalize(&self.kgtp_rho),
kgtp_rho_auto: bq.normalize(&self.kgtp_auto_rho),
kdgtp_rho: bq.normalize(&self.kdgtp_rho),
kdgtp_rac_on_rho: bq.normalize(&self.kdgtp_rac_on_rho),
randomization: self.randomization,
rand_avg_t: bq.normalize(&self.rand_avg_t).ceil(),
rand_std_t: bq.normalize(&self.rand_std_t).ceil(),
rand_mag: self.rand_mag,
num_rand_vs: (self.rand_vs * NVERTS as f64) as u32,
}
}
}
/// Calculate the area of an "ideal" initial cell of radius R, if it has
/// `NVERTS` vertices.
pub fn calc_init_cell_area(r: f64) -> f64 {
let poly_coords = (0..NVERTS)
.map(|vix| {
let theta = (vix as f64) / (NVERTS as f64) * 2.0 * PI;
V2d {
x: r * theta.cos(),
y: r * theta.sin(),
}
})
.collect::<Vec<V2d>>();
calc_poly_area(&poly_coords)
}
| orldParameters | identifier_name |
main.rs | eprintln!("ignore reason: {:?}", e);
}
}
}
Ok(StarDict { directories: items })
}
/// Get the Ifo struct, which is parsed from the.ifo file.
pub fn info(&self) -> Vec<&ifo::Ifo> {
let mut items = Vec::with_capacity(self.directories.len());
for it in &self.directories {
items.push(&it.ifo);
}
items
}
/// List the following neighbor words of `word`, from `off`.
/// If `off` is a negative number, list from before `-off`.
pub fn neighbors(&self, word: &[u8], off: i32) -> WordMergeIter<dictionary::DictNeighborIter> {
let mut wordit = Vec::with_capacity(2 * self.directories.len());
let mut cur = Vec::with_capacity(2 * self.directories.len());
for d in self.directories.iter() {
let mut x = d.neighbors(word, off);
let mut s = d.neighbors_syn(word, off);
cur.push(x.next());
cur.push(s.next());
wordit.push(x);
wordit.push(s);
}
WordMergeIter { wordit, cur }
}
/// Search from all dictionaries. using the specified regular expression.
/// to match the beginning of a word, use `^`, the ending of a word, use `$`.
pub fn search<'a>(&'a self, reg: &'a Regex) -> WordMergeIter<dictionary::IdxIter> {
let mut wordit = Vec::with_capacity(2 * self.directories.len());
let mut cur = Vec::with_capacity(2 * self.directories.len());
for d in self.directories.iter() {
//println!("in for {}", d.ifo.name.as_str());
let mut x = d.search_regex(reg);
let mut s = d.search_syn(reg);
//println!("created inner iter");
cur.push(x.next());
cur.push(s.next());
//println!("created 1st value");
wordit.push(x);
wordit.push(s);
}
WordMergeIter { wordit, cur }
}
/// Lookup the word. Find in the Idx case-sensitively, if not found then try to do
/// case-insensitive search. Also find all case-insensitive matching words in Syn.
pub fn lookup(&self, word: &[u8]) -> Result<Vec<dictionary::LookupResult>, result::DictError> {
let mut ret: Vec<dictionary::LookupResult> = Vec::with_capacity(self.directories.len());
for d in self.directories.iter() {
if let Ok(x) = d.lookup(word) {
ret.extend(x);
}
}
Ok(ret)
}
}
struct StardictUrl {
path: [u8; 4usize],
word: Vec<u8>,
offset: i32, // args for offset and length, may use BTreeMap, but it cost too much.
length: usize,
}
impl StardictUrl {
fn new() -> StardictUrl {
StardictUrl {
path: [0; 4],
word: Vec::with_capacity(16),
offset: 0,
length: 0,
}
}
fn byte_to_u8(b: u8) -> u8 {
match b {
b'0'..=b'9' => b - b'0',
b'A'..=b'F' => b - (b'A' - 10),
b'a'..=b'f' => b - (b'a' - 10),
_ => b,
}
}
fn add_path(&mut self, c: u8, idx: usize) {
if idx < self.path.len() {
self.path[idx] = c;
}
}
fn add_byte(&mut self, c: u8) {
self.word.push(c);
}
fn add_arg_offset(&mut self, c: i32) {
self.offset = self.offset * 10 + c;
}
fn add_arg_length(&mut self, c: usize) {
self.length = self.length * 10 + c;
}
}
fn main() {
let mut host = String::from("0.0.0.0:8888");
//let mut host = String::from("[::]:8888");
let mut dictdir = String::from("/usr/share/stardict/dic");
let dict;
{
let mut _daemon = false;
let mut pendarg = 0u8;
for arg in env::args().skip(1) {
//parse options.
println!("cmd args: {}", &arg);
let a = arg.as_bytes();
match pendarg {
b'h' => {
host.clear();
host.push_str(&arg);
pendarg = 0;
}
b'd' => {
_daemon = true;
pendarg = 0;
}
b'r' => {
dictdir.clear();
dictdir.push_str(&arg);
pendarg = 0;
}
0 => (),
_ => {
println!("parameter: [-d] [-h host:port] [-r dict-root-dir]");
return;
}
}
if a[0] == b'-' {
pendarg = a[1];
}
}
//println!("get arg host={}, daemon={}", host, daemon);
//if daemon {
//}
dict = StarDict::new(&path::PathBuf::from(&dictdir)).unwrap();
}
println!("dict size={}", dict.directories.len());
//for d in dict.info().iter() {
// println!("dict: wordcount:{} {}", d.word_count, d.name);
//}
//webs
let listener = TcpListener::bind(&host).expect("Bind Socket failed!");
//let pool = web::ThreadPool::new(4);
let cr = {
let mut fmtp = path::PathBuf::from(&dictdir);
fmtp.push("rformat.conf");
reformat::ContentReformat::from_config_file(&fmtp)
};
for stream in listener.incoming() {
let stream = stream.expect("accept TCP failed!");
//pool.execute(
if let Err(_) = handle_connection(stream, &dict, &cr, &dictdir) {
println!("communication failed!");
}
//);
}
println!("Shutting down.");
}
fn handle_connection(
mut stream: TcpStream,
dict: &StarDict,
cr: &reformat::ContentReformat,
dictdir: &str,
) -> std::io::Result<()> {
//stream.set_nonblocking(false)?;
//stream.set_nodelay(false)?;
let mut buffer = vec![0u8; 512];
{
let mut sz = 0usize;
while let Ok(bn) = stream.read(&mut buffer[sz..]) {
sz += bn;
if bn == 0 || sz <= 4 || sz > 4096 {
stream.write(b"HTTP/1.0 417 Expectation Failed\r\n\r\nFail")?;
return Ok(());
}
if buffer[sz - 4] == b'\r'
&& buffer[sz - 3] == b'\n'
&& buffer[sz - 2] == b'\r'
&& buffer[sz - 1] == b'\n'
{
buffer.resize(sz, 0);
break;
}
if sz >= buffer.len() {
buffer.resize(buffer.len() + 512, 0);
}
}
}
let get = b"GET /";
//("HTTP/1.0 200 OK\r\nConnection: close\r\n", "index.html");
let mut content: Vec<u8> = Vec::new();
let mut surl = StardictUrl::new();
if buffer.starts_with(get) {
let mut state = 0i16; //>=0 path, -1 w, -2 p0w, -3 p1w, -4 argKey, -5 argVal
let mut w = 0u8;
buffer[5..]
.iter()
.take_while(|c| **c!= b' ')
.for_each(|c| {
if state < 0 {
if *c == b'%' {
state = -2;
} else if *c == b'?' {
// parse args.
state = -4;
} else {
if state == -2 {
w = StardictUrl::byte_to_u8(*c) << 4;
state = -3;
} else if state == -3 {
w |= StardictUrl::byte_to_u8(*c);
surl.add_byte(w);
state = -1;
} else if state == -4 {
if *c == b'=' {
state = -5;
} else {
w = *c;
}
} else if state == -5 {
match *c {
b'&' => {
state = -4;
}
b'-' => {
if w == b'o' {
w = b'O';
} else {
state = -32768;
}
}
b'0'..=b'9' => {
let v: i32 = (*c - b'0') as i32;
if w == b'o' {
surl.add_arg_offset(v);
} else if w == b'O' {
// negative offset
surl.add_arg_offset(-v);
} else if w == b'l' {
// length
surl.add_arg_length(v as usize);
}
}
_ => {
state = -32768;
}
}
} else {
surl.add_byte(*c);
}
}
} else if *c == b'/' {
state = -1;
} else {
surl.add_path(*c, state as usize);
state += 1;
}
});
//println!("get from url path={}, word={}, off={}, len={}", str::from_utf8(&surl.path).unwrap(), str::from_utf8(&surl.word).unwrap(), surl.offset, surl.length);
if surl.length == 0 {
surl.length = 10;
}
if surl.word.len() > 0 {
if surl.path[0] == b'W' {
//word lookup
match dict.lookup(&surl.word) {
Ok(x) => {
content.extend(b"<ol>");
for (i, e) in x.iter().enumerate() {
content.extend(b"<li><a href='#word_");
content.extend(i.to_string().as_bytes());
content.extend(b"'>");
content.extend(&e.word);
content.extend(b" : ");
content.extend(e.dictionary.name.as_bytes());
content.extend(b"</a></li>");
}
content.extend(b"</ol>\n");
for (i, e) in x.iter().enumerate() {
content.extend(b"<div id='word_");
content.extend(i.to_string().as_bytes());
content.extend(b"' class='res_word'>");
content.extend(e.dictionary.name.as_bytes());
content.extend(b" (");
content.extend(&e.word);
content.extend(b") </div><div class='res_definition'>".iter());
for (a, b) in e
.dictionary
.same_type_sequence
.as_bytes()
.iter()
.zip(e.result.split(|c| *c == 0))
{
content.extend(&cr.replace_all(
*a,
e.dictionary.dict_path.as_bytes(),
b,
));
}
content.extend(b"</div>\n");
}
}
Err(e) => println!("err: {:?}", e),
}
} else if surl.path[0] == b'n' {
//neighbor words reference
for s in dict.neighbors(&surl.word, surl.offset).take(surl.length) {
content.extend(s);
content.extend(b"\n");
}
} else if surl.path[0] == b's' {
//search with regex
match str::from_utf8(&surl.word) {
Ok(x) => match Regex::new(x) {
Ok(v) => {
content.extend(b"/~/:<ol>");
dict.search(&v).take(surl.length).for_each(|e| {
content.extend(b"<li><a>");
content.extend(e);
content.extend(b"</a></li>\n");
});
content.extend(b"</ol>");
}
Err(e) => println!("err: {:?}", e),
},
Err(e) => println!("err: {:?}", e),
}
} else if surl.path[0] == b'r' | {
//html js css page etc.
if let Ok(fname) = str::from_utf8(&surl.word) {
let mut pfile = path::PathBuf::from(dictdir);
pfile.push(fname);
if let Ok(mut f) = fs::File::open(pfile) {
if f.read_to_end(&mut content).is_err() {
content.clear();
}
}
}
} | conditional_block |
|
main.rs |
impl StarDict {
/// Create a StarDict struct from a system path. in the path,
/// there should be some directories. each directory contains
/// the dict files, like.ifo,.idx,.dict, etc.
/// The dictionary will be sorted by its directory name.
pub fn new(root: &path::Path) -> Result<StarDict, result::DictError> {
let mut sort_dirs = Vec::new();
let mut items = Vec::new();
if root.is_dir() {
for it in fs::read_dir(root)? {
//println!("push direc: {:?}", it);
let it = it?.path();
if it.is_dir() {
sort_dirs.push(it.into_boxed_path());
}
}
}
sort_dirs.sort();
for it in sort_dirs.iter() {
match dictionary::Dictionary::new(&**it, root) {
Ok(d) => {
items.push(d);
}
Err(e) => {
eprintln!("ignore reason: {:?}", e);
}
}
}
Ok(StarDict { directories: items })
}
/// Get the Ifo struct, which is parsed from the.ifo file.
pub fn info(&self) -> Vec<&ifo::Ifo> {
let mut items = Vec::with_capacity(self.directories.len());
for it in &self.directories {
items.push(&it.ifo);
}
items
}
/// List the following neighbor words of `word`, from `off`.
/// If `off` is a negative number, list from before `-off`.
pub fn neighbors(&self, word: &[u8], off: i32) -> WordMergeIter<dictionary::DictNeighborIter> |
/// Search from all dictionaries. using the specified regular expression.
/// to match the beginning of a word, use `^`, the ending of a word, use `$`.
pub fn search<'a>(&'a self, reg: &'a Regex) -> WordMergeIter<dictionary::IdxIter> {
let mut wordit = Vec::with_capacity(2 * self.directories.len());
let mut cur = Vec::with_capacity(2 * self.directories.len());
for d in self.directories.iter() {
//println!("in for {}", d.ifo.name.as_str());
let mut x = d.search_regex(reg);
let mut s = d.search_syn(reg);
//println!("created inner iter");
cur.push(x.next());
cur.push(s.next());
//println!("created 1st value");
wordit.push(x);
wordit.push(s);
}
WordMergeIter { wordit, cur }
}
/// Lookup the word. Find in the Idx case-sensitively, if not found then try to do
/// case-insensitive search. Also find all case-insensitive matching words in Syn.
pub fn lookup(&self, word: &[u8]) -> Result<Vec<dictionary::LookupResult>, result::DictError> {
let mut ret: Vec<dictionary::LookupResult> = Vec::with_capacity(self.directories.len());
for d in self.directories.iter() {
if let Ok(x) = d.lookup(word) {
ret.extend(x);
}
}
Ok(ret)
}
}
struct StardictUrl {
path: [u8; 4usize],
word: Vec<u8>,
offset: i32, // args for offset and length, may use BTreeMap, but it cost too much.
length: usize,
}
impl StardictUrl {
fn new() -> StardictUrl {
StardictUrl {
path: [0; 4],
word: Vec::with_capacity(16),
offset: 0,
length: 0,
}
}
fn byte_to_u8(b: u8) -> u8 {
match b {
b'0'..=b'9' => b - b'0',
b'A'..=b'F' => b - (b'A' - 10),
b'a'..=b'f' => b - (b'a' - 10),
_ => b,
}
}
fn add_path(&mut self, c: u8, idx: usize) {
if idx < self.path.len() {
self.path[idx] = c;
}
}
fn add_byte(&mut self, c: u8) {
self.word.push(c);
}
fn add_arg_offset(&mut self, c: i32) {
self.offset = self.offset * 10 + c;
}
fn add_arg_length(&mut self, c: usize) {
self.length = self.length * 10 + c;
}
}
fn main() {
let mut host = String::from("0.0.0.0:8888");
//let mut host = String::from("[::]:8888");
let mut dictdir = String::from("/usr/share/stardict/dic");
let dict;
{
let mut _daemon = false;
let mut pendarg = 0u8;
for arg in env::args().skip(1) {
//parse options.
println!("cmd args: {}", &arg);
let a = arg.as_bytes();
match pendarg {
b'h' => {
host.clear();
host.push_str(&arg);
pendarg = 0;
}
b'd' => {
_daemon = true;
pendarg = 0;
}
b'r' => {
dictdir.clear();
dictdir.push_str(&arg);
pendarg = 0;
}
0 => (),
_ => {
println!("parameter: [-d] [-h host:port] [-r dict-root-dir]");
return;
}
}
if a[0] == b'-' {
pendarg = a[1];
}
}
//println!("get arg host={}, daemon={}", host, daemon);
//if daemon {
//}
dict = StarDict::new(&path::PathBuf::from(&dictdir)).unwrap();
}
println!("dict size={}", dict.directories.len());
//for d in dict.info().iter() {
// println!("dict: wordcount:{} {}", d.word_count, d.name);
//}
//webs
let listener = TcpListener::bind(&host).expect("Bind Socket failed!");
//let pool = web::ThreadPool::new(4);
let cr = {
let mut fmtp = path::PathBuf::from(&dictdir);
fmtp.push("rformat.conf");
reformat::ContentReformat::from_config_file(&fmtp)
};
for stream in listener.incoming() {
let stream = stream.expect("accept TCP failed!");
//pool.execute(
if let Err(_) = handle_connection(stream, &dict, &cr, &dictdir) {
println!("communication failed!");
}
//);
}
println!("Shutting down.");
}
fn handle_connection(
mut stream: TcpStream,
dict: &StarDict,
cr: &reformat::ContentReformat,
dictdir: &str,
) -> std::io::Result<()> {
//stream.set_nonblocking(false)?;
//stream.set_nodelay(false)?;
let mut buffer = vec![0u8; 512];
{
let mut sz = 0usize;
while let Ok(bn) = stream.read(&mut buffer[sz..]) {
sz += bn;
if bn == 0 || sz <= 4 || sz > 4096 {
stream.write(b"HTTP/1.0 417 Expectation Failed\r\n\r\nFail")?;
return Ok(());
}
if buffer[sz - 4] == b'\r'
&& buffer[sz - 3] == b'\n'
&& buffer[sz - 2] == b'\r'
&& buffer[sz - 1] == b'\n'
{
buffer.resize(sz, 0);
break;
}
if sz >= buffer.len() {
buffer.resize(buffer.len() + 512, 0);
}
}
}
let get = b"GET /";
//("HTTP/1.0 200 OK\r\nConnection: close\r\n", "index.html");
let mut content: Vec<u8> = Vec::new();
let mut surl = StardictUrl::new();
if buffer.starts_with(get) {
let mut state = 0i16; //>=0 path, -1 w, -2 p0w, -3 p1w, -4 argKey, -5 argVal
let mut w = 0u8;
buffer[5..]
.iter()
.take_while(|c| **c!= b' ')
.for_each(|c| {
if state < 0 {
if *c == b'%' {
state = -2;
} else if *c == b'?' {
// parse args.
state = -4;
} else {
if state == -2 {
w = StardictUrl::byte_to_u8(*c) << 4;
state = -3;
} else if state == -3 {
w |= StardictUrl::byte_to_u8(*c);
surl.add_byte(w);
state = -1;
} else if state == -4 {
if *c == b'=' {
state = -5;
} else {
w = *c;
}
} else if state == -5 {
match *c {
b'&' => {
state = -4;
}
b'-' => {
if w == b'o' {
w = b'O';
} else {
state = -32768;
}
}
b'0'..=b'9' => {
let v: i32 = (*c - b'0') as i32;
if w == b'o' {
surl.add_arg_offset(v);
} else if w == b'O' {
// negative offset
surl.add_arg_offset(-v);
} else if w == b'l' {
// length
surl.add_arg_length(v as usize);
}
}
_ => {
state = -32768;
}
}
} else {
surl.add_byte(*c);
}
}
} else if *c == b'/' {
state = -1;
} else {
surl.add_path(*c, state as usize);
state += 1;
}
});
//println!("get from url path={}, word={}, off={}, len={}", str::from_utf8(&surl.path).unwrap(), str::from_utf8(&surl.word).unwrap(), surl.offset, surl.length);
if surl.length == 0 {
surl.length = 10;
}
if surl.word.len() > 0 {
if surl.path[0] == b'W' {
//word lookup
match dict.lookup(&surl.word) {
Ok(x) => {
content.extend(b"<ol>");
for (i, e) in x.iter().enumerate() {
content.extend(b"<li><a href='#word_");
content.extend(i.to_string().as_bytes());
content.extend(b"'>");
content.extend(&e.word);
content.extend(b" : ");
content.extend(e.dictionary.name.as_bytes());
content.extend(b"</a></li>");
}
content.extend(b"</ol>\n");
for (i, e) in x.iter().enumerate() {
content.extend(b"<div id='word_");
content.extend(i.to_string().as_bytes());
content.extend(b"' class='res_word'>");
content.extend(e.dictionary.name.as_bytes());
content.extend(b" (");
content.extend(&e.word);
content.extend(b") </div><div class='res_definition'>".iter());
for (a, b) in e
.dictionary
.same_type_sequence
.as_bytes()
.iter()
.zip(e.result.split(|c| *c == 0))
{
content.extend(&cr.replace_all(
*a,
e.dictionary.dict_path.as_bytes(),
b,
));
}
content.extend(b"</div>\n");
}
}
Err(e) => println!("err: {:?}", e),
}
} else if surl.path[0] == b'n' {
//neighbor words reference
for s in dict.neighbors(&surl.word, surl.offset).take(surl.length) {
content.extend(s);
content.extend(b"\n");
}
} else if surl.path[0] == b's' {
//search with regex
match str::from_utf8(&surl.word) {
Ok(x) => match Regex::new(x) {
Ok(v) => {
content.extend(b"/~/:<ol>");
dict.search(&v).take(surl.length).for_each(|e| {
content.extend(b"<li><a>");
content.extend(e);
content.extend(b"</a></li>\n");
});
content.extend( | {
let mut wordit = Vec::with_capacity(2 * self.directories.len());
let mut cur = Vec::with_capacity(2 * self.directories.len());
for d in self.directories.iter() {
let mut x = d.neighbors(word, off);
let mut s = d.neighbors_syn(word, off);
cur.push(x.next());
cur.push(s.next());
wordit.push(x);
wordit.push(s);
}
WordMergeIter { wordit, cur }
} | identifier_body |
main.rs | <T: Iterator<Item = Vec<u8>>> {
wordit: Vec<T>,
cur: Vec<Option<Vec<u8>>>,
}
impl<'a, T: Iterator<Item = Vec<u8>>> Iterator for WordMergeIter<T> {
type Item = Vec<u8>;
fn next(&mut self) -> Option<Self::Item> {
let l = self.cur.len();
if l == 0 {
return None;
}
let mut x = 0usize;
let mut i = 1usize;
while i < l {
x = match (&self.cur[x], &self.cur[i]) {
(None, _) => i,
(_, None) => x,
(Some(a), Some(b)) => match idx::Idx::dict_cmp(&a, &b, false) {
Ordering::Greater => i,
Ordering::Equal => {
self.cur[i] = self.wordit[i].next();
x
}
_ => x,
},
};
i += 1;
}
mem::replace(&mut self.cur[x], self.wordit[x].next())
}
}
impl StarDict {
/// Create a StarDict struct from a system path. in the path,
/// there should be some directories. each directory contains
/// the dict files, like.ifo,.idx,.dict, etc.
/// The dictionary will be sorted by its directory name.
pub fn new(root: &path::Path) -> Result<StarDict, result::DictError> {
let mut sort_dirs = Vec::new();
let mut items = Vec::new();
if root.is_dir() {
for it in fs::read_dir(root)? {
//println!("push direc: {:?}", it);
let it = it?.path();
if it.is_dir() {
sort_dirs.push(it.into_boxed_path());
}
}
}
sort_dirs.sort();
for it in sort_dirs.iter() {
match dictionary::Dictionary::new(&**it, root) {
Ok(d) => {
items.push(d);
}
Err(e) => {
eprintln!("ignore reason: {:?}", e);
}
}
}
Ok(StarDict { directories: items })
}
/// Get the Ifo struct, which is parsed from the.ifo file.
pub fn info(&self) -> Vec<&ifo::Ifo> {
let mut items = Vec::with_capacity(self.directories.len());
for it in &self.directories {
items.push(&it.ifo);
}
items
}
/// List the following neighbor words of `word`, from `off`.
/// If `off` is a negative number, list from before `-off`.
pub fn neighbors(&self, word: &[u8], off: i32) -> WordMergeIter<dictionary::DictNeighborIter> {
let mut wordit = Vec::with_capacity(2 * self.directories.len());
let mut cur = Vec::with_capacity(2 * self.directories.len());
for d in self.directories.iter() {
let mut x = d.neighbors(word, off);
let mut s = d.neighbors_syn(word, off);
cur.push(x.next());
cur.push(s.next());
wordit.push(x);
wordit.push(s);
}
WordMergeIter { wordit, cur }
}
/// Search from all dictionaries. using the specified regular expression.
/// to match the beginning of a word, use `^`, the ending of a word, use `$`.
pub fn search<'a>(&'a self, reg: &'a Regex) -> WordMergeIter<dictionary::IdxIter> {
let mut wordit = Vec::with_capacity(2 * self.directories.len());
let mut cur = Vec::with_capacity(2 * self.directories.len());
for d in self.directories.iter() {
//println!("in for {}", d.ifo.name.as_str());
let mut x = d.search_regex(reg);
let mut s = d.search_syn(reg);
//println!("created inner iter");
cur.push(x.next());
cur.push(s.next());
//println!("created 1st value");
wordit.push(x);
wordit.push(s);
}
WordMergeIter { wordit, cur }
}
/// Lookup the word. Find in the Idx case-sensitively, if not found then try to do
/// case-insensitive search. Also find all case-insensitive matching words in Syn.
pub fn lookup(&self, word: &[u8]) -> Result<Vec<dictionary::LookupResult>, result::DictError> {
let mut ret: Vec<dictionary::LookupResult> = Vec::with_capacity(self.directories.len());
for d in self.directories.iter() {
if let Ok(x) = d.lookup(word) {
ret.extend(x);
}
}
Ok(ret)
}
}
struct StardictUrl {
path: [u8; 4usize],
word: Vec<u8>,
offset: i32, // args for offset and length, may use BTreeMap, but it cost too much.
length: usize,
}
impl StardictUrl {
fn new() -> StardictUrl {
StardictUrl {
path: [0; 4],
word: Vec::with_capacity(16),
offset: 0,
length: 0,
}
}
fn byte_to_u8(b: u8) -> u8 {
match b {
b'0'..=b'9' => b - b'0',
b'A'..=b'F' => b - (b'A' - 10),
b'a'..=b'f' => b - (b'a' - 10),
_ => b,
}
}
fn add_path(&mut self, c: u8, idx: usize) {
if idx < self.path.len() {
self.path[idx] = c;
}
}
fn add_byte(&mut self, c: u8) {
self.word.push(c);
}
fn add_arg_offset(&mut self, c: i32) {
self.offset = self.offset * 10 + c;
}
fn add_arg_length(&mut self, c: usize) {
self.length = self.length * 10 + c;
}
}
fn main() {
let mut host = String::from("0.0.0.0:8888");
//let mut host = String::from("[::]:8888");
let mut dictdir = String::from("/usr/share/stardict/dic");
let dict;
{
let mut _daemon = false;
let mut pendarg = 0u8;
for arg in env::args().skip(1) {
//parse options.
println!("cmd args: {}", &arg);
let a = arg.as_bytes();
match pendarg {
b'h' => {
host.clear();
host.push_str(&arg);
pendarg = 0;
}
b'd' => {
_daemon = true;
pendarg = 0;
}
b'r' => {
dictdir.clear();
dictdir.push_str(&arg);
pendarg = 0;
}
0 => (),
_ => {
println!("parameter: [-d] [-h host:port] [-r dict-root-dir]");
return;
}
}
if a[0] == b'-' {
pendarg = a[1];
}
}
//println!("get arg host={}, daemon={}", host, daemon);
//if daemon {
//}
dict = StarDict::new(&path::PathBuf::from(&dictdir)).unwrap();
}
println!("dict size={}", dict.directories.len());
//for d in dict.info().iter() {
// println!("dict: wordcount:{} {}", d.word_count, d.name);
//}
//webs
let listener = TcpListener::bind(&host).expect("Bind Socket failed!");
//let pool = web::ThreadPool::new(4);
let cr = {
let mut fmtp = path::PathBuf::from(&dictdir);
fmtp.push("rformat.conf");
reformat::ContentReformat::from_config_file(&fmtp)
};
for stream in listener.incoming() {
let stream = stream.expect("accept TCP failed!");
//pool.execute(
if let Err(_) = handle_connection(stream, &dict, &cr, &dictdir) {
println!("communication failed!");
}
//);
}
println!("Shutting down.");
}
fn handle_connection(
mut stream: TcpStream,
dict: &StarDict,
cr: &reformat::ContentReformat,
dictdir: &str,
) -> std::io::Result<()> {
//stream.set_nonblocking(false)?;
//stream.set_nodelay(false)?;
let mut buffer = vec![0u8; 512];
{
let mut sz = 0usize;
while let Ok(bn) = stream.read(&mut buffer[sz..]) {
sz += bn;
if bn == 0 || sz <= 4 || sz > 4096 {
stream.write(b"HTTP/1.0 417 Expectation Failed\r\n\r\nFail")?;
return Ok(());
}
if buffer[sz - 4] == b'\r'
&& buffer[sz - 3] == b'\n'
&& buffer[sz - 2] == b'\r'
&& buffer[sz - 1] == b'\n'
{
buffer.resize(sz, 0);
break;
}
if sz >= buffer.len() {
buffer.resize(buffer.len() + 512, 0);
}
}
}
let get = b"GET /";
//("HTTP/1.0 200 OK\r\nConnection: close\r\n", "index.html");
let mut content: Vec<u8> = Vec::new();
let mut surl = StardictUrl::new();
if buffer.starts_with(get) {
let mut state = 0i16; //>=0 path, -1 w, -2 p0w, -3 p1w, -4 argKey, -5 argVal
let mut w = 0u8;
buffer[5..]
.iter()
.take_while(|c| **c!= b' ')
.for_each(|c| {
if state < 0 {
if *c == b'%' {
state = -2;
} else if *c == b'?' {
// parse args.
state = -4;
} else {
if state == -2 {
w = StardictUrl::byte_to_u8(*c) << 4;
state = -3;
} else if state == -3 {
w |= StardictUrl::byte_to_u8(*c);
surl.add_byte(w);
state = -1;
} else if state == -4 {
if *c == b'=' {
state = -5;
} else {
w = *c;
}
} else if state == -5 {
match *c {
b'&' => {
state = -4;
}
b'-' => {
if w == b'o' {
w = b'O';
} else {
state = -32768;
}
}
b'0'..=b'9' => {
let v: i32 = (*c - b'0') as i32;
if w == b'o' {
surl.add_arg_offset(v);
} else if w == b'O' {
// negative offset
surl.add_arg_offset(-v);
} else if w == b'l' {
// length
surl.add_arg_length(v as usize);
}
}
_ => {
state = -32768;
}
}
} else {
surl.add_byte(*c);
}
}
} else if *c == b'/' {
state = -1;
} else {
surl.add_path(*c, state as usize);
state += 1;
}
});
//println!("get from url path={}, word={}, off={}, len={}", str::from_utf8(&surl.path).unwrap(), str::from_utf8(&surl.word).unwrap(), surl.offset, surl.length);
if surl.length == 0 {
surl.length = 10;
}
if surl.word.len() > 0 {
if surl.path[0] == b'W' {
//word lookup
match dict.lookup(&surl.word) {
Ok(x) => {
content.extend(b"<ol>");
for (i, e) in x.iter().enumerate() {
content.extend(b"<li><a href='#word_");
content.extend(i.to_string().as_bytes());
content.extend(b"'>");
content.extend(&e.word);
content.extend(b" : ");
content.extend(e.dictionary.name.as_bytes());
content.extend(b"</a></li>");
}
content.extend(b"</ol>\n");
for (i, e) in x.iter().enumerate() {
content.extend(b"<div id='word_");
content.extend(i.to_string().as_bytes());
content.extend(b"' class='res_word'>");
content.extend(e.dictionary.name.as_bytes());
content.extend(b" (");
content.extend(&e.word);
content.extend(b") </div><div class='res_definition'>".iter());
for (a, b) in e
.dictionary
.same_type_sequence
.as_bytes()
.iter()
.zip(e.result.split(|c| *c == 0))
{
content.extend(&cr.replace_all(
*a,
e.dictionary.dict_path.as_bytes(),
b,
));
}
content.extend(b"</div>\n");
}
}
Err(e) => println!("err: {:?}", e),
}
| WordMergeIter | identifier_name |
|
main.rs |
WordMergeIter { wordit, cur }
}
/// Search from all dictionaries. using the specified regular expression.
/// to match the beginning of a word, use `^`, the ending of a word, use `$`.
pub fn search<'a>(&'a self, reg: &'a Regex) -> WordMergeIter<dictionary::IdxIter> {
let mut wordit = Vec::with_capacity(2 * self.directories.len());
let mut cur = Vec::with_capacity(2 * self.directories.len());
for d in self.directories.iter() {
//println!("in for {}", d.ifo.name.as_str());
let mut x = d.search_regex(reg);
let mut s = d.search_syn(reg);
//println!("created inner iter");
cur.push(x.next());
cur.push(s.next());
//println!("created 1st value");
wordit.push(x);
wordit.push(s);
}
WordMergeIter { wordit, cur }
}
/// Lookup the word. Find in the Idx case-sensitively, if not found then try to do
/// case-insensitive search. Also find all case-insensitive matching words in Syn.
pub fn lookup(&self, word: &[u8]) -> Result<Vec<dictionary::LookupResult>, result::DictError> {
let mut ret: Vec<dictionary::LookupResult> = Vec::with_capacity(self.directories.len());
for d in self.directories.iter() {
if let Ok(x) = d.lookup(word) {
ret.extend(x);
}
}
Ok(ret)
}
}
struct StardictUrl {
path: [u8; 4usize],
word: Vec<u8>,
offset: i32, // args for offset and length, may use BTreeMap, but it cost too much.
length: usize,
}
impl StardictUrl {
fn new() -> StardictUrl {
StardictUrl {
path: [0; 4],
word: Vec::with_capacity(16),
offset: 0,
length: 0,
}
}
fn byte_to_u8(b: u8) -> u8 {
match b {
b'0'..=b'9' => b - b'0',
b'A'..=b'F' => b - (b'A' - 10),
b'a'..=b'f' => b - (b'a' - 10),
_ => b,
}
}
fn add_path(&mut self, c: u8, idx: usize) {
if idx < self.path.len() {
self.path[idx] = c;
}
}
fn add_byte(&mut self, c: u8) {
self.word.push(c);
}
fn add_arg_offset(&mut self, c: i32) {
self.offset = self.offset * 10 + c;
}
fn add_arg_length(&mut self, c: usize) {
self.length = self.length * 10 + c;
}
}
fn main() {
let mut host = String::from("0.0.0.0:8888");
//let mut host = String::from("[::]:8888");
let mut dictdir = String::from("/usr/share/stardict/dic");
let dict;
{
let mut _daemon = false;
let mut pendarg = 0u8;
for arg in env::args().skip(1) {
//parse options.
println!("cmd args: {}", &arg);
let a = arg.as_bytes();
match pendarg {
b'h' => {
host.clear();
host.push_str(&arg);
pendarg = 0;
}
b'd' => {
_daemon = true;
pendarg = 0;
}
b'r' => {
dictdir.clear();
dictdir.push_str(&arg);
pendarg = 0;
}
0 => (),
_ => {
println!("parameter: [-d] [-h host:port] [-r dict-root-dir]");
return;
}
}
if a[0] == b'-' {
pendarg = a[1];
}
}
//println!("get arg host={}, daemon={}", host, daemon);
//if daemon {
//}
dict = StarDict::new(&path::PathBuf::from(&dictdir)).unwrap();
}
println!("dict size={}", dict.directories.len());
//for d in dict.info().iter() {
// println!("dict: wordcount:{} {}", d.word_count, d.name);
//}
//webs
let listener = TcpListener::bind(&host).expect("Bind Socket failed!");
//let pool = web::ThreadPool::new(4);
let cr = {
let mut fmtp = path::PathBuf::from(&dictdir);
fmtp.push("rformat.conf");
reformat::ContentReformat::from_config_file(&fmtp)
};
for stream in listener.incoming() {
let stream = stream.expect("accept TCP failed!");
//pool.execute(
if let Err(_) = handle_connection(stream, &dict, &cr, &dictdir) {
println!("communication failed!");
}
//);
}
println!("Shutting down.");
}
fn handle_connection(
mut stream: TcpStream,
dict: &StarDict,
cr: &reformat::ContentReformat,
dictdir: &str,
) -> std::io::Result<()> {
//stream.set_nonblocking(false)?;
//stream.set_nodelay(false)?;
let mut buffer = vec![0u8; 512];
{
let mut sz = 0usize;
while let Ok(bn) = stream.read(&mut buffer[sz..]) {
sz += bn;
if bn == 0 || sz <= 4 || sz > 4096 {
stream.write(b"HTTP/1.0 417 Expectation Failed\r\n\r\nFail")?;
return Ok(());
}
if buffer[sz - 4] == b'\r'
&& buffer[sz - 3] == b'\n'
&& buffer[sz - 2] == b'\r'
&& buffer[sz - 1] == b'\n'
{
buffer.resize(sz, 0);
break;
}
if sz >= buffer.len() {
buffer.resize(buffer.len() + 512, 0);
}
}
}
let get = b"GET /";
//("HTTP/1.0 200 OK\r\nConnection: close\r\n", "index.html");
let mut content: Vec<u8> = Vec::new();
let mut surl = StardictUrl::new();
if buffer.starts_with(get) {
let mut state = 0i16; //>=0 path, -1 w, -2 p0w, -3 p1w, -4 argKey, -5 argVal
let mut w = 0u8;
buffer[5..]
.iter()
.take_while(|c| **c!= b' ')
.for_each(|c| {
if state < 0 {
if *c == b'%' {
state = -2;
} else if *c == b'?' {
// parse args.
state = -4;
} else {
if state == -2 {
w = StardictUrl::byte_to_u8(*c) << 4;
state = -3;
} else if state == -3 {
w |= StardictUrl::byte_to_u8(*c);
surl.add_byte(w);
state = -1;
} else if state == -4 {
if *c == b'=' {
state = -5;
} else {
w = *c;
}
} else if state == -5 {
match *c {
b'&' => {
state = -4;
}
b'-' => {
if w == b'o' {
w = b'O';
} else {
state = -32768;
}
}
b'0'..=b'9' => {
let v: i32 = (*c - b'0') as i32;
if w == b'o' {
surl.add_arg_offset(v);
} else if w == b'O' {
// negative offset
surl.add_arg_offset(-v);
} else if w == b'l' {
// length
surl.add_arg_length(v as usize);
}
}
_ => {
state = -32768;
}
}
} else {
surl.add_byte(*c);
}
}
} else if *c == b'/' {
state = -1;
} else {
surl.add_path(*c, state as usize);
state += 1;
}
});
//println!("get from url path={}, word={}, off={}, len={}", str::from_utf8(&surl.path).unwrap(), str::from_utf8(&surl.word).unwrap(), surl.offset, surl.length);
if surl.length == 0 {
surl.length = 10;
}
if surl.word.len() > 0 {
if surl.path[0] == b'W' {
//word lookup
match dict.lookup(&surl.word) {
Ok(x) => {
content.extend(b"<ol>");
for (i, e) in x.iter().enumerate() {
content.extend(b"<li><a href='#word_");
content.extend(i.to_string().as_bytes());
content.extend(b"'>");
content.extend(&e.word);
content.extend(b" : ");
content.extend(e.dictionary.name.as_bytes());
content.extend(b"</a></li>");
}
content.extend(b"</ol>\n");
for (i, e) in x.iter().enumerate() {
content.extend(b"<div id='word_");
content.extend(i.to_string().as_bytes());
content.extend(b"' class='res_word'>");
content.extend(e.dictionary.name.as_bytes());
content.extend(b" (");
content.extend(&e.word);
content.extend(b") </div><div class='res_definition'>".iter());
for (a, b) in e
.dictionary
.same_type_sequence
.as_bytes()
.iter()
.zip(e.result.split(|c| *c == 0))
{
content.extend(&cr.replace_all(
*a,
e.dictionary.dict_path.as_bytes(),
b,
));
}
content.extend(b"</div>\n");
}
}
Err(e) => println!("err: {:?}", e),
}
} else if surl.path[0] == b'n' {
//neighbor words reference
for s in dict.neighbors(&surl.word, surl.offset).take(surl.length) {
content.extend(s);
content.extend(b"\n");
}
} else if surl.path[0] == b's' {
//search with regex
match str::from_utf8(&surl.word) {
Ok(x) => match Regex::new(x) {
Ok(v) => {
content.extend(b"/~/:<ol>");
dict.search(&v).take(surl.length).for_each(|e| {
content.extend(b"<li><a>");
content.extend(e);
content.extend(b"</a></li>\n");
});
content.extend(b"</ol>");
}
Err(e) => println!("err: {:?}", e),
},
Err(e) => println!("err: {:?}", e),
}
} else if surl.path[0] == b'r' {
//html js css page etc.
if let Ok(fname) = str::from_utf8(&surl.word) {
let mut pfile = path::PathBuf::from(dictdir);
pfile.push(fname);
if let Ok(mut f) = fs::File::open(pfile) {
if f.read_to_end(&mut content).is_err() {
content.clear();
}
}
}
} else if surl.path[0] == b'w' {
content.extend(HOME_PAGE.as_bytes());
}
} else {
content.extend(HOME_PAGE.as_bytes());
}
}
fn map_by_file(f: &[u8]) -> &'static [u8] {
if let Some(s) = f.rsplit(|c| *c == b'.').next() {
match s {
b"js" => return b"application/javascript",
b"css" => return b"text/css",
b"jpg" => return b"image/jpeg",
b"png" => return b"image/png",
_ => (),
}
}
b"text/html"
}
if content.len() > 0 {
//let mut cg = 0;
//content.iter_mut().for_each(|x|{ *x = if cg % 10 == 0 {b'\n'} else {b'a'}; cg = cg + 1;});
stream.write(b"HTTP/1.0 200 OK\r\nContent-Type: ")?; | if surl.path[0] == b'n' {
stream.write(b"text/plain")?; | random_line_split |
|
lib.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod additional_cpp_generator;
mod byvalue_checker;
mod byvalue_scanner;
mod conversion;
mod function_wrapper;
mod known_types;
mod parse;
mod rust_pretty_printer;
mod type_database;
mod typedef_analyzer;
mod types;
#[cfg(any(test, feature = "build"))]
mod builder;
#[cfg(test)]
mod integration_tests;
use conversion::BridgeConverter;
use proc_macro2::TokenStream as TokenStream2;
use std::{fmt::Display, path::PathBuf};
use type_database::TypeDatabase;
use quote::ToTokens;
use syn::{
parse::{Parse, ParseStream, Result as ParseResult},
Token,
};
use syn::{parse_quote, ItemMod, Macro};
use additional_cpp_generator::AdditionalCppGenerator;
use itertools::join;
use known_types::KNOWN_TYPES;
use log::{info, warn};
use types::TypeName;
/// We use a forked version of bindgen - for now.
/// We hope to unfork.
use autocxx_bindgen as bindgen;
#[cfg(any(test, feature = "build"))]
pub use builder::{build, expect_build, BuilderError, BuilderResult, BuilderSuccess};
pub use parse::{parse_file, parse_token_stream, ParseError, ParsedFile};
pub use cxx_gen::HEADER;
/// Re-export cxx such that clients can use the same version as
/// us. This doesn't enable clients to avoid depending on the cxx
/// crate too, unfortunately, since generated cxx::bridge code
/// refers explicitly to ::cxx. See
/// https://github.com/google/autocxx/issues/36
pub use cxx;
pub struct CppFilePair {
pub header: Vec<u8>,
pub implementation: Vec<u8>,
pub header_name: String,
}
pub struct GeneratedCpp(pub Vec<CppFilePair>);
/// Errors which may occur in generating bindings for these C++
/// functions.
#[derive(Debug)]
pub enum Error {
/// Any error reported by bindgen, generating the C++ bindings.
/// Any C++ parsing errors, etc. would be reported this way.
Bindgen(()),
/// Any problem parsing the Rust file.
Parsing(syn::Error),
/// No `include_cpp!` macro could be found.
NoAutoCxxInc,
/// The include directories specified were incorreect.
CouldNotCanoncalizeIncludeDir(PathBuf),
/// Some error occcurred in converting the bindgen-style
/// bindings to safe cxx bindings.
Conversion(conversion::ConvertError),
/// No 'generate' or 'generate_pod' was specified.
/// It might be that in future we can simply let things work
/// without any allowlist, in which case bindgen should generate
/// bindings for everything. That just seems very unlikely to work
/// in the common case right now.
NoGenerationRequested,
}
impl Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::Bindgen(_) => write!(f, "Bindgen was unable to generate the initial.rs bindings for this file. This may indicate a parsing problem with the C++ headers.")?,
Error::Parsing(err) => write!(f, "The Rust file could not be parsede: {}", err)?,
Error::NoAutoCxxInc => write!(f, "No C++ include directory was provided. Consider setting AUTOCXX_INC.")?,
Error::CouldNotCanoncalizeIncludeDir(pb) => write!(f, "One of the C++ include directories provided ({}) did not appear to exist or could otherwise not be made into a canonical path.", pb.to_string_lossy())?,
Error::Conversion(err) => write!(f, "autocxx could not generate the requested bindings. {}", err)?,
Error::NoGenerationRequested => write!(f, "No 'generate' or 'generate_pod' directives were found, so we would not generate any Rust bindings despite the inclusion of C++ headers.")?,
}
Ok(())
}
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
pub enum CppInclusion {
Define(String),
Header(String),
}
#[allow(clippy::large_enum_variant)] // because this is only used once
enum State {
NotGenerated,
ParseOnly,
NothingGenerated,
Generated(ItemMod, AdditionalCppGenerator),
}
/// Core of the autocxx engine. See `generate` for most details
/// on how this works.
///
/// TODO - consider whether this 'engine' crate should actually be a
/// directory of source symlinked from all the other sub-crates, so that
/// we avoid exposing an external interface from this code.
pub struct IncludeCpp {
inclusions: Vec<CppInclusion>,
type_database: TypeDatabase,
preconfigured_inc_dirs: Option<std::ffi::OsString>,
exclude_utilities: bool,
state: State,
}
impl Parse for IncludeCpp {
fn parse(input: ParseStream) -> ParseResult<Self> {
Self::new_from_parse_stream(input)
}
}
impl IncludeCpp {
fn new_from_parse_stream(input: ParseStream) -> syn::Result<Self> {
// Takes as inputs:
// 1. List of headers to include
// 2. List of #defines to include
// 3. Allowlist
let mut inclusions = Vec::new();
let mut parse_only = false;
let mut exclude_utilities = false;
let mut type_database = TypeDatabase::new();
let mut unsafe_found = false;
while!input.is_empty() {
if input.parse::<Option<syn::Token![#]>>()?.is_some() {
let ident: syn::Ident = input.parse()?;
if ident!= "include" {
return Err(syn::Error::new(ident.span(), "expected include"));
}
let hdr: syn::LitStr = input.parse()?;
inclusions.push(CppInclusion::Header(hdr.value()));
} else if input.parse::<Option<Token![unsafe]>>()?.is_some() {
unsafe_found = true;
} else {
let ident: syn::Ident = input.parse()?;
input.parse::<Option<syn::Token![!]>>()?;
if ident == "generate" || ident == "generate_pod" {
let args;
syn::parenthesized!(args in input);
let generate: syn::LitStr = args.parse()?;
type_database.add_to_allowlist(generate.value());
if ident == "generate_pod" {
type_database
.note_pod_request(TypeName::new_from_user_input(&generate.value()));
}
} else if ident == "nested_type" {
let args;
syn::parenthesized!(args in input);
let nested: syn::LitStr = args.parse()?;
args.parse::<syn::Token![,]>()?;
let nested_in: syn::LitStr = args.parse()?;
type_database.note_nested_type(
TypeName::new_from_user_input(&nested.value()),
TypeName::new_from_user_input(&nested_in.value()),
);
} else if ident == "block" {
let args;
syn::parenthesized!(args in input);
let generate: syn::LitStr = args.parse()?;
type_database.add_to_blocklist(generate.value());
} else if ident == "parse_only" {
parse_only = true;
} else if ident == "exclude_utilities" {
exclude_utilities = true;
} else {
return Err(syn::Error::new(
ident.span(),
"expected generate, generate_pod, nested_type or exclude_utilities",
));
}
}
if input.is_empty() {
break;
}
}
if!exclude_utilities {
type_database.add_to_allowlist("make_string".to_string());
}
if!unsafe_found {
return Err(syn::Error::new(
input.span(),
"the unsafe keyword must be specified within each include_cpp! block",
));
}
Ok(IncludeCpp {
inclusions,
preconfigured_inc_dirs: None,
exclude_utilities,
type_database,
state: if parse_only {
State::ParseOnly
} else {
State::NotGenerated
},
})
}
pub fn new_from_syn(mac: Macro) -> Result<Self> {
mac.parse_body::<IncludeCpp>().map_err(Error::Parsing)
}
pub fn set_include_dirs<P: AsRef<std::ffi::OsStr>>(&mut self, include_dirs: P) {
self.preconfigured_inc_dirs = Some(include_dirs.as_ref().into());
}
fn build_header(&self) -> String {
join(
self.inclusions.iter().map(|incl| match incl {
CppInclusion::Define(symbol) => format!("#define {}\n", symbol),
CppInclusion::Header(path) => format!("#include \"{}\"\n", path),
}),
"",
)
}
fn determine_incdirs(&self) -> Result<Vec<PathBuf>> {
let inc_dirs = match &self.preconfigured_inc_dirs {
Some(d) => d.clone(),
None => std::env::var_os("AUTOCXX_INC").ok_or(Error::NoAutoCxxInc)?,
}; | // instead of requiring callers always to set AUTOCXX_INC.
// On Windows, the canonical path begins with a UNC prefix that cannot be passed to
// the MSVC compiler, so dunce::canonicalize() is used instead of std::fs::canonicalize()
// See:
// * https://github.com/dtolnay/cxx/pull/41
// * https://github.com/alexcrichton/cc-rs/issues/169
inc_dirs
.map(|p| dunce::canonicalize(&p).map_err(|_| Error::CouldNotCanoncalizeIncludeDir(p)))
.collect()
}
fn make_bindgen_builder(&self) -> Result<bindgen::Builder> {
// TODO support different C++ versions
let mut builder = bindgen::builder()
.clang_args(&["-x", "c++", "-std=c++14"])
.derive_copy(false)
.derive_debug(false)
.default_enum_style(bindgen::EnumVariation::Rust {
non_exhaustive: false,
})
.enable_cxx_namespaces()
.disable_nested_struct_naming()
.generate_inline_functions(true)
.layout_tests(false); // TODO revisit later
for item in known_types::get_initial_blocklist() {
builder = builder.blacklist_item(item);
}
for inc_dir in self.determine_incdirs()? {
// TODO work with OsStrs here to avoid the.display()
builder = builder.clang_arg(format!("-I{}", inc_dir.display()));
}
// 3. Passes allowlist and other options to the bindgen::Builder equivalent
// to --output-style=cxx --allowlist=<as passed in>
for a in self.type_database.allowlist() {
// TODO - allowlist type/functions/separately
builder = builder
.whitelist_type(a)
.whitelist_function(a)
.whitelist_var(a);
}
Ok(builder)
}
fn inject_header_into_bindgen(&self, mut builder: bindgen::Builder) -> bindgen::Builder {
let full_header = self.build_header();
let full_header = format!("{}\n\n{}", KNOWN_TYPES.get_prelude(), full_header,);
builder = builder.header_contents("example.hpp", &full_header);
builder
}
/// Generate the Rust bindings. Call `generate` first.
pub fn generate_rs(&self) -> TokenStream2 {
match &self.state {
State::NotGenerated => panic!("Call generate() first"),
State::Generated(itemmod, _) => itemmod.to_token_stream(),
State::NothingGenerated | State::ParseOnly => TokenStream2::new(),
}
}
fn parse_bindings(&self, bindings: bindgen::Bindings) -> Result<ItemMod> {
// This bindings object is actually a TokenStream internally and we're wasting
// effort converting to and from string. We could enhance the bindgen API
// in future.
let bindings = bindings.to_string();
// Manually add the mod ffi {} so that we can ask syn to parse
// into a single construct.
let bindings = format!("mod bindgen {{ {} }}", bindings);
info!("Bindings: {}", bindings);
syn::parse_str::<ItemMod>(&bindings).map_err(Error::Parsing)
}
fn generate_include_list(&self) -> Vec<String> {
let mut include_list = Vec::new();
for incl in &self.inclusions {
match incl {
CppInclusion::Header(ref hdr) => {
include_list.push(hdr.clone());
}
CppInclusion::Define(_) => warn!("Currently no way to define! within cxx"),
}
}
include_list
}
/// Actually examine the headers to find out what needs generating.
/// Most errors occur at this stage as we fail to interpret the C++
/// headers properly.
///
/// The basic idea is this. We will run `bindgen` which will spit
/// out a ton of Rust code corresponding to all the types and functions
/// defined in C++. We'll then post-process that bindgen output
/// into a form suitable for ingestion by `cxx`.
/// (It's the `bridge_converter` mod which does that.)
/// Along the way, the `bridge_converter` might tell us of additional
/// C++ code which we should generate, e.g. wrappers to move things
/// into and out of `UniquePtr`s.
pub fn generate(&mut self) -> Result<()> {
// If we are in parse only mode, do nothing. This is used for
// doc tests to ensure the parsing is valid, but we can't expect
// valid C++ header files or linkers to allow a complete build.
match self.state {
State::ParseOnly => return Ok(()),
State::NotGenerated => {}
State::Generated(_, _) | State::NothingGenerated => panic!("Only call generate once"),
}
if self.type_database.allowlist_is_empty() {
return Err(Error::NoGenerationRequested);
}
let builder = self.make_bindgen_builder()?;
let bindings = self
.inject_header_into_bindgen(builder)
.generate()
.map_err(Error::Bindgen)?;
let bindings = self.parse_bindings(bindings)?;
let include_list = self.generate_include_list();
let mut converter = BridgeConverter::new(&include_list, &self.type_database);
let conversion = converter
.convert(bindings, self.exclude_utilities)
.map_err(Error::Conversion)?;
let mut additional_cpp_generator = AdditionalCppGenerator::new(self.build_header());
additional_cpp_generator.add_needs(conversion.additional_cpp_needs, &self.type_database);
let mut items = conversion.items;
let mut new_bindings: ItemMod = parse_quote! {
#[allow(non_snake_case)]
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
#[allow(non_camel_case_types)]
mod ffi {
}
};
new_bindings.content.as_mut().unwrap().1.append(&mut items);
info!(
"New bindings:\n{}",
rust_pretty_printer::pretty_print(&new_bindings.to_token_stream())
);
self.state = State::Generated(new_bindings, additional_cpp_generator);
Ok(())
}
/// Generate C++-side bindings for these APIs. Call `generate` first.
pub fn generate_h_and_cxx(&self) -> Result<GeneratedCpp, cxx_gen::Error> {
let mut files = Vec::new();
match &self.state {
State::ParseOnly => panic!("Cannot generate C++ in parse-only mode"),
State::NotGenerated => panic!("Call generate() first"),
State::NothingGenerated => {}
State::Generated(itemmod, additional_cpp_generator) => {
let rs = itemmod.into_token_stream();
let opt = cxx_gen::Opt::default();
let cxx_generated = cxx_gen::generate_header_and_cc(rs, &opt)?;
files.push(CppFilePair {
header: cxx_generated.header,
header_name: "cxxgen.h".to_string(),
implementation: cxx_generated.implementation,
});
match additional_cpp_generator.generate() {
None => {}
Some(additional_cpp) => {
// TODO should probably replace pragma once below with traditional include guards.
let declarations = format!("#pragma once\n{}", additional_cpp.declarations);
files.push(CppFilePair {
header: declarations.as_bytes().to_vec(),
header_name: "autocxxgen.h".to_string(),
implementation: additional_cpp.definitions.as_bytes().to_vec(),
});
info!("Additional C++ decls:\n{}", declarations);
info!("Additional C++ defs:\n{}", additional_cpp.definitions);
}
}
}
};
Ok(GeneratedCpp(files))
}
/// Get the configured include directories.
pub fn include_dirs(&self) -> Result<Vec<PathBuf>> {
self.determine_incdirs()
}
} | let inc_dirs = std::env::split_paths(&inc_dirs);
// TODO consider if we can or should look up the include path automatically | random_line_split |
lib.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod additional_cpp_generator;
mod byvalue_checker;
mod byvalue_scanner;
mod conversion;
mod function_wrapper;
mod known_types;
mod parse;
mod rust_pretty_printer;
mod type_database;
mod typedef_analyzer;
mod types;
#[cfg(any(test, feature = "build"))]
mod builder;
#[cfg(test)]
mod integration_tests;
use conversion::BridgeConverter;
use proc_macro2::TokenStream as TokenStream2;
use std::{fmt::Display, path::PathBuf};
use type_database::TypeDatabase;
use quote::ToTokens;
use syn::{
parse::{Parse, ParseStream, Result as ParseResult},
Token,
};
use syn::{parse_quote, ItemMod, Macro};
use additional_cpp_generator::AdditionalCppGenerator;
use itertools::join;
use known_types::KNOWN_TYPES;
use log::{info, warn};
use types::TypeName;
/// We use a forked version of bindgen - for now.
/// We hope to unfork.
use autocxx_bindgen as bindgen;
#[cfg(any(test, feature = "build"))]
pub use builder::{build, expect_build, BuilderError, BuilderResult, BuilderSuccess};
pub use parse::{parse_file, parse_token_stream, ParseError, ParsedFile};
pub use cxx_gen::HEADER;
/// Re-export cxx such that clients can use the same version as
/// us. This doesn't enable clients to avoid depending on the cxx
/// crate too, unfortunately, since generated cxx::bridge code
/// refers explicitly to ::cxx. See
/// https://github.com/google/autocxx/issues/36
pub use cxx;
pub struct CppFilePair {
pub header: Vec<u8>,
pub implementation: Vec<u8>,
pub header_name: String,
}
pub struct GeneratedCpp(pub Vec<CppFilePair>);
/// Errors which may occur in generating bindings for these C++
/// functions.
#[derive(Debug)]
pub enum Error {
/// Any error reported by bindgen, generating the C++ bindings.
/// Any C++ parsing errors, etc. would be reported this way.
Bindgen(()),
/// Any problem parsing the Rust file.
Parsing(syn::Error),
/// No `include_cpp!` macro could be found.
NoAutoCxxInc,
/// The include directories specified were incorreect.
CouldNotCanoncalizeIncludeDir(PathBuf),
/// Some error occcurred in converting the bindgen-style
/// bindings to safe cxx bindings.
Conversion(conversion::ConvertError),
/// No 'generate' or 'generate_pod' was specified.
/// It might be that in future we can simply let things work
/// without any allowlist, in which case bindgen should generate
/// bindings for everything. That just seems very unlikely to work
/// in the common case right now.
NoGenerationRequested,
}
impl Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::Bindgen(_) => write!(f, "Bindgen was unable to generate the initial.rs bindings for this file. This may indicate a parsing problem with the C++ headers.")?,
Error::Parsing(err) => write!(f, "The Rust file could not be parsede: {}", err)?,
Error::NoAutoCxxInc => write!(f, "No C++ include directory was provided. Consider setting AUTOCXX_INC.")?,
Error::CouldNotCanoncalizeIncludeDir(pb) => write!(f, "One of the C++ include directories provided ({}) did not appear to exist or could otherwise not be made into a canonical path.", pb.to_string_lossy())?,
Error::Conversion(err) => write!(f, "autocxx could not generate the requested bindings. {}", err)?,
Error::NoGenerationRequested => write!(f, "No 'generate' or 'generate_pod' directives were found, so we would not generate any Rust bindings despite the inclusion of C++ headers.")?,
}
Ok(())
}
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
pub enum CppInclusion {
Define(String),
Header(String),
}
#[allow(clippy::large_enum_variant)] // because this is only used once
enum State {
NotGenerated,
ParseOnly,
NothingGenerated,
Generated(ItemMod, AdditionalCppGenerator),
}
/// Core of the autocxx engine. See `generate` for most details
/// on how this works.
///
/// TODO - consider whether this 'engine' crate should actually be a
/// directory of source symlinked from all the other sub-crates, so that
/// we avoid exposing an external interface from this code.
pub struct IncludeCpp {
inclusions: Vec<CppInclusion>,
type_database: TypeDatabase,
preconfigured_inc_dirs: Option<std::ffi::OsString>,
exclude_utilities: bool,
state: State,
}
impl Parse for IncludeCpp {
fn parse(input: ParseStream) -> ParseResult<Self> {
Self::new_from_parse_stream(input)
}
}
impl IncludeCpp {
fn new_from_parse_stream(input: ParseStream) -> syn::Result<Self> {
// Takes as inputs:
// 1. List of headers to include
// 2. List of #defines to include
// 3. Allowlist
let mut inclusions = Vec::new();
let mut parse_only = false;
let mut exclude_utilities = false;
let mut type_database = TypeDatabase::new();
let mut unsafe_found = false;
while!input.is_empty() {
if input.parse::<Option<syn::Token![#]>>()?.is_some() {
let ident: syn::Ident = input.parse()?;
if ident!= "include" {
return Err(syn::Error::new(ident.span(), "expected include"));
}
let hdr: syn::LitStr = input.parse()?;
inclusions.push(CppInclusion::Header(hdr.value()));
} else if input.parse::<Option<Token![unsafe]>>()?.is_some() {
unsafe_found = true;
} else {
let ident: syn::Ident = input.parse()?;
input.parse::<Option<syn::Token![!]>>()?;
if ident == "generate" || ident == "generate_pod" {
let args;
syn::parenthesized!(args in input);
let generate: syn::LitStr = args.parse()?;
type_database.add_to_allowlist(generate.value());
if ident == "generate_pod" {
type_database
.note_pod_request(TypeName::new_from_user_input(&generate.value()));
}
} else if ident == "nested_type" {
let args;
syn::parenthesized!(args in input);
let nested: syn::LitStr = args.parse()?;
args.parse::<syn::Token![,]>()?;
let nested_in: syn::LitStr = args.parse()?;
type_database.note_nested_type(
TypeName::new_from_user_input(&nested.value()),
TypeName::new_from_user_input(&nested_in.value()),
);
} else if ident == "block" {
let args;
syn::parenthesized!(args in input);
let generate: syn::LitStr = args.parse()?;
type_database.add_to_blocklist(generate.value());
} else if ident == "parse_only" {
parse_only = true;
} else if ident == "exclude_utilities" {
exclude_utilities = true;
} else {
return Err(syn::Error::new(
ident.span(),
"expected generate, generate_pod, nested_type or exclude_utilities",
));
}
}
if input.is_empty() {
break;
}
}
if!exclude_utilities {
type_database.add_to_allowlist("make_string".to_string());
}
if!unsafe_found {
return Err(syn::Error::new(
input.span(),
"the unsafe keyword must be specified within each include_cpp! block",
));
}
Ok(IncludeCpp {
inclusions,
preconfigured_inc_dirs: None,
exclude_utilities,
type_database,
state: if parse_only {
State::ParseOnly
} else {
State::NotGenerated
},
})
}
pub fn new_from_syn(mac: Macro) -> Result<Self> {
mac.parse_body::<IncludeCpp>().map_err(Error::Parsing)
}
pub fn set_include_dirs<P: AsRef<std::ffi::OsStr>>(&mut self, include_dirs: P) {
self.preconfigured_inc_dirs = Some(include_dirs.as_ref().into());
}
fn build_header(&self) -> String {
join(
self.inclusions.iter().map(|incl| match incl {
CppInclusion::Define(symbol) => format!("#define {}\n", symbol),
CppInclusion::Header(path) => format!("#include \"{}\"\n", path),
}),
"",
)
}
fn determine_incdirs(&self) -> Result<Vec<PathBuf>> {
let inc_dirs = match &self.preconfigured_inc_dirs {
Some(d) => d.clone(),
None => std::env::var_os("AUTOCXX_INC").ok_or(Error::NoAutoCxxInc)?,
};
let inc_dirs = std::env::split_paths(&inc_dirs);
// TODO consider if we can or should look up the include path automatically
// instead of requiring callers always to set AUTOCXX_INC.
// On Windows, the canonical path begins with a UNC prefix that cannot be passed to
// the MSVC compiler, so dunce::canonicalize() is used instead of std::fs::canonicalize()
// See:
// * https://github.com/dtolnay/cxx/pull/41
// * https://github.com/alexcrichton/cc-rs/issues/169
inc_dirs
.map(|p| dunce::canonicalize(&p).map_err(|_| Error::CouldNotCanoncalizeIncludeDir(p)))
.collect()
}
fn make_bindgen_builder(&self) -> Result<bindgen::Builder> {
// TODO support different C++ versions
let mut builder = bindgen::builder()
.clang_args(&["-x", "c++", "-std=c++14"])
.derive_copy(false)
.derive_debug(false)
.default_enum_style(bindgen::EnumVariation::Rust {
non_exhaustive: false,
})
.enable_cxx_namespaces()
.disable_nested_struct_naming()
.generate_inline_functions(true)
.layout_tests(false); // TODO revisit later
for item in known_types::get_initial_blocklist() {
builder = builder.blacklist_item(item);
}
for inc_dir in self.determine_incdirs()? {
// TODO work with OsStrs here to avoid the.display()
builder = builder.clang_arg(format!("-I{}", inc_dir.display()));
}
// 3. Passes allowlist and other options to the bindgen::Builder equivalent
// to --output-style=cxx --allowlist=<as passed in>
for a in self.type_database.allowlist() {
// TODO - allowlist type/functions/separately
builder = builder
.whitelist_type(a)
.whitelist_function(a)
.whitelist_var(a);
}
Ok(builder)
}
fn inject_header_into_bindgen(&self, mut builder: bindgen::Builder) -> bindgen::Builder |
/// Generate the Rust bindings. Call `generate` first.
pub fn generate_rs(&self) -> TokenStream2 {
match &self.state {
State::NotGenerated => panic!("Call generate() first"),
State::Generated(itemmod, _) => itemmod.to_token_stream(),
State::NothingGenerated | State::ParseOnly => TokenStream2::new(),
}
}
fn parse_bindings(&self, bindings: bindgen::Bindings) -> Result<ItemMod> {
// This bindings object is actually a TokenStream internally and we're wasting
// effort converting to and from string. We could enhance the bindgen API
// in future.
let bindings = bindings.to_string();
// Manually add the mod ffi {} so that we can ask syn to parse
// into a single construct.
let bindings = format!("mod bindgen {{ {} }}", bindings);
info!("Bindings: {}", bindings);
syn::parse_str::<ItemMod>(&bindings).map_err(Error::Parsing)
}
fn generate_include_list(&self) -> Vec<String> {
let mut include_list = Vec::new();
for incl in &self.inclusions {
match incl {
CppInclusion::Header(ref hdr) => {
include_list.push(hdr.clone());
}
CppInclusion::Define(_) => warn!("Currently no way to define! within cxx"),
}
}
include_list
}
/// Actually examine the headers to find out what needs generating.
/// Most errors occur at this stage as we fail to interpret the C++
/// headers properly.
///
/// The basic idea is this. We will run `bindgen` which will spit
/// out a ton of Rust code corresponding to all the types and functions
/// defined in C++. We'll then post-process that bindgen output
/// into a form suitable for ingestion by `cxx`.
/// (It's the `bridge_converter` mod which does that.)
/// Along the way, the `bridge_converter` might tell us of additional
/// C++ code which we should generate, e.g. wrappers to move things
/// into and out of `UniquePtr`s.
pub fn generate(&mut self) -> Result<()> {
// If we are in parse only mode, do nothing. This is used for
// doc tests to ensure the parsing is valid, but we can't expect
// valid C++ header files or linkers to allow a complete build.
match self.state {
State::ParseOnly => return Ok(()),
State::NotGenerated => {}
State::Generated(_, _) | State::NothingGenerated => panic!("Only call generate once"),
}
if self.type_database.allowlist_is_empty() {
return Err(Error::NoGenerationRequested);
}
let builder = self.make_bindgen_builder()?;
let bindings = self
.inject_header_into_bindgen(builder)
.generate()
.map_err(Error::Bindgen)?;
let bindings = self.parse_bindings(bindings)?;
let include_list = self.generate_include_list();
let mut converter = BridgeConverter::new(&include_list, &self.type_database);
let conversion = converter
.convert(bindings, self.exclude_utilities)
.map_err(Error::Conversion)?;
let mut additional_cpp_generator = AdditionalCppGenerator::new(self.build_header());
additional_cpp_generator.add_needs(conversion.additional_cpp_needs, &self.type_database);
let mut items = conversion.items;
let mut new_bindings: ItemMod = parse_quote! {
#[allow(non_snake_case)]
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
#[allow(non_camel_case_types)]
mod ffi {
}
};
new_bindings.content.as_mut().unwrap().1.append(&mut items);
info!(
"New bindings:\n{}",
rust_pretty_printer::pretty_print(&new_bindings.to_token_stream())
);
self.state = State::Generated(new_bindings, additional_cpp_generator);
Ok(())
}
/// Generate C++-side bindings for these APIs. Call `generate` first.
pub fn generate_h_and_cxx(&self) -> Result<GeneratedCpp, cxx_gen::Error> {
let mut files = Vec::new();
match &self.state {
State::ParseOnly => panic!("Cannot generate C++ in parse-only mode"),
State::NotGenerated => panic!("Call generate() first"),
State::NothingGenerated => {}
State::Generated(itemmod, additional_cpp_generator) => {
let rs = itemmod.into_token_stream();
let opt = cxx_gen::Opt::default();
let cxx_generated = cxx_gen::generate_header_and_cc(rs, &opt)?;
files.push(CppFilePair {
header: cxx_generated.header,
header_name: "cxxgen.h".to_string(),
implementation: cxx_generated.implementation,
});
match additional_cpp_generator.generate() {
None => {}
Some(additional_cpp) => {
// TODO should probably replace pragma once below with traditional include guards.
let declarations = format!("#pragma once\n{}", additional_cpp.declarations);
files.push(CppFilePair {
header: declarations.as_bytes().to_vec(),
header_name: "autocxxgen.h".to_string(),
implementation: additional_cpp.definitions.as_bytes().to_vec(),
});
info!("Additional C++ decls:\n{}", declarations);
info!("Additional C++ defs:\n{}", additional_cpp.definitions);
}
}
}
};
Ok(GeneratedCpp(files))
}
/// Get the configured include directories.
pub fn include_dirs(&self) -> Result<Vec<PathBuf>> {
self.determine_incdirs()
}
}
| {
let full_header = self.build_header();
let full_header = format!("{}\n\n{}", KNOWN_TYPES.get_prelude(), full_header,);
builder = builder.header_contents("example.hpp", &full_header);
builder
} | identifier_body |
lib.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod additional_cpp_generator;
mod byvalue_checker;
mod byvalue_scanner;
mod conversion;
mod function_wrapper;
mod known_types;
mod parse;
mod rust_pretty_printer;
mod type_database;
mod typedef_analyzer;
mod types;
#[cfg(any(test, feature = "build"))]
mod builder;
#[cfg(test)]
mod integration_tests;
use conversion::BridgeConverter;
use proc_macro2::TokenStream as TokenStream2;
use std::{fmt::Display, path::PathBuf};
use type_database::TypeDatabase;
use quote::ToTokens;
use syn::{
parse::{Parse, ParseStream, Result as ParseResult},
Token,
};
use syn::{parse_quote, ItemMod, Macro};
use additional_cpp_generator::AdditionalCppGenerator;
use itertools::join;
use known_types::KNOWN_TYPES;
use log::{info, warn};
use types::TypeName;
/// We use a forked version of bindgen - for now.
/// We hope to unfork.
use autocxx_bindgen as bindgen;
#[cfg(any(test, feature = "build"))]
pub use builder::{build, expect_build, BuilderError, BuilderResult, BuilderSuccess};
pub use parse::{parse_file, parse_token_stream, ParseError, ParsedFile};
pub use cxx_gen::HEADER;
/// Re-export cxx such that clients can use the same version as
/// us. This doesn't enable clients to avoid depending on the cxx
/// crate too, unfortunately, since generated cxx::bridge code
/// refers explicitly to ::cxx. See
/// https://github.com/google/autocxx/issues/36
pub use cxx;
pub struct CppFilePair {
pub header: Vec<u8>,
pub implementation: Vec<u8>,
pub header_name: String,
}
pub struct GeneratedCpp(pub Vec<CppFilePair>);
/// Errors which may occur in generating bindings for these C++
/// functions.
#[derive(Debug)]
pub enum Error {
/// Any error reported by bindgen, generating the C++ bindings.
/// Any C++ parsing errors, etc. would be reported this way.
Bindgen(()),
/// Any problem parsing the Rust file.
Parsing(syn::Error),
/// No `include_cpp!` macro could be found.
NoAutoCxxInc,
/// The include directories specified were incorreect.
CouldNotCanoncalizeIncludeDir(PathBuf),
/// Some error occcurred in converting the bindgen-style
/// bindings to safe cxx bindings.
Conversion(conversion::ConvertError),
/// No 'generate' or 'generate_pod' was specified.
/// It might be that in future we can simply let things work
/// without any allowlist, in which case bindgen should generate
/// bindings for everything. That just seems very unlikely to work
/// in the common case right now.
NoGenerationRequested,
}
impl Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::Bindgen(_) => write!(f, "Bindgen was unable to generate the initial.rs bindings for this file. This may indicate a parsing problem with the C++ headers.")?,
Error::Parsing(err) => write!(f, "The Rust file could not be parsede: {}", err)?,
Error::NoAutoCxxInc => write!(f, "No C++ include directory was provided. Consider setting AUTOCXX_INC.")?,
Error::CouldNotCanoncalizeIncludeDir(pb) => write!(f, "One of the C++ include directories provided ({}) did not appear to exist or could otherwise not be made into a canonical path.", pb.to_string_lossy())?,
Error::Conversion(err) => write!(f, "autocxx could not generate the requested bindings. {}", err)?,
Error::NoGenerationRequested => write!(f, "No 'generate' or 'generate_pod' directives were found, so we would not generate any Rust bindings despite the inclusion of C++ headers.")?,
}
Ok(())
}
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
pub enum CppInclusion {
Define(String),
Header(String),
}
#[allow(clippy::large_enum_variant)] // because this is only used once
enum State {
NotGenerated,
ParseOnly,
NothingGenerated,
Generated(ItemMod, AdditionalCppGenerator),
}
/// Core of the autocxx engine. See `generate` for most details
/// on how this works.
///
/// TODO - consider whether this 'engine' crate should actually be a
/// directory of source symlinked from all the other sub-crates, so that
/// we avoid exposing an external interface from this code.
pub struct IncludeCpp {
inclusions: Vec<CppInclusion>,
type_database: TypeDatabase,
preconfigured_inc_dirs: Option<std::ffi::OsString>,
exclude_utilities: bool,
state: State,
}
impl Parse for IncludeCpp {
fn parse(input: ParseStream) -> ParseResult<Self> {
Self::new_from_parse_stream(input)
}
}
impl IncludeCpp {
fn new_from_parse_stream(input: ParseStream) -> syn::Result<Self> {
// Takes as inputs:
// 1. List of headers to include
// 2. List of #defines to include
// 3. Allowlist
let mut inclusions = Vec::new();
let mut parse_only = false;
let mut exclude_utilities = false;
let mut type_database = TypeDatabase::new();
let mut unsafe_found = false;
while!input.is_empty() {
if input.parse::<Option<syn::Token![#]>>()?.is_some() {
let ident: syn::Ident = input.parse()?;
if ident!= "include" {
return Err(syn::Error::new(ident.span(), "expected include"));
}
let hdr: syn::LitStr = input.parse()?;
inclusions.push(CppInclusion::Header(hdr.value()));
} else if input.parse::<Option<Token![unsafe]>>()?.is_some() {
unsafe_found = true;
} else {
let ident: syn::Ident = input.parse()?;
input.parse::<Option<syn::Token![!]>>()?;
if ident == "generate" || ident == "generate_pod" {
let args;
syn::parenthesized!(args in input);
let generate: syn::LitStr = args.parse()?;
type_database.add_to_allowlist(generate.value());
if ident == "generate_pod" {
type_database
.note_pod_request(TypeName::new_from_user_input(&generate.value()));
}
} else if ident == "nested_type" {
let args;
syn::parenthesized!(args in input);
let nested: syn::LitStr = args.parse()?;
args.parse::<syn::Token![,]>()?;
let nested_in: syn::LitStr = args.parse()?;
type_database.note_nested_type(
TypeName::new_from_user_input(&nested.value()),
TypeName::new_from_user_input(&nested_in.value()),
);
} else if ident == "block" {
let args;
syn::parenthesized!(args in input);
let generate: syn::LitStr = args.parse()?;
type_database.add_to_blocklist(generate.value());
} else if ident == "parse_only" {
parse_only = true;
} else if ident == "exclude_utilities" {
exclude_utilities = true;
} else {
return Err(syn::Error::new(
ident.span(),
"expected generate, generate_pod, nested_type or exclude_utilities",
));
}
}
if input.is_empty() {
break;
}
}
if!exclude_utilities {
type_database.add_to_allowlist("make_string".to_string());
}
if!unsafe_found {
return Err(syn::Error::new(
input.span(),
"the unsafe keyword must be specified within each include_cpp! block",
));
}
Ok(IncludeCpp {
inclusions,
preconfigured_inc_dirs: None,
exclude_utilities,
type_database,
state: if parse_only {
State::ParseOnly
} else {
State::NotGenerated
},
})
}
pub fn new_from_syn(mac: Macro) -> Result<Self> {
mac.parse_body::<IncludeCpp>().map_err(Error::Parsing)
}
pub fn set_include_dirs<P: AsRef<std::ffi::OsStr>>(&mut self, include_dirs: P) {
self.preconfigured_inc_dirs = Some(include_dirs.as_ref().into());
}
fn build_header(&self) -> String {
join(
self.inclusions.iter().map(|incl| match incl {
CppInclusion::Define(symbol) => format!("#define {}\n", symbol),
CppInclusion::Header(path) => format!("#include \"{}\"\n", path),
}),
"",
)
}
fn determine_incdirs(&self) -> Result<Vec<PathBuf>> {
let inc_dirs = match &self.preconfigured_inc_dirs {
Some(d) => d.clone(),
None => std::env::var_os("AUTOCXX_INC").ok_or(Error::NoAutoCxxInc)?,
};
let inc_dirs = std::env::split_paths(&inc_dirs);
// TODO consider if we can or should look up the include path automatically
// instead of requiring callers always to set AUTOCXX_INC.
// On Windows, the canonical path begins with a UNC prefix that cannot be passed to
// the MSVC compiler, so dunce::canonicalize() is used instead of std::fs::canonicalize()
// See:
// * https://github.com/dtolnay/cxx/pull/41
// * https://github.com/alexcrichton/cc-rs/issues/169
inc_dirs
.map(|p| dunce::canonicalize(&p).map_err(|_| Error::CouldNotCanoncalizeIncludeDir(p)))
.collect()
}
fn make_bindgen_builder(&self) -> Result<bindgen::Builder> {
// TODO support different C++ versions
let mut builder = bindgen::builder()
.clang_args(&["-x", "c++", "-std=c++14"])
.derive_copy(false)
.derive_debug(false)
.default_enum_style(bindgen::EnumVariation::Rust {
non_exhaustive: false,
})
.enable_cxx_namespaces()
.disable_nested_struct_naming()
.generate_inline_functions(true)
.layout_tests(false); // TODO revisit later
for item in known_types::get_initial_blocklist() {
builder = builder.blacklist_item(item);
}
for inc_dir in self.determine_incdirs()? {
// TODO work with OsStrs here to avoid the.display()
builder = builder.clang_arg(format!("-I{}", inc_dir.display()));
}
// 3. Passes allowlist and other options to the bindgen::Builder equivalent
// to --output-style=cxx --allowlist=<as passed in>
for a in self.type_database.allowlist() {
// TODO - allowlist type/functions/separately
builder = builder
.whitelist_type(a)
.whitelist_function(a)
.whitelist_var(a);
}
Ok(builder)
}
fn inject_header_into_bindgen(&self, mut builder: bindgen::Builder) -> bindgen::Builder {
let full_header = self.build_header();
let full_header = format!("{}\n\n{}", KNOWN_TYPES.get_prelude(), full_header,);
builder = builder.header_contents("example.hpp", &full_header);
builder
}
/// Generate the Rust bindings. Call `generate` first.
pub fn generate_rs(&self) -> TokenStream2 {
match &self.state {
State::NotGenerated => panic!("Call generate() first"),
State::Generated(itemmod, _) => itemmod.to_token_stream(),
State::NothingGenerated | State::ParseOnly => TokenStream2::new(),
}
}
fn parse_bindings(&self, bindings: bindgen::Bindings) -> Result<ItemMod> {
// This bindings object is actually a TokenStream internally and we're wasting
// effort converting to and from string. We could enhance the bindgen API
// in future.
let bindings = bindings.to_string();
// Manually add the mod ffi {} so that we can ask syn to parse
// into a single construct.
let bindings = format!("mod bindgen {{ {} }}", bindings);
info!("Bindings: {}", bindings);
syn::parse_str::<ItemMod>(&bindings).map_err(Error::Parsing)
}
fn generate_include_list(&self) -> Vec<String> {
let mut include_list = Vec::new();
for incl in &self.inclusions {
match incl {
CppInclusion::Header(ref hdr) => {
include_list.push(hdr.clone());
}
CppInclusion::Define(_) => warn!("Currently no way to define! within cxx"),
}
}
include_list
}
/// Actually examine the headers to find out what needs generating.
/// Most errors occur at this stage as we fail to interpret the C++
/// headers properly.
///
/// The basic idea is this. We will run `bindgen` which will spit
/// out a ton of Rust code corresponding to all the types and functions
/// defined in C++. We'll then post-process that bindgen output
/// into a form suitable for ingestion by `cxx`.
/// (It's the `bridge_converter` mod which does that.)
/// Along the way, the `bridge_converter` might tell us of additional
/// C++ code which we should generate, e.g. wrappers to move things
/// into and out of `UniquePtr`s.
pub fn generate(&mut self) -> Result<()> {
// If we are in parse only mode, do nothing. This is used for
// doc tests to ensure the parsing is valid, but we can't expect
// valid C++ header files or linkers to allow a complete build.
match self.state {
State::ParseOnly => return Ok(()),
State::NotGenerated => {}
State::Generated(_, _) | State::NothingGenerated => panic!("Only call generate once"),
}
if self.type_database.allowlist_is_empty() {
return Err(Error::NoGenerationRequested);
}
let builder = self.make_bindgen_builder()?;
let bindings = self
.inject_header_into_bindgen(builder)
.generate()
.map_err(Error::Bindgen)?;
let bindings = self.parse_bindings(bindings)?;
let include_list = self.generate_include_list();
let mut converter = BridgeConverter::new(&include_list, &self.type_database);
let conversion = converter
.convert(bindings, self.exclude_utilities)
.map_err(Error::Conversion)?;
let mut additional_cpp_generator = AdditionalCppGenerator::new(self.build_header());
additional_cpp_generator.add_needs(conversion.additional_cpp_needs, &self.type_database);
let mut items = conversion.items;
let mut new_bindings: ItemMod = parse_quote! {
#[allow(non_snake_case)]
#[allow(dead_code)]
#[allow(non_upper_case_globals)]
#[allow(non_camel_case_types)]
mod ffi {
}
};
new_bindings.content.as_mut().unwrap().1.append(&mut items);
info!(
"New bindings:\n{}",
rust_pretty_printer::pretty_print(&new_bindings.to_token_stream())
);
self.state = State::Generated(new_bindings, additional_cpp_generator);
Ok(())
}
/// Generate C++-side bindings for these APIs. Call `generate` first.
pub fn generate_h_and_cxx(&self) -> Result<GeneratedCpp, cxx_gen::Error> {
let mut files = Vec::new();
match &self.state {
State::ParseOnly => panic!("Cannot generate C++ in parse-only mode"),
State::NotGenerated => panic!("Call generate() first"),
State::NothingGenerated => {}
State::Generated(itemmod, additional_cpp_generator) => {
let rs = itemmod.into_token_stream();
let opt = cxx_gen::Opt::default();
let cxx_generated = cxx_gen::generate_header_and_cc(rs, &opt)?;
files.push(CppFilePair {
header: cxx_generated.header,
header_name: "cxxgen.h".to_string(),
implementation: cxx_generated.implementation,
});
match additional_cpp_generator.generate() {
None => {}
Some(additional_cpp) => {
// TODO should probably replace pragma once below with traditional include guards.
let declarations = format!("#pragma once\n{}", additional_cpp.declarations);
files.push(CppFilePair {
header: declarations.as_bytes().to_vec(),
header_name: "autocxxgen.h".to_string(),
implementation: additional_cpp.definitions.as_bytes().to_vec(),
});
info!("Additional C++ decls:\n{}", declarations);
info!("Additional C++ defs:\n{}", additional_cpp.definitions);
}
}
}
};
Ok(GeneratedCpp(files))
}
/// Get the configured include directories.
pub fn | (&self) -> Result<Vec<PathBuf>> {
self.determine_incdirs()
}
}
| include_dirs | identifier_name |
lib.rs | //! `hull` is Theseus's shell for basic interactive systems operations.
//!
//! Just as the hull is the outermost layer or "shell" of a boat or ship,
//! this crate `hull` is the shell of the "Ship of Theseus" (this OS).
//!
//! Functionally, this is similar to bash, zsh, fish, etc.
//!
//! This shell will eventually supercede the shell located at
//! `applications/shell`.
//!
//! Terminology used in this file using `sleep 1 | sleep 2 & sleep 3` as an
//! example:
//! - A line is an entire line of user input i.e. `sleep 1 | sleep 2 & sleep 3`.
//! - A task is a subset of a line used to spawn an individual task i.e. `sleep
//! 1`, `sleep 2`, and `sleep 3`.
//! - A job is a list of piped tasks i.e. `sleep 1 | sleep 2`, and `sleep 3`.
//! - A command is the first word in a task i.e. `sleep`.
//! - The arguments are any subsequent words in a task i.e. `1`, `2`, and `3`.
#![cfg_attr(not(test), no_std)]
#![feature(extend_one, let_chains)]
extern crate alloc;
mod builtin;
mod error;
mod job;
mod parse;
mod wrapper;
use crate::{
job::{JobPart, State},
parse::{ParsedJob, ParsedLine, ParsedTask},
};
use alloc::{borrow::ToOwned, format, string::String, sync::Arc, vec::Vec};
use app_io::{println, IoStreams};
use core::fmt::Write;
use hashbrown::HashMap;
use job::Job;
use log::{error, warn};
use noline::{builder::EditorBuilder, sync::embedded::IO as Io};
use path::Path;
use stdio::Stdio;
use sync_block::Mutex;
use task::{ExitValue, KillReason};
use tty::{Event, LineDiscipline};
pub use crate::error::{Error, Result};
pub fn main(_: Vec<String>) -> isize {
let mut shell = Shell {
discipline: app_io::line_discipline().expect("no line discipline"),
jobs: Arc::new(Mutex::new(HashMap::new())),
stop_order: Vec::new(),
history: Vec::new(),
};
let result = shell.run();
shell.set_app_discipline();
if let Err(e) = result {
println!("{e:?}");
-1
} else {
0
}
}
pub struct Shell {
discipline: Arc<LineDiscipline>,
// TODO: Could use a vec-based data structure like Vec<Option<JoinableTaskRef>
// Adding a job would iterate over the vec trying to find a None and if it can't, push to the
// end. Removing a job would replace the job with None.
jobs: Arc<Mutex<HashMap<usize, Job>>>,
stop_order: Vec<usize>,
history: Vec<String>,
}
impl Shell {
/// Configures the line discipline for use by the shell.
fn set_shell_discipline(&self) {
self.discipline.set_raw();
}
/// Configures the line discipline for use by applications.
fn set_app_discipline(&self) -> AppDisciplineGuard {
self.discipline.set_sane();
AppDisciplineGuard {
discipline: self.discipline.clone(),
}
}
fn run(&mut self) -> Result<()> {
self.set_shell_discipline();
let wrapper = wrapper::Wrapper {
stdin: app_io::stdin().expect("no stdin"),
stdout: app_io::stdout().expect("no stdout"),
};
let mut io = Io::new(wrapper);
let mut editor = EditorBuilder::new_unbounded()
.with_unbounded_history()
.build_sync(&mut io)
.expect("couldn't instantiate line editor");
loop {
editor.dedup_history();
if let Ok(line) = editor.readline("> ", &mut io) {
match self.execute_line(line) {
Ok(()) => {}
Err(Error::ExitRequested) => return Ok(()),
Err(e) => return Err(e),
};
} else {
write!(io, "failed to read line").expect("failed to write output");
}
}
}
fn execute_line(&mut self, line: &str) -> Result<()> {
let parsed_line = ParsedLine::from(line);
if parsed_line.is_empty() {
return Ok(());
}
// TODO: Use line editor history.
self.history.push(line.to_owned());
for (job_str, job) in parsed_line.background {
if let Err(error) = self.execute_cmd(job, job_str, false) {
error.print()?;
}
}
if let Some((job_str, job)) = parsed_line.foreground {
let app_discipline_guard = self.set_app_discipline();
match self.execute_cmd(job, job_str, true) {
Ok(Some(foreground_id)) => {
if let Err(error) = self.wait_on_job(foreground_id) {
error.print()?;
}
}
Ok(None) => {}
Err(error) => error.print()?,
}
drop(app_discipline_guard);
}
Ok(())
}
/// Executes a command.
fn execute_cmd(
&mut self,
parsed_job: ParsedJob,
job_str: &str,
current: bool,
) -> Result<Option<usize>> {
let shell_streams = app_io::streams().unwrap();
let stderr = shell_streams.stderr;
let mut previous_output = shell_streams.stdin;
let mut iter = parsed_job.into_iter().peekable();
let mut task = iter.next();
let mut jobs = self.jobs.lock();
let mut job_id = 1;
let mut temp_job = Job {
string: job_str.to_owned(),
parts: Vec::new(),
current,
};
loop {
match jobs.try_insert(job_id, temp_job) {
Ok(_) => break,
Err(e) => {
temp_job = e.value;
}
}
job_id += 1;
}
drop(jobs);
while let Some(ParsedTask { command, args }) = task {
if iter.peek().is_none() {
if let Some(result) = self.execute_builtin(command, &args) {
self.jobs.lock().remove(&job_id);
return result.map(|_| None);
} else {
let streams = IoStreams {
// TODO: Technically clone not needed.
stdin: previous_output.clone(),
stdout: shell_streams.stdout.clone(),
stderr: stderr.clone(),
discipline: shell_streams.discipline,
};
let part = self.resolve_external(command, args, streams, job_id)?;
self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part);
return Ok(Some(job_id));
}
}
// TODO: Piped builtin commands.
let pipe = Stdio::new();
let streams = IoStreams {
stdin: previous_output.clone(),
stdout: Arc::new(pipe.get_writer()),
stderr: stderr.clone(),
discipline: None,
};
let part = self.resolve_external(command, args, streams, job_id)?;
self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part);
previous_output = Arc::new(pipe.get_reader());
task = iter.next();
}
unreachable!("called execute_cmd with empty command");
}
fn wait_on_job(&mut self, num: usize) -> Result<()> {
let jobs = self.jobs.lock();
let Some(job) = jobs.get(&num) else {
return Ok(())
};
if!job.current {
warn!("asked to wait on non-current job");
return Ok(());
}
drop(jobs);
self.discipline.clear_events();
let event_receiver = self.discipline.event_receiver();
loop {
// TODO: Use async futures::select! loop?
if let Ok(event) = event_receiver.try_receive() {
match event {
Event::CtrlC => {
if let Some(mut job) = self.jobs.lock().remove(&num) {
job.kill()?;
} else {
error!("tried to kill a job that doesn't exist");
}
return Err(Error::Command(130));
}
Event::CtrlD => error!("received ctrl+d event"),
Event::CtrlZ => error!("received ctrl+z event"),
}
} else {
let mut jobs = self.jobs.lock();
if let Some(job) = jobs.get_mut(&num)
&& let Some(exit_value) = job.exit_value()
{
jobs.remove(&num);
return match exit_value {
0 => Ok(()),
_ => Err(Error::Command(exit_value)),
};
}
}
scheduler::schedule();
}
}
fn execute_builtin(&mut self, cmd: &str, args: &[&str]) -> Option<Result<()>> {
Some(match cmd {
"" => Ok(()),
"alias" => self.alias(args),
"bg" => self.bg(args),
"cd" => self.cd(args),
"exec" => self.exec(args),
"exit" => self.exit(args),
"export" => self.export(args),
"fc" => self.fc(args),
"fg" => self.fg(args),
"getopts" => self.getopts(args),
"hash" => self.hash(args),
"history" => {
self.history(args);
Ok(())
}
"jobs" => self.jobs(args),
"set" => self.set(args),
"unalias" => self.unalias(args),
"unset" => self.unset(args),
"wait" => self.wait(args),
_ => return None,
})
}
fn resolve_external(
&self,
cmd: &str,
args: Vec<&str>,
streams: IoStreams,
job_id: usize,
) -> Result<JobPart> | .map_err(Error::SpawnFailed)?
.argument(args.into_iter().map(ToOwned::to_owned).collect::<Vec<_>>())
.block()
.spawn()
.unwrap();
let task_ref = task.clone();
let id = task.id;
// TODO: Double arc :(
app_io::insert_child_streams(id, streams);
task.unblock().map_err(Error::UnblockFailed)?;
// Spawn watchdog task.
spawn::new_task_builder(
move |_| {
let task_ref = task.clone();
let exit_value = match task.join().unwrap() {
ExitValue::Completed(status) => {
match status.downcast_ref::<isize>() {
Some(num) => *num,
// FIXME: Document/decide on a number for when app doesn't
// return isize.
None => 210,
}
}
ExitValue::Killed(reason) => match reason {
// FIXME: Document/decide on a number. This is used by bash.
KillReason::Requested => 130,
KillReason::Panic(_) => 1,
KillReason::Exception(num) => num.into(),
},
};
let mut jobs = self.jobs.lock();
if let Some(mut job) = jobs.remove(&job_id) {
for part in job.parts.iter_mut() {
if part.task == task_ref {
part.state = State::Done(exit_value);
break;
}
}
if job.current {
jobs.insert(job_id, job);
}
}
},
(),
)
.spawn()
.map_err(Error::SpawnFailed)?;
Ok(JobPart {
state: State::Running,
task: task_ref,
})
}
}
struct AppDisciplineGuard {
discipline: Arc<LineDiscipline>,
}
impl Drop for AppDisciplineGuard {
fn drop(&mut self) {
self.discipline.set_raw();
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::vec;
#[test]
fn test_split_pipes() {
assert_eq!(
split_pipes("a b c |d e f|g | h | i j"),
vec![
("a", vec!["b", "c"]),
("d", vec!["e", "f"]),
("g", vec![]),
("h", vec![]),
("i", vec!["j"])
]
);
}
#[test]
fn test_parse_line() {
assert_eq!(
parse_line("a b|c &d e f|g | h & i j | k"),
ParsedLine {
background: vec![
vec![("a", vec!["b"]), ("c", vec![])],
vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])],
],
foreground: Some(vec![("i", vec!["j"]), ("k", vec![])]),
}
);
assert_eq!(
parse_line("a b|c &d e f|g | h & i j | k& "),
ParsedLine {
background: vec![
vec![("a", vec!["b"]), ("c", vec![])],
vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])],
vec![("i", vec!["j"]), ("k", vec![])]
],
foreground: None,
}
);
}
}
| {
let namespace_dir = task::get_my_current_task()
.map(|t| t.get_namespace().dir().clone())
.expect("couldn't get namespace dir");
let crate_name = format!("{cmd}-");
let mut matching_files = namespace_dir
.get_files_starting_with(&crate_name)
.into_iter();
let app_path = match matching_files.next() {
Some(f) => Path::new(f.lock().get_absolute_path()),
None => return Err(Error::CommandNotFound(cmd.to_owned())),
};
if matching_files.next().is_some() {
println!("multiple matching files found, running: {app_path}");
}
let task = spawn::new_application_task_builder(app_path, None) | identifier_body |
lib.rs | //! `hull` is Theseus's shell for basic interactive systems operations.
//!
//! Just as the hull is the outermost layer or "shell" of a boat or ship,
//! this crate `hull` is the shell of the "Ship of Theseus" (this OS).
//!
//! Functionally, this is similar to bash, zsh, fish, etc.
//!
//! This shell will eventually supercede the shell located at
//! `applications/shell`.
//!
//! Terminology used in this file using `sleep 1 | sleep 2 & sleep 3` as an
//! example:
//! - A line is an entire line of user input i.e. `sleep 1 | sleep 2 & sleep 3`.
//! - A task is a subset of a line used to spawn an individual task i.e. `sleep
//! 1`, `sleep 2`, and `sleep 3`.
//! - A job is a list of piped tasks i.e. `sleep 1 | sleep 2`, and `sleep 3`.
//! - A command is the first word in a task i.e. `sleep`.
//! - The arguments are any subsequent words in a task i.e. `1`, `2`, and `3`.
#![cfg_attr(not(test), no_std)]
#![feature(extend_one, let_chains)]
extern crate alloc;
mod builtin;
mod error;
mod job;
mod parse;
mod wrapper;
use crate::{
job::{JobPart, State},
parse::{ParsedJob, ParsedLine, ParsedTask},
};
use alloc::{borrow::ToOwned, format, string::String, sync::Arc, vec::Vec};
use app_io::{println, IoStreams};
use core::fmt::Write;
use hashbrown::HashMap;
use job::Job;
use log::{error, warn};
use noline::{builder::EditorBuilder, sync::embedded::IO as Io};
use path::Path;
use stdio::Stdio;
use sync_block::Mutex;
use task::{ExitValue, KillReason};
use tty::{Event, LineDiscipline};
pub use crate::error::{Error, Result};
pub fn main(_: Vec<String>) -> isize {
let mut shell = Shell {
discipline: app_io::line_discipline().expect("no line discipline"),
jobs: Arc::new(Mutex::new(HashMap::new())),
stop_order: Vec::new(),
history: Vec::new(),
};
let result = shell.run();
shell.set_app_discipline();
if let Err(e) = result {
println!("{e:?}");
-1
} else {
0
}
}
pub struct Shell {
discipline: Arc<LineDiscipline>,
// TODO: Could use a vec-based data structure like Vec<Option<JoinableTaskRef>
// Adding a job would iterate over the vec trying to find a None and if it can't, push to the
// end. Removing a job would replace the job with None.
jobs: Arc<Mutex<HashMap<usize, Job>>>,
stop_order: Vec<usize>,
history: Vec<String>,
}
impl Shell {
/// Configures the line discipline for use by the shell.
fn set_shell_discipline(&self) {
self.discipline.set_raw();
}
/// Configures the line discipline for use by applications.
fn set_app_discipline(&self) -> AppDisciplineGuard {
self.discipline.set_sane();
AppDisciplineGuard {
discipline: self.discipline.clone(),
}
}
fn run(&mut self) -> Result<()> {
self.set_shell_discipline();
let wrapper = wrapper::Wrapper {
stdin: app_io::stdin().expect("no stdin"),
stdout: app_io::stdout().expect("no stdout"),
};
let mut io = Io::new(wrapper);
let mut editor = EditorBuilder::new_unbounded()
.with_unbounded_history()
.build_sync(&mut io)
.expect("couldn't instantiate line editor");
loop {
editor.dedup_history();
if let Ok(line) = editor.readline("> ", &mut io) {
match self.execute_line(line) {
Ok(()) => {}
Err(Error::ExitRequested) => return Ok(()),
Err(e) => return Err(e),
};
} else {
write!(io, "failed to read line").expect("failed to write output");
}
}
}
fn execute_line(&mut self, line: &str) -> Result<()> {
let parsed_line = ParsedLine::from(line);
if parsed_line.is_empty() {
return Ok(());
}
// TODO: Use line editor history.
self.history.push(line.to_owned());
for (job_str, job) in parsed_line.background {
if let Err(error) = self.execute_cmd(job, job_str, false) {
error.print()?;
}
}
if let Some((job_str, job)) = parsed_line.foreground {
let app_discipline_guard = self.set_app_discipline();
match self.execute_cmd(job, job_str, true) {
Ok(Some(foreground_id)) => {
if let Err(error) = self.wait_on_job(foreground_id) {
error.print()?;
}
}
Ok(None) => {}
Err(error) => error.print()?,
}
drop(app_discipline_guard);
}
Ok(())
}
/// Executes a command.
fn execute_cmd(
&mut self,
parsed_job: ParsedJob,
job_str: &str,
current: bool,
) -> Result<Option<usize>> {
let shell_streams = app_io::streams().unwrap();
let stderr = shell_streams.stderr;
let mut previous_output = shell_streams.stdin;
let mut iter = parsed_job.into_iter().peekable();
let mut task = iter.next();
let mut jobs = self.jobs.lock();
let mut job_id = 1;
let mut temp_job = Job {
string: job_str.to_owned(),
parts: Vec::new(),
current,
};
loop {
match jobs.try_insert(job_id, temp_job) {
Ok(_) => break,
Err(e) => {
temp_job = e.value;
}
}
job_id += 1; | while let Some(ParsedTask { command, args }) = task {
if iter.peek().is_none() {
if let Some(result) = self.execute_builtin(command, &args) {
self.jobs.lock().remove(&job_id);
return result.map(|_| None);
} else {
let streams = IoStreams {
// TODO: Technically clone not needed.
stdin: previous_output.clone(),
stdout: shell_streams.stdout.clone(),
stderr: stderr.clone(),
discipline: shell_streams.discipline,
};
let part = self.resolve_external(command, args, streams, job_id)?;
self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part);
return Ok(Some(job_id));
}
}
// TODO: Piped builtin commands.
let pipe = Stdio::new();
let streams = IoStreams {
stdin: previous_output.clone(),
stdout: Arc::new(pipe.get_writer()),
stderr: stderr.clone(),
discipline: None,
};
let part = self.resolve_external(command, args, streams, job_id)?;
self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part);
previous_output = Arc::new(pipe.get_reader());
task = iter.next();
}
unreachable!("called execute_cmd with empty command");
}
fn wait_on_job(&mut self, num: usize) -> Result<()> {
let jobs = self.jobs.lock();
let Some(job) = jobs.get(&num) else {
return Ok(())
};
if!job.current {
warn!("asked to wait on non-current job");
return Ok(());
}
drop(jobs);
self.discipline.clear_events();
let event_receiver = self.discipline.event_receiver();
loop {
// TODO: Use async futures::select! loop?
if let Ok(event) = event_receiver.try_receive() {
match event {
Event::CtrlC => {
if let Some(mut job) = self.jobs.lock().remove(&num) {
job.kill()?;
} else {
error!("tried to kill a job that doesn't exist");
}
return Err(Error::Command(130));
}
Event::CtrlD => error!("received ctrl+d event"),
Event::CtrlZ => error!("received ctrl+z event"),
}
} else {
let mut jobs = self.jobs.lock();
if let Some(job) = jobs.get_mut(&num)
&& let Some(exit_value) = job.exit_value()
{
jobs.remove(&num);
return match exit_value {
0 => Ok(()),
_ => Err(Error::Command(exit_value)),
};
}
}
scheduler::schedule();
}
}
fn execute_builtin(&mut self, cmd: &str, args: &[&str]) -> Option<Result<()>> {
Some(match cmd {
"" => Ok(()),
"alias" => self.alias(args),
"bg" => self.bg(args),
"cd" => self.cd(args),
"exec" => self.exec(args),
"exit" => self.exit(args),
"export" => self.export(args),
"fc" => self.fc(args),
"fg" => self.fg(args),
"getopts" => self.getopts(args),
"hash" => self.hash(args),
"history" => {
self.history(args);
Ok(())
}
"jobs" => self.jobs(args),
"set" => self.set(args),
"unalias" => self.unalias(args),
"unset" => self.unset(args),
"wait" => self.wait(args),
_ => return None,
})
}
fn resolve_external(
&self,
cmd: &str,
args: Vec<&str>,
streams: IoStreams,
job_id: usize,
) -> Result<JobPart> {
let namespace_dir = task::get_my_current_task()
.map(|t| t.get_namespace().dir().clone())
.expect("couldn't get namespace dir");
let crate_name = format!("{cmd}-");
let mut matching_files = namespace_dir
.get_files_starting_with(&crate_name)
.into_iter();
let app_path = match matching_files.next() {
Some(f) => Path::new(f.lock().get_absolute_path()),
None => return Err(Error::CommandNotFound(cmd.to_owned())),
};
if matching_files.next().is_some() {
println!("multiple matching files found, running: {app_path}");
}
let task = spawn::new_application_task_builder(app_path, None)
.map_err(Error::SpawnFailed)?
.argument(args.into_iter().map(ToOwned::to_owned).collect::<Vec<_>>())
.block()
.spawn()
.unwrap();
let task_ref = task.clone();
let id = task.id;
// TODO: Double arc :(
app_io::insert_child_streams(id, streams);
task.unblock().map_err(Error::UnblockFailed)?;
// Spawn watchdog task.
spawn::new_task_builder(
move |_| {
let task_ref = task.clone();
let exit_value = match task.join().unwrap() {
ExitValue::Completed(status) => {
match status.downcast_ref::<isize>() {
Some(num) => *num,
// FIXME: Document/decide on a number for when app doesn't
// return isize.
None => 210,
}
}
ExitValue::Killed(reason) => match reason {
// FIXME: Document/decide on a number. This is used by bash.
KillReason::Requested => 130,
KillReason::Panic(_) => 1,
KillReason::Exception(num) => num.into(),
},
};
let mut jobs = self.jobs.lock();
if let Some(mut job) = jobs.remove(&job_id) {
for part in job.parts.iter_mut() {
if part.task == task_ref {
part.state = State::Done(exit_value);
break;
}
}
if job.current {
jobs.insert(job_id, job);
}
}
},
(),
)
.spawn()
.map_err(Error::SpawnFailed)?;
Ok(JobPart {
state: State::Running,
task: task_ref,
})
}
}
struct AppDisciplineGuard {
discipline: Arc<LineDiscipline>,
}
impl Drop for AppDisciplineGuard {
fn drop(&mut self) {
self.discipline.set_raw();
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::vec;
#[test]
fn test_split_pipes() {
assert_eq!(
split_pipes("a b c |d e f|g | h | i j"),
vec![
("a", vec!["b", "c"]),
("d", vec!["e", "f"]),
("g", vec![]),
("h", vec![]),
("i", vec!["j"])
]
);
}
#[test]
fn test_parse_line() {
assert_eq!(
parse_line("a b|c &d e f|g | h & i j | k"),
ParsedLine {
background: vec![
vec![("a", vec!["b"]), ("c", vec![])],
vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])],
],
foreground: Some(vec![("i", vec!["j"]), ("k", vec![])]),
}
);
assert_eq!(
parse_line("a b|c &d e f|g | h & i j | k& "),
ParsedLine {
background: vec![
vec![("a", vec!["b"]), ("c", vec![])],
vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])],
vec![("i", vec!["j"]), ("k", vec![])]
],
foreground: None,
}
);
}
} | }
drop(jobs);
| random_line_split |
lib.rs | //! `hull` is Theseus's shell for basic interactive systems operations.
//!
//! Just as the hull is the outermost layer or "shell" of a boat or ship,
//! this crate `hull` is the shell of the "Ship of Theseus" (this OS).
//!
//! Functionally, this is similar to bash, zsh, fish, etc.
//!
//! This shell will eventually supercede the shell located at
//! `applications/shell`.
//!
//! Terminology used in this file using `sleep 1 | sleep 2 & sleep 3` as an
//! example:
//! - A line is an entire line of user input i.e. `sleep 1 | sleep 2 & sleep 3`.
//! - A task is a subset of a line used to spawn an individual task i.e. `sleep
//! 1`, `sleep 2`, and `sleep 3`.
//! - A job is a list of piped tasks i.e. `sleep 1 | sleep 2`, and `sleep 3`.
//! - A command is the first word in a task i.e. `sleep`.
//! - The arguments are any subsequent words in a task i.e. `1`, `2`, and `3`.
#![cfg_attr(not(test), no_std)]
#![feature(extend_one, let_chains)]
extern crate alloc;
mod builtin;
mod error;
mod job;
mod parse;
mod wrapper;
use crate::{
job::{JobPart, State},
parse::{ParsedJob, ParsedLine, ParsedTask},
};
use alloc::{borrow::ToOwned, format, string::String, sync::Arc, vec::Vec};
use app_io::{println, IoStreams};
use core::fmt::Write;
use hashbrown::HashMap;
use job::Job;
use log::{error, warn};
use noline::{builder::EditorBuilder, sync::embedded::IO as Io};
use path::Path;
use stdio::Stdio;
use sync_block::Mutex;
use task::{ExitValue, KillReason};
use tty::{Event, LineDiscipline};
pub use crate::error::{Error, Result};
pub fn main(_: Vec<String>) -> isize {
let mut shell = Shell {
discipline: app_io::line_discipline().expect("no line discipline"),
jobs: Arc::new(Mutex::new(HashMap::new())),
stop_order: Vec::new(),
history: Vec::new(),
};
let result = shell.run();
shell.set_app_discipline();
if let Err(e) = result {
println!("{e:?}");
-1
} else {
0
}
}
pub struct Shell {
discipline: Arc<LineDiscipline>,
// TODO: Could use a vec-based data structure like Vec<Option<JoinableTaskRef>
// Adding a job would iterate over the vec trying to find a None and if it can't, push to the
// end. Removing a job would replace the job with None.
jobs: Arc<Mutex<HashMap<usize, Job>>>,
stop_order: Vec<usize>,
history: Vec<String>,
}
impl Shell {
/// Configures the line discipline for use by the shell.
fn set_shell_discipline(&self) {
self.discipline.set_raw();
}
/// Configures the line discipline for use by applications.
fn set_app_discipline(&self) -> AppDisciplineGuard {
self.discipline.set_sane();
AppDisciplineGuard {
discipline: self.discipline.clone(),
}
}
fn run(&mut self) -> Result<()> {
self.set_shell_discipline();
let wrapper = wrapper::Wrapper {
stdin: app_io::stdin().expect("no stdin"),
stdout: app_io::stdout().expect("no stdout"),
};
let mut io = Io::new(wrapper);
let mut editor = EditorBuilder::new_unbounded()
.with_unbounded_history()
.build_sync(&mut io)
.expect("couldn't instantiate line editor");
loop {
editor.dedup_history();
if let Ok(line) = editor.readline("> ", &mut io) {
match self.execute_line(line) {
Ok(()) => {}
Err(Error::ExitRequested) => return Ok(()),
Err(e) => return Err(e),
};
} else {
write!(io, "failed to read line").expect("failed to write output");
}
}
}
fn | (&mut self, line: &str) -> Result<()> {
let parsed_line = ParsedLine::from(line);
if parsed_line.is_empty() {
return Ok(());
}
// TODO: Use line editor history.
self.history.push(line.to_owned());
for (job_str, job) in parsed_line.background {
if let Err(error) = self.execute_cmd(job, job_str, false) {
error.print()?;
}
}
if let Some((job_str, job)) = parsed_line.foreground {
let app_discipline_guard = self.set_app_discipline();
match self.execute_cmd(job, job_str, true) {
Ok(Some(foreground_id)) => {
if let Err(error) = self.wait_on_job(foreground_id) {
error.print()?;
}
}
Ok(None) => {}
Err(error) => error.print()?,
}
drop(app_discipline_guard);
}
Ok(())
}
/// Executes a command.
fn execute_cmd(
&mut self,
parsed_job: ParsedJob,
job_str: &str,
current: bool,
) -> Result<Option<usize>> {
let shell_streams = app_io::streams().unwrap();
let stderr = shell_streams.stderr;
let mut previous_output = shell_streams.stdin;
let mut iter = parsed_job.into_iter().peekable();
let mut task = iter.next();
let mut jobs = self.jobs.lock();
let mut job_id = 1;
let mut temp_job = Job {
string: job_str.to_owned(),
parts: Vec::new(),
current,
};
loop {
match jobs.try_insert(job_id, temp_job) {
Ok(_) => break,
Err(e) => {
temp_job = e.value;
}
}
job_id += 1;
}
drop(jobs);
while let Some(ParsedTask { command, args }) = task {
if iter.peek().is_none() {
if let Some(result) = self.execute_builtin(command, &args) {
self.jobs.lock().remove(&job_id);
return result.map(|_| None);
} else {
let streams = IoStreams {
// TODO: Technically clone not needed.
stdin: previous_output.clone(),
stdout: shell_streams.stdout.clone(),
stderr: stderr.clone(),
discipline: shell_streams.discipline,
};
let part = self.resolve_external(command, args, streams, job_id)?;
self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part);
return Ok(Some(job_id));
}
}
// TODO: Piped builtin commands.
let pipe = Stdio::new();
let streams = IoStreams {
stdin: previous_output.clone(),
stdout: Arc::new(pipe.get_writer()),
stderr: stderr.clone(),
discipline: None,
};
let part = self.resolve_external(command, args, streams, job_id)?;
self.jobs.lock().get_mut(&job_id).unwrap().parts.push(part);
previous_output = Arc::new(pipe.get_reader());
task = iter.next();
}
unreachable!("called execute_cmd with empty command");
}
fn wait_on_job(&mut self, num: usize) -> Result<()> {
let jobs = self.jobs.lock();
let Some(job) = jobs.get(&num) else {
return Ok(())
};
if!job.current {
warn!("asked to wait on non-current job");
return Ok(());
}
drop(jobs);
self.discipline.clear_events();
let event_receiver = self.discipline.event_receiver();
loop {
// TODO: Use async futures::select! loop?
if let Ok(event) = event_receiver.try_receive() {
match event {
Event::CtrlC => {
if let Some(mut job) = self.jobs.lock().remove(&num) {
job.kill()?;
} else {
error!("tried to kill a job that doesn't exist");
}
return Err(Error::Command(130));
}
Event::CtrlD => error!("received ctrl+d event"),
Event::CtrlZ => error!("received ctrl+z event"),
}
} else {
let mut jobs = self.jobs.lock();
if let Some(job) = jobs.get_mut(&num)
&& let Some(exit_value) = job.exit_value()
{
jobs.remove(&num);
return match exit_value {
0 => Ok(()),
_ => Err(Error::Command(exit_value)),
};
}
}
scheduler::schedule();
}
}
fn execute_builtin(&mut self, cmd: &str, args: &[&str]) -> Option<Result<()>> {
Some(match cmd {
"" => Ok(()),
"alias" => self.alias(args),
"bg" => self.bg(args),
"cd" => self.cd(args),
"exec" => self.exec(args),
"exit" => self.exit(args),
"export" => self.export(args),
"fc" => self.fc(args),
"fg" => self.fg(args),
"getopts" => self.getopts(args),
"hash" => self.hash(args),
"history" => {
self.history(args);
Ok(())
}
"jobs" => self.jobs(args),
"set" => self.set(args),
"unalias" => self.unalias(args),
"unset" => self.unset(args),
"wait" => self.wait(args),
_ => return None,
})
}
fn resolve_external(
&self,
cmd: &str,
args: Vec<&str>,
streams: IoStreams,
job_id: usize,
) -> Result<JobPart> {
let namespace_dir = task::get_my_current_task()
.map(|t| t.get_namespace().dir().clone())
.expect("couldn't get namespace dir");
let crate_name = format!("{cmd}-");
let mut matching_files = namespace_dir
.get_files_starting_with(&crate_name)
.into_iter();
let app_path = match matching_files.next() {
Some(f) => Path::new(f.lock().get_absolute_path()),
None => return Err(Error::CommandNotFound(cmd.to_owned())),
};
if matching_files.next().is_some() {
println!("multiple matching files found, running: {app_path}");
}
let task = spawn::new_application_task_builder(app_path, None)
.map_err(Error::SpawnFailed)?
.argument(args.into_iter().map(ToOwned::to_owned).collect::<Vec<_>>())
.block()
.spawn()
.unwrap();
let task_ref = task.clone();
let id = task.id;
// TODO: Double arc :(
app_io::insert_child_streams(id, streams);
task.unblock().map_err(Error::UnblockFailed)?;
// Spawn watchdog task.
spawn::new_task_builder(
move |_| {
let task_ref = task.clone();
let exit_value = match task.join().unwrap() {
ExitValue::Completed(status) => {
match status.downcast_ref::<isize>() {
Some(num) => *num,
// FIXME: Document/decide on a number for when app doesn't
// return isize.
None => 210,
}
}
ExitValue::Killed(reason) => match reason {
// FIXME: Document/decide on a number. This is used by bash.
KillReason::Requested => 130,
KillReason::Panic(_) => 1,
KillReason::Exception(num) => num.into(),
},
};
let mut jobs = self.jobs.lock();
if let Some(mut job) = jobs.remove(&job_id) {
for part in job.parts.iter_mut() {
if part.task == task_ref {
part.state = State::Done(exit_value);
break;
}
}
if job.current {
jobs.insert(job_id, job);
}
}
},
(),
)
.spawn()
.map_err(Error::SpawnFailed)?;
Ok(JobPart {
state: State::Running,
task: task_ref,
})
}
}
struct AppDisciplineGuard {
discipline: Arc<LineDiscipline>,
}
impl Drop for AppDisciplineGuard {
fn drop(&mut self) {
self.discipline.set_raw();
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloc::vec;
#[test]
fn test_split_pipes() {
assert_eq!(
split_pipes("a b c |d e f|g | h | i j"),
vec![
("a", vec!["b", "c"]),
("d", vec!["e", "f"]),
("g", vec![]),
("h", vec![]),
("i", vec!["j"])
]
);
}
#[test]
fn test_parse_line() {
assert_eq!(
parse_line("a b|c &d e f|g | h & i j | k"),
ParsedLine {
background: vec![
vec![("a", vec!["b"]), ("c", vec![])],
vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])],
],
foreground: Some(vec![("i", vec!["j"]), ("k", vec![])]),
}
);
assert_eq!(
parse_line("a b|c &d e f|g | h & i j | k& "),
ParsedLine {
background: vec![
vec![("a", vec!["b"]), ("c", vec![])],
vec![("d", vec!["e", "f"]), ("g", vec![]), ("h", vec![])],
vec![("i", vec!["j"]), ("k", vec![])]
],
foreground: None,
}
);
}
}
| execute_line | identifier_name |
models.rs | /// How our data look?
// Main logs contains when bot started to run, what is total log amount
// Keywords logs contains indivitual keyword with their own logs
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
type Account = String;
type KeywordId = u64;
type KeywordStats = HashMap<KeywordId, KeywordStatistics>;
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Statistics {
pub main_stats: MainStats,
pub keyword_stats: KeywordStats,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct KeywordStatistics {
pub stats: KeywordStat,
pub keyword_logs: Vec<Log>,
}
impl Statistics {
pub fn new(account: Account) -> Self {
Statistics {
main_stats: MainStats::new(account),
keyword_stats: HashMap::new(),
}
}
}
pub type Db = Arc<RwLock<HashMap<Account, Statistics>>>;
pub fn blank_db() -> Db {
Arc::new(RwLock::new(HashMap::new()))
}
// Stats is top level statistics
// It contains inner individual keyword statistics
// However every log related to keyword goes into keyword_db
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct MainStats {
// Name of Account
pub account_name: String,
// Total error counts. It is keyword error + other errors
pub error_counts: u64,
// Total number of logs. It is main log + all log from keywords
pub log_counts: u64,
// Currently unused
pub running: bool,
// Total api calls made by other bot.
pub no_api_calls: u64,
// API calls used for this bot
pub no_internal_api_calls: u64,
// When the other bot was started?
pub started_at: String,
// when the bot was last updated. When new logs, keywoord logs come this field must be updated
pub last_updated_at: String,
// Logs are cleared out and only top 100 logs are placed if program memory goes beyond
// 1G
pub logs: Vec<Log>,
}
impl MainStats {
pub fn new(account_name: Account) -> Self {
MainStats {
account_name,
error_counts: 0,
running: false,
no_api_calls: 0,
log_counts: 0,
no_internal_api_calls: 0,
started_at: crate::helpers::current_time_string(),
last_updated_at: crate::helpers::current_time_string(),
logs: Vec::new(),
}
}
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct UpdateStat {
pub error_counts: Option<u64>,
pub running: Option<bool>,
// How many api calls were made since last updated
pub no_of_api_call_diff: Option<u64>,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Log {
pub r#type: String,
pub time: String,
pub message: String,
pub meta: Option<Value>,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct KeywordStat {
pub id: u64,
pub last_updated_at: String,
pub error_counts: u64,
pub log_counts: u64,
pub name: Option<String>,
pub keyword: Option<String>,
pub placement: Option<u64>,
pub running: Option<bool>,
pub ads_running: Option<bool>,
pub ads_position: Option<u64>,
pub current_price: Option<f64>,
pub is_max_price_reached: Option<bool>,
pub is_min_price_reached: Option<bool>,
pub max_expense_reached: Option<bool>,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct UpdateKeywordStat {
pub id: u64,
pub name: Option<String>,
pub current_price: Option<f64>,
pub keyword: Option<String>,
pub placement: Option<u64>,
pub running: Option<bool>,
pub error_counts: Option<u64>,
pub ads_running: Option<bool>,
pub ads_position: Option<u64>,
pub logs: Option<Vec<Log>>,
pub is_max_price_reached: Option<bool>,
pub is_min_price_reached: Option<bool>,
pub max_expense_reached: Option<bool>,
}
impl KeywordStatistics {
pub fn update(stats: &mut Statistics, input: &UpdateKeywordStat) {
let main_stats = &mut stats.main_stats;
main_stats.last_updated_at = crate::helpers::current_time_string();
let keyword_stats = &mut stats.keyword_stats;
if let Some(ks) = keyword_stats.get_mut(&input.id) {
ks.stats.last_updated_at = crate::helpers::current_time_string();
if let Some(ru) = input.running {
ks.stats.running = Some(ru);
}
// Todo use if let some feeling lazy atm.
if input.ads_running.is_some() {
ks.stats.ads_running = input.ads_running
}
if input.is_max_price_reached.is_some() {
ks.stats.is_max_price_reached = input.is_max_price_reached
}
if input.is_min_price_reached.is_some() {
ks.stats.is_min_price_reached = input.is_min_price_reached
}
if input.ads_position.is_some() {
ks.stats.ads_position = input.ads_position
}
if input.max_expense_reached.is_some() {
ks.stats.max_expense_reached = input.max_expense_reached
}
if let Some(cp) = input.current_price {
ks.stats.current_price = Some(cp);
}
} else {
let keyword_statistics = KeywordStatistics {
stats: KeywordStat {
id: input.id,
error_counts: 0,
log_counts: 0,
name: input.name.to_owned(),
keyword: input.keyword.to_owned(),
placement: input.placement,
last_updated_at: crate::helpers::current_time_string(),
running: input.running,
ads_running: input.ads_running,
ads_position: input.ads_position,
current_price: input.current_price,
is_max_price_reached: None,
is_min_price_reached: None,
max_expense_reached: None,
},
keyword_logs: Vec::with_capacity(1000),
};
keyword_stats.insert(input.id, keyword_statistics);
}
}
pub fn add_logs(stats: &mut Statistics, id: KeywordId, input: Log) {
let main_stats = &mut stats.main_stats;
let keyword_stats = &mut stats.keyword_stats;
if let Some(ks) = keyword_stats.get_mut(&id) {
main_stats.last_updated_at = crate::helpers::current_time_string();
if input.r#type == "error" {
main_stats.error_counts += 1;
ks.stats.error_counts += 1;
}
main_stats.log_counts += 1;
ks.stats.log_counts += 1;
ks.keyword_logs.push(input);
}
}
}
#[derive(Debug, Deserialize, Serialize, Clone)]
struct BackupStatistics {
stats: MainStats,
keyword: HashMap<KeywordId, Vec<Log>>,
}
// // We might want to reanalyze previous record for that we are providing ability to
// // use old database.
// pub async fn load_old_database() -> Option<Statistics> {
// let aa = std::env::var("JSON_FILE_PATH");
// if aa.is_ok() {
// let ff = std::fs::File::open(aa.unwrap()).unwrap();
// let json: BackupStatistics = serde_json::from_reader(ff).unwrap();
// let stats = json.stats;
// let keyword = json.keyword;
// let account = stats.account_name.to_owned();
// let mut stats_hm = HashMap::new();
// stats_hm.insert(account.clone(), stats);
// let mut keywords_hm = HashMap::new();
// keywords_hm.insert(account, keyword);
// let arc_stats = Arc::new(Mutex::new(stats_hm));
// let arc_keywords = Arc::new(Mutex::new(keywords_hm));
// return Some(Statistics {
// stats: arc_stats,
// keyword_stats: arc_keywords,
// });
// }
// None
// }
pub async fn clear_database_periodically(db: Db) {
loop {
println!("Waiting 6 hour to clear DB!");
use tokio::time::Duration;
tokio::time::delay_for(Duration::from_secs(6 * 60 * 60)).await;
println!("Clearing Old Records!");
// As database keeeps growing we must keep memory usage in track
// For this we will check how much process is using memory
// if its greator than zero we will clear it
let mut lock = db.write().await;
let vv = lock.values_mut();
for statistics in vv {
clear_db(statistics, 100).await
}
}
}
pub async fn clear_db(statistics: &mut Statistics, count: usize) {
// use std::borrow::Cow;
// #[derive(Debug, Deserialize, Serialize, Clone)]
// struct Backup<'a> {
// stats: Cow<'a, Statistics>,
// };
// {
// let content = serde_json::to_string_pretty(&Backup {
// stats: Cow::Borrowed(&*statistics),
// })
// .unwrap();
// let path = crate::helpers::sanitize(
// &("".to_owned()
// + &statistics.main_stats.account_name
// + "_"
// + &crate::helpers::current_time_string()),
// ) + ".json";
// let mut new_file = File::create(path).unwrap();
// new_file.write_all(&content.into_bytes()).unwrap();
// } | // println!("Backup done");
let mut no_of_main_log_cleared = 0;
{
if count == 0 {
let ms = &mut statistics.main_stats;
ms.error_counts = 0;
ms.log_counts = 0;
ms.no_api_calls = 0;
ms.no_internal_api_calls = 0;
}
let main_logs_len = statistics.main_stats.logs.len();
if main_logs_len > count {
// [1,2,3,4,5,6,7] to keep 2 elem drain 0..(7-2)
statistics.main_stats.logs.drain(0..(main_logs_len - count));
no_of_main_log_cleared += main_logs_len - count;
}
}
println!("Main Lang Cleared");
let mut no_of_keyword_drained = 0;
{
let keyword_stats_hashmap = statistics.keyword_stats.values_mut();
for kstat in keyword_stats_hashmap {
if count == 0 {
let ss = &mut kstat.stats;
ss.error_counts = 0;
ss.log_counts = 0;
ss.last_updated_at = crate::helpers::current_time_string();
}
let log_len = kstat.keyword_logs.len();
if log_len > count {
kstat.keyword_logs.drain(0..(log_len - count));
no_of_keyword_drained += log_len - count;
}
}
}
println!(
"Keyword Static Cleared \n No of log cleared {} \n No of mail log cleared {}",
no_of_keyword_drained, no_of_main_log_cleared
);
} | random_line_split |
|
models.rs | /// How our data look?
// Main logs contains when bot started to run, what is total log amount
// Keywords logs contains indivitual keyword with their own logs
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
type Account = String;
type KeywordId = u64;
type KeywordStats = HashMap<KeywordId, KeywordStatistics>;
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Statistics {
pub main_stats: MainStats,
pub keyword_stats: KeywordStats,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct KeywordStatistics {
pub stats: KeywordStat,
pub keyword_logs: Vec<Log>,
}
impl Statistics {
pub fn new(account: Account) -> Self {
Statistics {
main_stats: MainStats::new(account),
keyword_stats: HashMap::new(),
}
}
}
pub type Db = Arc<RwLock<HashMap<Account, Statistics>>>;
pub fn blank_db() -> Db {
Arc::new(RwLock::new(HashMap::new()))
}
// Stats is top level statistics
// It contains inner individual keyword statistics
// However every log related to keyword goes into keyword_db
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct MainStats {
// Name of Account
pub account_name: String,
// Total error counts. It is keyword error + other errors
pub error_counts: u64,
// Total number of logs. It is main log + all log from keywords
pub log_counts: u64,
// Currently unused
pub running: bool,
// Total api calls made by other bot.
pub no_api_calls: u64,
// API calls used for this bot
pub no_internal_api_calls: u64,
// When the other bot was started?
pub started_at: String,
// when the bot was last updated. When new logs, keywoord logs come this field must be updated
pub last_updated_at: String,
// Logs are cleared out and only top 100 logs are placed if program memory goes beyond
// 1G
pub logs: Vec<Log>,
}
impl MainStats {
pub fn new(account_name: Account) -> Self {
MainStats {
account_name,
error_counts: 0,
running: false,
no_api_calls: 0,
log_counts: 0,
no_internal_api_calls: 0,
started_at: crate::helpers::current_time_string(),
last_updated_at: crate::helpers::current_time_string(),
logs: Vec::new(),
}
}
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct UpdateStat {
pub error_counts: Option<u64>,
pub running: Option<bool>,
// How many api calls were made since last updated
pub no_of_api_call_diff: Option<u64>,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Log {
pub r#type: String,
pub time: String,
pub message: String,
pub meta: Option<Value>,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct KeywordStat {
pub id: u64,
pub last_updated_at: String,
pub error_counts: u64,
pub log_counts: u64,
pub name: Option<String>,
pub keyword: Option<String>,
pub placement: Option<u64>,
pub running: Option<bool>,
pub ads_running: Option<bool>,
pub ads_position: Option<u64>,
pub current_price: Option<f64>,
pub is_max_price_reached: Option<bool>,
pub is_min_price_reached: Option<bool>,
pub max_expense_reached: Option<bool>,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct UpdateKeywordStat {
pub id: u64,
pub name: Option<String>,
pub current_price: Option<f64>,
pub keyword: Option<String>,
pub placement: Option<u64>,
pub running: Option<bool>,
pub error_counts: Option<u64>,
pub ads_running: Option<bool>,
pub ads_position: Option<u64>,
pub logs: Option<Vec<Log>>,
pub is_max_price_reached: Option<bool>,
pub is_min_price_reached: Option<bool>,
pub max_expense_reached: Option<bool>,
}
impl KeywordStatistics {
pub fn update(stats: &mut Statistics, input: &UpdateKeywordStat) {
let main_stats = &mut stats.main_stats;
main_stats.last_updated_at = crate::helpers::current_time_string();
let keyword_stats = &mut stats.keyword_stats;
if let Some(ks) = keyword_stats.get_mut(&input.id) {
ks.stats.last_updated_at = crate::helpers::current_time_string();
if let Some(ru) = input.running {
ks.stats.running = Some(ru);
}
// Todo use if let some feeling lazy atm.
if input.ads_running.is_some() {
ks.stats.ads_running = input.ads_running
}
if input.is_max_price_reached.is_some() {
ks.stats.is_max_price_reached = input.is_max_price_reached
}
if input.is_min_price_reached.is_some() {
ks.stats.is_min_price_reached = input.is_min_price_reached
}
if input.ads_position.is_some() {
ks.stats.ads_position = input.ads_position
}
if input.max_expense_reached.is_some() {
ks.stats.max_expense_reached = input.max_expense_reached
}
if let Some(cp) = input.current_price {
ks.stats.current_price = Some(cp);
}
} else {
let keyword_statistics = KeywordStatistics {
stats: KeywordStat {
id: input.id,
error_counts: 0,
log_counts: 0,
name: input.name.to_owned(),
keyword: input.keyword.to_owned(),
placement: input.placement,
last_updated_at: crate::helpers::current_time_string(),
running: input.running,
ads_running: input.ads_running,
ads_position: input.ads_position,
current_price: input.current_price,
is_max_price_reached: None,
is_min_price_reached: None,
max_expense_reached: None,
},
keyword_logs: Vec::with_capacity(1000),
};
keyword_stats.insert(input.id, keyword_statistics);
}
}
pub fn add_logs(stats: &mut Statistics, id: KeywordId, input: Log) {
let main_stats = &mut stats.main_stats;
let keyword_stats = &mut stats.keyword_stats;
if let Some(ks) = keyword_stats.get_mut(&id) |
}
}
#[derive(Debug, Deserialize, Serialize, Clone)]
struct BackupStatistics {
stats: MainStats,
keyword: HashMap<KeywordId, Vec<Log>>,
}
// // We might want to reanalyze previous record for that we are providing ability to
// // use old database.
// pub async fn load_old_database() -> Option<Statistics> {
// let aa = std::env::var("JSON_FILE_PATH");
// if aa.is_ok() {
// let ff = std::fs::File::open(aa.unwrap()).unwrap();
// let json: BackupStatistics = serde_json::from_reader(ff).unwrap();
// let stats = json.stats;
// let keyword = json.keyword;
// let account = stats.account_name.to_owned();
// let mut stats_hm = HashMap::new();
// stats_hm.insert(account.clone(), stats);
// let mut keywords_hm = HashMap::new();
// keywords_hm.insert(account, keyword);
// let arc_stats = Arc::new(Mutex::new(stats_hm));
// let arc_keywords = Arc::new(Mutex::new(keywords_hm));
// return Some(Statistics {
// stats: arc_stats,
// keyword_stats: arc_keywords,
// });
// }
// None
// }
pub async fn clear_database_periodically(db: Db) {
loop {
println!("Waiting 6 hour to clear DB!");
use tokio::time::Duration;
tokio::time::delay_for(Duration::from_secs(6 * 60 * 60)).await;
println!("Clearing Old Records!");
// As database keeeps growing we must keep memory usage in track
// For this we will check how much process is using memory
// if its greator than zero we will clear it
let mut lock = db.write().await;
let vv = lock.values_mut();
for statistics in vv {
clear_db(statistics, 100).await
}
}
}
pub async fn clear_db(statistics: &mut Statistics, count: usize) {
// use std::borrow::Cow;
// #[derive(Debug, Deserialize, Serialize, Clone)]
// struct Backup<'a> {
// stats: Cow<'a, Statistics>,
// };
// {
// let content = serde_json::to_string_pretty(&Backup {
// stats: Cow::Borrowed(&*statistics),
// })
// .unwrap();
// let path = crate::helpers::sanitize(
// &("".to_owned()
// + &statistics.main_stats.account_name
// + "_"
// + &crate::helpers::current_time_string()),
// ) + ".json";
// let mut new_file = File::create(path).unwrap();
// new_file.write_all(&content.into_bytes()).unwrap();
// }
// println!("Backup done");
let mut no_of_main_log_cleared = 0;
{
if count == 0 {
let ms = &mut statistics.main_stats;
ms.error_counts = 0;
ms.log_counts = 0;
ms.no_api_calls = 0;
ms.no_internal_api_calls = 0;
}
let main_logs_len = statistics.main_stats.logs.len();
if main_logs_len > count {
// [1,2,3,4,5,6,7] to keep 2 elem drain 0..(7-2)
statistics.main_stats.logs.drain(0..(main_logs_len - count));
no_of_main_log_cleared += main_logs_len - count;
}
}
println!("Main Lang Cleared");
let mut no_of_keyword_drained = 0;
{
let keyword_stats_hashmap = statistics.keyword_stats.values_mut();
for kstat in keyword_stats_hashmap {
if count == 0 {
let ss = &mut kstat.stats;
ss.error_counts = 0;
ss.log_counts = 0;
ss.last_updated_at = crate::helpers::current_time_string();
}
let log_len = kstat.keyword_logs.len();
if log_len > count {
kstat.keyword_logs.drain(0..(log_len - count));
no_of_keyword_drained += log_len - count;
}
}
}
println!(
"Keyword Static Cleared \n No of log cleared {} \n No of mail log cleared {}",
no_of_keyword_drained, no_of_main_log_cleared
);
}
| {
main_stats.last_updated_at = crate::helpers::current_time_string();
if input.r#type == "error" {
main_stats.error_counts += 1;
ks.stats.error_counts += 1;
}
main_stats.log_counts += 1;
ks.stats.log_counts += 1;
ks.keyword_logs.push(input);
} | conditional_block |
models.rs | /// How our data look?
// Main logs contains when bot started to run, what is total log amount
// Keywords logs contains indivitual keyword with their own logs
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
type Account = String;
type KeywordId = u64;
type KeywordStats = HashMap<KeywordId, KeywordStatistics>;
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Statistics {
pub main_stats: MainStats,
pub keyword_stats: KeywordStats,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct KeywordStatistics {
pub stats: KeywordStat,
pub keyword_logs: Vec<Log>,
}
impl Statistics {
pub fn new(account: Account) -> Self {
Statistics {
main_stats: MainStats::new(account),
keyword_stats: HashMap::new(),
}
}
}
pub type Db = Arc<RwLock<HashMap<Account, Statistics>>>;
pub fn blank_db() -> Db {
Arc::new(RwLock::new(HashMap::new()))
}
// Stats is top level statistics
// It contains inner individual keyword statistics
// However every log related to keyword goes into keyword_db
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct MainStats {
// Name of Account
pub account_name: String,
// Total error counts. It is keyword error + other errors
pub error_counts: u64,
// Total number of logs. It is main log + all log from keywords
pub log_counts: u64,
// Currently unused
pub running: bool,
// Total api calls made by other bot.
pub no_api_calls: u64,
// API calls used for this bot
pub no_internal_api_calls: u64,
// When the other bot was started?
pub started_at: String,
// when the bot was last updated. When new logs, keywoord logs come this field must be updated
pub last_updated_at: String,
// Logs are cleared out and only top 100 logs are placed if program memory goes beyond
// 1G
pub logs: Vec<Log>,
}
impl MainStats {
pub fn new(account_name: Account) -> Self {
MainStats {
account_name,
error_counts: 0,
running: false,
no_api_calls: 0,
log_counts: 0,
no_internal_api_calls: 0,
started_at: crate::helpers::current_time_string(),
last_updated_at: crate::helpers::current_time_string(),
logs: Vec::new(),
}
}
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct UpdateStat {
pub error_counts: Option<u64>,
pub running: Option<bool>,
// How many api calls were made since last updated
pub no_of_api_call_diff: Option<u64>,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Log {
pub r#type: String,
pub time: String,
pub message: String,
pub meta: Option<Value>,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct | {
pub id: u64,
pub last_updated_at: String,
pub error_counts: u64,
pub log_counts: u64,
pub name: Option<String>,
pub keyword: Option<String>,
pub placement: Option<u64>,
pub running: Option<bool>,
pub ads_running: Option<bool>,
pub ads_position: Option<u64>,
pub current_price: Option<f64>,
pub is_max_price_reached: Option<bool>,
pub is_min_price_reached: Option<bool>,
pub max_expense_reached: Option<bool>,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct UpdateKeywordStat {
pub id: u64,
pub name: Option<String>,
pub current_price: Option<f64>,
pub keyword: Option<String>,
pub placement: Option<u64>,
pub running: Option<bool>,
pub error_counts: Option<u64>,
pub ads_running: Option<bool>,
pub ads_position: Option<u64>,
pub logs: Option<Vec<Log>>,
pub is_max_price_reached: Option<bool>,
pub is_min_price_reached: Option<bool>,
pub max_expense_reached: Option<bool>,
}
impl KeywordStatistics {
pub fn update(stats: &mut Statistics, input: &UpdateKeywordStat) {
let main_stats = &mut stats.main_stats;
main_stats.last_updated_at = crate::helpers::current_time_string();
let keyword_stats = &mut stats.keyword_stats;
if let Some(ks) = keyword_stats.get_mut(&input.id) {
ks.stats.last_updated_at = crate::helpers::current_time_string();
if let Some(ru) = input.running {
ks.stats.running = Some(ru);
}
// Todo use if let some feeling lazy atm.
if input.ads_running.is_some() {
ks.stats.ads_running = input.ads_running
}
if input.is_max_price_reached.is_some() {
ks.stats.is_max_price_reached = input.is_max_price_reached
}
if input.is_min_price_reached.is_some() {
ks.stats.is_min_price_reached = input.is_min_price_reached
}
if input.ads_position.is_some() {
ks.stats.ads_position = input.ads_position
}
if input.max_expense_reached.is_some() {
ks.stats.max_expense_reached = input.max_expense_reached
}
if let Some(cp) = input.current_price {
ks.stats.current_price = Some(cp);
}
} else {
let keyword_statistics = KeywordStatistics {
stats: KeywordStat {
id: input.id,
error_counts: 0,
log_counts: 0,
name: input.name.to_owned(),
keyword: input.keyword.to_owned(),
placement: input.placement,
last_updated_at: crate::helpers::current_time_string(),
running: input.running,
ads_running: input.ads_running,
ads_position: input.ads_position,
current_price: input.current_price,
is_max_price_reached: None,
is_min_price_reached: None,
max_expense_reached: None,
},
keyword_logs: Vec::with_capacity(1000),
};
keyword_stats.insert(input.id, keyword_statistics);
}
}
pub fn add_logs(stats: &mut Statistics, id: KeywordId, input: Log) {
let main_stats = &mut stats.main_stats;
let keyword_stats = &mut stats.keyword_stats;
if let Some(ks) = keyword_stats.get_mut(&id) {
main_stats.last_updated_at = crate::helpers::current_time_string();
if input.r#type == "error" {
main_stats.error_counts += 1;
ks.stats.error_counts += 1;
}
main_stats.log_counts += 1;
ks.stats.log_counts += 1;
ks.keyword_logs.push(input);
}
}
}
#[derive(Debug, Deserialize, Serialize, Clone)]
struct BackupStatistics {
stats: MainStats,
keyword: HashMap<KeywordId, Vec<Log>>,
}
// // We might want to reanalyze previous record for that we are providing ability to
// // use old database.
// pub async fn load_old_database() -> Option<Statistics> {
// let aa = std::env::var("JSON_FILE_PATH");
// if aa.is_ok() {
// let ff = std::fs::File::open(aa.unwrap()).unwrap();
// let json: BackupStatistics = serde_json::from_reader(ff).unwrap();
// let stats = json.stats;
// let keyword = json.keyword;
// let account = stats.account_name.to_owned();
// let mut stats_hm = HashMap::new();
// stats_hm.insert(account.clone(), stats);
// let mut keywords_hm = HashMap::new();
// keywords_hm.insert(account, keyword);
// let arc_stats = Arc::new(Mutex::new(stats_hm));
// let arc_keywords = Arc::new(Mutex::new(keywords_hm));
// return Some(Statistics {
// stats: arc_stats,
// keyword_stats: arc_keywords,
// });
// }
// None
// }
pub async fn clear_database_periodically(db: Db) {
loop {
println!("Waiting 6 hour to clear DB!");
use tokio::time::Duration;
tokio::time::delay_for(Duration::from_secs(6 * 60 * 60)).await;
println!("Clearing Old Records!");
// As database keeeps growing we must keep memory usage in track
// For this we will check how much process is using memory
// if its greator than zero we will clear it
let mut lock = db.write().await;
let vv = lock.values_mut();
for statistics in vv {
clear_db(statistics, 100).await
}
}
}
pub async fn clear_db(statistics: &mut Statistics, count: usize) {
// use std::borrow::Cow;
// #[derive(Debug, Deserialize, Serialize, Clone)]
// struct Backup<'a> {
// stats: Cow<'a, Statistics>,
// };
// {
// let content = serde_json::to_string_pretty(&Backup {
// stats: Cow::Borrowed(&*statistics),
// })
// .unwrap();
// let path = crate::helpers::sanitize(
// &("".to_owned()
// + &statistics.main_stats.account_name
// + "_"
// + &crate::helpers::current_time_string()),
// ) + ".json";
// let mut new_file = File::create(path).unwrap();
// new_file.write_all(&content.into_bytes()).unwrap();
// }
// println!("Backup done");
let mut no_of_main_log_cleared = 0;
{
if count == 0 {
let ms = &mut statistics.main_stats;
ms.error_counts = 0;
ms.log_counts = 0;
ms.no_api_calls = 0;
ms.no_internal_api_calls = 0;
}
let main_logs_len = statistics.main_stats.logs.len();
if main_logs_len > count {
// [1,2,3,4,5,6,7] to keep 2 elem drain 0..(7-2)
statistics.main_stats.logs.drain(0..(main_logs_len - count));
no_of_main_log_cleared += main_logs_len - count;
}
}
println!("Main Lang Cleared");
let mut no_of_keyword_drained = 0;
{
let keyword_stats_hashmap = statistics.keyword_stats.values_mut();
for kstat in keyword_stats_hashmap {
if count == 0 {
let ss = &mut kstat.stats;
ss.error_counts = 0;
ss.log_counts = 0;
ss.last_updated_at = crate::helpers::current_time_string();
}
let log_len = kstat.keyword_logs.len();
if log_len > count {
kstat.keyword_logs.drain(0..(log_len - count));
no_of_keyword_drained += log_len - count;
}
}
}
println!(
"Keyword Static Cleared \n No of log cleared {} \n No of mail log cleared {}",
no_of_keyword_drained, no_of_main_log_cleared
);
}
| KeywordStat | identifier_name |
main.rs | use std::ops::Deref;
use std::path::PathBuf;
use serde::Deserialize;
use structopt::StructOpt;
use tmux_interface::{AttachSession, NewSession, NewWindow, SelectWindow, SendKeys, SplitWindow, TmuxInterface};
const ORIGINAL_WINDOW_NAME: &str = "__DEFAULT__";
#[derive(Debug, Deserialize)]
struct Setup {
file: Option<String>,
socket_name: Option<String>,
session: Option<Session>,
#[serde(rename = "window")]
windows: Vec<Window>,
rebuild: Option<bool>,
}
#[derive(Debug, Deserialize, Default)]
struct Session {
name: Option<String>,
select: Option<String>,
}
#[derive(Debug, Deserialize)]
struct Window {
name: Option<String>,
layout: String,
#[serde(rename = "pane")]
panes: Option<Vec<Pane>>,
}
#[derive(Debug, Deserialize)]
struct Pane {
name: Option<String>,
command: Option<String>,
}
#[derive(Debug, StructOpt)]
#[structopt(name = "txl", about = "A tmux layout manager.")]
struct Args {
/// TOML file that contains a txl layout.
#[structopt(parse(from_os_str))]
file: PathBuf,
/// If true, txl will destroy the previous session if it exists, and rebuild everything.
#[structopt(long)]
rebuild: bool,
}
macro_rules! handle {
($expr:expr, |$err:ident| $err_handler:expr) => {{
match $expr {
Ok(v) => v,
Err($err) => {
$err_handler
}
}
}};
}
fn main() {
let Args {
file: path,
rebuild,
} = <_>::from_args();
let (path, input) = {
let mut path = path;
macro_rules! not {
($ext:literal) => {{
path.set_extension($ext);
!path.exists()
}};
}
if!path.exists() && not!("txl") && not!("toml") {
path.set_extension("");
eprintln!("Unable to locate file: {}[.txl|.toml]", path.display());
return;
}
match std::fs::read_to_string(&path) {
Ok(value) => (path, value),
Err(err) => {
eprintln!("Unable to read file: {}\n{}", path.display(), err);
return;
}
}
};
let Setup {
file,
socket_name,
session,
windows,
rebuild: rebuilding,
} = handle!(toml::from_str(&input), |err| {
eprintln!("Input file (\"{}\") contains invalid toml: {}", path.display(), err);
return;
});
let rebuild = rebuild || rebuilding.unwrap_or(false);
let file = file.as_ref().map(Deref::deref);
let socket_name = socket_name.as_ref().map(Deref::deref);
let Session {
name: session_name,
select,
} = session.unwrap_or_default();
let session_name = session_name.as_ref().map(Deref::deref);
let select = select.as_ref().map(Deref::deref);
// println!("{:#?}", windows);
let tmux = TmuxInterface {
file,
socket_name,
..Default::default()
};
{ // Setting up the session and whatnot.
let has_session = handle!(tmux.has_session(session_name), |err| {
eprintln!("Unable to check if session already exists: {}", err);
return;
});
if has_session {
if!rebuild {
// Well, we're not allowed to rebuild, so attach ourselves, and we're done.
attach_to(&tmux, session_name);
}
println!("Found session... Destroying..");
handle!(tmux.kill_session(Some(false), None, session_name), |err| {
eprintln!("Unable to kill session with the same name: {}", err);
return;
});
}
let has_session = handle!(tmux.has_session(session_name), |err| {
eprintln!("Unable to check if session already exists: {}", err);
return;
});
if has_session {
// I've had some weird sessions where they just keep on sticking around.
// Stupidest solution I've found is to just kill the server... :|
handle!(tmux.kill_server(), |err| {
eprintln!("Unable to kill server: {}", err);
return;
});
}
let (width, height) = if let Some((w, h)) = term_size::dimensions() {
(Some(w), Some(h))
} else {
(None, None)
};
let new_session = NewSession {
session_name,
detached: Some(true),
width,
height,
..Default::default()
};
match tmux.new_session(&new_session) {
Ok(v) => if!v.is_empty() {
eprintln!("Unable to create new session: {}", v);
return; | }
Err(err) => {
eprintln!("Unable to create new session: {}", err);
return;
}
}
}
// We rename the first window, so we can locate and remove it later.
match tmux.rename_window(None, ORIGINAL_WINDOW_NAME) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to rename default window: {:?}", v);
return;
}
Err(err) => {
eprintln!("Unable to rename default window: {}", err);
return;
}
}
// This is where we need to build the actual layout...
for Window {
name: window_name,
layout,
panes,
} in windows {
let window_name = window_name.as_ref().map(Deref::deref);
let panes = panes.unwrap_or_default();
{ // Tell tmux to create the window
let new_window = NewWindow {
detached: Some(false),
window_name,
..Default::default()
};
match tmux.new_window(new_window) {
Ok(v) => if!v.is_empty() {
eprintln!("Unable to create new window: {}", v);
return;
}
Err(err) => {
eprintln!("Unable to create new window: {}", err);
return;
}
}
}
for Action(direction, target, percentage) in parse_layout(&layout) {
let selected = format!("{}", target + 1);
let percentage = (percentage * 100f32) as usize;
let split_window = SplitWindow {
target_pane: Some(&selected),
vertical: Some(direction == Direction::Vertical),
horizontal: Some(direction == Direction::Horizontal),
percentage: Some(percentage),
..Default::default()
};
match tmux.split_window(&split_window) {
Ok(v) => if!v.is_empty() {
eprintln!("Unable to split window: {}", v);
return;
}
Err(err) => {
eprintln!("Unable to split window: {}", err);
return;
}
}
}
let mut target = 1;
for pane in panes {
let target = {
let old = target;
target += 1;
old
};
let command = if let Some(ref value) = pane.command {
value.deref()
} else {
continue;
};
let selected = format!("{}", target);
let keys = vec![
command,
"C-m"
];
let send_keys = SendKeys {
target_pane: Some(&selected),
key: keys,
..Default::default()
};
match tmux.send_keys(&send_keys) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to send command ({}) to pane {}: {:?}", command, target, v);
}
Err(err) => {
eprintln!("Unable to send command ({}) to pane {}: {}", command, target, err);
}
}
}
}
// Kill the first window, as tmux just adds it, but we don't want or need it.
match tmux.kill_window(None, Some(ORIGINAL_WINDOW_NAME)) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to kill default window: {:?}", v);
return;
}
Err(err) => {
eprintln!("Unable to kill default window: {}", err);
return;
}
}
if let Some(value) = select {
let select_window = SelectWindow {
target_window: Some(value),
..Default::default()
};
match tmux.select_window(&select_window) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to select window: {:?}", v);
return;
}
Err(err) => {
eprintln!("Unable to select window: {}", err);
return;
}
}
}
// We're done here, so we can attach to the session, and promptly fuck off.
attach_to(&tmux, session_name);
}
fn attach_to(tmux: &TmuxInterface, session_name: Option<&str>) ->! {
let attach_session = AttachSession {
target_session: session_name,
detach_other: Some(true),
..Default::default()
};
tmux.attach_session_with(&attach_session, |args| {
println!("{:?}", args);
#[cfg(any(unix, macos))]
{
let program = tmux.tmux.unwrap_or("tmux");
use exec::Command;
let mut command = Command::new(program);
command.args(&args);
let error = command.exec();
panic!("{}", error);
}
#[cfg(not(any(unix, macos)))]
{
compile_error!("Windows doesn't support 'execvp'");
}
});
panic!("Failed to attach to tmux session: {:?}", session_name);
}
#[derive(Debug, PartialEq)]
enum Direction {
Vertical,
Horizontal,
}
#[derive(Debug)]
struct Action(Direction, u8, f32);
fn parse_layout(layout: &str) -> Vec<Action> {
let mut rects = determine_rectangles(layout);
let mut actions = vec![];
while let Some((parent_index, child_index, ordinal)) = find_first_pair(&rects) {
let child = rects.remove(child_index);
let parent = &mut rects[parent_index];
// println!("{:?} <= {:?}", parent.c, child.c);
match ordinal {
Ordinal::South => {
let old_height = child.height;
let new_height = parent.height + child.height;
let percentage = old_height as f32 / new_height as f32;
parent.height = new_height;
actions.push(Action(Direction::Vertical, parent_index as u8, percentage));
}
Ordinal::East => {
let old_width = child.width;
let new_width = parent.width + child.width;
let percentage = old_width as f32 / new_width as f32;
parent.width = new_width;
actions.push(Action(Direction::Horizontal, parent_index as u8, percentage));
}
_ => panic!("Someone changed the ORDINALS constant..."),
}
}
actions.reverse();
actions
}
fn find_first_pair(rects: &Vec<Rect>) -> Option<(usize, usize, Ordinal)> {
const ORDINALS: &[Ordinal] = &[
Ordinal::South,
Ordinal::East,
];
for (left_index, rect) in rects.iter().enumerate() {
for ordinal in ORDINALS {
let left_edge = rect.edge(*ordinal);
if let Some(right_index) = rects.iter().position(|r| r!= rect && r.edge(ordinal.opposite()) == left_edge) {
return Some((left_index, right_index, *ordinal));
}
}
}
None
}
#[derive(Debug, PartialEq, Copy, Clone)]
enum Ordinal {
North,
South,
East,
West,
}
impl Ordinal {
fn opposite(&self) -> Ordinal {
match self {
Ordinal::North => Ordinal::South,
Ordinal::South => Ordinal::North,
Ordinal::East => Ordinal::West,
Ordinal::West => Ordinal::East,
}
}
}
#[derive(Debug, PartialEq)]
struct Rect {
c: char,
x: u8,
y: u8,
width: u8,
height: u8,
}
impl Rect {
fn edge(&self, direction: Ordinal) -> Edge {
match direction {
Ordinal::North => {
Edge {
x: self.x,
y: self.y,
length: self.width,
}
}
Ordinal::East => {
Edge {
x: self.x + self.width,
y: self.y,
length: self.height,
}
}
Ordinal::South => {
Edge {
x: self.x,
y: self.y + self.height,
length: self.width,
}
}
Ordinal::West => {
Edge {
x: self.x,
y: self.y,
length: self.height,
}
}
}
}
}
#[derive(Debug, PartialEq)]
struct Edge {
x: u8,
y: u8,
length: u8,
}
fn determine_rectangles(layout: &str) -> Vec<Rect> {
let (width, height, chars) = sanitise_input(layout);
macro_rules! point {
($index:ident) => {{
let x = $index % width;
let y = $index / width;
(x, y)
}};
}
let mut index = 0usize;
let mut rects = vec![];
let mut bit_mask = vec![false; chars.len()];
while index < chars.len() {
if bit_mask[index] {
index += 1;
continue;
}
let c = chars[index];
let rect_width = {
let mut rect_width = width;
for offset in 1..width {
let right = index + offset;
if right >= chars.len() || chars[right]!= c {
rect_width = offset;
break;
}
};
rect_width
};
let rect_height = {
let mut rect_height = height;
for offset in 1..height {
let below = index + (offset * width);
if below >= chars.len() || chars[below]!= c {
rect_height = offset;
break;
}
}
rect_height
};
for y_offset in 0..rect_height {
for x_offset in 0..rect_width {
let bit_index = index + x_offset + (y_offset * width);
if chars[bit_index]!= c {
panic!("Invalid character at {:?}. [expected: {:?}, found: {:?}]", point!(bit_index), c, chars[bit_index]);
}
bit_mask[bit_index] = true;
}
}
let (x, y) = point!(index);
rects.push(Rect {
c,
x: x as u8,
y: y as u8,
width: rect_width as u8,
height: rect_height as u8,
});
index += 1;
}
rects
}
fn sanitise_input(layout: &str) -> (usize, usize, Vec<char>) {
#[derive(PartialEq)]
enum Mode {
SkipWhitespace,
WidthCounting,
HeightCounting,
}
use Mode::*;
// It basically treats any whitespace as newlines...
// If you give it something like "000\n000 000"
// It'll think you've given it a 3x3 of '0'
let mut mode = SkipWhitespace;
let mut width = 0usize;
let mut running_width = 0usize;
let mut height = 0usize;
let mut chars = vec![];
for c in layout.chars() {
if c.is_ascii_whitespace() {
if mode == WidthCounting {
if width == 0 {
width = running_width;
running_width = 0;
} else {
if width!= running_width {
panic!("Width's do not match! (pos:{})", chars.len());
}
running_width = 0;
}
mode = HeightCounting;
}
if mode == HeightCounting {
height += 1;
mode = SkipWhitespace;
}
continue;
}
if mode == SkipWhitespace {
mode = WidthCounting;
}
if mode == WidthCounting {
running_width += 1;
}
chars.push(c);
}
let expected = (width * height) as usize;
if expected!= chars.len() {
panic!("Unexpected character count. [expected: {}, got: {}]", expected, chars.len());
}
(width, height, chars)
} | random_line_split |
|
main.rs | use std::ops::Deref;
use std::path::PathBuf;
use serde::Deserialize;
use structopt::StructOpt;
use tmux_interface::{AttachSession, NewSession, NewWindow, SelectWindow, SendKeys, SplitWindow, TmuxInterface};
const ORIGINAL_WINDOW_NAME: &str = "__DEFAULT__";
#[derive(Debug, Deserialize)]
struct Setup {
file: Option<String>,
socket_name: Option<String>,
session: Option<Session>,
#[serde(rename = "window")]
windows: Vec<Window>,
rebuild: Option<bool>,
}
#[derive(Debug, Deserialize, Default)]
struct Session {
name: Option<String>,
select: Option<String>,
}
#[derive(Debug, Deserialize)]
struct Window {
name: Option<String>,
layout: String,
#[serde(rename = "pane")]
panes: Option<Vec<Pane>>,
}
#[derive(Debug, Deserialize)]
struct Pane {
name: Option<String>,
command: Option<String>,
}
#[derive(Debug, StructOpt)]
#[structopt(name = "txl", about = "A tmux layout manager.")]
struct Args {
/// TOML file that contains a txl layout.
#[structopt(parse(from_os_str))]
file: PathBuf,
/// If true, txl will destroy the previous session if it exists, and rebuild everything.
#[structopt(long)]
rebuild: bool,
}
macro_rules! handle {
($expr:expr, |$err:ident| $err_handler:expr) => {{
match $expr {
Ok(v) => v,
Err($err) => {
$err_handler
}
}
}};
}
fn main() {
let Args {
file: path,
rebuild,
} = <_>::from_args();
let (path, input) = {
let mut path = path;
macro_rules! not {
($ext:literal) => {{
path.set_extension($ext);
!path.exists()
}};
}
if!path.exists() && not!("txl") && not!("toml") {
path.set_extension("");
eprintln!("Unable to locate file: {}[.txl|.toml]", path.display());
return;
}
match std::fs::read_to_string(&path) {
Ok(value) => (path, value),
Err(err) => {
eprintln!("Unable to read file: {}\n{}", path.display(), err);
return;
}
}
};
let Setup {
file,
socket_name,
session,
windows,
rebuild: rebuilding,
} = handle!(toml::from_str(&input), |err| {
eprintln!("Input file (\"{}\") contains invalid toml: {}", path.display(), err);
return;
});
let rebuild = rebuild || rebuilding.unwrap_or(false);
let file = file.as_ref().map(Deref::deref);
let socket_name = socket_name.as_ref().map(Deref::deref);
let Session {
name: session_name,
select,
} = session.unwrap_or_default();
let session_name = session_name.as_ref().map(Deref::deref);
let select = select.as_ref().map(Deref::deref);
// println!("{:#?}", windows);
let tmux = TmuxInterface {
file,
socket_name,
..Default::default()
};
{ // Setting up the session and whatnot.
let has_session = handle!(tmux.has_session(session_name), |err| {
eprintln!("Unable to check if session already exists: {}", err);
return;
});
if has_session {
if!rebuild {
// Well, we're not allowed to rebuild, so attach ourselves, and we're done.
attach_to(&tmux, session_name);
}
println!("Found session... Destroying..");
handle!(tmux.kill_session(Some(false), None, session_name), |err| {
eprintln!("Unable to kill session with the same name: {}", err);
return;
});
}
let has_session = handle!(tmux.has_session(session_name), |err| {
eprintln!("Unable to check if session already exists: {}", err);
return;
});
if has_session {
// I've had some weird sessions where they just keep on sticking around.
// Stupidest solution I've found is to just kill the server... :|
handle!(tmux.kill_server(), |err| {
eprintln!("Unable to kill server: {}", err);
return;
});
}
let (width, height) = if let Some((w, h)) = term_size::dimensions() {
(Some(w), Some(h))
} else {
(None, None)
};
let new_session = NewSession {
session_name,
detached: Some(true),
width,
height,
..Default::default()
};
match tmux.new_session(&new_session) {
Ok(v) => if!v.is_empty() {
eprintln!("Unable to create new session: {}", v);
return;
}
Err(err) => {
eprintln!("Unable to create new session: {}", err);
return;
}
}
}
// We rename the first window, so we can locate and remove it later.
match tmux.rename_window(None, ORIGINAL_WINDOW_NAME) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to rename default window: {:?}", v);
return;
}
Err(err) => {
eprintln!("Unable to rename default window: {}", err);
return;
}
}
// This is where we need to build the actual layout...
for Window {
name: window_name,
layout,
panes,
} in windows {
let window_name = window_name.as_ref().map(Deref::deref);
let panes = panes.unwrap_or_default();
{ // Tell tmux to create the window
let new_window = NewWindow {
detached: Some(false),
window_name,
..Default::default()
};
match tmux.new_window(new_window) {
Ok(v) => if!v.is_empty() {
eprintln!("Unable to create new window: {}", v);
return;
}
Err(err) => {
eprintln!("Unable to create new window: {}", err);
return;
}
}
}
for Action(direction, target, percentage) in parse_layout(&layout) {
let selected = format!("{}", target + 1);
let percentage = (percentage * 100f32) as usize;
let split_window = SplitWindow {
target_pane: Some(&selected),
vertical: Some(direction == Direction::Vertical),
horizontal: Some(direction == Direction::Horizontal),
percentage: Some(percentage),
..Default::default()
};
match tmux.split_window(&split_window) {
Ok(v) => if!v.is_empty() {
eprintln!("Unable to split window: {}", v);
return;
}
Err(err) => {
eprintln!("Unable to split window: {}", err);
return;
}
}
}
let mut target = 1;
for pane in panes {
let target = {
let old = target;
target += 1;
old
};
let command = if let Some(ref value) = pane.command {
value.deref()
} else {
continue;
};
let selected = format!("{}", target);
let keys = vec![
command,
"C-m"
];
let send_keys = SendKeys {
target_pane: Some(&selected),
key: keys,
..Default::default()
};
match tmux.send_keys(&send_keys) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to send command ({}) to pane {}: {:?}", command, target, v);
}
Err(err) => {
eprintln!("Unable to send command ({}) to pane {}: {}", command, target, err);
}
}
}
}
// Kill the first window, as tmux just adds it, but we don't want or need it.
match tmux.kill_window(None, Some(ORIGINAL_WINDOW_NAME)) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to kill default window: {:?}", v);
return;
}
Err(err) => {
eprintln!("Unable to kill default window: {}", err);
return;
}
}
if let Some(value) = select {
let select_window = SelectWindow {
target_window: Some(value),
..Default::default()
};
match tmux.select_window(&select_window) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to select window: {:?}", v);
return;
}
Err(err) => {
eprintln!("Unable to select window: {}", err);
return;
}
}
}
// We're done here, so we can attach to the session, and promptly fuck off.
attach_to(&tmux, session_name);
}
fn attach_to(tmux: &TmuxInterface, session_name: Option<&str>) ->! {
let attach_session = AttachSession {
target_session: session_name,
detach_other: Some(true),
..Default::default()
};
tmux.attach_session_with(&attach_session, |args| {
println!("{:?}", args);
#[cfg(any(unix, macos))]
{
let program = tmux.tmux.unwrap_or("tmux");
use exec::Command;
let mut command = Command::new(program);
command.args(&args);
let error = command.exec();
panic!("{}", error);
}
#[cfg(not(any(unix, macos)))]
{
compile_error!("Windows doesn't support 'execvp'");
}
});
panic!("Failed to attach to tmux session: {:?}", session_name);
}
#[derive(Debug, PartialEq)]
enum Direction {
Vertical,
Horizontal,
}
#[derive(Debug)]
struct Action(Direction, u8, f32);
fn parse_layout(layout: &str) -> Vec<Action> {
let mut rects = determine_rectangles(layout);
let mut actions = vec![];
while let Some((parent_index, child_index, ordinal)) = find_first_pair(&rects) {
let child = rects.remove(child_index);
let parent = &mut rects[parent_index];
// println!("{:?} <= {:?}", parent.c, child.c);
match ordinal {
Ordinal::South => {
let old_height = child.height;
let new_height = parent.height + child.height;
let percentage = old_height as f32 / new_height as f32;
parent.height = new_height;
actions.push(Action(Direction::Vertical, parent_index as u8, percentage));
}
Ordinal::East => {
let old_width = child.width;
let new_width = parent.width + child.width;
let percentage = old_width as f32 / new_width as f32;
parent.width = new_width;
actions.push(Action(Direction::Horizontal, parent_index as u8, percentage));
}
_ => panic!("Someone changed the ORDINALS constant..."),
}
}
actions.reverse();
actions
}
fn find_first_pair(rects: &Vec<Rect>) -> Option<(usize, usize, Ordinal)> {
const ORDINALS: &[Ordinal] = &[
Ordinal::South,
Ordinal::East,
];
for (left_index, rect) in rects.iter().enumerate() {
for ordinal in ORDINALS {
let left_edge = rect.edge(*ordinal);
if let Some(right_index) = rects.iter().position(|r| r!= rect && r.edge(ordinal.opposite()) == left_edge) |
}
}
None
}
#[derive(Debug, PartialEq, Copy, Clone)]
enum Ordinal {
North,
South,
East,
West,
}
impl Ordinal {
fn opposite(&self) -> Ordinal {
match self {
Ordinal::North => Ordinal::South,
Ordinal::South => Ordinal::North,
Ordinal::East => Ordinal::West,
Ordinal::West => Ordinal::East,
}
}
}
#[derive(Debug, PartialEq)]
struct Rect {
c: char,
x: u8,
y: u8,
width: u8,
height: u8,
}
impl Rect {
fn edge(&self, direction: Ordinal) -> Edge {
match direction {
Ordinal::North => {
Edge {
x: self.x,
y: self.y,
length: self.width,
}
}
Ordinal::East => {
Edge {
x: self.x + self.width,
y: self.y,
length: self.height,
}
}
Ordinal::South => {
Edge {
x: self.x,
y: self.y + self.height,
length: self.width,
}
}
Ordinal::West => {
Edge {
x: self.x,
y: self.y,
length: self.height,
}
}
}
}
}
#[derive(Debug, PartialEq)]
struct Edge {
x: u8,
y: u8,
length: u8,
}
fn determine_rectangles(layout: &str) -> Vec<Rect> {
let (width, height, chars) = sanitise_input(layout);
macro_rules! point {
($index:ident) => {{
let x = $index % width;
let y = $index / width;
(x, y)
}};
}
let mut index = 0usize;
let mut rects = vec![];
let mut bit_mask = vec![false; chars.len()];
while index < chars.len() {
if bit_mask[index] {
index += 1;
continue;
}
let c = chars[index];
let rect_width = {
let mut rect_width = width;
for offset in 1..width {
let right = index + offset;
if right >= chars.len() || chars[right]!= c {
rect_width = offset;
break;
}
};
rect_width
};
let rect_height = {
let mut rect_height = height;
for offset in 1..height {
let below = index + (offset * width);
if below >= chars.len() || chars[below]!= c {
rect_height = offset;
break;
}
}
rect_height
};
for y_offset in 0..rect_height {
for x_offset in 0..rect_width {
let bit_index = index + x_offset + (y_offset * width);
if chars[bit_index]!= c {
panic!("Invalid character at {:?}. [expected: {:?}, found: {:?}]", point!(bit_index), c, chars[bit_index]);
}
bit_mask[bit_index] = true;
}
}
let (x, y) = point!(index);
rects.push(Rect {
c,
x: x as u8,
y: y as u8,
width: rect_width as u8,
height: rect_height as u8,
});
index += 1;
}
rects
}
fn sanitise_input(layout: &str) -> (usize, usize, Vec<char>) {
#[derive(PartialEq)]
enum Mode {
SkipWhitespace,
WidthCounting,
HeightCounting,
}
use Mode::*;
// It basically treats any whitespace as newlines...
// If you give it something like "000\n000 000"
// It'll think you've given it a 3x3 of '0'
let mut mode = SkipWhitespace;
let mut width = 0usize;
let mut running_width = 0usize;
let mut height = 0usize;
let mut chars = vec![];
for c in layout.chars() {
if c.is_ascii_whitespace() {
if mode == WidthCounting {
if width == 0 {
width = running_width;
running_width = 0;
} else {
if width!= running_width {
panic!("Width's do not match! (pos:{})", chars.len());
}
running_width = 0;
}
mode = HeightCounting;
}
if mode == HeightCounting {
height += 1;
mode = SkipWhitespace;
}
continue;
}
if mode == SkipWhitespace {
mode = WidthCounting;
}
if mode == WidthCounting {
running_width += 1;
}
chars.push(c);
}
let expected = (width * height) as usize;
if expected!= chars.len() {
panic!("Unexpected character count. [expected: {}, got: {}]", expected, chars.len());
}
(width, height, chars)
}
| {
return Some((left_index, right_index, *ordinal));
} | conditional_block |
main.rs | use std::ops::Deref;
use std::path::PathBuf;
use serde::Deserialize;
use structopt::StructOpt;
use tmux_interface::{AttachSession, NewSession, NewWindow, SelectWindow, SendKeys, SplitWindow, TmuxInterface};
const ORIGINAL_WINDOW_NAME: &str = "__DEFAULT__";
#[derive(Debug, Deserialize)]
struct Setup {
file: Option<String>,
socket_name: Option<String>,
session: Option<Session>,
#[serde(rename = "window")]
windows: Vec<Window>,
rebuild: Option<bool>,
}
#[derive(Debug, Deserialize, Default)]
struct Session {
name: Option<String>,
select: Option<String>,
}
#[derive(Debug, Deserialize)]
struct Window {
name: Option<String>,
layout: String,
#[serde(rename = "pane")]
panes: Option<Vec<Pane>>,
}
#[derive(Debug, Deserialize)]
struct Pane {
name: Option<String>,
command: Option<String>,
}
#[derive(Debug, StructOpt)]
#[structopt(name = "txl", about = "A tmux layout manager.")]
struct Args {
/// TOML file that contains a txl layout.
#[structopt(parse(from_os_str))]
file: PathBuf,
/// If true, txl will destroy the previous session if it exists, and rebuild everything.
#[structopt(long)]
rebuild: bool,
}
macro_rules! handle {
($expr:expr, |$err:ident| $err_handler:expr) => {{
match $expr {
Ok(v) => v,
Err($err) => {
$err_handler
}
}
}};
}
fn main() {
let Args {
file: path,
rebuild,
} = <_>::from_args();
let (path, input) = {
let mut path = path;
macro_rules! not {
($ext:literal) => {{
path.set_extension($ext);
!path.exists()
}};
}
if!path.exists() && not!("txl") && not!("toml") {
path.set_extension("");
eprintln!("Unable to locate file: {}[.txl|.toml]", path.display());
return;
}
match std::fs::read_to_string(&path) {
Ok(value) => (path, value),
Err(err) => {
eprintln!("Unable to read file: {}\n{}", path.display(), err);
return;
}
}
};
let Setup {
file,
socket_name,
session,
windows,
rebuild: rebuilding,
} = handle!(toml::from_str(&input), |err| {
eprintln!("Input file (\"{}\") contains invalid toml: {}", path.display(), err);
return;
});
let rebuild = rebuild || rebuilding.unwrap_or(false);
let file = file.as_ref().map(Deref::deref);
let socket_name = socket_name.as_ref().map(Deref::deref);
let Session {
name: session_name,
select,
} = session.unwrap_or_default();
let session_name = session_name.as_ref().map(Deref::deref);
let select = select.as_ref().map(Deref::deref);
// println!("{:#?}", windows);
let tmux = TmuxInterface {
file,
socket_name,
..Default::default()
};
{ // Setting up the session and whatnot.
let has_session = handle!(tmux.has_session(session_name), |err| {
eprintln!("Unable to check if session already exists: {}", err);
return;
});
if has_session {
if!rebuild {
// Well, we're not allowed to rebuild, so attach ourselves, and we're done.
attach_to(&tmux, session_name);
}
println!("Found session... Destroying..");
handle!(tmux.kill_session(Some(false), None, session_name), |err| {
eprintln!("Unable to kill session with the same name: {}", err);
return;
});
}
let has_session = handle!(tmux.has_session(session_name), |err| {
eprintln!("Unable to check if session already exists: {}", err);
return;
});
if has_session {
// I've had some weird sessions where they just keep on sticking around.
// Stupidest solution I've found is to just kill the server... :|
handle!(tmux.kill_server(), |err| {
eprintln!("Unable to kill server: {}", err);
return;
});
}
let (width, height) = if let Some((w, h)) = term_size::dimensions() {
(Some(w), Some(h))
} else {
(None, None)
};
let new_session = NewSession {
session_name,
detached: Some(true),
width,
height,
..Default::default()
};
match tmux.new_session(&new_session) {
Ok(v) => if!v.is_empty() {
eprintln!("Unable to create new session: {}", v);
return;
}
Err(err) => {
eprintln!("Unable to create new session: {}", err);
return;
}
}
}
// We rename the first window, so we can locate and remove it later.
match tmux.rename_window(None, ORIGINAL_WINDOW_NAME) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to rename default window: {:?}", v);
return;
}
Err(err) => {
eprintln!("Unable to rename default window: {}", err);
return;
}
}
// This is where we need to build the actual layout...
for Window {
name: window_name,
layout,
panes,
} in windows {
let window_name = window_name.as_ref().map(Deref::deref);
let panes = panes.unwrap_or_default();
{ // Tell tmux to create the window
let new_window = NewWindow {
detached: Some(false),
window_name,
..Default::default()
};
match tmux.new_window(new_window) {
Ok(v) => if!v.is_empty() {
eprintln!("Unable to create new window: {}", v);
return;
}
Err(err) => {
eprintln!("Unable to create new window: {}", err);
return;
}
}
}
for Action(direction, target, percentage) in parse_layout(&layout) {
let selected = format!("{}", target + 1);
let percentage = (percentage * 100f32) as usize;
let split_window = SplitWindow {
target_pane: Some(&selected),
vertical: Some(direction == Direction::Vertical),
horizontal: Some(direction == Direction::Horizontal),
percentage: Some(percentage),
..Default::default()
};
match tmux.split_window(&split_window) {
Ok(v) => if!v.is_empty() {
eprintln!("Unable to split window: {}", v);
return;
}
Err(err) => {
eprintln!("Unable to split window: {}", err);
return;
}
}
}
let mut target = 1;
for pane in panes {
let target = {
let old = target;
target += 1;
old
};
let command = if let Some(ref value) = pane.command {
value.deref()
} else {
continue;
};
let selected = format!("{}", target);
let keys = vec![
command,
"C-m"
];
let send_keys = SendKeys {
target_pane: Some(&selected),
key: keys,
..Default::default()
};
match tmux.send_keys(&send_keys) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to send command ({}) to pane {}: {:?}", command, target, v);
}
Err(err) => {
eprintln!("Unable to send command ({}) to pane {}: {}", command, target, err);
}
}
}
}
// Kill the first window, as tmux just adds it, but we don't want or need it.
match tmux.kill_window(None, Some(ORIGINAL_WINDOW_NAME)) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to kill default window: {:?}", v);
return;
}
Err(err) => {
eprintln!("Unable to kill default window: {}", err);
return;
}
}
if let Some(value) = select {
let select_window = SelectWindow {
target_window: Some(value),
..Default::default()
};
match tmux.select_window(&select_window) {
Ok(v) => if!v.status.success() {
eprintln!("Unable to select window: {:?}", v);
return;
}
Err(err) => {
eprintln!("Unable to select window: {}", err);
return;
}
}
}
// We're done here, so we can attach to the session, and promptly fuck off.
attach_to(&tmux, session_name);
}
fn attach_to(tmux: &TmuxInterface, session_name: Option<&str>) ->! {
let attach_session = AttachSession {
target_session: session_name,
detach_other: Some(true),
..Default::default()
};
tmux.attach_session_with(&attach_session, |args| {
println!("{:?}", args);
#[cfg(any(unix, macos))]
{
let program = tmux.tmux.unwrap_or("tmux");
use exec::Command;
let mut command = Command::new(program);
command.args(&args);
let error = command.exec();
panic!("{}", error);
}
#[cfg(not(any(unix, macos)))]
{
compile_error!("Windows doesn't support 'execvp'");
}
});
panic!("Failed to attach to tmux session: {:?}", session_name);
}
#[derive(Debug, PartialEq)]
enum Direction {
Vertical,
Horizontal,
}
#[derive(Debug)]
struct Action(Direction, u8, f32);
fn parse_layout(layout: &str) -> Vec<Action> {
let mut rects = determine_rectangles(layout);
let mut actions = vec![];
while let Some((parent_index, child_index, ordinal)) = find_first_pair(&rects) {
let child = rects.remove(child_index);
let parent = &mut rects[parent_index];
// println!("{:?} <= {:?}", parent.c, child.c);
match ordinal {
Ordinal::South => {
let old_height = child.height;
let new_height = parent.height + child.height;
let percentage = old_height as f32 / new_height as f32;
parent.height = new_height;
actions.push(Action(Direction::Vertical, parent_index as u8, percentage));
}
Ordinal::East => {
let old_width = child.width;
let new_width = parent.width + child.width;
let percentage = old_width as f32 / new_width as f32;
parent.width = new_width;
actions.push(Action(Direction::Horizontal, parent_index as u8, percentage));
}
_ => panic!("Someone changed the ORDINALS constant..."),
}
}
actions.reverse();
actions
}
fn find_first_pair(rects: &Vec<Rect>) -> Option<(usize, usize, Ordinal)> {
const ORDINALS: &[Ordinal] = &[
Ordinal::South,
Ordinal::East,
];
for (left_index, rect) in rects.iter().enumerate() {
for ordinal in ORDINALS {
let left_edge = rect.edge(*ordinal);
if let Some(right_index) = rects.iter().position(|r| r!= rect && r.edge(ordinal.opposite()) == left_edge) {
return Some((left_index, right_index, *ordinal));
}
}
}
None
}
#[derive(Debug, PartialEq, Copy, Clone)]
enum Ordinal {
North,
South,
East,
West,
}
impl Ordinal {
fn opposite(&self) -> Ordinal {
match self {
Ordinal::North => Ordinal::South,
Ordinal::South => Ordinal::North,
Ordinal::East => Ordinal::West,
Ordinal::West => Ordinal::East,
}
}
}
#[derive(Debug, PartialEq)]
struct Rect {
c: char,
x: u8,
y: u8,
width: u8,
height: u8,
}
impl Rect {
fn edge(&self, direction: Ordinal) -> Edge {
match direction {
Ordinal::North => {
Edge {
x: self.x,
y: self.y,
length: self.width,
}
}
Ordinal::East => {
Edge {
x: self.x + self.width,
y: self.y,
length: self.height,
}
}
Ordinal::South => {
Edge {
x: self.x,
y: self.y + self.height,
length: self.width,
}
}
Ordinal::West => {
Edge {
x: self.x,
y: self.y,
length: self.height,
}
}
}
}
}
#[derive(Debug, PartialEq)]
struct | {
x: u8,
y: u8,
length: u8,
}
fn determine_rectangles(layout: &str) -> Vec<Rect> {
let (width, height, chars) = sanitise_input(layout);
macro_rules! point {
($index:ident) => {{
let x = $index % width;
let y = $index / width;
(x, y)
}};
}
let mut index = 0usize;
let mut rects = vec![];
let mut bit_mask = vec![false; chars.len()];
while index < chars.len() {
if bit_mask[index] {
index += 1;
continue;
}
let c = chars[index];
let rect_width = {
let mut rect_width = width;
for offset in 1..width {
let right = index + offset;
if right >= chars.len() || chars[right]!= c {
rect_width = offset;
break;
}
};
rect_width
};
let rect_height = {
let mut rect_height = height;
for offset in 1..height {
let below = index + (offset * width);
if below >= chars.len() || chars[below]!= c {
rect_height = offset;
break;
}
}
rect_height
};
for y_offset in 0..rect_height {
for x_offset in 0..rect_width {
let bit_index = index + x_offset + (y_offset * width);
if chars[bit_index]!= c {
panic!("Invalid character at {:?}. [expected: {:?}, found: {:?}]", point!(bit_index), c, chars[bit_index]);
}
bit_mask[bit_index] = true;
}
}
let (x, y) = point!(index);
rects.push(Rect {
c,
x: x as u8,
y: y as u8,
width: rect_width as u8,
height: rect_height as u8,
});
index += 1;
}
rects
}
fn sanitise_input(layout: &str) -> (usize, usize, Vec<char>) {
#[derive(PartialEq)]
enum Mode {
SkipWhitespace,
WidthCounting,
HeightCounting,
}
use Mode::*;
// It basically treats any whitespace as newlines...
// If you give it something like "000\n000 000"
// It'll think you've given it a 3x3 of '0'
let mut mode = SkipWhitespace;
let mut width = 0usize;
let mut running_width = 0usize;
let mut height = 0usize;
let mut chars = vec![];
for c in layout.chars() {
if c.is_ascii_whitespace() {
if mode == WidthCounting {
if width == 0 {
width = running_width;
running_width = 0;
} else {
if width!= running_width {
panic!("Width's do not match! (pos:{})", chars.len());
}
running_width = 0;
}
mode = HeightCounting;
}
if mode == HeightCounting {
height += 1;
mode = SkipWhitespace;
}
continue;
}
if mode == SkipWhitespace {
mode = WidthCounting;
}
if mode == WidthCounting {
running_width += 1;
}
chars.push(c);
}
let expected = (width * height) as usize;
if expected!= chars.len() {
panic!("Unexpected character count. [expected: {}, got: {}]", expected, chars.len());
}
(width, height, chars)
}
| Edge | identifier_name |
t_usefulness.rs | #![allow(clippy::excessive_precision)]
use wide::*;
use bytemuck::*;
#[test]
fn unpack_modify_and_repack_rgba_values() {
let mask = u32x4::from(0xFF);
//
let input = u32x4::from([0xFF0000FF, 0x00FF00FF, 0x0000FFFF, 0x000000FF]);
// unpack
let r_actual = cast::<_, i32x4>(input >> 24).round_float();
let g_actual = cast::<_, i32x4>((input >> 16) & mask).round_float();
let b_actual = cast::<_, i32x4>((input >> 8) & mask).round_float();
let a_actual = cast::<_, i32x4>(input & mask).round_float();
let r_expected = f32x4::from([255.0, 0.0, 0.0, 0.0]);
let g_expected = f32x4::from([0.0, 255.0, 0.0, 0.0]);
let b_expected = f32x4::from([0.0, 0.0, 255.0, 0.0]);
let a_expected = f32x4::from([255.0, 255.0, 255.0, 255.0]);
assert_eq!(r_expected, r_actual);
assert_eq!(g_expected, g_actual);
assert_eq!(b_expected, b_actual);
assert_eq!(a_expected, a_actual);
// modify some of the data
let r_new = (r_actual - f32x4::from(1.0)).max(f32x4::from(0.0));
let g_new = (g_actual - f32x4::from(1.0)).max(f32x4::from(0.0));
let b_new = (b_actual - f32x4::from(1.0)).max(f32x4::from(0.0));
let a_new = a_actual;
// repack
let r_u = cast::<i32x4, u32x4>(r_new.round_int());
let g_u = cast::<i32x4, u32x4>(g_new.round_int());
let b_u = cast::<i32x4, u32x4>(b_new.round_int());
let a_u = cast::<i32x4, u32x4>(a_new.round_int());
let output_actual = (r_u << 24) | (g_u << 16) | (b_u << 8) | (a_u);
let output_expected =
u32x4::from([0xFE0000FF, 0x00FE00FF, 0x0000FEFF, 0x000000FF]);
assert_eq!(output_expected, output_actual);
}
/// Implement JPEG IDCT using i16x8. This has slightly different behavior than
/// the normal 32 bit scalar implementation in libjpeg. It's a bit more accurate
/// in some ways (since the constants are encoded in 15 bits instead of 12) but
/// is more subject to hitting saturation during intermediate calculations,
/// although that should normally not be a problem for photographic JPEGs.
///
/// The main downside of this approach is that it is very slow to do saturating
/// math on scalar types on some CPUs, so if you need bit-exact behavior on
/// different architectures this is not the algorithm for you.
#[test]
fn test_dequantize_and_idct_i16() {
fn to_fixed(x: f32) -> i16 |
fn kernel_i16(data: [i16x8; 8]) -> [i16x8; 8] {
// kernel x
let a2 = data[2];
let a6 = data[6];
let b0 = a2.saturating_add(a6).mul_scale_round_n(to_fixed(0.5411961));
let c0 = b0
.saturating_sub(a6)
.saturating_sub(a6.mul_scale_round_n(to_fixed(0.847759065)));
let c1 = b0.saturating_add(a2.mul_scale_round_n(to_fixed(0.765366865)));
let a0 = data[0];
let a4 = data[4];
let b1 = a0.saturating_add(a4);
let b2 = a0.saturating_sub(a4);
let x0 = b1.saturating_add(c1);
let x1 = b2.saturating_add(c0);
let x2 = b2.saturating_sub(c0);
let x3 = b1.saturating_sub(c1);
// kernel t
let t0 = data[7];
let t1 = data[5];
let t2 = data[3];
let t3 = data[1];
let p1 = t0.saturating_add(t3);
let p2 = t1.saturating_add(t2);
let p3 = t0.saturating_add(t2);
let p4 = t1.saturating_add(t3);
let p5t = p3.saturating_add(p4);
let p5 = p5t.saturating_add(p5t.mul_scale_round_n(to_fixed(0.175875602)));
let e0 = t0.mul_scale_round_n(to_fixed(0.298631336));
let e1 = t1
.saturating_add(t1)
.saturating_add(t1.mul_scale_round_n(to_fixed(0.053119869)));
let e2 = t2
.saturating_add(t2)
.saturating_add(t2)
.saturating_add(t2.mul_scale_round_n(to_fixed(0.072711026)));
let e3 = t3.saturating_add(t3.mul_scale_round_n(to_fixed(0.501321110)));
let f0 = p5.saturating_sub(p1.mul_scale_round_n(to_fixed(0.899976223)));
let f1 = p5
.saturating_sub(p2)
.saturating_sub(p2)
.saturating_sub(p2.mul_scale_round_n(to_fixed(0.562915447)));
let f2 = p3.mul_scale_round_n(to_fixed(-0.961570560)).saturating_sub(p3);
let f3 = p4.mul_scale_round_n(to_fixed(-0.390180644));
let t3 = f0.saturating_add(f3).saturating_add(e3);
let t2 = f1.saturating_add(f2).saturating_add(e2);
let t1 = f1.saturating_add(f3).saturating_add(e1);
let t0 = f0.saturating_add(f2).saturating_add(e0);
[
x0.saturating_add(t3),
x1.saturating_add(t2),
x2.saturating_add(t1),
x3.saturating_add(t0),
x3.saturating_sub(t0),
x2.saturating_sub(t1),
x1.saturating_sub(t2),
x0.saturating_sub(t3),
]
}
#[rustfmt::skip]
let coefficients: [i16; 8 * 8] = [
-14, -39, 58, -2, 3, 3, 0, 1,
11, 27, 4, -3, 3, 0, 1, 0,
-6, -13, -9, -1, -2, -1, 0, 0,
-4, 0, -1, -2, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
-3, -2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
];
#[rustfmt::skip]
let quantization_table: [i16; 8 * 8] = [
8, 6, 5, 8, 12, 20, 26, 31,
6, 6, 7, 10, 13, 29, 30, 28,
7, 7, 8, 12, 20, 29, 35, 28,
7, 9, 11, 15, 26, 44, 40, 31,
9, 11, 19, 28, 34, 55, 52, 39,
12, 18, 28, 32, 41, 52, 57, 46,
25, 32, 39, 44, 52, 61, 60, 51,
36, 46, 48, 49, 56, 50, 52, 50
];
let c: [i16x8; 8] = cast(coefficients);
let q: [i16x8; 8] = cast(quantization_table);
// coefficients normally go up to 1024, shift up by 3 to get extra precision
const SHIFT: i16 = 3;
let data = [
(c[0] * q[0]) << SHIFT,
(c[1] * q[1]) << SHIFT,
(c[2] * q[2]) << SHIFT,
(c[3] * q[3]) << SHIFT,
(c[4] * q[4]) << SHIFT,
(c[5] * q[5]) << SHIFT,
(c[6] * q[6]) << SHIFT,
(c[7] * q[7]) << SHIFT,
];
let pass1 = kernel_i16(data);
let transpose1 = i16x8::transpose(pass1);
let pass2 = kernel_i16(transpose1);
let result = i16x8::transpose(pass2);
// offset to recenter to 0..256 and round properly
const ROUND_FACTOR: i16 = 0x2020;
let round_factor = i16x8::splat(ROUND_FACTOR);
let result_adj = [
result[0].saturating_add(round_factor) >> (2 * SHIFT),
result[1].saturating_add(round_factor) >> (2 * SHIFT),
result[2].saturating_add(round_factor) >> (2 * SHIFT),
result[3].saturating_add(round_factor) >> (2 * SHIFT),
result[4].saturating_add(round_factor) >> (2 * SHIFT),
result[5].saturating_add(round_factor) >> (2 * SHIFT),
result[6].saturating_add(round_factor) >> (2 * SHIFT),
result[7].saturating_add(round_factor) >> (2 * SHIFT),
];
let output: [i16; 64] = cast(result_adj);
#[rustfmt::skip]
let expected_output = [
118, 92, 110, 83, 77, 93, 144, 198,
172, 116, 114, 87, 78, 93, 146, 191,
194, 107, 91, 76, 71, 93, 160, 198,
196, 100, 80, 74, 67, 92, 174, 209,
182, 104, 88, 81, 68, 89, 178, 206,
105, 64, 59, 59, 63, 94, 183, 201,
35, 27, 28, 37, 72, 121, 203, 204,
38, 45, 41, 47, 99, 154, 223, 208
];
assert_eq!(expected_output, output);
}
/// Implement JPEG IDCT using i32x8. This is most similar to the scalar
/// libjpeg version which has slightly different rounding propertis than the 16
/// bit version. Some decoders are forced to use this if they want bit-by-bit
/// compability across all architectures.
#[test]
fn test_dequantize_and_idct_i32() {
fn to_fixed(x: f32) -> i32 {
(x * 4096.0 + 0.5) as i32
}
fn kernel_i32(
[s0, s1, s2, s3, s4, s5, s6, s7]: [i32x8; 8],
rounding_factor: i32,
shift_right: i32,
) -> [i32x8; 8] {
// kernel x
let at = (s2 + s6) * to_fixed(0.5411961);
let a0 = (s0 + s4) << 12; // multiply by 1, ie 4096 in fixed point)
let a1 = (s0 - s4) << 12; // multiply by 1, ie 4096 in fixed point)
let a2 = at + s6 * to_fixed(-1.847759065);
let a3 = at + s2 * to_fixed(0.765366865);
let x0 = a0 + a3 + rounding_factor; // add rounding factor here to avoid extra addition
let x1 = a1 + a2 + rounding_factor;
let x2 = a1 - a2 + rounding_factor;
let x3 = a0 - a3 + rounding_factor;
// kernel t
let b0 = s7 + s1;
let b1 = s5 + s3;
let b2 = s7 + s3;
let b3 = s5 + s1;
let ct = (b2 + b3) * to_fixed(1.175875602);
let c0 = ct + b0 * to_fixed(-0.899976223);
let c1 = ct + b1 * to_fixed(-2.562915447);
let c2 = b2 * to_fixed(-1.961570560);
let c3 = b3 * to_fixed(-0.390180644);
let t0 = s7 * to_fixed(0.298631336) + c0 + c2;
let t1 = s5 * to_fixed(2.053119869) + c1 + c3;
let t2 = s3 * to_fixed(3.072711026) + c1 + c2;
let t3 = s1 * to_fixed(1.501321110) + c0 + c3;
[
(x0 + t3) >> shift_right,
(x1 + t2) >> shift_right,
(x2 + t1) >> shift_right,
(x3 + t0) >> shift_right,
(x3 - t0) >> shift_right,
(x2 - t1) >> shift_right,
(x1 - t2) >> shift_right,
(x0 - t3) >> shift_right,
]
}
#[rustfmt::skip]
let coefficients: [i32; 8 * 8] = [
-14, -39, 58, -2, 3, 3, 0, 1,
11, 27, 4, -3, 3, 0, 1, 0,
-6, -13, -9, -1, -2, -1, 0, 0,
-4, 0, -1, -2, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
-3, -2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
];
#[rustfmt::skip]
let quantization_table: [i32; 8 * 8] = [
8, 6, 5, 8, 12, 20, 26, 31,
6, 6, 7, 10, 13, 29, 30, 28,
7, 7, 8, 12, 20, 29, 35, 28,
7, 9, 11, 15, 26, 44, 40, 31,
9, 11, 19, 28, 34, 55, 52, 39,
12, 18, 28, 32, 41, 52, 57, 46,
25, 32, 39, 44, 52, 61, 60, 51,
36, 46, 48, 49, 56, 50, 52, 50
];
let c: [i32x8; 8] = cast(coefficients);
let q: [i32x8; 8] = cast(quantization_table);
let scaled = [
c[0] * q[0],
c[1] * q[1],
c[2] * q[2],
c[3] * q[3],
c[4] * q[4],
c[5] * q[5],
c[6] * q[6],
c[7] * q[7],
];
// add rounding factor before shifting right
let pass1 = kernel_i32(scaled, 1 << 9, 10);
let transpose1 = i32x8::transpose(pass1);
// add rounding factor before shifting right (include rebasing from -128..128
// to 0..256)
let pass2 = kernel_i32(transpose1, 65536 + (128 << 17), 17);
let result = i32x8::transpose(pass2);
let output: [i32; 64] = cast(result);
// same as other DCT test with some minor rounding differences
#[rustfmt::skip]
let expected_output = [
118, 92, 110, 83, 77, 93, 144, 198,
172, 116, 114, 87, 78, 93, 146, 191,
194, 107, 91, 76, 71, 93, 160, 198,
196, 100, 80, 74, 67, 92, 174, 209,
182, 104, 88, 81, 68, 89, 178, 206,
105, 64, 59, 59, 63, 94, 183, 201,
35, 27, 28, 37, 72, 121, 203, 204,
37, 45, 41, 47, 98, 154, 223, 208];
assert_eq!(expected_output, output);
}
| {
(x * 32767.0 + 0.5) as i16
} | identifier_body |
t_usefulness.rs | #![allow(clippy::excessive_precision)]
use wide::*;
use bytemuck::*;
#[test]
fn unpack_modify_and_repack_rgba_values() {
let mask = u32x4::from(0xFF);
//
let input = u32x4::from([0xFF0000FF, 0x00FF00FF, 0x0000FFFF, 0x000000FF]);
// unpack
let r_actual = cast::<_, i32x4>(input >> 24).round_float();
let g_actual = cast::<_, i32x4>((input >> 16) & mask).round_float();
let b_actual = cast::<_, i32x4>((input >> 8) & mask).round_float();
let a_actual = cast::<_, i32x4>(input & mask).round_float(); | let a_expected = f32x4::from([255.0, 255.0, 255.0, 255.0]);
assert_eq!(r_expected, r_actual);
assert_eq!(g_expected, g_actual);
assert_eq!(b_expected, b_actual);
assert_eq!(a_expected, a_actual);
// modify some of the data
let r_new = (r_actual - f32x4::from(1.0)).max(f32x4::from(0.0));
let g_new = (g_actual - f32x4::from(1.0)).max(f32x4::from(0.0));
let b_new = (b_actual - f32x4::from(1.0)).max(f32x4::from(0.0));
let a_new = a_actual;
// repack
let r_u = cast::<i32x4, u32x4>(r_new.round_int());
let g_u = cast::<i32x4, u32x4>(g_new.round_int());
let b_u = cast::<i32x4, u32x4>(b_new.round_int());
let a_u = cast::<i32x4, u32x4>(a_new.round_int());
let output_actual = (r_u << 24) | (g_u << 16) | (b_u << 8) | (a_u);
let output_expected =
u32x4::from([0xFE0000FF, 0x00FE00FF, 0x0000FEFF, 0x000000FF]);
assert_eq!(output_expected, output_actual);
}
/// Implement JPEG IDCT using i16x8. This has slightly different behavior than
/// the normal 32 bit scalar implementation in libjpeg. It's a bit more accurate
/// in some ways (since the constants are encoded in 15 bits instead of 12) but
/// is more subject to hitting saturation during intermediate calculations,
/// although that should normally not be a problem for photographic JPEGs.
///
/// The main downside of this approach is that it is very slow to do saturating
/// math on scalar types on some CPUs, so if you need bit-exact behavior on
/// different architectures this is not the algorithm for you.
#[test]
fn test_dequantize_and_idct_i16() {
fn to_fixed(x: f32) -> i16 {
(x * 32767.0 + 0.5) as i16
}
fn kernel_i16(data: [i16x8; 8]) -> [i16x8; 8] {
// kernel x
let a2 = data[2];
let a6 = data[6];
let b0 = a2.saturating_add(a6).mul_scale_round_n(to_fixed(0.5411961));
let c0 = b0
.saturating_sub(a6)
.saturating_sub(a6.mul_scale_round_n(to_fixed(0.847759065)));
let c1 = b0.saturating_add(a2.mul_scale_round_n(to_fixed(0.765366865)));
let a0 = data[0];
let a4 = data[4];
let b1 = a0.saturating_add(a4);
let b2 = a0.saturating_sub(a4);
let x0 = b1.saturating_add(c1);
let x1 = b2.saturating_add(c0);
let x2 = b2.saturating_sub(c0);
let x3 = b1.saturating_sub(c1);
// kernel t
let t0 = data[7];
let t1 = data[5];
let t2 = data[3];
let t3 = data[1];
let p1 = t0.saturating_add(t3);
let p2 = t1.saturating_add(t2);
let p3 = t0.saturating_add(t2);
let p4 = t1.saturating_add(t3);
let p5t = p3.saturating_add(p4);
let p5 = p5t.saturating_add(p5t.mul_scale_round_n(to_fixed(0.175875602)));
let e0 = t0.mul_scale_round_n(to_fixed(0.298631336));
let e1 = t1
.saturating_add(t1)
.saturating_add(t1.mul_scale_round_n(to_fixed(0.053119869)));
let e2 = t2
.saturating_add(t2)
.saturating_add(t2)
.saturating_add(t2.mul_scale_round_n(to_fixed(0.072711026)));
let e3 = t3.saturating_add(t3.mul_scale_round_n(to_fixed(0.501321110)));
let f0 = p5.saturating_sub(p1.mul_scale_round_n(to_fixed(0.899976223)));
let f1 = p5
.saturating_sub(p2)
.saturating_sub(p2)
.saturating_sub(p2.mul_scale_round_n(to_fixed(0.562915447)));
let f2 = p3.mul_scale_round_n(to_fixed(-0.961570560)).saturating_sub(p3);
let f3 = p4.mul_scale_round_n(to_fixed(-0.390180644));
let t3 = f0.saturating_add(f3).saturating_add(e3);
let t2 = f1.saturating_add(f2).saturating_add(e2);
let t1 = f1.saturating_add(f3).saturating_add(e1);
let t0 = f0.saturating_add(f2).saturating_add(e0);
[
x0.saturating_add(t3),
x1.saturating_add(t2),
x2.saturating_add(t1),
x3.saturating_add(t0),
x3.saturating_sub(t0),
x2.saturating_sub(t1),
x1.saturating_sub(t2),
x0.saturating_sub(t3),
]
}
#[rustfmt::skip]
let coefficients: [i16; 8 * 8] = [
-14, -39, 58, -2, 3, 3, 0, 1,
11, 27, 4, -3, 3, 0, 1, 0,
-6, -13, -9, -1, -2, -1, 0, 0,
-4, 0, -1, -2, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
-3, -2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
];
#[rustfmt::skip]
let quantization_table: [i16; 8 * 8] = [
8, 6, 5, 8, 12, 20, 26, 31,
6, 6, 7, 10, 13, 29, 30, 28,
7, 7, 8, 12, 20, 29, 35, 28,
7, 9, 11, 15, 26, 44, 40, 31,
9, 11, 19, 28, 34, 55, 52, 39,
12, 18, 28, 32, 41, 52, 57, 46,
25, 32, 39, 44, 52, 61, 60, 51,
36, 46, 48, 49, 56, 50, 52, 50
];
let c: [i16x8; 8] = cast(coefficients);
let q: [i16x8; 8] = cast(quantization_table);
// coefficients normally go up to 1024, shift up by 3 to get extra precision
const SHIFT: i16 = 3;
let data = [
(c[0] * q[0]) << SHIFT,
(c[1] * q[1]) << SHIFT,
(c[2] * q[2]) << SHIFT,
(c[3] * q[3]) << SHIFT,
(c[4] * q[4]) << SHIFT,
(c[5] * q[5]) << SHIFT,
(c[6] * q[6]) << SHIFT,
(c[7] * q[7]) << SHIFT,
];
let pass1 = kernel_i16(data);
let transpose1 = i16x8::transpose(pass1);
let pass2 = kernel_i16(transpose1);
let result = i16x8::transpose(pass2);
// offset to recenter to 0..256 and round properly
const ROUND_FACTOR: i16 = 0x2020;
let round_factor = i16x8::splat(ROUND_FACTOR);
let result_adj = [
result[0].saturating_add(round_factor) >> (2 * SHIFT),
result[1].saturating_add(round_factor) >> (2 * SHIFT),
result[2].saturating_add(round_factor) >> (2 * SHIFT),
result[3].saturating_add(round_factor) >> (2 * SHIFT),
result[4].saturating_add(round_factor) >> (2 * SHIFT),
result[5].saturating_add(round_factor) >> (2 * SHIFT),
result[6].saturating_add(round_factor) >> (2 * SHIFT),
result[7].saturating_add(round_factor) >> (2 * SHIFT),
];
let output: [i16; 64] = cast(result_adj);
#[rustfmt::skip]
let expected_output = [
118, 92, 110, 83, 77, 93, 144, 198,
172, 116, 114, 87, 78, 93, 146, 191,
194, 107, 91, 76, 71, 93, 160, 198,
196, 100, 80, 74, 67, 92, 174, 209,
182, 104, 88, 81, 68, 89, 178, 206,
105, 64, 59, 59, 63, 94, 183, 201,
35, 27, 28, 37, 72, 121, 203, 204,
38, 45, 41, 47, 99, 154, 223, 208
];
assert_eq!(expected_output, output);
}
/// Implement JPEG IDCT using i32x8. This is most similar to the scalar
/// libjpeg version which has slightly different rounding propertis than the 16
/// bit version. Some decoders are forced to use this if they want bit-by-bit
/// compability across all architectures.
#[test]
fn test_dequantize_and_idct_i32() {
fn to_fixed(x: f32) -> i32 {
(x * 4096.0 + 0.5) as i32
}
fn kernel_i32(
[s0, s1, s2, s3, s4, s5, s6, s7]: [i32x8; 8],
rounding_factor: i32,
shift_right: i32,
) -> [i32x8; 8] {
// kernel x
let at = (s2 + s6) * to_fixed(0.5411961);
let a0 = (s0 + s4) << 12; // multiply by 1, ie 4096 in fixed point)
let a1 = (s0 - s4) << 12; // multiply by 1, ie 4096 in fixed point)
let a2 = at + s6 * to_fixed(-1.847759065);
let a3 = at + s2 * to_fixed(0.765366865);
let x0 = a0 + a3 + rounding_factor; // add rounding factor here to avoid extra addition
let x1 = a1 + a2 + rounding_factor;
let x2 = a1 - a2 + rounding_factor;
let x3 = a0 - a3 + rounding_factor;
// kernel t
let b0 = s7 + s1;
let b1 = s5 + s3;
let b2 = s7 + s3;
let b3 = s5 + s1;
let ct = (b2 + b3) * to_fixed(1.175875602);
let c0 = ct + b0 * to_fixed(-0.899976223);
let c1 = ct + b1 * to_fixed(-2.562915447);
let c2 = b2 * to_fixed(-1.961570560);
let c3 = b3 * to_fixed(-0.390180644);
let t0 = s7 * to_fixed(0.298631336) + c0 + c2;
let t1 = s5 * to_fixed(2.053119869) + c1 + c3;
let t2 = s3 * to_fixed(3.072711026) + c1 + c2;
let t3 = s1 * to_fixed(1.501321110) + c0 + c3;
[
(x0 + t3) >> shift_right,
(x1 + t2) >> shift_right,
(x2 + t1) >> shift_right,
(x3 + t0) >> shift_right,
(x3 - t0) >> shift_right,
(x2 - t1) >> shift_right,
(x1 - t2) >> shift_right,
(x0 - t3) >> shift_right,
]
}
#[rustfmt::skip]
let coefficients: [i32; 8 * 8] = [
-14, -39, 58, -2, 3, 3, 0, 1,
11, 27, 4, -3, 3, 0, 1, 0,
-6, -13, -9, -1, -2, -1, 0, 0,
-4, 0, -1, -2, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
-3, -2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
];
#[rustfmt::skip]
let quantization_table: [i32; 8 * 8] = [
8, 6, 5, 8, 12, 20, 26, 31,
6, 6, 7, 10, 13, 29, 30, 28,
7, 7, 8, 12, 20, 29, 35, 28,
7, 9, 11, 15, 26, 44, 40, 31,
9, 11, 19, 28, 34, 55, 52, 39,
12, 18, 28, 32, 41, 52, 57, 46,
25, 32, 39, 44, 52, 61, 60, 51,
36, 46, 48, 49, 56, 50, 52, 50
];
let c: [i32x8; 8] = cast(coefficients);
let q: [i32x8; 8] = cast(quantization_table);
let scaled = [
c[0] * q[0],
c[1] * q[1],
c[2] * q[2],
c[3] * q[3],
c[4] * q[4],
c[5] * q[5],
c[6] * q[6],
c[7] * q[7],
];
// add rounding factor before shifting right
let pass1 = kernel_i32(scaled, 1 << 9, 10);
let transpose1 = i32x8::transpose(pass1);
// add rounding factor before shifting right (include rebasing from -128..128
// to 0..256)
let pass2 = kernel_i32(transpose1, 65536 + (128 << 17), 17);
let result = i32x8::transpose(pass2);
let output: [i32; 64] = cast(result);
// same as other DCT test with some minor rounding differences
#[rustfmt::skip]
let expected_output = [
118, 92, 110, 83, 77, 93, 144, 198,
172, 116, 114, 87, 78, 93, 146, 191,
194, 107, 91, 76, 71, 93, 160, 198,
196, 100, 80, 74, 67, 92, 174, 209,
182, 104, 88, 81, 68, 89, 178, 206,
105, 64, 59, 59, 63, 94, 183, 201,
35, 27, 28, 37, 72, 121, 203, 204,
37, 45, 41, 47, 98, 154, 223, 208];
assert_eq!(expected_output, output);
} | let r_expected = f32x4::from([255.0, 0.0, 0.0, 0.0]);
let g_expected = f32x4::from([0.0, 255.0, 0.0, 0.0]);
let b_expected = f32x4::from([0.0, 0.0, 255.0, 0.0]); | random_line_split |
t_usefulness.rs | #![allow(clippy::excessive_precision)]
use wide::*;
use bytemuck::*;
#[test]
fn unpack_modify_and_repack_rgba_values() {
let mask = u32x4::from(0xFF);
//
let input = u32x4::from([0xFF0000FF, 0x00FF00FF, 0x0000FFFF, 0x000000FF]);
// unpack
let r_actual = cast::<_, i32x4>(input >> 24).round_float();
let g_actual = cast::<_, i32x4>((input >> 16) & mask).round_float();
let b_actual = cast::<_, i32x4>((input >> 8) & mask).round_float();
let a_actual = cast::<_, i32x4>(input & mask).round_float();
let r_expected = f32x4::from([255.0, 0.0, 0.0, 0.0]);
let g_expected = f32x4::from([0.0, 255.0, 0.0, 0.0]);
let b_expected = f32x4::from([0.0, 0.0, 255.0, 0.0]);
let a_expected = f32x4::from([255.0, 255.0, 255.0, 255.0]);
assert_eq!(r_expected, r_actual);
assert_eq!(g_expected, g_actual);
assert_eq!(b_expected, b_actual);
assert_eq!(a_expected, a_actual);
// modify some of the data
let r_new = (r_actual - f32x4::from(1.0)).max(f32x4::from(0.0));
let g_new = (g_actual - f32x4::from(1.0)).max(f32x4::from(0.0));
let b_new = (b_actual - f32x4::from(1.0)).max(f32x4::from(0.0));
let a_new = a_actual;
// repack
let r_u = cast::<i32x4, u32x4>(r_new.round_int());
let g_u = cast::<i32x4, u32x4>(g_new.round_int());
let b_u = cast::<i32x4, u32x4>(b_new.round_int());
let a_u = cast::<i32x4, u32x4>(a_new.round_int());
let output_actual = (r_u << 24) | (g_u << 16) | (b_u << 8) | (a_u);
let output_expected =
u32x4::from([0xFE0000FF, 0x00FE00FF, 0x0000FEFF, 0x000000FF]);
assert_eq!(output_expected, output_actual);
}
/// Implement JPEG IDCT using i16x8. This has slightly different behavior than
/// the normal 32 bit scalar implementation in libjpeg. It's a bit more accurate
/// in some ways (since the constants are encoded in 15 bits instead of 12) but
/// is more subject to hitting saturation during intermediate calculations,
/// although that should normally not be a problem for photographic JPEGs.
///
/// The main downside of this approach is that it is very slow to do saturating
/// math on scalar types on some CPUs, so if you need bit-exact behavior on
/// different architectures this is not the algorithm for you.
#[test]
fn test_dequantize_and_idct_i16() {
fn to_fixed(x: f32) -> i16 {
(x * 32767.0 + 0.5) as i16
}
fn | (data: [i16x8; 8]) -> [i16x8; 8] {
// kernel x
let a2 = data[2];
let a6 = data[6];
let b0 = a2.saturating_add(a6).mul_scale_round_n(to_fixed(0.5411961));
let c0 = b0
.saturating_sub(a6)
.saturating_sub(a6.mul_scale_round_n(to_fixed(0.847759065)));
let c1 = b0.saturating_add(a2.mul_scale_round_n(to_fixed(0.765366865)));
let a0 = data[0];
let a4 = data[4];
let b1 = a0.saturating_add(a4);
let b2 = a0.saturating_sub(a4);
let x0 = b1.saturating_add(c1);
let x1 = b2.saturating_add(c0);
let x2 = b2.saturating_sub(c0);
let x3 = b1.saturating_sub(c1);
// kernel t
let t0 = data[7];
let t1 = data[5];
let t2 = data[3];
let t3 = data[1];
let p1 = t0.saturating_add(t3);
let p2 = t1.saturating_add(t2);
let p3 = t0.saturating_add(t2);
let p4 = t1.saturating_add(t3);
let p5t = p3.saturating_add(p4);
let p5 = p5t.saturating_add(p5t.mul_scale_round_n(to_fixed(0.175875602)));
let e0 = t0.mul_scale_round_n(to_fixed(0.298631336));
let e1 = t1
.saturating_add(t1)
.saturating_add(t1.mul_scale_round_n(to_fixed(0.053119869)));
let e2 = t2
.saturating_add(t2)
.saturating_add(t2)
.saturating_add(t2.mul_scale_round_n(to_fixed(0.072711026)));
let e3 = t3.saturating_add(t3.mul_scale_round_n(to_fixed(0.501321110)));
let f0 = p5.saturating_sub(p1.mul_scale_round_n(to_fixed(0.899976223)));
let f1 = p5
.saturating_sub(p2)
.saturating_sub(p2)
.saturating_sub(p2.mul_scale_round_n(to_fixed(0.562915447)));
let f2 = p3.mul_scale_round_n(to_fixed(-0.961570560)).saturating_sub(p3);
let f3 = p4.mul_scale_round_n(to_fixed(-0.390180644));
let t3 = f0.saturating_add(f3).saturating_add(e3);
let t2 = f1.saturating_add(f2).saturating_add(e2);
let t1 = f1.saturating_add(f3).saturating_add(e1);
let t0 = f0.saturating_add(f2).saturating_add(e0);
[
x0.saturating_add(t3),
x1.saturating_add(t2),
x2.saturating_add(t1),
x3.saturating_add(t0),
x3.saturating_sub(t0),
x2.saturating_sub(t1),
x1.saturating_sub(t2),
x0.saturating_sub(t3),
]
}
#[rustfmt::skip]
let coefficients: [i16; 8 * 8] = [
-14, -39, 58, -2, 3, 3, 0, 1,
11, 27, 4, -3, 3, 0, 1, 0,
-6, -13, -9, -1, -2, -1, 0, 0,
-4, 0, -1, -2, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
-3, -2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
];
#[rustfmt::skip]
let quantization_table: [i16; 8 * 8] = [
8, 6, 5, 8, 12, 20, 26, 31,
6, 6, 7, 10, 13, 29, 30, 28,
7, 7, 8, 12, 20, 29, 35, 28,
7, 9, 11, 15, 26, 44, 40, 31,
9, 11, 19, 28, 34, 55, 52, 39,
12, 18, 28, 32, 41, 52, 57, 46,
25, 32, 39, 44, 52, 61, 60, 51,
36, 46, 48, 49, 56, 50, 52, 50
];
let c: [i16x8; 8] = cast(coefficients);
let q: [i16x8; 8] = cast(quantization_table);
// coefficients normally go up to 1024, shift up by 3 to get extra precision
const SHIFT: i16 = 3;
let data = [
(c[0] * q[0]) << SHIFT,
(c[1] * q[1]) << SHIFT,
(c[2] * q[2]) << SHIFT,
(c[3] * q[3]) << SHIFT,
(c[4] * q[4]) << SHIFT,
(c[5] * q[5]) << SHIFT,
(c[6] * q[6]) << SHIFT,
(c[7] * q[7]) << SHIFT,
];
let pass1 = kernel_i16(data);
let transpose1 = i16x8::transpose(pass1);
let pass2 = kernel_i16(transpose1);
let result = i16x8::transpose(pass2);
// offset to recenter to 0..256 and round properly
const ROUND_FACTOR: i16 = 0x2020;
let round_factor = i16x8::splat(ROUND_FACTOR);
let result_adj = [
result[0].saturating_add(round_factor) >> (2 * SHIFT),
result[1].saturating_add(round_factor) >> (2 * SHIFT),
result[2].saturating_add(round_factor) >> (2 * SHIFT),
result[3].saturating_add(round_factor) >> (2 * SHIFT),
result[4].saturating_add(round_factor) >> (2 * SHIFT),
result[5].saturating_add(round_factor) >> (2 * SHIFT),
result[6].saturating_add(round_factor) >> (2 * SHIFT),
result[7].saturating_add(round_factor) >> (2 * SHIFT),
];
let output: [i16; 64] = cast(result_adj);
#[rustfmt::skip]
let expected_output = [
118, 92, 110, 83, 77, 93, 144, 198,
172, 116, 114, 87, 78, 93, 146, 191,
194, 107, 91, 76, 71, 93, 160, 198,
196, 100, 80, 74, 67, 92, 174, 209,
182, 104, 88, 81, 68, 89, 178, 206,
105, 64, 59, 59, 63, 94, 183, 201,
35, 27, 28, 37, 72, 121, 203, 204,
38, 45, 41, 47, 99, 154, 223, 208
];
assert_eq!(expected_output, output);
}
/// Implement JPEG IDCT using i32x8. This is most similar to the scalar
/// libjpeg version which has slightly different rounding propertis than the 16
/// bit version. Some decoders are forced to use this if they want bit-by-bit
/// compability across all architectures.
#[test]
fn test_dequantize_and_idct_i32() {
fn to_fixed(x: f32) -> i32 {
(x * 4096.0 + 0.5) as i32
}
fn kernel_i32(
[s0, s1, s2, s3, s4, s5, s6, s7]: [i32x8; 8],
rounding_factor: i32,
shift_right: i32,
) -> [i32x8; 8] {
// kernel x
let at = (s2 + s6) * to_fixed(0.5411961);
let a0 = (s0 + s4) << 12; // multiply by 1, ie 4096 in fixed point)
let a1 = (s0 - s4) << 12; // multiply by 1, ie 4096 in fixed point)
let a2 = at + s6 * to_fixed(-1.847759065);
let a3 = at + s2 * to_fixed(0.765366865);
let x0 = a0 + a3 + rounding_factor; // add rounding factor here to avoid extra addition
let x1 = a1 + a2 + rounding_factor;
let x2 = a1 - a2 + rounding_factor;
let x3 = a0 - a3 + rounding_factor;
// kernel t
let b0 = s7 + s1;
let b1 = s5 + s3;
let b2 = s7 + s3;
let b3 = s5 + s1;
let ct = (b2 + b3) * to_fixed(1.175875602);
let c0 = ct + b0 * to_fixed(-0.899976223);
let c1 = ct + b1 * to_fixed(-2.562915447);
let c2 = b2 * to_fixed(-1.961570560);
let c3 = b3 * to_fixed(-0.390180644);
let t0 = s7 * to_fixed(0.298631336) + c0 + c2;
let t1 = s5 * to_fixed(2.053119869) + c1 + c3;
let t2 = s3 * to_fixed(3.072711026) + c1 + c2;
let t3 = s1 * to_fixed(1.501321110) + c0 + c3;
[
(x0 + t3) >> shift_right,
(x1 + t2) >> shift_right,
(x2 + t1) >> shift_right,
(x3 + t0) >> shift_right,
(x3 - t0) >> shift_right,
(x2 - t1) >> shift_right,
(x1 - t2) >> shift_right,
(x0 - t3) >> shift_right,
]
}
#[rustfmt::skip]
let coefficients: [i32; 8 * 8] = [
-14, -39, 58, -2, 3, 3, 0, 1,
11, 27, 4, -3, 3, 0, 1, 0,
-6, -13, -9, -1, -2, -1, 0, 0,
-4, 0, -1, -2, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
-3, -2, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
];
#[rustfmt::skip]
let quantization_table: [i32; 8 * 8] = [
8, 6, 5, 8, 12, 20, 26, 31,
6, 6, 7, 10, 13, 29, 30, 28,
7, 7, 8, 12, 20, 29, 35, 28,
7, 9, 11, 15, 26, 44, 40, 31,
9, 11, 19, 28, 34, 55, 52, 39,
12, 18, 28, 32, 41, 52, 57, 46,
25, 32, 39, 44, 52, 61, 60, 51,
36, 46, 48, 49, 56, 50, 52, 50
];
let c: [i32x8; 8] = cast(coefficients);
let q: [i32x8; 8] = cast(quantization_table);
let scaled = [
c[0] * q[0],
c[1] * q[1],
c[2] * q[2],
c[3] * q[3],
c[4] * q[4],
c[5] * q[5],
c[6] * q[6],
c[7] * q[7],
];
// add rounding factor before shifting right
let pass1 = kernel_i32(scaled, 1 << 9, 10);
let transpose1 = i32x8::transpose(pass1);
// add rounding factor before shifting right (include rebasing from -128..128
// to 0..256)
let pass2 = kernel_i32(transpose1, 65536 + (128 << 17), 17);
let result = i32x8::transpose(pass2);
let output: [i32; 64] = cast(result);
// same as other DCT test with some minor rounding differences
#[rustfmt::skip]
let expected_output = [
118, 92, 110, 83, 77, 93, 144, 198,
172, 116, 114, 87, 78, 93, 146, 191,
194, 107, 91, 76, 71, 93, 160, 198,
196, 100, 80, 74, 67, 92, 174, 209,
182, 104, 88, 81, 68, 89, 178, 206,
105, 64, 59, 59, 63, 94, 183, 201,
35, 27, 28, 37, 72, 121, 203, 204,
37, 45, 41, 47, 98, 154, 223, 208];
assert_eq!(expected_output, output);
}
| kernel_i16 | identifier_name |
useful.rs | use rand::{Rng, thread_rng};
use serenity::builder::CreateEmbed;
use serenity::client::Context;
use serenity::model::channel::Message;
use tokio_postgres::{Error, NoTls, types::ToSql};
use crate::format_emojis;
const POSTGRE: &'static str = "host=192.168.1.146 user=postgres";
pub const GRIST_TYPES: (&'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str) = ("build","amber","amethyst","caulk","chalk","cobalt","diamond","garnet","gold","iodine","marble","mercury","quartz","ruby","rust","shale","sulfur","tar","uranium","zillium");
#[derive(Debug, Clone)]
pub struct Player {
pub id: i64,
pub sprite: String,
pub class: String,
pub aspect: String,
pub materials: Materials,
pub inventory: Vec<String>,
pub storage: Vec<String>,
pub sylladex_type: String,
}
#[derive(Debug, Clone)]
pub struct Materials {
pub build: i64,
pub amber: i64,
pub amethyst: i64,
pub caulk: i64,
pub chalk: i64,
pub cobalt: i64,
pub diamond: i64,
pub garnet: i64,
pub gold: i64,
pub iodine: i64,
pub marble: i64,
pub mercury: i64,
pub quartz: i64,
pub ruby: i64,
pub rust: i64,
pub shale: i64,
pub sulfur: i64,
pub tar: i64,
pub uranium: i64,
pub zillium: i64,
}
// Useful functions for Player
impl Player {
pub fn empty() -> Self {
return Player {
id: 0,
sprite: "Empty".to_string(),
class: "Bard".to_string(),
aspect: "Light".to_string(),
materials: Materials::empty(),
inventory: vec!["disc".to_string()],
storage: vec![],
sylladex_type: "".to_owned(),
}
}
}
// Useful functions for Materials
impl Materials {
pub fn empty() -> Self {
return Materials {
build: 0,
amber: 0,
amethyst: 0,
caulk: 0,
chalk: 0,
cobalt: 0,
diamond: 0,
garnet: 0,
gold: 0,
iodine: 0,
marble: 0,
mercury: 0,
quartz: 0,
ruby: 0,
rust: 0,
shale: 0,
sulfur: 0,
tar: 0,
uranium: 0,
zillium: 0,
}
}
}
// Makes it so you can iterate through materials
impl IntoIterator for Materials {
type Item = i64;
type IntoIter = std::array::IntoIter<i64, 20>;
fn into_iter(self) -> Self::IntoIter {
std::array::IntoIter::new([
self.build,
self.amber,
self.amethyst,
self.caulk,
self.chalk,
self.cobalt,
self.diamond,
self.garnet,
self.gold,
self.iodine,
self.marble,
self.mercury,
self.quartz,
self.ruby,
self.rust,
self.shale,
self.sulfur,
self.tar,
self.uranium,
self.zillium
])
}
}
// Easily send a message
pub async fn sendmessage(message: &str, ctx: &Context, msg: &Message) {
// Send a message or direct message the user saying there was an error
if let Err(why) = msg.channel_id.say(&ctx.http, message).await {
if let Err(why2) = msg.author.direct_message(&ctx, |m| {
m.content(
format!("Hello {}, The error I got is `{}`", msg.author, why)
)
}).await {
println!("{} | {}", why, why2)
}
}
}
// Send embed
pub async fn send_embed<F>(ctx: &Context, msg: &Message, closure: F) where F: FnOnce(&mut CreateEmbed) -> &mut CreateEmbed, {
if let Err(why) = msg.channel_id.send_message(&ctx, |m| {
m.embed(closure);
m
}).await {
sendmessage(format!("Error {}", why).as_str(), ctx, msg).await;
}
}
// Executes a sql statement
pub async fn sqlstatement(statement: &str) -> Result<(), Error> {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let _ = client.execute(statement, &[]).await?;
Ok(())
}
// Executes a update sql statement
pub async fn update_sqlstatement(statement: &str, author_id: u64, params: &[&(dyn ToSql + Sync)],) -> Result<(), Error> {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let _ = client.execute(format!("UPDATE player SET {} WHERE \"id\"={}", statement, author_id).as_str(), params).await?;
Ok(())
}
// Checks if the user has an entry in the DB
pub async fn check_if_registered(id: u64) -> Result<(), Error> {
// Get player
let result = get_player(id).await;
let player = result.unwrap_or(Player::empty());
// if player.id is 0 then they don't have an entry
// so then create an entry
if player.id == 0 {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let _ = client.execute("INSERT INTO player
(\"id\") VALUES ($1);", &[&(id as i64)]).await.unwrap();
}
Ok(())
}
// SQLite search statement
pub async fn get_player(author_id: u64) -> Result<Player, Error> {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await?;
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let mut player = Player::empty();
// Create Player struct
for row in client.query("SELECT * FROM player WHERE \"id\"=$1",&[&(author_id as i64)]).await? {
let inventory = row.get::<_, String>(24).split("ˌ").map(str::to_string).collect::<Vec<String>>();
let storage = row.get::<_, String>(25).split("ˌ").map(str::to_string).collect::<Vec<String>>();
player = Player {
id: row.get(0),
sprite: row.get(21),
class: row.get(22),
aspect: row.get(23),
materials: Materials {
build: row.get(1),
amber: row.get(2),
amethyst: row.get(3),
caulk: row.get(4),
chalk: row.get(5),
cobalt: row.get(6),
diamond: row.get(7),
garnet: row.get(8),
gold: row.get(9),
iodine: row.get(10),
marble: row.get(11),
mercury: row.get(12),
quartz: row.get(13),
ruby: row.get(14),
rust: row.get(15),
shale: row.get(16),
sulfur: row.get(17),
tar: row.get(18),
uranium: row.get(19),
zillium: row.get(20),
},
inventory,
storage,
sylladex_type: row.get(26),
}
}
return Ok(player)
}
// Gets exile quote
pub async fn get_exile_quote(ctx: &Context, msg: &Message) {
// Exile quotes
let exile_1: Vec<&str> = vec!["What are you doing", "Good job hero"];
let exile_2: Vec<&str> = vec!["DO YOU HAVE ANY IDEA WHAT YOU ARE DOING?", "YOU ARE DOING GOOD MAGGOT!"];
let exile_3: Vec<&str> = vec!["Good.", "Yes more."];
let exile_4: Vec<&str> = vec!["i could do better than that", "what are you doing loser"];
// Send embed function
async fn send_embed(ctx: &Context, msg: &Message, embed_text: &str) {
let randcolor: u32 = thread_rng().gen_range(0x000000..0xFFFFFF);
if let Err(why) = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title(format!("{}'s Exile", msg.author.name).as_str());
e.description(format_emojis!("{}", embed_text));
e.color(randcolor);
e.author(|a| {
a.icon_url(msg.author.avatar_url().unwrap());
a.name(msg.author.name.as_str());
a
});e
});m
}).await {
sendmessage(format!("Error {}", why).as_str(), ctx, msg).await;
}
}
// Random index for exile quote
let rand_index: u32 = thread_rng().gen_range(0..exile_1.len() as u32);
// Send exile quote
let author_exile = (msg.author.id.as_u64() % 4) + 1;
if author_exile == 1 {
send_embed(ctx, msg, exile_1[rand_index as usize]).await;
} else if author_exile == 2 {
send_embed(ctx, msg, exile_2[rand_index as usize]).await;
} else if author_exile == 3 {
| lse if author_exile == 4 {
send_embed(ctx, msg, exile_4[rand_index as usize]).await;
}
}
pub trait InVec: std::cmp::PartialEq + Sized {
fn in_vec(self, vector: Vec<Self>) -> bool {
vector.contains(&self)
}
}
impl<T> InVec for T where T: std::cmp::PartialEq {}
pub trait ConvertCaseToSnake {
fn to_snakecase(&self) -> String;
}
impl ConvertCaseToSnake for String {
fn to_snakecase(&self) -> String {
let part1 = &self.to_uppercase()[0..1];
let part2 = &self.to_lowercase()[1..self.len()];
return format!("{}{}", part1, part2);
}
}
pub trait VecStrToString {
fn vec_to_string(self) -> Vec<String>;
}
impl<T: std::fmt::Display> VecStrToString for Vec<T> {
fn vec_to_string(self) -> Vec<String> {
let mut return_vector = vec![];
for x in 0..self.len() {
return_vector.push(self[x].to_string());
}
return return_vector;
}
}
pub trait FormatVec {
fn format_vec(&self) -> String;
}
impl<T: std::fmt::Display> FormatVec for Vec<T> {
fn format_vec(&self) -> String {
let new_vec = self.into_iter().rev().collect::<Vec<_>>();
let mut return_string = "".to_owned();
for x in new_vec {
return_string = format!("{}\n{}", return_string, x);
}
if return_string.replace("\n", "") == "" {
return "Empty".to_owned()
} else {
return return_string
}
}
}
pub trait ConvertVec {
fn convert_vec(&self) -> String;
}
impl<T: std::fmt::Display> ConvertVec for Vec<T> {
fn convert_vec(&self) -> String {
let mut return_string = "".to_owned();
for x in self {
return_string = format!("{}ˌ{}", return_string, x);
}
if return_string.replace("ˌ", "") == "" {
return "".to_owned();
} else {
return return_string;
}
}
} |
send_embed(ctx, msg, exile_3[rand_index as usize]).await;
} e | conditional_block |
useful.rs | use rand::{Rng, thread_rng};
use serenity::builder::CreateEmbed;
use serenity::client::Context;
use serenity::model::channel::Message;
use tokio_postgres::{Error, NoTls, types::ToSql};
use crate::format_emojis;
const POSTGRE: &'static str = "host=192.168.1.146 user=postgres";
pub const GRIST_TYPES: (&'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str) = ("build","amber","amethyst","caulk","chalk","cobalt","diamond","garnet","gold","iodine","marble","mercury","quartz","ruby","rust","shale","sulfur","tar","uranium","zillium");
#[derive(Debug, Clone)]
pub struct Player {
pub id: i64,
pub sprite: String,
pub class: String,
pub aspect: String,
pub materials: Materials,
pub inventory: Vec<String>,
pub storage: Vec<String>,
pub sylladex_type: String,
}
#[derive(Debug, Clone)]
pub struct Materials {
pub build: i64,
pub amber: i64,
pub amethyst: i64,
pub caulk: i64,
pub chalk: i64,
pub cobalt: i64,
pub diamond: i64,
pub garnet: i64,
pub gold: i64,
pub iodine: i64,
pub marble: i64,
pub mercury: i64,
pub quartz: i64,
pub ruby: i64,
pub rust: i64,
pub shale: i64,
pub sulfur: i64,
pub tar: i64,
pub uranium: i64,
pub zillium: i64,
}
// Useful functions for Player
impl Player {
pub fn empty() -> Self {
return Player {
id: 0,
sprite: "Empty".to_string(),
class: "Bard".to_string(),
aspect: "Light".to_string(),
materials: Materials::empty(),
inventory: vec!["disc".to_string()],
storage: vec![],
sylladex_type: "".to_owned(),
}
}
}
// Useful functions for Materials
impl Materials {
pub fn empty() -> Self {
return Materials {
build: 0,
amber: 0,
amethyst: 0,
caulk: 0,
chalk: 0,
cobalt: 0,
diamond: 0,
garnet: 0,
gold: 0,
iodine: 0,
marble: 0,
mercury: 0,
quartz: 0,
ruby: 0,
rust: 0,
shale: 0,
sulfur: 0,
tar: 0,
uranium: 0,
zillium: 0,
}
}
}
// Makes it so you can iterate through materials
impl IntoIterator for Materials {
type Item = i64;
type IntoIter = std::array::IntoIter<i64, 20>;
fn into_iter(self) -> Self::IntoIter {
std::array::IntoIter::new([
self.build,
self.amber,
self.amethyst,
self.caulk,
self.chalk,
self.cobalt,
self.diamond,
self.garnet,
self.gold,
self.iodine,
self.marble,
self.mercury,
self.quartz,
self.ruby,
self.rust,
self.shale,
self.sulfur,
self.tar,
self.uranium,
self.zillium
])
}
}
// Easily send a message
pub async fn sendmessage(message: &str, ctx: &Context, msg: &Message) {
// Send a message or direct message the user saying there was an error
if let Err(why) = msg.channel_id.say(&ctx.http, message).await {
if let Err(why2) = msg.author.direct_message(&ctx, |m| {
m.content(
format!("Hello {}, The error I got is `{}`", msg.author, why)
)
}).await {
println!("{} | {}", why, why2)
}
}
}
// Send embed
pub async fn send_embed<F>(ctx: &Context, msg: &Message, closure: F) where F: FnOnce(&mut CreateEmbed) -> &mut CreateEmbed, {
if let Err(why) = msg.channel_id.send_message(&ctx, |m| {
m.embed(closure);
m
}).await {
sendmessage(format!("Error {}", why).as_str(), ctx, msg).await;
}
}
// Executes a sql statement
pub async fn sqlstatement(statement: &str) -> Result<(), Error> {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let _ = client.execute(statement, &[]).await?;
Ok(())
}
// Executes a update sql statement
pub async fn update_sqlstatement(statement: &str, author_id: u64, params: &[&(dyn ToSql + Sync)],) -> Result<(), Error> {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let _ = client.execute(format!("UPDATE player SET {} WHERE \"id\"={}", statement, author_id).as_str(), params).await?;
Ok(())
}
// Checks if the user has an entry in the DB
pub async fn check_if_registered(id: u64) -> Result<(), Error> {
// Get player
let result = get_player(id).await;
let player = result.unwrap_or(Player::empty());
// if player.id is 0 then they don't have an entry
// so then create an entry
if player.id == 0 {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let _ = client.execute("INSERT INTO player
(\"id\") VALUES ($1);", &[&(id as i64)]).await.unwrap();
}
Ok(())
}
// SQLite search statement
pub async fn get_player(author_id: u64) -> Result<Player, Error> {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await?;
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let mut player = Player::empty();
// Create Player struct
for row in client.query("SELECT * FROM player WHERE \"id\"=$1",&[&(author_id as i64)]).await? {
let inventory = row.get::<_, String>(24).split("ˌ").map(str::to_string).collect::<Vec<String>>();
let storage = row.get::<_, String>(25).split("ˌ").map(str::to_string).collect::<Vec<String>>();
player = Player {
id: row.get(0),
sprite: row.get(21),
class: row.get(22),
aspect: row.get(23),
materials: Materials {
build: row.get(1),
amber: row.get(2),
amethyst: row.get(3),
caulk: row.get(4),
chalk: row.get(5),
cobalt: row.get(6),
diamond: row.get(7),
garnet: row.get(8),
gold: row.get(9),
iodine: row.get(10),
marble: row.get(11),
mercury: row.get(12),
quartz: row.get(13),
ruby: row.get(14),
rust: row.get(15),
shale: row.get(16),
sulfur: row.get(17),
tar: row.get(18),
uranium: row.get(19),
zillium: row.get(20),
},
inventory,
storage,
sylladex_type: row.get(26),
}
}
return Ok(player)
}
// Gets exile quote
pub async fn get_exile_quote(ctx: &Context, msg: &Message) {
// Exile quotes
let exile_1: Vec<&str> = vec!["What are you doing", "Good job hero"];
let exile_2: Vec<&str> = vec!["DO YOU HAVE ANY IDEA WHAT YOU ARE DOING?", "YOU ARE DOING GOOD MAGGOT!"];
let exile_3: Vec<&str> = vec!["Good.", "Yes more."];
let exile_4: Vec<&str> = vec!["i could do better than that", "what are you doing loser"];
// Send embed function
async fn send_embed(ctx: &Context, msg: &Message, embed_text: &str) {
let randcolor: u32 = thread_rng().gen_range(0x000000..0xFFFFFF);
if let Err(why) = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title(format!("{}'s Exile", msg.author.name).as_str());
e.description(format_emojis!("{}", embed_text));
e.color(randcolor);
e.author(|a| {
a.icon_url(msg.author.avatar_url().unwrap());
a.name(msg.author.name.as_str());
a
});e
});m
}).await {
sendmessage(format!("Error {}", why).as_str(), ctx, msg).await;
}
}
// Random index for exile quote
let rand_index: u32 = thread_rng().gen_range(0..exile_1.len() as u32);
// Send exile quote
let author_exile = (msg.author.id.as_u64() % 4) + 1;
if author_exile == 1 {
send_embed(ctx, msg, exile_1[rand_index as usize]).await;
} else if author_exile == 2 {
send_embed(ctx, msg, exile_2[rand_index as usize]).await;
} else if author_exile == 3 {
send_embed(ctx, msg, exile_3[rand_index as usize]).await;
} else if author_exile == 4 {
send_embed(ctx, msg, exile_4[rand_index as usize]).await;
}
}
pub trait InVec: std::cmp::PartialEq + Sized {
fn in_vec(self, vector: Vec<Self>) -> bool {
vector.contains(&self)
}
}
impl<T> InVec for T where T: std::cmp::PartialEq {}
pub trait ConvertCaseToSnake {
fn to_snakecase(&self) -> String;
}
impl ConvertCaseToSnake for String {
fn to_snakecase(&self) -> String {
let part1 = &self.to_uppercase()[0..1];
let part2 = &self.to_lowercase()[1..self.len()];
return format!("{}{}", part1, part2);
}
}
pub trait VecStrToString {
fn vec_to_string(self) -> Vec<String>;
}
impl<T: std::fmt::Display> VecStrToString for Vec<T> {
fn vec_to_string(self) -> Vec<String> {
let mut return_vector = vec![];
for x in 0..self.len() {
return_vector.push(self[x].to_string());
}
return return_vector;
}
}
pub trait FormatVec {
fn format_vec(&self) -> String;
}
| let new_vec = self.into_iter().rev().collect::<Vec<_>>();
let mut return_string = "".to_owned();
for x in new_vec {
return_string = format!("{}\n{}", return_string, x);
}
if return_string.replace("\n", "") == "" {
return "Empty".to_owned()
} else {
return return_string
}
}
}
pub trait ConvertVec {
fn convert_vec(&self) -> String;
}
impl<T: std::fmt::Display> ConvertVec for Vec<T> {
fn convert_vec(&self) -> String {
let mut return_string = "".to_owned();
for x in self {
return_string = format!("{}ˌ{}", return_string, x);
}
if return_string.replace("ˌ", "") == "" {
return "".to_owned();
} else {
return return_string;
}
}
} |
impl<T: std::fmt::Display> FormatVec for Vec<T> {
fn format_vec(&self) -> String {
| random_line_split |
useful.rs | use rand::{Rng, thread_rng};
use serenity::builder::CreateEmbed;
use serenity::client::Context;
use serenity::model::channel::Message;
use tokio_postgres::{Error, NoTls, types::ToSql};
use crate::format_emojis;
const POSTGRE: &'static str = "host=192.168.1.146 user=postgres";
pub const GRIST_TYPES: (&'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str) = ("build","amber","amethyst","caulk","chalk","cobalt","diamond","garnet","gold","iodine","marble","mercury","quartz","ruby","rust","shale","sulfur","tar","uranium","zillium");
#[derive(Debug, Clone)]
pub struct Player {
pub id: i64,
pub sprite: String,
pub class: String,
pub aspect: String,
pub materials: Materials,
pub inventory: Vec<String>,
pub storage: Vec<String>,
pub sylladex_type: String,
}
#[derive(Debug, Clone)]
pub struct Materials {
pub build: i64,
pub amber: i64,
pub amethyst: i64,
pub caulk: i64,
pub chalk: i64,
pub cobalt: i64,
pub diamond: i64,
pub garnet: i64,
pub gold: i64,
pub iodine: i64,
pub marble: i64,
pub mercury: i64,
pub quartz: i64,
pub ruby: i64,
pub rust: i64,
pub shale: i64,
pub sulfur: i64,
pub tar: i64,
pub uranium: i64,
pub zillium: i64,
}
// Useful functions for Player
impl Player {
pub fn empty() -> Self {
return Player {
id: 0,
sprite: "Empty".to_string(),
class: "Bard".to_string(),
aspect: "Light".to_string(),
materials: Materials::empty(),
inventory: vec!["disc".to_string()],
storage: vec![],
sylladex_type: "".to_owned(),
}
}
}
// Useful functions for Materials
impl Materials {
pub fn empty() -> Self {
return Materials {
build: 0,
amber: 0,
amethyst: 0,
caulk: 0,
chalk: 0,
cobalt: 0,
diamond: 0,
garnet: 0,
gold: 0,
iodine: 0,
marble: 0,
mercury: 0,
quartz: 0,
ruby: 0,
rust: 0,
shale: 0,
sulfur: 0,
tar: 0,
uranium: 0,
zillium: 0,
}
}
}
// Makes it so you can iterate through materials
impl IntoIterator for Materials {
type Item = i64;
type IntoIter = std::array::IntoIter<i64, 20>;
fn into_iter(self) -> Self::IntoIter {
std::array::IntoIter::new([
self.build,
self.amber,
self.amethyst,
self.caulk,
self.chalk,
self.cobalt,
self.diamond,
self.garnet,
self.gold,
self.iodine,
self.marble,
self.mercury,
self.quartz,
self.ruby,
self.rust,
self.shale,
self.sulfur,
self.tar,
self.uranium,
self.zillium
])
}
}
// Easily send a message
pub async fn sendmessage(message: &str, ctx: &Context, msg: &Message) {
// Send a message or direct message the user saying there was an error
if let Err(why) = msg.channel_id.say(&ctx.http, message).await {
if let Err(why2) = msg.author.direct_message(&ctx, |m| {
m.content(
format!("Hello {}, The error I got is `{}`", msg.author, why)
)
}).await {
println!("{} | {}", why, why2)
}
}
}
// Send embed
pub async fn send_embed<F>(ctx: &Context, msg: &Message, closure: F) where F: FnOnce(&mut CreateEmbed) -> &mut CreateEmbed, {
if let Err(why) = msg.channel_id.send_message(&ctx, |m| {
m.embed(closure);
m
}).await {
sendmessage(format!("Error {}", why).as_str(), ctx, msg).await;
}
}
// Executes a sql statement
pub async fn | (statement: &str) -> Result<(), Error> {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let _ = client.execute(statement, &[]).await?;
Ok(())
}
// Executes a update sql statement
pub async fn update_sqlstatement(statement: &str, author_id: u64, params: &[&(dyn ToSql + Sync)],) -> Result<(), Error> {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let _ = client.execute(format!("UPDATE player SET {} WHERE \"id\"={}", statement, author_id).as_str(), params).await?;
Ok(())
}
// Checks if the user has an entry in the DB
pub async fn check_if_registered(id: u64) -> Result<(), Error> {
// Get player
let result = get_player(id).await;
let player = result.unwrap_or(Player::empty());
// if player.id is 0 then they don't have an entry
// so then create an entry
if player.id == 0 {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await.unwrap();
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let _ = client.execute("INSERT INTO player
(\"id\") VALUES ($1);", &[&(id as i64)]).await.unwrap();
}
Ok(())
}
// SQLite search statement
pub async fn get_player(author_id: u64) -> Result<Player, Error> {
let (client, connection) = tokio_postgres::connect(POSTGRE, NoTls).await?;
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {}", e);
}
});
let mut player = Player::empty();
// Create Player struct
for row in client.query("SELECT * FROM player WHERE \"id\"=$1",&[&(author_id as i64)]).await? {
let inventory = row.get::<_, String>(24).split("ˌ").map(str::to_string).collect::<Vec<String>>();
let storage = row.get::<_, String>(25).split("ˌ").map(str::to_string).collect::<Vec<String>>();
player = Player {
id: row.get(0),
sprite: row.get(21),
class: row.get(22),
aspect: row.get(23),
materials: Materials {
build: row.get(1),
amber: row.get(2),
amethyst: row.get(3),
caulk: row.get(4),
chalk: row.get(5),
cobalt: row.get(6),
diamond: row.get(7),
garnet: row.get(8),
gold: row.get(9),
iodine: row.get(10),
marble: row.get(11),
mercury: row.get(12),
quartz: row.get(13),
ruby: row.get(14),
rust: row.get(15),
shale: row.get(16),
sulfur: row.get(17),
tar: row.get(18),
uranium: row.get(19),
zillium: row.get(20),
},
inventory,
storage,
sylladex_type: row.get(26),
}
}
return Ok(player)
}
// Gets exile quote
pub async fn get_exile_quote(ctx: &Context, msg: &Message) {
// Exile quotes
let exile_1: Vec<&str> = vec!["What are you doing", "Good job hero"];
let exile_2: Vec<&str> = vec!["DO YOU HAVE ANY IDEA WHAT YOU ARE DOING?", "YOU ARE DOING GOOD MAGGOT!"];
let exile_3: Vec<&str> = vec!["Good.", "Yes more."];
let exile_4: Vec<&str> = vec!["i could do better than that", "what are you doing loser"];
// Send embed function
async fn send_embed(ctx: &Context, msg: &Message, embed_text: &str) {
let randcolor: u32 = thread_rng().gen_range(0x000000..0xFFFFFF);
if let Err(why) = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title(format!("{}'s Exile", msg.author.name).as_str());
e.description(format_emojis!("{}", embed_text));
e.color(randcolor);
e.author(|a| {
a.icon_url(msg.author.avatar_url().unwrap());
a.name(msg.author.name.as_str());
a
});e
});m
}).await {
sendmessage(format!("Error {}", why).as_str(), ctx, msg).await;
}
}
// Random index for exile quote
let rand_index: u32 = thread_rng().gen_range(0..exile_1.len() as u32);
// Send exile quote
let author_exile = (msg.author.id.as_u64() % 4) + 1;
if author_exile == 1 {
send_embed(ctx, msg, exile_1[rand_index as usize]).await;
} else if author_exile == 2 {
send_embed(ctx, msg, exile_2[rand_index as usize]).await;
} else if author_exile == 3 {
send_embed(ctx, msg, exile_3[rand_index as usize]).await;
} else if author_exile == 4 {
send_embed(ctx, msg, exile_4[rand_index as usize]).await;
}
}
pub trait InVec: std::cmp::PartialEq + Sized {
fn in_vec(self, vector: Vec<Self>) -> bool {
vector.contains(&self)
}
}
impl<T> InVec for T where T: std::cmp::PartialEq {}
pub trait ConvertCaseToSnake {
fn to_snakecase(&self) -> String;
}
impl ConvertCaseToSnake for String {
fn to_snakecase(&self) -> String {
let part1 = &self.to_uppercase()[0..1];
let part2 = &self.to_lowercase()[1..self.len()];
return format!("{}{}", part1, part2);
}
}
pub trait VecStrToString {
fn vec_to_string(self) -> Vec<String>;
}
impl<T: std::fmt::Display> VecStrToString for Vec<T> {
fn vec_to_string(self) -> Vec<String> {
let mut return_vector = vec![];
for x in 0..self.len() {
return_vector.push(self[x].to_string());
}
return return_vector;
}
}
pub trait FormatVec {
fn format_vec(&self) -> String;
}
impl<T: std::fmt::Display> FormatVec for Vec<T> {
fn format_vec(&self) -> String {
let new_vec = self.into_iter().rev().collect::<Vec<_>>();
let mut return_string = "".to_owned();
for x in new_vec {
return_string = format!("{}\n{}", return_string, x);
}
if return_string.replace("\n", "") == "" {
return "Empty".to_owned()
} else {
return return_string
}
}
}
pub trait ConvertVec {
fn convert_vec(&self) -> String;
}
impl<T: std::fmt::Display> ConvertVec for Vec<T> {
fn convert_vec(&self) -> String {
let mut return_string = "".to_owned();
for x in self {
return_string = format!("{}ˌ{}", return_string, x);
}
if return_string.replace("ˌ", "") == "" {
return "".to_owned();
} else {
return return_string;
}
}
} | sqlstatement | identifier_name |
main.rs | use tetra::graphics::text::{Text, Font};
use tetra::graphics::{self, Color, Texture, DrawParams};
use tetra::math::Vec2;
use tetra::input::{self, Key};
use tetra::{Context, ContextBuilder, State};
// visual consts
const SCREEN_WIDTH: i32 = 1280;
const SCREEN_HEIGHT: i32 = 720;
const FONT_SIZE: f32 = 32.0;
const PADDING: f32 = FONT_SIZE;
// gameplay consts
const PADDLE_SPEED: f32 = 14.0;
const BALL_SPEED: f32 = PADDLE_SPEED/2.0;
const PADDLE_SPIN: f32 = 3.0;
const BALL_ACC: f32 = 0.005;
// AI constants
const AI_ENABLED: bool = true;
const AI_MAX_ITERS: u32 = 400; // Experimentation results: around 800 is more than sufficient,
// 400 is quite good though is insufficient for a short time after ball leaves enemy paddle
const AI_WAIT_FOR_PLAYER_HIT: bool = true; // wait for the player to hit the ball first before calculating solution
// (= will not have to guess the player's angle of attack)
// NB: if waiting for player hit, max iters may be set to a lower value
const EPSILON: f32 = 1.0;
#[derive(Clone)]
struct Paddle {
paddle_texture: Texture,
position: Vec2<f32>,
}
#[derive(Clone)]
struct Ball {
ball_texture: Texture,
position: Vec2<f32>,
velocity: Vec2<f32>,
}
impl Ball {
fn reset(&mut self){
self.position = Vec2::new(
(SCREEN_WIDTH as f32)/2.0 - (self.ball_texture.width() as f32)/2.0,
(SCREEN_HEIGHT as f32)/2.0 - (self.ball_texture.height() as f32)/2.0
);
}
}
struct GameState {
ball: Ball,
player_paddle: Paddle,
player_score: i32,
enemy_paddle: Paddle,
enemy_score: i32,
simulated: bool,
enemy_hit: bool, // used when simulating
}
impl GameState {
fn new(ctx: &mut Context) -> tetra::Result<GameState> {
// init textures
let paddle_texture = Texture::new(ctx, "res/paddle.png")?;
let ball_texture = Texture::new(ctx, "res/ball.png")?;
// init ball
let mut ball = Ball {
ball_texture,
position: Vec2::new(0.0, 0.0),
velocity: Vec2::new(1.0, 1.0),
};
ball.reset(); // initialise ball's position
ball.velocity = ball.velocity.normalized() * BALL_SPEED; // init ball speed
// calculate paddle initial y
let paddle_initial_y = (SCREEN_HEIGHT as f32)/2.0 - (paddle_texture.height() as f32)/2.0;
Ok(GameState {
ball,
player_paddle: Paddle {
paddle_texture: paddle_texture.clone(),
position: Vec2::new(
(SCREEN_WIDTH as f32) - PADDING - (paddle_texture.width() as f32),
paddle_initial_y,
),
},
player_score: 0,
enemy_paddle: Paddle {
paddle_texture: paddle_texture.clone(),
position: Vec2::new(
PADDING,
paddle_initial_y,
),
},
enemy_score: 0,
simulated: false,
enemy_hit: false,
})
}
fn draw_paddle(ctx: &mut Context, paddle: &Paddle){
graphics::draw(ctx, &paddle.paddle_texture, paddle.position)
}
fn handle_inputs(&mut self, ctx: &mut Context){
if input::is_key_down(ctx, Key::W) {
self.player_paddle.position.y -= PADDLE_SPEED;
}
if input::is_key_down(ctx, Key::S) {
self.player_paddle.position.y += PADDLE_SPEED;
}
// if!AI_ENABLED {
if!false {
if input::is_key_down(ctx, Key::O) {
self.enemy_paddle.position.y -= PADDLE_SPEED;
}
if input::is_key_down(ctx, Key::L) {
self.enemy_paddle.position.y += PADDLE_SPEED;
}
}
}
/// Check for ball-paddle collision with the given paddle
fn check_intersects(ball: &Ball, paddle: &Paddle) -> bool{
// check if ball's centre point is inside paddle rectangle:
// method adapted from: https://stackoverflow.com/a/2763387/5013267
let ab = Vec2::new(paddle.paddle_texture.width() as f32, 0.0); // vector a->b
let bc = Vec2::new(0.0, paddle.paddle_texture.height() as f32); // vector b->c
let m = ball.position + Vec2::new(ball.ball_texture.width() as f32, ball.ball_texture.height() as f32)/2.0;
let ab_dot_am = ab.dot(m - paddle.position);
let bc_dot_bm = bc.dot(m - (paddle.position + (paddle.paddle_texture.width() as f32, 0.0)));
// return value:
0.0 <= ab_dot_am && ab_dot_am <= ab.dot(ab)
&& 0.0 <= bc_dot_bm && bc_dot_bm <= bc.dot(bc)
}
fn apply_collision_response(ball: &mut Ball, paddle: &Paddle){
ball.velocity.x = -(ball.velocity.x + (BALL_ACC * ball.velocity.x.signum()));
let offset = (paddle.position.y - ball.position.y) / paddle.paddle_texture.height() as f32;
ball.velocity.y += PADDLE_SPIN * -offset;
}
fn update_collision(ball: &mut Ball, paddle: &Paddle){
if GameState::check_intersects(ball, &paddle) |
}
fn update_ball(&mut self, _ctx: &mut Context){
self.update_ai(_ctx);
self.ball.position += self.ball.velocity;
if!self.simulated {
GameState::update_collision(&mut self.ball, &self.player_paddle);
GameState::update_collision(&mut self.ball, &self.enemy_paddle);
}else {
// if simulated, use simplified calculations
// (always assume ball hits player paddle, otherwise AI would win anyway)
// only need to check player paddle
if self.ball.position.x + ((self.ball.ball_texture.width() as f32)/2.0) >= self.player_paddle.position.x {
GameState::apply_collision_response(&mut self.ball, &mut self.player_paddle);
}
// check reaches enemy's side (so that iteration can be terminated)
if self.ball.position.x <= self.enemy_paddle.position.x + self.enemy_paddle.paddle_texture.width() as f32 {
self.enemy_hit = true;
return; // no need to do rest of update calculations
}
}
// walls
// if bouncing off top or bottom walls...
if (self.ball.position[1] + (self.ball.ball_texture.height() as f32) >= (SCREEN_HEIGHT as f32)) || self.ball.position[1] <= 0.0 {
self.ball.velocity[1] = -self.ball.velocity[1];
}
// if bouncing off either of the side walls...
if self.ball.position[0] + (self.ball.ball_texture.width() as f32) >= (SCREEN_WIDTH as f32) || self.ball.position[0] <= 0.0 {
if self.ball.position[0] <= 0.0 {
// bounced off left wall
self.player_score += 1;
} else {
// bounced off right wall
self.enemy_score += 1;
self.ball.velocity = Vec2::new(1.0, 1.0); // setting direction
}
// reset ball to centre
self.ball.reset();
// reset ball speed (but not direction)
self.ball.velocity = self.ball.velocity.normalized() * BALL_SPEED;
}
}
fn update_ai(&mut self, ctx: &mut Context){
if self.simulated ||!AI_ENABLED {
return;
}
if AI_WAIT_FOR_PLAYER_HIT && self.ball.velocity.x >= 0.0 {
// ball vel.x >= 0.0 implies ball moving towards player still, and has not been returned yet
return;
}
// create a simulation GameState, cloned from real GameState
let mut sim = GameState {
ball: self.ball.clone(),
player_paddle: self.player_paddle.clone(),
player_score: self.player_score,
enemy_paddle: self.enemy_paddle.clone(),
enemy_score: self.enemy_score,
simulated: true,
enemy_hit: false,
};
for i in 0..AI_MAX_ITERS {
if!sim.enemy_hit {
sim.update(ctx).expect("bruh moment when updating sim");
// sim.draw(ctx).expect("bruh moment when drawing sim"); // NB: only for debug -- rendering here slows down program signficantly
} else {
// if enemy_hit, stop iterating: found solution
// TODO: maybe implement solution caching
// (but low prio because solution prediction is variable anyway [depends on other player's angle of attack])
let target_y = sim.ball.position.y + (sim.ball.ball_texture.height() as f32)/2.0
- (self.enemy_paddle.paddle_texture.height() as f32)/2.0;
let delta = target_y - self.enemy_paddle.position.y;
if delta.abs() > EPSILON {
self.enemy_paddle.position.y += (delta.abs()).min(PADDLE_SPEED).copysign(delta);
} else {
self.enemy_paddle.position.y = target_y;
}
break;
}
}
}
}
impl State for GameState {
fn draw(&mut self, ctx: &mut Context) -> tetra::Result {
graphics::clear(ctx, Color::rgb(0.0, 0.0, 0.0));
let text = Text::new(
format!("{}-{}", self.enemy_score, self.player_score),
Font::vector(ctx, "res/vcr_osd_mono.ttf", FONT_SIZE)?,
);
graphics::draw(ctx, &text, Vec2::new((SCREEN_WIDTH/2) as f32, FONT_SIZE));
GameState::draw_paddle(ctx, &self.enemy_paddle);
GameState::draw_paddle(ctx, &self.player_paddle);
if!self.simulated {
graphics::draw(ctx, &self.ball.ball_texture, self.ball.position);
} else {
// for debugging, render simulated run in different shade
// (visualisation may not be used in final version)
graphics::draw(ctx, &self.ball.ball_texture, DrawParams::new()
.position(self.ball.position)
.color(Color::rgb(1.0, 0.0, 0.0)));
}
Ok(())
}
fn update(&mut self, ctx: &mut Context) -> tetra::Result {
self.handle_inputs(ctx);
self.update_ball(ctx);
Ok(())
}
}
fn main() -> tetra::Result {
ContextBuilder::new("Pong", SCREEN_WIDTH, SCREEN_HEIGHT)
.quit_on_escape(true)
.build()?
.run(GameState::new)
} | {
GameState::apply_collision_response(ball, paddle);
} | conditional_block |
main.rs | use tetra::graphics::text::{Text, Font};
use tetra::graphics::{self, Color, Texture, DrawParams};
use tetra::math::Vec2;
use tetra::input::{self, Key};
use tetra::{Context, ContextBuilder, State};
// visual consts
const SCREEN_WIDTH: i32 = 1280;
const SCREEN_HEIGHT: i32 = 720;
const FONT_SIZE: f32 = 32.0;
const PADDING: f32 = FONT_SIZE;
// gameplay consts
const PADDLE_SPEED: f32 = 14.0;
const BALL_SPEED: f32 = PADDLE_SPEED/2.0;
const PADDLE_SPIN: f32 = 3.0;
const BALL_ACC: f32 = 0.005;
// AI constants
const AI_ENABLED: bool = true;
const AI_MAX_ITERS: u32 = 400; // Experimentation results: around 800 is more than sufficient,
// 400 is quite good though is insufficient for a short time after ball leaves enemy paddle
const AI_WAIT_FOR_PLAYER_HIT: bool = true; // wait for the player to hit the ball first before calculating solution
// (= will not have to guess the player's angle of attack)
// NB: if waiting for player hit, max iters may be set to a lower value
const EPSILON: f32 = 1.0;
#[derive(Clone)]
struct Paddle {
paddle_texture: Texture,
position: Vec2<f32>,
}
#[derive(Clone)]
struct | {
ball_texture: Texture,
position: Vec2<f32>,
velocity: Vec2<f32>,
}
impl Ball {
fn reset(&mut self){
self.position = Vec2::new(
(SCREEN_WIDTH as f32)/2.0 - (self.ball_texture.width() as f32)/2.0,
(SCREEN_HEIGHT as f32)/2.0 - (self.ball_texture.height() as f32)/2.0
);
}
}
struct GameState {
ball: Ball,
player_paddle: Paddle,
player_score: i32,
enemy_paddle: Paddle,
enemy_score: i32,
simulated: bool,
enemy_hit: bool, // used when simulating
}
impl GameState {
fn new(ctx: &mut Context) -> tetra::Result<GameState> {
// init textures
let paddle_texture = Texture::new(ctx, "res/paddle.png")?;
let ball_texture = Texture::new(ctx, "res/ball.png")?;
// init ball
let mut ball = Ball {
ball_texture,
position: Vec2::new(0.0, 0.0),
velocity: Vec2::new(1.0, 1.0),
};
ball.reset(); // initialise ball's position
ball.velocity = ball.velocity.normalized() * BALL_SPEED; // init ball speed
// calculate paddle initial y
let paddle_initial_y = (SCREEN_HEIGHT as f32)/2.0 - (paddle_texture.height() as f32)/2.0;
Ok(GameState {
ball,
player_paddle: Paddle {
paddle_texture: paddle_texture.clone(),
position: Vec2::new(
(SCREEN_WIDTH as f32) - PADDING - (paddle_texture.width() as f32),
paddle_initial_y,
),
},
player_score: 0,
enemy_paddle: Paddle {
paddle_texture: paddle_texture.clone(),
position: Vec2::new(
PADDING,
paddle_initial_y,
),
},
enemy_score: 0,
simulated: false,
enemy_hit: false,
})
}
fn draw_paddle(ctx: &mut Context, paddle: &Paddle){
graphics::draw(ctx, &paddle.paddle_texture, paddle.position)
}
fn handle_inputs(&mut self, ctx: &mut Context){
if input::is_key_down(ctx, Key::W) {
self.player_paddle.position.y -= PADDLE_SPEED;
}
if input::is_key_down(ctx, Key::S) {
self.player_paddle.position.y += PADDLE_SPEED;
}
// if!AI_ENABLED {
if!false {
if input::is_key_down(ctx, Key::O) {
self.enemy_paddle.position.y -= PADDLE_SPEED;
}
if input::is_key_down(ctx, Key::L) {
self.enemy_paddle.position.y += PADDLE_SPEED;
}
}
}
/// Check for ball-paddle collision with the given paddle
fn check_intersects(ball: &Ball, paddle: &Paddle) -> bool{
// check if ball's centre point is inside paddle rectangle:
// method adapted from: https://stackoverflow.com/a/2763387/5013267
let ab = Vec2::new(paddle.paddle_texture.width() as f32, 0.0); // vector a->b
let bc = Vec2::new(0.0, paddle.paddle_texture.height() as f32); // vector b->c
let m = ball.position + Vec2::new(ball.ball_texture.width() as f32, ball.ball_texture.height() as f32)/2.0;
let ab_dot_am = ab.dot(m - paddle.position);
let bc_dot_bm = bc.dot(m - (paddle.position + (paddle.paddle_texture.width() as f32, 0.0)));
// return value:
0.0 <= ab_dot_am && ab_dot_am <= ab.dot(ab)
&& 0.0 <= bc_dot_bm && bc_dot_bm <= bc.dot(bc)
}
fn apply_collision_response(ball: &mut Ball, paddle: &Paddle){
ball.velocity.x = -(ball.velocity.x + (BALL_ACC * ball.velocity.x.signum()));
let offset = (paddle.position.y - ball.position.y) / paddle.paddle_texture.height() as f32;
ball.velocity.y += PADDLE_SPIN * -offset;
}
fn update_collision(ball: &mut Ball, paddle: &Paddle){
if GameState::check_intersects(ball, &paddle){
GameState::apply_collision_response(ball, paddle);
}
}
fn update_ball(&mut self, _ctx: &mut Context){
self.update_ai(_ctx);
self.ball.position += self.ball.velocity;
if!self.simulated {
GameState::update_collision(&mut self.ball, &self.player_paddle);
GameState::update_collision(&mut self.ball, &self.enemy_paddle);
}else {
// if simulated, use simplified calculations
// (always assume ball hits player paddle, otherwise AI would win anyway)
// only need to check player paddle
if self.ball.position.x + ((self.ball.ball_texture.width() as f32)/2.0) >= self.player_paddle.position.x {
GameState::apply_collision_response(&mut self.ball, &mut self.player_paddle);
}
// check reaches enemy's side (so that iteration can be terminated)
if self.ball.position.x <= self.enemy_paddle.position.x + self.enemy_paddle.paddle_texture.width() as f32 {
self.enemy_hit = true;
return; // no need to do rest of update calculations
}
}
// walls
// if bouncing off top or bottom walls...
if (self.ball.position[1] + (self.ball.ball_texture.height() as f32) >= (SCREEN_HEIGHT as f32)) || self.ball.position[1] <= 0.0 {
self.ball.velocity[1] = -self.ball.velocity[1];
}
// if bouncing off either of the side walls...
if self.ball.position[0] + (self.ball.ball_texture.width() as f32) >= (SCREEN_WIDTH as f32) || self.ball.position[0] <= 0.0 {
if self.ball.position[0] <= 0.0 {
// bounced off left wall
self.player_score += 1;
} else {
// bounced off right wall
self.enemy_score += 1;
self.ball.velocity = Vec2::new(1.0, 1.0); // setting direction
}
// reset ball to centre
self.ball.reset();
// reset ball speed (but not direction)
self.ball.velocity = self.ball.velocity.normalized() * BALL_SPEED;
}
}
fn update_ai(&mut self, ctx: &mut Context){
if self.simulated ||!AI_ENABLED {
return;
}
if AI_WAIT_FOR_PLAYER_HIT && self.ball.velocity.x >= 0.0 {
// ball vel.x >= 0.0 implies ball moving towards player still, and has not been returned yet
return;
}
// create a simulation GameState, cloned from real GameState
let mut sim = GameState {
ball: self.ball.clone(),
player_paddle: self.player_paddle.clone(),
player_score: self.player_score,
enemy_paddle: self.enemy_paddle.clone(),
enemy_score: self.enemy_score,
simulated: true,
enemy_hit: false,
};
for i in 0..AI_MAX_ITERS {
if!sim.enemy_hit {
sim.update(ctx).expect("bruh moment when updating sim");
// sim.draw(ctx).expect("bruh moment when drawing sim"); // NB: only for debug -- rendering here slows down program signficantly
} else {
// if enemy_hit, stop iterating: found solution
// TODO: maybe implement solution caching
// (but low prio because solution prediction is variable anyway [depends on other player's angle of attack])
let target_y = sim.ball.position.y + (sim.ball.ball_texture.height() as f32)/2.0
- (self.enemy_paddle.paddle_texture.height() as f32)/2.0;
let delta = target_y - self.enemy_paddle.position.y;
if delta.abs() > EPSILON {
self.enemy_paddle.position.y += (delta.abs()).min(PADDLE_SPEED).copysign(delta);
} else {
self.enemy_paddle.position.y = target_y;
}
break;
}
}
}
}
impl State for GameState {
fn draw(&mut self, ctx: &mut Context) -> tetra::Result {
graphics::clear(ctx, Color::rgb(0.0, 0.0, 0.0));
let text = Text::new(
format!("{}-{}", self.enemy_score, self.player_score),
Font::vector(ctx, "res/vcr_osd_mono.ttf", FONT_SIZE)?,
);
graphics::draw(ctx, &text, Vec2::new((SCREEN_WIDTH/2) as f32, FONT_SIZE));
GameState::draw_paddle(ctx, &self.enemy_paddle);
GameState::draw_paddle(ctx, &self.player_paddle);
if!self.simulated {
graphics::draw(ctx, &self.ball.ball_texture, self.ball.position);
} else {
// for debugging, render simulated run in different shade
// (visualisation may not be used in final version)
graphics::draw(ctx, &self.ball.ball_texture, DrawParams::new()
.position(self.ball.position)
.color(Color::rgb(1.0, 0.0, 0.0)));
}
Ok(())
}
fn update(&mut self, ctx: &mut Context) -> tetra::Result {
self.handle_inputs(ctx);
self.update_ball(ctx);
Ok(())
}
}
fn main() -> tetra::Result {
ContextBuilder::new("Pong", SCREEN_WIDTH, SCREEN_HEIGHT)
.quit_on_escape(true)
.build()?
.run(GameState::new)
} | Ball | identifier_name |
main.rs | use tetra::graphics::text::{Text, Font};
use tetra::graphics::{self, Color, Texture, DrawParams};
use tetra::math::Vec2;
use tetra::input::{self, Key};
use tetra::{Context, ContextBuilder, State};
// visual consts
const SCREEN_WIDTH: i32 = 1280;
const SCREEN_HEIGHT: i32 = 720;
const FONT_SIZE: f32 = 32.0;
const PADDING: f32 = FONT_SIZE;
// gameplay consts
const PADDLE_SPEED: f32 = 14.0;
const BALL_SPEED: f32 = PADDLE_SPEED/2.0;
const PADDLE_SPIN: f32 = 3.0;
const BALL_ACC: f32 = 0.005;
// AI constants
const AI_ENABLED: bool = true;
const AI_MAX_ITERS: u32 = 400; // Experimentation results: around 800 is more than sufficient,
// 400 is quite good though is insufficient for a short time after ball leaves enemy paddle
const AI_WAIT_FOR_PLAYER_HIT: bool = true; // wait for the player to hit the ball first before calculating solution
// (= will not have to guess the player's angle of attack)
// NB: if waiting for player hit, max iters may be set to a lower value
const EPSILON: f32 = 1.0;
#[derive(Clone)]
struct Paddle {
paddle_texture: Texture,
position: Vec2<f32>,
}
#[derive(Clone)]
struct Ball {
ball_texture: Texture,
position: Vec2<f32>,
velocity: Vec2<f32>,
}
impl Ball {
fn reset(&mut self){
self.position = Vec2::new(
(SCREEN_WIDTH as f32)/2.0 - (self.ball_texture.width() as f32)/2.0,
(SCREEN_HEIGHT as f32)/2.0 - (self.ball_texture.height() as f32)/2.0
);
}
}
struct GameState {
ball: Ball,
player_paddle: Paddle,
player_score: i32,
enemy_paddle: Paddle,
enemy_score: i32,
simulated: bool,
enemy_hit: bool, // used when simulating
}
impl GameState {
fn new(ctx: &mut Context) -> tetra::Result<GameState> {
// init textures
let paddle_texture = Texture::new(ctx, "res/paddle.png")?;
let ball_texture = Texture::new(ctx, "res/ball.png")?;
// init ball
let mut ball = Ball {
ball_texture,
position: Vec2::new(0.0, 0.0),
velocity: Vec2::new(1.0, 1.0),
};
ball.reset(); // initialise ball's position
ball.velocity = ball.velocity.normalized() * BALL_SPEED; // init ball speed
// calculate paddle initial y
let paddle_initial_y = (SCREEN_HEIGHT as f32)/2.0 - (paddle_texture.height() as f32)/2.0;
Ok(GameState {
ball,
player_paddle: Paddle {
paddle_texture: paddle_texture.clone(),
position: Vec2::new(
(SCREEN_WIDTH as f32) - PADDING - (paddle_texture.width() as f32),
paddle_initial_y,
),
},
player_score: 0,
enemy_paddle: Paddle {
paddle_texture: paddle_texture.clone(),
position: Vec2::new(
PADDING,
paddle_initial_y,
),
},
enemy_score: 0,
simulated: false,
enemy_hit: false,
}) | fn draw_paddle(ctx: &mut Context, paddle: &Paddle){
graphics::draw(ctx, &paddle.paddle_texture, paddle.position)
}
fn handle_inputs(&mut self, ctx: &mut Context){
if input::is_key_down(ctx, Key::W) {
self.player_paddle.position.y -= PADDLE_SPEED;
}
if input::is_key_down(ctx, Key::S) {
self.player_paddle.position.y += PADDLE_SPEED;
}
// if!AI_ENABLED {
if!false {
if input::is_key_down(ctx, Key::O) {
self.enemy_paddle.position.y -= PADDLE_SPEED;
}
if input::is_key_down(ctx, Key::L) {
self.enemy_paddle.position.y += PADDLE_SPEED;
}
}
}
/// Check for ball-paddle collision with the given paddle
fn check_intersects(ball: &Ball, paddle: &Paddle) -> bool{
// check if ball's centre point is inside paddle rectangle:
// method adapted from: https://stackoverflow.com/a/2763387/5013267
let ab = Vec2::new(paddle.paddle_texture.width() as f32, 0.0); // vector a->b
let bc = Vec2::new(0.0, paddle.paddle_texture.height() as f32); // vector b->c
let m = ball.position + Vec2::new(ball.ball_texture.width() as f32, ball.ball_texture.height() as f32)/2.0;
let ab_dot_am = ab.dot(m - paddle.position);
let bc_dot_bm = bc.dot(m - (paddle.position + (paddle.paddle_texture.width() as f32, 0.0)));
// return value:
0.0 <= ab_dot_am && ab_dot_am <= ab.dot(ab)
&& 0.0 <= bc_dot_bm && bc_dot_bm <= bc.dot(bc)
}
fn apply_collision_response(ball: &mut Ball, paddle: &Paddle){
ball.velocity.x = -(ball.velocity.x + (BALL_ACC * ball.velocity.x.signum()));
let offset = (paddle.position.y - ball.position.y) / paddle.paddle_texture.height() as f32;
ball.velocity.y += PADDLE_SPIN * -offset;
}
fn update_collision(ball: &mut Ball, paddle: &Paddle){
if GameState::check_intersects(ball, &paddle){
GameState::apply_collision_response(ball, paddle);
}
}
fn update_ball(&mut self, _ctx: &mut Context){
self.update_ai(_ctx);
self.ball.position += self.ball.velocity;
if!self.simulated {
GameState::update_collision(&mut self.ball, &self.player_paddle);
GameState::update_collision(&mut self.ball, &self.enemy_paddle);
}else {
// if simulated, use simplified calculations
// (always assume ball hits player paddle, otherwise AI would win anyway)
// only need to check player paddle
if self.ball.position.x + ((self.ball.ball_texture.width() as f32)/2.0) >= self.player_paddle.position.x {
GameState::apply_collision_response(&mut self.ball, &mut self.player_paddle);
}
// check reaches enemy's side (so that iteration can be terminated)
if self.ball.position.x <= self.enemy_paddle.position.x + self.enemy_paddle.paddle_texture.width() as f32 {
self.enemy_hit = true;
return; // no need to do rest of update calculations
}
}
// walls
// if bouncing off top or bottom walls...
if (self.ball.position[1] + (self.ball.ball_texture.height() as f32) >= (SCREEN_HEIGHT as f32)) || self.ball.position[1] <= 0.0 {
self.ball.velocity[1] = -self.ball.velocity[1];
}
// if bouncing off either of the side walls...
if self.ball.position[0] + (self.ball.ball_texture.width() as f32) >= (SCREEN_WIDTH as f32) || self.ball.position[0] <= 0.0 {
if self.ball.position[0] <= 0.0 {
// bounced off left wall
self.player_score += 1;
} else {
// bounced off right wall
self.enemy_score += 1;
self.ball.velocity = Vec2::new(1.0, 1.0); // setting direction
}
// reset ball to centre
self.ball.reset();
// reset ball speed (but not direction)
self.ball.velocity = self.ball.velocity.normalized() * BALL_SPEED;
}
}
fn update_ai(&mut self, ctx: &mut Context){
if self.simulated ||!AI_ENABLED {
return;
}
if AI_WAIT_FOR_PLAYER_HIT && self.ball.velocity.x >= 0.0 {
// ball vel.x >= 0.0 implies ball moving towards player still, and has not been returned yet
return;
}
// create a simulation GameState, cloned from real GameState
let mut sim = GameState {
ball: self.ball.clone(),
player_paddle: self.player_paddle.clone(),
player_score: self.player_score,
enemy_paddle: self.enemy_paddle.clone(),
enemy_score: self.enemy_score,
simulated: true,
enemy_hit: false,
};
for i in 0..AI_MAX_ITERS {
if!sim.enemy_hit {
sim.update(ctx).expect("bruh moment when updating sim");
// sim.draw(ctx).expect("bruh moment when drawing sim"); // NB: only for debug -- rendering here slows down program signficantly
} else {
// if enemy_hit, stop iterating: found solution
// TODO: maybe implement solution caching
// (but low prio because solution prediction is variable anyway [depends on other player's angle of attack])
let target_y = sim.ball.position.y + (sim.ball.ball_texture.height() as f32)/2.0
- (self.enemy_paddle.paddle_texture.height() as f32)/2.0;
let delta = target_y - self.enemy_paddle.position.y;
if delta.abs() > EPSILON {
self.enemy_paddle.position.y += (delta.abs()).min(PADDLE_SPEED).copysign(delta);
} else {
self.enemy_paddle.position.y = target_y;
}
break;
}
}
}
}
impl State for GameState {
fn draw(&mut self, ctx: &mut Context) -> tetra::Result {
graphics::clear(ctx, Color::rgb(0.0, 0.0, 0.0));
let text = Text::new(
format!("{}-{}", self.enemy_score, self.player_score),
Font::vector(ctx, "res/vcr_osd_mono.ttf", FONT_SIZE)?,
);
graphics::draw(ctx, &text, Vec2::new((SCREEN_WIDTH/2) as f32, FONT_SIZE));
GameState::draw_paddle(ctx, &self.enemy_paddle);
GameState::draw_paddle(ctx, &self.player_paddle);
if!self.simulated {
graphics::draw(ctx, &self.ball.ball_texture, self.ball.position);
} else {
// for debugging, render simulated run in different shade
// (visualisation may not be used in final version)
graphics::draw(ctx, &self.ball.ball_texture, DrawParams::new()
.position(self.ball.position)
.color(Color::rgb(1.0, 0.0, 0.0)));
}
Ok(())
}
fn update(&mut self, ctx: &mut Context) -> tetra::Result {
self.handle_inputs(ctx);
self.update_ball(ctx);
Ok(())
}
}
fn main() -> tetra::Result {
ContextBuilder::new("Pong", SCREEN_WIDTH, SCREEN_HEIGHT)
.quit_on_escape(true)
.build()?
.run(GameState::new)
} | }
| random_line_split |
ffi.rs | // Take a look at the license at the top of the repository in the LICENSE file.
use core_foundation_sys::base::{mach_port_t, CFAllocatorRef};
use core_foundation_sys::dictionary::{CFDictionaryRef, CFMutableDictionaryRef};
use core_foundation_sys::string::CFStringRef;
use libc::{c_char, kern_return_t}; |
#[allow(non_camel_case_types)]
pub type io_object_t = mach_port_t;
#[allow(non_camel_case_types)]
pub type io_iterator_t = io_object_t;
#[allow(non_camel_case_types)]
pub type io_registry_entry_t = io_object_t;
// This is a hack, `io_name_t` should normally be `[c_char; 128]` but Rust makes it very annoying
// to deal with that so we go around it a bit.
#[allow(non_camel_case_types, dead_code)]
pub type io_name = [c_char; 128];
#[allow(non_camel_case_types)]
pub type io_name_t = *const c_char;
pub type IOOptionBits = u32;
#[allow(non_upper_case_globals)]
pub const kIOServicePlane: &[u8] = b"IOService\0";
#[allow(non_upper_case_globals)]
pub const kIOPropertyDeviceCharacteristicsKey: &str = "Device Characteristics";
#[allow(non_upper_case_globals)]
pub const kIOPropertyMediumTypeKey: &str = "Medium Type";
#[allow(non_upper_case_globals)]
pub const kIOPropertyMediumTypeSolidStateKey: &str = "Solid State";
#[allow(non_upper_case_globals)]
pub const kIOPropertyMediumTypeRotationalKey: &str = "Rotational";
// Based on https://github.com/libusb/libusb/blob/bed8d3034eac74a6e1ba123b5c270ea63cb6cf1a/libusb/os/darwin_usb.c#L54-L55,
// we can simply set it to 0 (and is the same value as its replacement `kIOMainPortDefault`).
#[allow(non_upper_case_globals)]
pub const kIOMasterPortDefault: mach_port_t = 0;
// Note: Obtaining information about disks using IOKIt is allowed inside the default macOS App Sandbox.
#[link(name = "IOKit", kind = "framework")]
extern "C" {
pub fn IOServiceGetMatchingServices(
mainPort: mach_port_t,
matching: CFMutableDictionaryRef,
existing: *mut io_iterator_t,
) -> kern_return_t;
#[allow(dead_code)]
pub fn IOServiceMatching(a: *const c_char) -> CFMutableDictionaryRef;
pub fn IOIteratorNext(iterator: io_iterator_t) -> io_object_t;
pub fn IOObjectRelease(obj: io_object_t) -> kern_return_t;
pub fn IORegistryEntryCreateCFProperty(
entry: io_registry_entry_t,
key: CFStringRef,
allocator: CFAllocatorRef,
options: IOOptionBits,
) -> CFDictionaryRef;
pub fn IORegistryEntryGetParentEntry(
entry: io_registry_entry_t,
plane: io_name_t,
parent: *mut io_registry_entry_t,
) -> kern_return_t;
#[allow(dead_code)]
pub fn IORegistryEntryGetName(entry: io_registry_entry_t, name: io_name_t) -> kern_return_t;
pub fn IOBSDNameMatching(
mainPort: mach_port_t,
options: u32,
bsdName: *const c_char,
) -> CFMutableDictionaryRef;
}
#[allow(dead_code)]
pub const KIO_RETURN_SUCCESS: i32 = 0;
extern "C" {
// FIXME: to be removed once higher version than core_foundation_sys 0.8.4 is released.
#[allow(dead_code)]
pub fn CFStringCreateWithCStringNoCopy(
alloc: CFAllocatorRef,
cStr: *const c_char,
encoding: core_foundation_sys::string::CFStringEncoding,
contentsDeallocator: CFAllocatorRef,
) -> CFStringRef;
}
#[cfg(all(
not(feature = "apple-sandbox"),
any(target_arch = "x86", target_arch = "x86_64")
))]
mod io_service {
use super::{io_object_t, mach_port_t};
use libc::{kern_return_t, size_t, task_t};
#[allow(non_camel_case_types)]
pub type io_connect_t = io_object_t;
#[allow(non_camel_case_types)]
pub type io_service_t = io_object_t;
#[allow(non_camel_case_types)]
pub type task_port_t = task_t;
extern "C" {
pub fn IOServiceOpen(
device: io_service_t,
owning_task: task_port_t,
type_: u32,
connect: *mut io_connect_t,
) -> kern_return_t;
pub fn IOServiceClose(a: io_connect_t) -> kern_return_t;
#[allow(dead_code)]
pub fn IOConnectCallStructMethod(
connection: mach_port_t,
selector: u32,
inputStruct: *const KeyData_t,
inputStructCnt: size_t,
outputStruct: *mut KeyData_t,
outputStructCnt: *mut size_t,
) -> kern_return_t;
}
#[cfg_attr(feature = "debug", derive(Debug, Eq, Hash, PartialEq))]
#[repr(C)]
pub struct KeyData_vers_t {
pub major: u8,
pub minor: u8,
pub build: u8,
pub reserved: [u8; 1],
pub release: u16,
}
#[cfg_attr(feature = "debug", derive(Debug, Eq, Hash, PartialEq))]
#[repr(C)]
pub struct KeyData_pLimitData_t {
pub version: u16,
pub length: u16,
pub cpu_plimit: u32,
pub gpu_plimit: u32,
pub mem_plimit: u32,
}
#[cfg_attr(feature = "debug", derive(Debug, Eq, Hash, PartialEq))]
#[repr(C)]
pub struct KeyData_keyInfo_t {
pub data_size: u32,
pub data_type: u32,
pub data_attributes: u8,
}
#[cfg_attr(feature = "debug", derive(Debug, Eq, Hash, PartialEq))]
#[repr(C)]
pub struct KeyData_t {
pub key: u32,
pub vers: KeyData_vers_t,
pub p_limit_data: KeyData_pLimitData_t,
pub key_info: KeyData_keyInfo_t,
pub result: u8,
pub status: u8,
pub data8: u8,
pub data32: u32,
pub bytes: [i8; 32], // SMCBytes_t
}
#[allow(dead_code)]
pub const KERNEL_INDEX_SMC: i32 = 2;
#[allow(dead_code)]
pub const SMC_CMD_READ_KEYINFO: u8 = 9;
#[allow(dead_code)]
pub const SMC_CMD_READ_BYTES: u8 = 5;
}
#[cfg(feature = "apple-sandbox")]
mod io_service {}
#[cfg(all(
not(feature = "apple-sandbox"),
any(target_arch = "x86", target_arch = "x86_64")
))]
pub use io_service::*;
#[cfg(all(not(feature = "apple-sandbox"), target_arch = "aarch64"))]
mod io_service {
use std::ptr::null;
use super::CFStringCreateWithCStringNoCopy;
use core_foundation_sys::array::CFArrayRef;
use core_foundation_sys::base::{CFAllocatorRef, CFRelease};
use core_foundation_sys::dictionary::{
kCFTypeDictionaryKeyCallBacks, kCFTypeDictionaryValueCallBacks, CFDictionaryCreate,
CFDictionaryRef,
};
use core_foundation_sys::number::{kCFNumberSInt32Type, CFNumberCreate};
use core_foundation_sys::string::CFStringRef;
#[repr(C)]
pub struct __IOHIDServiceClient(libc::c_void);
pub type IOHIDServiceClientRef = *const __IOHIDServiceClient;
#[repr(C)]
pub struct __IOHIDEventSystemClient(libc::c_void);
pub type IOHIDEventSystemClientRef = *const __IOHIDEventSystemClient;
#[repr(C)]
pub struct __IOHIDEvent(libc::c_void);
pub type IOHIDEventRef = *const __IOHIDEvent;
#[allow(non_upper_case_globals)]
pub const kIOHIDEventTypeTemperature: i64 = 15;
#[inline]
#[allow(non_snake_case)]
pub fn IOHIDEventFieldBase(event_type: i64) -> i64 {
event_type << 16
}
#[cfg(not(feature = "apple-sandbox"))]
extern "C" {
pub fn IOHIDEventSystemClientCreate(allocator: CFAllocatorRef)
-> IOHIDEventSystemClientRef;
pub fn IOHIDEventSystemClientSetMatching(
client: IOHIDEventSystemClientRef,
matches: CFDictionaryRef,
) -> i32;
pub fn IOHIDEventSystemClientCopyServices(client: IOHIDEventSystemClientRef) -> CFArrayRef;
pub fn IOHIDServiceClientCopyProperty(
service: IOHIDServiceClientRef,
key: CFStringRef,
) -> CFStringRef;
pub fn IOHIDServiceClientCopyEvent(
service: IOHIDServiceClientRef,
v0: i64,
v1: i32,
v2: i64,
) -> IOHIDEventRef;
pub fn IOHIDEventGetFloatValue(event: IOHIDEventRef, field: i64) -> f64;
}
pub(crate) const HID_DEVICE_PROPERTY_PRODUCT: &[u8] = b"Product\0";
pub(crate) const HID_DEVICE_PROPERTY_PRIMARY_USAGE: &[u8] = b"PrimaryUsage\0";
pub(crate) const HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE: &[u8] = b"PrimaryUsagePage\0";
#[allow(non_upper_case_globals)]
pub(crate) const kHIDPage_AppleVendor: i32 = 0xff00;
#[allow(non_upper_case_globals)]
pub(crate) const kHIDUsage_AppleVendor_TemperatureSensor: i32 = 0x0005;
pub(crate) fn matching(page: i32, usage: i32) -> CFDictionaryRef {
unsafe {
let keys = [
CFStringCreateWithCStringNoCopy(
null() as *const _,
HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE.as_ptr() as *const _,
core_foundation_sys::string::kCFStringEncodingUTF8,
core_foundation_sys::base::kCFAllocatorNull as *mut _,
),
CFStringCreateWithCStringNoCopy(
null() as *const _,
HID_DEVICE_PROPERTY_PRIMARY_USAGE.as_ptr() as *const _,
core_foundation_sys::string::kCFStringEncodingUTF8,
core_foundation_sys::base::kCFAllocatorNull as *mut _,
),
];
let nums = [
CFNumberCreate(null(), kCFNumberSInt32Type, &page as *const _ as *const _),
CFNumberCreate(null(), kCFNumberSInt32Type, &usage as *const _ as *const _),
];
let dict = CFDictionaryCreate(
null(),
&keys as *const _ as *const _,
&nums as *const _ as *const _,
2,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks,
);
for key in keys {
CFRelease(key as _);
}
for num in nums {
CFRelease(num as _);
}
dict
}
}
}
#[cfg(all(not(feature = "apple-sandbox"), target_arch = "aarch64"))]
pub use io_service::*; |
// Note: IOKit is only available on MacOS up until very recent iOS versions: https://developer.apple.com/documentation/iokit | random_line_split |
ffi.rs | // Take a look at the license at the top of the repository in the LICENSE file.
use core_foundation_sys::base::{mach_port_t, CFAllocatorRef};
use core_foundation_sys::dictionary::{CFDictionaryRef, CFMutableDictionaryRef};
use core_foundation_sys::string::CFStringRef;
use libc::{c_char, kern_return_t};
// Note: IOKit is only available on MacOS up until very recent iOS versions: https://developer.apple.com/documentation/iokit
#[allow(non_camel_case_types)]
pub type io_object_t = mach_port_t;
#[allow(non_camel_case_types)]
pub type io_iterator_t = io_object_t;
#[allow(non_camel_case_types)]
pub type io_registry_entry_t = io_object_t;
// This is a hack, `io_name_t` should normally be `[c_char; 128]` but Rust makes it very annoying
// to deal with that so we go around it a bit.
#[allow(non_camel_case_types, dead_code)]
pub type io_name = [c_char; 128];
#[allow(non_camel_case_types)]
pub type io_name_t = *const c_char;
pub type IOOptionBits = u32;
#[allow(non_upper_case_globals)]
pub const kIOServicePlane: &[u8] = b"IOService\0";
#[allow(non_upper_case_globals)]
pub const kIOPropertyDeviceCharacteristicsKey: &str = "Device Characteristics";
#[allow(non_upper_case_globals)]
pub const kIOPropertyMediumTypeKey: &str = "Medium Type";
#[allow(non_upper_case_globals)]
pub const kIOPropertyMediumTypeSolidStateKey: &str = "Solid State";
#[allow(non_upper_case_globals)]
pub const kIOPropertyMediumTypeRotationalKey: &str = "Rotational";
// Based on https://github.com/libusb/libusb/blob/bed8d3034eac74a6e1ba123b5c270ea63cb6cf1a/libusb/os/darwin_usb.c#L54-L55,
// we can simply set it to 0 (and is the same value as its replacement `kIOMainPortDefault`).
#[allow(non_upper_case_globals)]
pub const kIOMasterPortDefault: mach_port_t = 0;
// Note: Obtaining information about disks using IOKIt is allowed inside the default macOS App Sandbox.
#[link(name = "IOKit", kind = "framework")]
extern "C" {
pub fn IOServiceGetMatchingServices(
mainPort: mach_port_t,
matching: CFMutableDictionaryRef,
existing: *mut io_iterator_t,
) -> kern_return_t;
#[allow(dead_code)]
pub fn IOServiceMatching(a: *const c_char) -> CFMutableDictionaryRef;
pub fn IOIteratorNext(iterator: io_iterator_t) -> io_object_t;
pub fn IOObjectRelease(obj: io_object_t) -> kern_return_t;
pub fn IORegistryEntryCreateCFProperty(
entry: io_registry_entry_t,
key: CFStringRef,
allocator: CFAllocatorRef,
options: IOOptionBits,
) -> CFDictionaryRef;
pub fn IORegistryEntryGetParentEntry(
entry: io_registry_entry_t,
plane: io_name_t,
parent: *mut io_registry_entry_t,
) -> kern_return_t;
#[allow(dead_code)]
pub fn IORegistryEntryGetName(entry: io_registry_entry_t, name: io_name_t) -> kern_return_t;
pub fn IOBSDNameMatching(
mainPort: mach_port_t,
options: u32,
bsdName: *const c_char,
) -> CFMutableDictionaryRef;
}
#[allow(dead_code)]
pub const KIO_RETURN_SUCCESS: i32 = 0;
extern "C" {
// FIXME: to be removed once higher version than core_foundation_sys 0.8.4 is released.
#[allow(dead_code)]
pub fn CFStringCreateWithCStringNoCopy(
alloc: CFAllocatorRef,
cStr: *const c_char,
encoding: core_foundation_sys::string::CFStringEncoding,
contentsDeallocator: CFAllocatorRef,
) -> CFStringRef;
}
#[cfg(all(
not(feature = "apple-sandbox"),
any(target_arch = "x86", target_arch = "x86_64")
))]
mod io_service {
use super::{io_object_t, mach_port_t};
use libc::{kern_return_t, size_t, task_t};
#[allow(non_camel_case_types)]
pub type io_connect_t = io_object_t;
#[allow(non_camel_case_types)]
pub type io_service_t = io_object_t;
#[allow(non_camel_case_types)]
pub type task_port_t = task_t;
extern "C" {
pub fn IOServiceOpen(
device: io_service_t,
owning_task: task_port_t,
type_: u32,
connect: *mut io_connect_t,
) -> kern_return_t;
pub fn IOServiceClose(a: io_connect_t) -> kern_return_t;
#[allow(dead_code)]
pub fn IOConnectCallStructMethod(
connection: mach_port_t,
selector: u32,
inputStruct: *const KeyData_t,
inputStructCnt: size_t,
outputStruct: *mut KeyData_t,
outputStructCnt: *mut size_t,
) -> kern_return_t;
}
#[cfg_attr(feature = "debug", derive(Debug, Eq, Hash, PartialEq))]
#[repr(C)]
pub struct KeyData_vers_t {
pub major: u8,
pub minor: u8,
pub build: u8,
pub reserved: [u8; 1],
pub release: u16,
}
#[cfg_attr(feature = "debug", derive(Debug, Eq, Hash, PartialEq))]
#[repr(C)]
pub struct | {
pub version: u16,
pub length: u16,
pub cpu_plimit: u32,
pub gpu_plimit: u32,
pub mem_plimit: u32,
}
#[cfg_attr(feature = "debug", derive(Debug, Eq, Hash, PartialEq))]
#[repr(C)]
pub struct KeyData_keyInfo_t {
pub data_size: u32,
pub data_type: u32,
pub data_attributes: u8,
}
#[cfg_attr(feature = "debug", derive(Debug, Eq, Hash, PartialEq))]
#[repr(C)]
pub struct KeyData_t {
pub key: u32,
pub vers: KeyData_vers_t,
pub p_limit_data: KeyData_pLimitData_t,
pub key_info: KeyData_keyInfo_t,
pub result: u8,
pub status: u8,
pub data8: u8,
pub data32: u32,
pub bytes: [i8; 32], // SMCBytes_t
}
#[allow(dead_code)]
pub const KERNEL_INDEX_SMC: i32 = 2;
#[allow(dead_code)]
pub const SMC_CMD_READ_KEYINFO: u8 = 9;
#[allow(dead_code)]
pub const SMC_CMD_READ_BYTES: u8 = 5;
}
#[cfg(feature = "apple-sandbox")]
mod io_service {}
#[cfg(all(
not(feature = "apple-sandbox"),
any(target_arch = "x86", target_arch = "x86_64")
))]
pub use io_service::*;
#[cfg(all(not(feature = "apple-sandbox"), target_arch = "aarch64"))]
mod io_service {
use std::ptr::null;
use super::CFStringCreateWithCStringNoCopy;
use core_foundation_sys::array::CFArrayRef;
use core_foundation_sys::base::{CFAllocatorRef, CFRelease};
use core_foundation_sys::dictionary::{
kCFTypeDictionaryKeyCallBacks, kCFTypeDictionaryValueCallBacks, CFDictionaryCreate,
CFDictionaryRef,
};
use core_foundation_sys::number::{kCFNumberSInt32Type, CFNumberCreate};
use core_foundation_sys::string::CFStringRef;
#[repr(C)]
pub struct __IOHIDServiceClient(libc::c_void);
pub type IOHIDServiceClientRef = *const __IOHIDServiceClient;
#[repr(C)]
pub struct __IOHIDEventSystemClient(libc::c_void);
pub type IOHIDEventSystemClientRef = *const __IOHIDEventSystemClient;
#[repr(C)]
pub struct __IOHIDEvent(libc::c_void);
pub type IOHIDEventRef = *const __IOHIDEvent;
#[allow(non_upper_case_globals)]
pub const kIOHIDEventTypeTemperature: i64 = 15;
#[inline]
#[allow(non_snake_case)]
pub fn IOHIDEventFieldBase(event_type: i64) -> i64 {
event_type << 16
}
#[cfg(not(feature = "apple-sandbox"))]
extern "C" {
pub fn IOHIDEventSystemClientCreate(allocator: CFAllocatorRef)
-> IOHIDEventSystemClientRef;
pub fn IOHIDEventSystemClientSetMatching(
client: IOHIDEventSystemClientRef,
matches: CFDictionaryRef,
) -> i32;
pub fn IOHIDEventSystemClientCopyServices(client: IOHIDEventSystemClientRef) -> CFArrayRef;
pub fn IOHIDServiceClientCopyProperty(
service: IOHIDServiceClientRef,
key: CFStringRef,
) -> CFStringRef;
pub fn IOHIDServiceClientCopyEvent(
service: IOHIDServiceClientRef,
v0: i64,
v1: i32,
v2: i64,
) -> IOHIDEventRef;
pub fn IOHIDEventGetFloatValue(event: IOHIDEventRef, field: i64) -> f64;
}
pub(crate) const HID_DEVICE_PROPERTY_PRODUCT: &[u8] = b"Product\0";
pub(crate) const HID_DEVICE_PROPERTY_PRIMARY_USAGE: &[u8] = b"PrimaryUsage\0";
pub(crate) const HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE: &[u8] = b"PrimaryUsagePage\0";
#[allow(non_upper_case_globals)]
pub(crate) const kHIDPage_AppleVendor: i32 = 0xff00;
#[allow(non_upper_case_globals)]
pub(crate) const kHIDUsage_AppleVendor_TemperatureSensor: i32 = 0x0005;
pub(crate) fn matching(page: i32, usage: i32) -> CFDictionaryRef {
unsafe {
let keys = [
CFStringCreateWithCStringNoCopy(
null() as *const _,
HID_DEVICE_PROPERTY_PRIMARY_USAGE_PAGE.as_ptr() as *const _,
core_foundation_sys::string::kCFStringEncodingUTF8,
core_foundation_sys::base::kCFAllocatorNull as *mut _,
),
CFStringCreateWithCStringNoCopy(
null() as *const _,
HID_DEVICE_PROPERTY_PRIMARY_USAGE.as_ptr() as *const _,
core_foundation_sys::string::kCFStringEncodingUTF8,
core_foundation_sys::base::kCFAllocatorNull as *mut _,
),
];
let nums = [
CFNumberCreate(null(), kCFNumberSInt32Type, &page as *const _ as *const _),
CFNumberCreate(null(), kCFNumberSInt32Type, &usage as *const _ as *const _),
];
let dict = CFDictionaryCreate(
null(),
&keys as *const _ as *const _,
&nums as *const _ as *const _,
2,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks,
);
for key in keys {
CFRelease(key as _);
}
for num in nums {
CFRelease(num as _);
}
dict
}
}
}
#[cfg(all(not(feature = "apple-sandbox"), target_arch = "aarch64"))]
pub use io_service::*;
| KeyData_pLimitData_t | identifier_name |
lib.rs | //! This crate allows running a process with a timeout, with the option to
//! terminate it automatically afterward. The latter is surprisingly difficult
//! to achieve on Unix, since process identifiers can be arbitrarily reassigned
//! when no longer used. Thus, it would be extremely easy to inadvertently
//! terminate an unexpected process. This crate protects against that
//! possibility.
//!
//! Methods for creating timeouts are available on [`ChildExt`], which is
//! implemented for [`Child`]. They each return a builder of options to
//! configure how the timeout should be applied.
//!
//! # Implementation
//!
//! All traits are [sealed], meaning that they can only be implemented by this
//! crate. Otherwise, backward compatibility would be more difficult to
//! maintain for new features.
//!
//! # Features
//!
//! These features are optional and can be enabled or disabled in a
//! "Cargo.toml" file.
//!
//! ### Optional Features
//!
//! - **crossbeam-channel** -
//! Changes the implementation to use crate [crossbeam-channel] for better
//! performance.
//!
//! # Comparable Crates
//!
//! - [wait-timeout] -
//! Made for a related purpose but does not provide the same functionality.
//! Processes cannot be terminated automatically, and there is no counterpart
//! of [`Child::wait_with_output`] to read output while setting a timeout.
//! This crate aims to fill in those gaps and simplify the implementation,
//! now that [`Receiver::recv_timeout`] exists.
//!
//! # Examples
//!
//! ```
//! use std::io;
//! use std::process::Command;
//! use std::process::Stdio;
//! use std::time::Duration;
//!
//! use process_control::ChildExt;
//! use process_control::Timeout;
//!
//! let process = Command::new("echo")
//! .arg("hello")
//! .stdout(Stdio::piped())
//! .spawn()?;
//!
//! let output = process
//! .with_output_timeout(Duration::from_secs(1))
//! .terminating()
//! .wait()?
//! .ok_or_else(|| {
//! io::Error::new(io::ErrorKind::TimedOut, "Process timed out")
//! })?;
//! assert_eq!(b"hello", &output.stdout[..5]);
//! #
//! # Ok::<_, io::Error>(())
//! ```
//!
//! [crossbeam-channel]: https://crates.io/crates/crossbeam-channel
//! [`Receiver::recv_timeout`]: ::std::sync::mpsc::Receiver::recv_timeout
//! [sealed]: https://rust-lang.github.io/api-guidelines/future-proofing.html#c-sealed
//! [wait-timeout]: https://crates.io/crates/wait-timeout
// Only require a nightly compiler when building documentation for docs.rs.
// This is a private option that should not be used.
// https://github.com/rust-lang/docs.rs/issues/147#issuecomment-389544407
#![cfg_attr(process_control_docs_rs, feature(doc_cfg))]
#![warn(unused_results)]
use std::fmt;
use std::fmt::Display;
use std::fmt::Formatter;
use std::io;
use std::process;
use std::process::Child;
use std::time::Duration;
#[cfg_attr(unix, path = "unix.rs")]
#[cfg_attr(windows, path = "windows.rs")]
mod imp;
mod timeout;
/// A wrapper that stores enough information to terminate a process.
///
/// Instances can only be constructed using [`ChildExt::terminator`].
#[derive(Debug)]
pub struct Terminator(imp::Handle);
impl Terminator {
/// Terminates a process as immediately as the operating system allows.
///
/// Behavior should be equivalent to calling [`Child::kill`] for the same
/// process. However, this method does not require a reference of any kind
/// to the [`Child`] instance of the process, meaning that it can be called
/// even in some unsafe circumstances.
///
/// # Safety
///
/// If the process is no longer running, a different process may be
/// terminated on some operating systems. Reuse of process identifiers
/// makes it impossible for this method to determine if the intended
/// process still exists.
///
/// Thus, this method should not be used in production code, as
/// [`Child::kill`] more safely provides the same functionality. It is only
/// used for testing in this crate and may be used similarly in others.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::path::Path;
/// use std::process::Command;
/// use std::thread;
///
/// use process_control::ChildExt;
///
/// let dir = Path::new("hello");
/// let mut process = Command::new("mkdir").arg(dir).spawn()?;
/// let terminator = process.terminator()?;
///
/// let thread = thread::spawn(move || process.wait());
/// if!dir.exists() {
/// // [process.kill] requires a mutable reference.
/// unsafe { terminator.terminate()? }
/// }
///
/// let exit_status = thread.join().expect("thread panicked")?;
/// println!("exited {}", exit_status);
/// #
/// # Ok::<_, io::Error>(())
/// ```
#[inline]
pub unsafe fn terminate(&self) -> io::Result<()> {
self.0.terminate()
}
}
/// Equivalent to [`process::ExitStatus`] but allows for greater accuracy.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct ExitStatus(imp::ExitStatus);
impl ExitStatus {
/// Equivalent to [`process::ExitStatus::success`].
#[inline]
#[must_use]
pub fn success(self) -> bool {
self.0.success()
}
/// Equivalent to [`process::ExitStatus::code`], but a more accurate value
/// will be returned if possible.
#[inline]
#[must_use]
pub fn code(self) -> Option<i64> {
self.0.code().map(Into::into)
}
/// Equivalent to [`ExitStatusExt::signal`].
///
/// [`ExitStatusExt::signal`]: ::std::os::unix::process::ExitStatusExt::signal
#[cfg(any(unix, doc))]
#[cfg_attr(process_control_docs_rs, doc(cfg(unix)))]
#[inline]
#[must_use]
pub fn signal(self) -> Option<::std::os::raw::c_int> {
self.0.signal()
}
}
impl Display for ExitStatus {
#[inline]
fn | (&self, formatter: &mut Formatter<'_>) -> fmt::Result {
self.0.fmt(formatter)
}
}
impl From<process::ExitStatus> for ExitStatus {
#[inline]
fn from(value: process::ExitStatus) -> Self {
#[cfg_attr(windows, allow(clippy::useless_conversion))]
Self(value.into())
}
}
/// Equivalent to [`process::Output`] but holds an instance of [`ExitStatus`]
/// from this crate.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Output {
/// Equivalent to [`process::Output::status`].
pub status: ExitStatus,
/// Equivalent to [`process::Output::stdout`].
pub stdout: Vec<u8>,
/// Equivalent to [`process::Output::stderr`].
pub stderr: Vec<u8>,
}
impl From<process::Output> for Output {
#[inline]
fn from(value: process::Output) -> Self {
Self {
status: value.status.into(),
stdout: value.stdout,
stderr: value.stderr,
}
}
}
/// A temporary wrapper for a process timeout.
pub trait Timeout: private::Sealed {
/// The type returned by [`wait`].
///
/// [`wait`]: Self::wait
type Result;
/// Causes [`wait`] to never suppress an error.
///
/// Typically, errors terminating the process will be ignored, as they are
/// often less important than the result. However, when this method is
/// called, those errors will be returned as well.
///
/// [`wait`]: Self::wait
#[must_use]
fn strict_errors(self) -> Self;
/// Causes the process to be terminated if it exceeds the time limit.
///
/// Process identifier reuse by the system will be mitigated. There should
/// never be a scenario that causes an unintended process to be terminated.
#[must_use]
fn terminating(self) -> Self;
/// Runs the process to completion, aborting if it exceeds the time limit.
///
/// At least one thread will be created to wait on the process without
/// blocking the current thread.
///
/// If the time limit is exceeded before the process finishes, `Ok(None)`
/// will be returned. However, the process will not be terminated in that
/// case unless [`terminating`] is called beforehand. It is recommended to
/// always call that method to allow system resources to be freed.
///
/// The stdin handle to the process, if it exists, will be closed before
/// waiting. Otherwise, the process would assuredly time out when reading
/// from that pipe.
///
/// This method cannot guarantee that the same [`io::ErrorKind`] variants
/// will be returned in the future for the same types of failures. Allowing
/// these breakages is required to enable calling [`Child::kill`]
/// internally.
///
/// [`terminating`]: Self::terminating
fn wait(self) -> io::Result<Option<Self::Result>>;
}
/// Extensions to [`Child`] for easily terminating processes.
///
/// For more information, see [the module-level documentation][crate].
pub trait ChildExt<'a>: private::Sealed {
/// The type returned by [`with_timeout`].
///
/// [`with_timeout`]: Self::with_timeout
type ExitStatusTimeout: 'a + Timeout<Result = ExitStatus>;
/// The type returned by [`with_output_timeout`].
///
/// [`with_output_timeout`]: Self::with_output_timeout
type OutputTimeout: Timeout<Result = Output>;
/// Creates an instance of [`Terminator`] for this process.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::process::Command;
///
/// use process_control::ChildExt;
///
/// let process = Command::new("echo").spawn()?;
/// let terminator = process.terminator()?;
/// #
/// # Ok::<_, io::Error>(())
/// ```
fn terminator(&self) -> io::Result<Terminator>;
/// Creates an instance of [`Timeout`] that yields [`ExitStatus`] for this
/// process.
///
/// This method parallels [`Child::wait`] when the process must finish
/// within a time limit.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::process::Command;
/// use std::time::Duration;
///
/// use process_control::ChildExt;
/// use process_control::Timeout;
///
/// let exit_status = Command::new("echo")
/// .spawn()?
/// .with_timeout(Duration::from_secs(1))
/// .terminating()
/// .wait()?
/// .expect("process timed out");
/// assert!(exit_status.success());
/// #
/// # Ok::<_, io::Error>(())
/// ```
#[must_use]
fn with_timeout(
&'a mut self,
time_limit: Duration,
) -> Self::ExitStatusTimeout;
/// Creates an instance of [`Timeout`] that yields [`Output`] for this
/// process.
///
/// This method parallels [`Child::wait_with_output`] when the process must
/// finish within a time limit.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::process::Command;
/// use std::time::Duration;
///
/// use process_control::ChildExt;
/// use process_control::Timeout;
///
/// let output = Command::new("echo")
/// .spawn()?
/// .with_output_timeout(Duration::from_secs(1))
/// .terminating()
/// .wait()?
/// .expect("process timed out");
/// assert!(output.status.success());
/// #
/// # Ok::<_, io::Error>(())
/// ```
#[must_use]
fn with_output_timeout(self, time_limit: Duration) -> Self::OutputTimeout;
}
impl<'a> ChildExt<'a> for Child {
type ExitStatusTimeout = timeout::ExitStatusTimeout<'a>;
type OutputTimeout = timeout::OutputTimeout;
#[inline]
fn terminator(&self) -> io::Result<Terminator> {
imp::Handle::new(self).map(Terminator)
}
#[inline]
fn with_timeout(
&'a mut self,
time_limit: Duration,
) -> Self::ExitStatusTimeout {
Self::ExitStatusTimeout::new(self, time_limit)
}
#[inline]
fn with_output_timeout(self, time_limit: Duration) -> Self::OutputTimeout {
Self::OutputTimeout::new(self, time_limit)
}
}
mod private {
use std::process::Child;
use super::timeout;
pub trait Sealed {}
impl Sealed for Child {}
impl Sealed for timeout::ExitStatusTimeout<'_> {}
impl Sealed for timeout::OutputTimeout {}
}
| fmt | identifier_name |
lib.rs | //! This crate allows running a process with a timeout, with the option to
//! terminate it automatically afterward. The latter is surprisingly difficult
//! to achieve on Unix, since process identifiers can be arbitrarily reassigned
//! when no longer used. Thus, it would be extremely easy to inadvertently
//! terminate an unexpected process. This crate protects against that
//! possibility.
//!
//! Methods for creating timeouts are available on [`ChildExt`], which is
//! implemented for [`Child`]. They each return a builder of options to
//! configure how the timeout should be applied.
//!
//! # Implementation
//!
//! All traits are [sealed], meaning that they can only be implemented by this
//! crate. Otherwise, backward compatibility would be more difficult to
//! maintain for new features.
//!
//! # Features
//!
//! These features are optional and can be enabled or disabled in a
//! "Cargo.toml" file.
//!
//! ### Optional Features
//!
//! - **crossbeam-channel** -
//! Changes the implementation to use crate [crossbeam-channel] for better
//! performance.
//!
//! # Comparable Crates
//!
//! - [wait-timeout] -
//! Made for a related purpose but does not provide the same functionality.
//! Processes cannot be terminated automatically, and there is no counterpart
//! of [`Child::wait_with_output`] to read output while setting a timeout.
//! This crate aims to fill in those gaps and simplify the implementation,
//! now that [`Receiver::recv_timeout`] exists.
//!
//! # Examples
//!
//! ```
//! use std::io;
//! use std::process::Command;
//! use std::process::Stdio;
//! use std::time::Duration;
//!
//! use process_control::ChildExt;
//! use process_control::Timeout;
//!
//! let process = Command::new("echo")
//! .arg("hello")
//! .stdout(Stdio::piped())
//! .spawn()?;
//!
//! let output = process
//! .with_output_timeout(Duration::from_secs(1))
//! .terminating()
//! .wait()?
//! .ok_or_else(|| {
//! io::Error::new(io::ErrorKind::TimedOut, "Process timed out")
//! })?;
//! assert_eq!(b"hello", &output.stdout[..5]);
//! #
//! # Ok::<_, io::Error>(())
//! ```
//!
//! [crossbeam-channel]: https://crates.io/crates/crossbeam-channel
//! [`Receiver::recv_timeout`]: ::std::sync::mpsc::Receiver::recv_timeout
//! [sealed]: https://rust-lang.github.io/api-guidelines/future-proofing.html#c-sealed
//! [wait-timeout]: https://crates.io/crates/wait-timeout
// Only require a nightly compiler when building documentation for docs.rs.
// This is a private option that should not be used.
// https://github.com/rust-lang/docs.rs/issues/147#issuecomment-389544407
#![cfg_attr(process_control_docs_rs, feature(doc_cfg))]
#![warn(unused_results)]
use std::fmt;
use std::fmt::Display;
use std::fmt::Formatter;
use std::io;
use std::process;
use std::process::Child;
use std::time::Duration;
#[cfg_attr(unix, path = "unix.rs")]
#[cfg_attr(windows, path = "windows.rs")]
mod imp;
mod timeout;
/// A wrapper that stores enough information to terminate a process.
///
/// Instances can only be constructed using [`ChildExt::terminator`].
#[derive(Debug)]
pub struct Terminator(imp::Handle);
impl Terminator {
/// Terminates a process as immediately as the operating system allows.
///
/// Behavior should be equivalent to calling [`Child::kill`] for the same
/// process. However, this method does not require a reference of any kind
/// to the [`Child`] instance of the process, meaning that it can be called
/// even in some unsafe circumstances.
///
/// # Safety
///
/// If the process is no longer running, a different process may be
/// terminated on some operating systems. Reuse of process identifiers
/// makes it impossible for this method to determine if the intended
/// process still exists.
///
/// Thus, this method should not be used in production code, as
/// [`Child::kill`] more safely provides the same functionality. It is only
/// used for testing in this crate and may be used similarly in others.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::path::Path;
/// use std::process::Command;
/// use std::thread;
///
/// use process_control::ChildExt;
///
/// let dir = Path::new("hello");
/// let mut process = Command::new("mkdir").arg(dir).spawn()?;
/// let terminator = process.terminator()?;
///
/// let thread = thread::spawn(move || process.wait());
/// if!dir.exists() {
/// // [process.kill] requires a mutable reference.
/// unsafe { terminator.terminate()? }
/// }
///
/// let exit_status = thread.join().expect("thread panicked")?;
/// println!("exited {}", exit_status);
/// #
/// # Ok::<_, io::Error>(())
/// ```
#[inline]
pub unsafe fn terminate(&self) -> io::Result<()> {
self.0.terminate()
}
}
/// Equivalent to [`process::ExitStatus`] but allows for greater accuracy.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct ExitStatus(imp::ExitStatus);
impl ExitStatus {
/// Equivalent to [`process::ExitStatus::success`].
#[inline]
#[must_use]
pub fn success(self) -> bool {
self.0.success()
}
/// Equivalent to [`process::ExitStatus::code`], but a more accurate value
/// will be returned if possible.
#[inline]
#[must_use]
pub fn code(self) -> Option<i64> {
self.0.code().map(Into::into)
}
/// Equivalent to [`ExitStatusExt::signal`].
///
/// [`ExitStatusExt::signal`]: ::std::os::unix::process::ExitStatusExt::signal
#[cfg(any(unix, doc))]
#[cfg_attr(process_control_docs_rs, doc(cfg(unix)))]
#[inline]
#[must_use]
pub fn signal(self) -> Option<::std::os::raw::c_int> {
self.0.signal()
}
}
impl Display for ExitStatus {
#[inline]
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
self.0.fmt(formatter)
}
}
impl From<process::ExitStatus> for ExitStatus {
#[inline]
fn from(value: process::ExitStatus) -> Self {
#[cfg_attr(windows, allow(clippy::useless_conversion))]
Self(value.into())
}
}
/// Equivalent to [`process::Output`] but holds an instance of [`ExitStatus`]
/// from this crate.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Output {
/// Equivalent to [`process::Output::status`].
pub status: ExitStatus,
/// Equivalent to [`process::Output::stdout`].
pub stdout: Vec<u8>,
/// Equivalent to [`process::Output::stderr`].
pub stderr: Vec<u8>,
}
impl From<process::Output> for Output {
#[inline]
fn from(value: process::Output) -> Self {
Self {
status: value.status.into(),
stdout: value.stdout,
stderr: value.stderr,
}
}
}
/// A temporary wrapper for a process timeout.
pub trait Timeout: private::Sealed {
/// The type returned by [`wait`].
///
/// [`wait`]: Self::wait
type Result;
/// Causes [`wait`] to never suppress an error.
///
/// Typically, errors terminating the process will be ignored, as they are
/// often less important than the result. However, when this method is
/// called, those errors will be returned as well.
///
/// [`wait`]: Self::wait
#[must_use]
fn strict_errors(self) -> Self;
/// Causes the process to be terminated if it exceeds the time limit.
///
/// Process identifier reuse by the system will be mitigated. There should
/// never be a scenario that causes an unintended process to be terminated.
#[must_use]
fn terminating(self) -> Self;
/// Runs the process to completion, aborting if it exceeds the time limit.
///
/// At least one thread will be created to wait on the process without
/// blocking the current thread.
///
/// If the time limit is exceeded before the process finishes, `Ok(None)`
/// will be returned. However, the process will not be terminated in that
/// case unless [`terminating`] is called beforehand. It is recommended to
/// always call that method to allow system resources to be freed.
///
/// The stdin handle to the process, if it exists, will be closed before
/// waiting. Otherwise, the process would assuredly time out when reading
/// from that pipe.
///
/// This method cannot guarantee that the same [`io::ErrorKind`] variants
/// will be returned in the future for the same types of failures. Allowing
/// these breakages is required to enable calling [`Child::kill`]
/// internally.
///
/// [`terminating`]: Self::terminating
fn wait(self) -> io::Result<Option<Self::Result>>;
}
/// Extensions to [`Child`] for easily terminating processes.
///
/// For more information, see [the module-level documentation][crate].
pub trait ChildExt<'a>: private::Sealed {
/// The type returned by [`with_timeout`].
///
/// [`with_timeout`]: Self::with_timeout
type ExitStatusTimeout: 'a + Timeout<Result = ExitStatus>;
/// The type returned by [`with_output_timeout`].
///
/// [`with_output_timeout`]: Self::with_output_timeout
type OutputTimeout: Timeout<Result = Output>;
/// Creates an instance of [`Terminator`] for this process.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::process::Command;
///
/// use process_control::ChildExt;
///
/// let process = Command::new("echo").spawn()?;
/// let terminator = process.terminator()?;
/// #
/// # Ok::<_, io::Error>(())
/// ```
fn terminator(&self) -> io::Result<Terminator>;
/// Creates an instance of [`Timeout`] that yields [`ExitStatus`] for this
/// process.
///
/// This method parallels [`Child::wait`] when the process must finish
/// within a time limit.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::process::Command;
/// use std::time::Duration;
///
/// use process_control::ChildExt;
/// use process_control::Timeout;
///
/// let exit_status = Command::new("echo")
/// .spawn()?
/// .with_timeout(Duration::from_secs(1))
/// .terminating()
/// .wait()?
/// .expect("process timed out");
/// assert!(exit_status.success());
/// #
/// # Ok::<_, io::Error>(())
/// ```
#[must_use]
fn with_timeout(
&'a mut self,
time_limit: Duration,
) -> Self::ExitStatusTimeout;
/// Creates an instance of [`Timeout`] that yields [`Output`] for this
/// process.
///
/// This method parallels [`Child::wait_with_output`] when the process must
/// finish within a time limit.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::process::Command;
/// use std::time::Duration;
///
/// use process_control::ChildExt;
/// use process_control::Timeout;
///
/// let output = Command::new("echo")
/// .spawn()?
/// .with_output_timeout(Duration::from_secs(1))
/// .terminating()
/// .wait()?
/// .expect("process timed out");
/// assert!(output.status.success());
/// #
/// # Ok::<_, io::Error>(())
/// ```
#[must_use]
fn with_output_timeout(self, time_limit: Duration) -> Self::OutputTimeout;
}
impl<'a> ChildExt<'a> for Child {
type ExitStatusTimeout = timeout::ExitStatusTimeout<'a>;
type OutputTimeout = timeout::OutputTimeout;
#[inline]
fn terminator(&self) -> io::Result<Terminator> |
#[inline]
fn with_timeout(
&'a mut self,
time_limit: Duration,
) -> Self::ExitStatusTimeout {
Self::ExitStatusTimeout::new(self, time_limit)
}
#[inline]
fn with_output_timeout(self, time_limit: Duration) -> Self::OutputTimeout {
Self::OutputTimeout::new(self, time_limit)
}
}
mod private {
use std::process::Child;
use super::timeout;
pub trait Sealed {}
impl Sealed for Child {}
impl Sealed for timeout::ExitStatusTimeout<'_> {}
impl Sealed for timeout::OutputTimeout {}
}
| {
imp::Handle::new(self).map(Terminator)
} | identifier_body |
lib.rs | //! This crate allows running a process with a timeout, with the option to
//! terminate it automatically afterward. The latter is surprisingly difficult
//! to achieve on Unix, since process identifiers can be arbitrarily reassigned
//! when no longer used. Thus, it would be extremely easy to inadvertently
//! terminate an unexpected process. This crate protects against that
//! possibility.
//!
//! Methods for creating timeouts are available on [`ChildExt`], which is
//! implemented for [`Child`]. They each return a builder of options to
//! configure how the timeout should be applied.
//!
//! # Implementation
//!
//! All traits are [sealed], meaning that they can only be implemented by this
//! crate. Otherwise, backward compatibility would be more difficult to
//! maintain for new features.
//!
//! # Features
//!
//! These features are optional and can be enabled or disabled in a
//! "Cargo.toml" file.
//!
//! ### Optional Features
//!
//! - **crossbeam-channel** -
//! Changes the implementation to use crate [crossbeam-channel] for better
//! performance.
//!
//! # Comparable Crates
//!
//! - [wait-timeout] -
//! Made for a related purpose but does not provide the same functionality.
//! Processes cannot be terminated automatically, and there is no counterpart
//! of [`Child::wait_with_output`] to read output while setting a timeout.
//! This crate aims to fill in those gaps and simplify the implementation,
//! now that [`Receiver::recv_timeout`] exists.
//!
//! # Examples
//!
//! ```
//! use std::io;
//! use std::process::Command;
//! use std::process::Stdio;
//! use std::time::Duration;
//!
//! use process_control::ChildExt;
//! use process_control::Timeout;
//!
//! let process = Command::new("echo")
//! .arg("hello")
//! .stdout(Stdio::piped())
//! .spawn()?;
//!
//! let output = process
//! .with_output_timeout(Duration::from_secs(1))
//! .terminating()
//! .wait()?
//! .ok_or_else(|| {
//! io::Error::new(io::ErrorKind::TimedOut, "Process timed out")
//! })?;
//! assert_eq!(b"hello", &output.stdout[..5]);
//! #
//! # Ok::<_, io::Error>(())
//! ```
//!
//! [crossbeam-channel]: https://crates.io/crates/crossbeam-channel
//! [`Receiver::recv_timeout`]: ::std::sync::mpsc::Receiver::recv_timeout
//! [sealed]: https://rust-lang.github.io/api-guidelines/future-proofing.html#c-sealed
//! [wait-timeout]: https://crates.io/crates/wait-timeout
// Only require a nightly compiler when building documentation for docs.rs.
// This is a private option that should not be used.
// https://github.com/rust-lang/docs.rs/issues/147#issuecomment-389544407
#![cfg_attr(process_control_docs_rs, feature(doc_cfg))]
#![warn(unused_results)]
use std::fmt;
use std::fmt::Display;
use std::fmt::Formatter;
use std::io;
use std::process;
use std::process::Child;
use std::time::Duration;
#[cfg_attr(unix, path = "unix.rs")]
#[cfg_attr(windows, path = "windows.rs")]
mod imp;
| ///
/// Instances can only be constructed using [`ChildExt::terminator`].
#[derive(Debug)]
pub struct Terminator(imp::Handle);
impl Terminator {
/// Terminates a process as immediately as the operating system allows.
///
/// Behavior should be equivalent to calling [`Child::kill`] for the same
/// process. However, this method does not require a reference of any kind
/// to the [`Child`] instance of the process, meaning that it can be called
/// even in some unsafe circumstances.
///
/// # Safety
///
/// If the process is no longer running, a different process may be
/// terminated on some operating systems. Reuse of process identifiers
/// makes it impossible for this method to determine if the intended
/// process still exists.
///
/// Thus, this method should not be used in production code, as
/// [`Child::kill`] more safely provides the same functionality. It is only
/// used for testing in this crate and may be used similarly in others.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::path::Path;
/// use std::process::Command;
/// use std::thread;
///
/// use process_control::ChildExt;
///
/// let dir = Path::new("hello");
/// let mut process = Command::new("mkdir").arg(dir).spawn()?;
/// let terminator = process.terminator()?;
///
/// let thread = thread::spawn(move || process.wait());
/// if!dir.exists() {
/// // [process.kill] requires a mutable reference.
/// unsafe { terminator.terminate()? }
/// }
///
/// let exit_status = thread.join().expect("thread panicked")?;
/// println!("exited {}", exit_status);
/// #
/// # Ok::<_, io::Error>(())
/// ```
#[inline]
pub unsafe fn terminate(&self) -> io::Result<()> {
self.0.terminate()
}
}
/// Equivalent to [`process::ExitStatus`] but allows for greater accuracy.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct ExitStatus(imp::ExitStatus);
impl ExitStatus {
/// Equivalent to [`process::ExitStatus::success`].
#[inline]
#[must_use]
pub fn success(self) -> bool {
self.0.success()
}
/// Equivalent to [`process::ExitStatus::code`], but a more accurate value
/// will be returned if possible.
#[inline]
#[must_use]
pub fn code(self) -> Option<i64> {
self.0.code().map(Into::into)
}
/// Equivalent to [`ExitStatusExt::signal`].
///
/// [`ExitStatusExt::signal`]: ::std::os::unix::process::ExitStatusExt::signal
#[cfg(any(unix, doc))]
#[cfg_attr(process_control_docs_rs, doc(cfg(unix)))]
#[inline]
#[must_use]
pub fn signal(self) -> Option<::std::os::raw::c_int> {
self.0.signal()
}
}
impl Display for ExitStatus {
#[inline]
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
self.0.fmt(formatter)
}
}
impl From<process::ExitStatus> for ExitStatus {
#[inline]
fn from(value: process::ExitStatus) -> Self {
#[cfg_attr(windows, allow(clippy::useless_conversion))]
Self(value.into())
}
}
/// Equivalent to [`process::Output`] but holds an instance of [`ExitStatus`]
/// from this crate.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Output {
/// Equivalent to [`process::Output::status`].
pub status: ExitStatus,
/// Equivalent to [`process::Output::stdout`].
pub stdout: Vec<u8>,
/// Equivalent to [`process::Output::stderr`].
pub stderr: Vec<u8>,
}
impl From<process::Output> for Output {
#[inline]
fn from(value: process::Output) -> Self {
Self {
status: value.status.into(),
stdout: value.stdout,
stderr: value.stderr,
}
}
}
/// A temporary wrapper for a process timeout.
pub trait Timeout: private::Sealed {
/// The type returned by [`wait`].
///
/// [`wait`]: Self::wait
type Result;
/// Causes [`wait`] to never suppress an error.
///
/// Typically, errors terminating the process will be ignored, as they are
/// often less important than the result. However, when this method is
/// called, those errors will be returned as well.
///
/// [`wait`]: Self::wait
#[must_use]
fn strict_errors(self) -> Self;
/// Causes the process to be terminated if it exceeds the time limit.
///
/// Process identifier reuse by the system will be mitigated. There should
/// never be a scenario that causes an unintended process to be terminated.
#[must_use]
fn terminating(self) -> Self;
/// Runs the process to completion, aborting if it exceeds the time limit.
///
/// At least one thread will be created to wait on the process without
/// blocking the current thread.
///
/// If the time limit is exceeded before the process finishes, `Ok(None)`
/// will be returned. However, the process will not be terminated in that
/// case unless [`terminating`] is called beforehand. It is recommended to
/// always call that method to allow system resources to be freed.
///
/// The stdin handle to the process, if it exists, will be closed before
/// waiting. Otherwise, the process would assuredly time out when reading
/// from that pipe.
///
/// This method cannot guarantee that the same [`io::ErrorKind`] variants
/// will be returned in the future for the same types of failures. Allowing
/// these breakages is required to enable calling [`Child::kill`]
/// internally.
///
/// [`terminating`]: Self::terminating
fn wait(self) -> io::Result<Option<Self::Result>>;
}
/// Extensions to [`Child`] for easily terminating processes.
///
/// For more information, see [the module-level documentation][crate].
pub trait ChildExt<'a>: private::Sealed {
/// The type returned by [`with_timeout`].
///
/// [`with_timeout`]: Self::with_timeout
type ExitStatusTimeout: 'a + Timeout<Result = ExitStatus>;
/// The type returned by [`with_output_timeout`].
///
/// [`with_output_timeout`]: Self::with_output_timeout
type OutputTimeout: Timeout<Result = Output>;
/// Creates an instance of [`Terminator`] for this process.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::process::Command;
///
/// use process_control::ChildExt;
///
/// let process = Command::new("echo").spawn()?;
/// let terminator = process.terminator()?;
/// #
/// # Ok::<_, io::Error>(())
/// ```
fn terminator(&self) -> io::Result<Terminator>;
/// Creates an instance of [`Timeout`] that yields [`ExitStatus`] for this
/// process.
///
/// This method parallels [`Child::wait`] when the process must finish
/// within a time limit.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::process::Command;
/// use std::time::Duration;
///
/// use process_control::ChildExt;
/// use process_control::Timeout;
///
/// let exit_status = Command::new("echo")
/// .spawn()?
/// .with_timeout(Duration::from_secs(1))
/// .terminating()
/// .wait()?
/// .expect("process timed out");
/// assert!(exit_status.success());
/// #
/// # Ok::<_, io::Error>(())
/// ```
#[must_use]
fn with_timeout(
&'a mut self,
time_limit: Duration,
) -> Self::ExitStatusTimeout;
/// Creates an instance of [`Timeout`] that yields [`Output`] for this
/// process.
///
/// This method parallels [`Child::wait_with_output`] when the process must
/// finish within a time limit.
///
/// # Examples
///
/// ```
/// # use std::io;
/// use std::process::Command;
/// use std::time::Duration;
///
/// use process_control::ChildExt;
/// use process_control::Timeout;
///
/// let output = Command::new("echo")
/// .spawn()?
/// .with_output_timeout(Duration::from_secs(1))
/// .terminating()
/// .wait()?
/// .expect("process timed out");
/// assert!(output.status.success());
/// #
/// # Ok::<_, io::Error>(())
/// ```
#[must_use]
fn with_output_timeout(self, time_limit: Duration) -> Self::OutputTimeout;
}
impl<'a> ChildExt<'a> for Child {
type ExitStatusTimeout = timeout::ExitStatusTimeout<'a>;
type OutputTimeout = timeout::OutputTimeout;
#[inline]
fn terminator(&self) -> io::Result<Terminator> {
imp::Handle::new(self).map(Terminator)
}
#[inline]
fn with_timeout(
&'a mut self,
time_limit: Duration,
) -> Self::ExitStatusTimeout {
Self::ExitStatusTimeout::new(self, time_limit)
}
#[inline]
fn with_output_timeout(self, time_limit: Duration) -> Self::OutputTimeout {
Self::OutputTimeout::new(self, time_limit)
}
}
mod private {
use std::process::Child;
use super::timeout;
pub trait Sealed {}
impl Sealed for Child {}
impl Sealed for timeout::ExitStatusTimeout<'_> {}
impl Sealed for timeout::OutputTimeout {}
} | mod timeout;
/// A wrapper that stores enough information to terminate a process. | random_line_split |
app.rs | // vim: tw=80
use std::{
collections::{btree_map, BTreeMap},
error::Error,
mem,
num::NonZeroUsize,
ops::AddAssign,
};
use cfg_if::cfg_if;
use ieee754::Ieee754;
use nix::{
sys::time::TimeSpec,
time::{clock_gettime, ClockId},
};
use regex::Regex;
cfg_if! {
if #[cfg(target_os = "freebsd")] {
mod freebsd;
use freebsd::{SnapshotIter};
const CLOCK_UPTIME: ClockId = ClockId::CLOCK_UPTIME;
} else if #[cfg(target_os = "linux")] {
mod linux;
use linux::SnapshotIter;
const CLOCK_UPTIME: ClockId = ClockId::CLOCK_BOOTTIME;
}
}
/// A snapshot in time of a dataset's statistics.
///
/// The various fields are not saved atomically, but ought to be close.
#[derive(Clone, Debug)]
struct | {
name: String,
nunlinked: u64,
nunlinks: u64,
nread: u64,
reads: u64,
nwritten: u64,
writes: u64,
}
impl Snapshot {
fn compute(&self, prev: Option<&Self>, etime: f64) -> Element {
if let Some(prev) = prev {
Element {
name: self.name.clone(),
ops_r: (self.reads - prev.reads) as f64 / etime,
r_s: (self.nread - prev.nread) as f64 / etime,
ops_w: (self.writes - prev.writes) as f64 / etime,
w_s: (self.nwritten - prev.nwritten) as f64 / etime,
ops_d: (self.nunlinks - prev.nunlinks) as f64 / etime,
d_s: (self.nunlinked - prev.nunlinked) as f64 / etime,
}
} else {
Element {
name: self.name.clone(),
ops_r: self.reads as f64 / etime,
r_s: self.nread as f64 / etime,
ops_w: self.writes as f64 / etime,
w_s: self.nwritten as f64 / etime,
ops_d: self.nunlinks as f64 / etime,
d_s: self.nunlinked as f64 / etime,
}
}
}
/// Iterate through ZFS datasets, returning stats for each.
///
/// Iterates through every dataset beneath each of the given pools, or
/// through all datasets if no pool is supplied.
pub fn iter(pool: Option<&str>) -> Result<SnapshotIter, Box<dyn Error>> {
SnapshotIter::new(pool)
}
}
impl AddAssign<&Self> for Snapshot {
fn add_assign(&mut self, other: &Self) {
assert!(
other.name.starts_with(&self.name),
"Why would you want to combine two unrelated datasets?"
);
self.nunlinked += other.nunlinked;
self.nunlinks += other.nunlinks;
self.nread += other.nread;
self.reads += other.reads;
self.nwritten += other.nwritten;
self.writes += other.writes;
}
}
#[derive(Default)]
struct DataSource {
children: bool,
prev: BTreeMap<String, Snapshot>,
prev_ts: Option<TimeSpec>,
cur: BTreeMap<String, Snapshot>,
cur_ts: Option<TimeSpec>,
pools: Vec<String>,
}
impl DataSource {
fn new(children: bool, pools: Vec<String>) -> Self {
DataSource {
children,
pools,
..Default::default()
}
}
/// Iterate through all the datasets, returning current stats
fn iter(&mut self) -> impl Iterator<Item = Element> + '_ {
let etime = if let Some(prev_ts) = self.prev_ts.as_ref() {
let delta = *self.cur_ts.as_ref().unwrap() - *prev_ts;
delta.tv_sec() as f64 + delta.tv_nsec() as f64 * 1e-9
} else {
let boottime = clock_gettime(CLOCK_UPTIME).unwrap();
boottime.tv_sec() as f64 + boottime.tv_nsec() as f64 * 1e-9
};
DataSourceIter {
inner_iter: self.cur.iter(),
ds: self,
etime,
}
}
/// Iterate over all of the names of parent datasets of the argument
fn with_parents(s: &str) -> impl Iterator<Item = &str> {
s.char_indices().filter_map(move |(idx, c)| {
if c == '/' {
Some(s.split_at(idx).0)
} else if idx == s.len() - 1 {
Some(s)
} else {
None
}
})
}
fn refresh(&mut self) -> Result<(), Box<dyn Error>> {
let now = clock_gettime(ClockId::CLOCK_MONOTONIC)?;
self.prev = mem::take(&mut self.cur);
self.prev_ts = self.cur_ts.replace(now);
if self.pools.is_empty() {
for rss in Snapshot::iter(None).unwrap() {
let ss = rss?;
Self::upsert(&mut self.cur, ss, self.children);
}
} else {
for pool in self.pools.iter() {
for rss in Snapshot::iter(Some(pool)).unwrap() {
let ss = rss?;
Self::upsert(&mut self.cur, ss, self.children);
}
}
}
Ok(())
}
fn toggle_children(&mut self) -> Result<(), Box<dyn Error>> {
self.children ^= true;
// Wipe out previous statistics. The next refresh will report stats
// since boot.
self.refresh()?;
mem::take(&mut self.prev);
self.prev_ts = None;
Ok(())
}
/// Insert a snapshot into `cur`, and/or update it and its parents
fn upsert(
cur: &mut BTreeMap<String, Snapshot>,
ss: Snapshot,
children: bool,
) {
if children {
for dsname in Self::with_parents(&ss.name) {
match cur.entry(dsname.to_string()) {
btree_map::Entry::Vacant(ve) => {
if ss.name == dsname {
ve.insert(ss.clone());
} else {
let mut parent_ss = ss.clone();
parent_ss.name = dsname.to_string();
ve.insert(parent_ss);
}
}
btree_map::Entry::Occupied(mut oe) => {
*oe.get_mut() += &ss;
}
}
}
} else {
match cur.entry(ss.name.clone()) {
btree_map::Entry::Vacant(ve) => {
ve.insert(ss);
}
btree_map::Entry::Occupied(mut oe) => {
*oe.get_mut() += &ss;
}
}
};
}
}
struct DataSourceIter<'a> {
inner_iter: btree_map::Iter<'a, String, Snapshot>,
ds: &'a DataSource,
etime: f64,
}
impl<'a> Iterator for DataSourceIter<'a> {
type Item = Element;
fn next(&mut self) -> Option<Self::Item> {
self.inner_iter
.next()
.map(|(_, ss)| ss.compute(self.ds.prev.get(&ss.name), self.etime))
}
}
/// One thing to display in the table
#[derive(Clone, Debug)]
pub struct Element {
pub name: String,
/// Read IOPs
pub ops_r: f64,
/// Read B/s
pub r_s: f64,
/// Delete IOPs
pub ops_d: f64,
/// Delete B/s
pub d_s: f64,
/// Write IOPs
pub ops_w: f64,
/// Write B/s
pub w_s: f64,
}
#[derive(Default)]
pub struct App {
auto: bool,
data: DataSource,
depth: Option<NonZeroUsize>,
filter: Option<Regex>,
reverse: bool,
should_quit: bool,
/// 0-based index of the column to sort by, if any
sort_idx: Option<usize>,
}
impl App {
pub fn new(
auto: bool,
children: bool,
pools: Vec<String>,
depth: Option<NonZeroUsize>,
filter: Option<Regex>,
reverse: bool,
sort_idx: Option<usize>,
) -> Self {
let mut data = DataSource::new(children, pools);
data.refresh().unwrap();
App {
auto,
data,
depth,
filter,
reverse,
sort_idx,
..Default::default()
}
}
pub fn clear_filter(&mut self) {
self.filter = None;
}
/// Return the elements that should be displayed, in order
#[rustfmt::skip]
pub fn elements(&mut self) -> Vec<Element> {
let auto = self.auto;
let depth = self.depth;
let filter = &self.filter;
let mut v = self.data.iter()
.filter(move |elem| {
if let Some(limit) = depth {
let edepth = elem.name.split('/').count();
edepth <= limit.get()
} else {
true
}
}).filter(|elem|
filter.as_ref()
.map(|f| f.is_match(&elem.name))
.unwrap_or(true)
).filter(|elem|!auto || (elem.r_s + elem.w_s + elem.d_s > 1.0))
.collect::<Vec<_>>();
match (self.reverse, self.sort_idx) {
// TODO: when the total_cmp feature stabilities, use f64::total_cmp
// instead.
// https://github.com/rust-lang/rust/issues/72599
(false, Some(0)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.ops_r, &y.ops_r)),
(true, Some(0)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.ops_r, &x.ops_r)),
(false, Some(1)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.r_s, &y.r_s)),
(true, Some(1)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.r_s, &x.r_s)),
(false, Some(2)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.ops_w, &y.ops_w)),
(true, Some(2)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.ops_w, &x.ops_w)),
(false, Some(3)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.w_s, &y.w_s)),
(true, Some(3)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.w_s, &x.w_s)),
(false, Some(4)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.ops_d, &y.ops_d)),
(true, Some(4)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.ops_d, &x.ops_d)),
(false, Some(5)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.d_s, &y.d_s)),
(true, Some(5)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.d_s, &x.d_s)),
(false, Some(6)) => v.sort_by(|x, y| x.name.cmp(&y.name)),
(true, Some(6)) => v.sort_by(|x, y| y.name.cmp(&x.name)),
_ => ()
}
v
}
pub fn on_a(&mut self) {
self.auto ^= true;
}
pub fn on_c(&mut self) -> Result<(), Box<dyn Error>> {
self.data.toggle_children()
}
pub fn on_d(&mut self, more_depth: bool) {
self.depth = if more_depth {
match self.depth {
None => NonZeroUsize::new(1),
Some(x) => NonZeroUsize::new(x.get() + 1),
}
} else {
match self.depth {
None => None,
Some(x) => NonZeroUsize::new(x.get() - 1),
}
}
}
pub fn on_minus(&mut self) {
self.sort_idx = match self.sort_idx {
Some(0) => None,
Some(old) => Some(old - 1),
None => Some(6),
}
}
pub fn on_plus(&mut self) {
self.sort_idx = match self.sort_idx {
Some(old) if old >= 6 => None,
Some(old) => Some(old + 1),
None => Some(0),
}
}
pub fn on_q(&mut self) {
self.should_quit = true;
}
pub fn on_r(&mut self) {
self.reverse ^= true;
}
pub fn on_tick(&mut self) {
self.data.refresh().unwrap();
}
pub fn set_filter(&mut self, filter: Regex) {
self.filter = Some(filter);
}
pub fn should_quit(&self) -> bool {
self.should_quit
}
pub fn sort_idx(&self) -> Option<usize> {
self.sort_idx
}
}
#[cfg(test)]
mod t {
mod with_parents {
use super::super::*;
/// The empty string is not a valid dataset, but make sure nothing bad
/// happens anyway
#[test]
fn empty() {
let ds = "";
let mut actual = DataSource::with_parents(ds);
assert!(actual.next().is_none());
}
#[test]
fn pool() {
let ds = "zroot";
let expected = ["zroot"];
let actual = DataSource::with_parents(ds).collect::<Vec<_>>();
assert_eq!(&expected[..], &actual[..]);
}
#[test]
fn one_level() {
let ds = "zroot/ROOT";
let expected = ["zroot", "zroot/ROOT"];
let actual = DataSource::with_parents(ds).collect::<Vec<_>>();
assert_eq!(&expected[..], &actual[..]);
}
#[test]
fn two_levels() {
let ds = "zroot/ROOT/13.0-RELEASE";
let expected = ["zroot", "zroot/ROOT", "zroot/ROOT/13.0-RELEASE"];
let actual = DataSource::with_parents(ds).collect::<Vec<_>>();
assert_eq!(&expected[..], &actual[..]);
}
}
}
| Snapshot | identifier_name |
app.rs | // vim: tw=80
use std::{
collections::{btree_map, BTreeMap},
error::Error,
mem,
num::NonZeroUsize,
ops::AddAssign,
};
use cfg_if::cfg_if;
use ieee754::Ieee754;
use nix::{
sys::time::TimeSpec,
time::{clock_gettime, ClockId},
};
use regex::Regex;
cfg_if! {
if #[cfg(target_os = "freebsd")] {
mod freebsd;
use freebsd::{SnapshotIter};
const CLOCK_UPTIME: ClockId = ClockId::CLOCK_UPTIME;
} else if #[cfg(target_os = "linux")] {
mod linux;
use linux::SnapshotIter;
const CLOCK_UPTIME: ClockId = ClockId::CLOCK_BOOTTIME;
}
}
/// A snapshot in time of a dataset's statistics.
///
/// The various fields are not saved atomically, but ought to be close.
#[derive(Clone, Debug)]
struct Snapshot {
name: String,
nunlinked: u64,
nunlinks: u64,
nread: u64,
reads: u64,
nwritten: u64,
writes: u64,
}
impl Snapshot {
fn compute(&self, prev: Option<&Self>, etime: f64) -> Element {
if let Some(prev) = prev {
Element {
name: self.name.clone(),
ops_r: (self.reads - prev.reads) as f64 / etime,
r_s: (self.nread - prev.nread) as f64 / etime,
ops_w: (self.writes - prev.writes) as f64 / etime,
w_s: (self.nwritten - prev.nwritten) as f64 / etime,
ops_d: (self.nunlinks - prev.nunlinks) as f64 / etime,
d_s: (self.nunlinked - prev.nunlinked) as f64 / etime,
}
} else {
Element {
name: self.name.clone(),
ops_r: self.reads as f64 / etime,
r_s: self.nread as f64 / etime,
ops_w: self.writes as f64 / etime,
w_s: self.nwritten as f64 / etime,
ops_d: self.nunlinks as f64 / etime,
d_s: self.nunlinked as f64 / etime,
}
}
}
/// Iterate through ZFS datasets, returning stats for each.
///
/// Iterates through every dataset beneath each of the given pools, or
/// through all datasets if no pool is supplied.
pub fn iter(pool: Option<&str>) -> Result<SnapshotIter, Box<dyn Error>> {
SnapshotIter::new(pool)
}
}
impl AddAssign<&Self> for Snapshot {
fn add_assign(&mut self, other: &Self) {
assert!(
other.name.starts_with(&self.name),
"Why would you want to combine two unrelated datasets?"
);
self.nunlinked += other.nunlinked;
self.nunlinks += other.nunlinks;
self.nread += other.nread;
self.reads += other.reads;
self.nwritten += other.nwritten;
self.writes += other.writes;
}
}
#[derive(Default)]
struct DataSource {
children: bool,
prev: BTreeMap<String, Snapshot>,
prev_ts: Option<TimeSpec>,
cur: BTreeMap<String, Snapshot>,
cur_ts: Option<TimeSpec>,
pools: Vec<String>,
}
impl DataSource {
fn new(children: bool, pools: Vec<String>) -> Self {
DataSource {
children,
pools,
..Default::default()
}
}
/// Iterate through all the datasets, returning current stats
fn iter(&mut self) -> impl Iterator<Item = Element> + '_ {
let etime = if let Some(prev_ts) = self.prev_ts.as_ref() {
let delta = *self.cur_ts.as_ref().unwrap() - *prev_ts;
delta.tv_sec() as f64 + delta.tv_nsec() as f64 * 1e-9
} else {
let boottime = clock_gettime(CLOCK_UPTIME).unwrap();
boottime.tv_sec() as f64 + boottime.tv_nsec() as f64 * 1e-9
};
DataSourceIter {
inner_iter: self.cur.iter(),
ds: self,
etime,
}
}
/// Iterate over all of the names of parent datasets of the argument
fn with_parents(s: &str) -> impl Iterator<Item = &str> {
s.char_indices().filter_map(move |(idx, c)| {
if c == '/' {
Some(s.split_at(idx).0)
} else if idx == s.len() - 1 {
Some(s)
} else {
None
}
})
}
fn refresh(&mut self) -> Result<(), Box<dyn Error>> {
let now = clock_gettime(ClockId::CLOCK_MONOTONIC)?;
self.prev = mem::take(&mut self.cur);
self.prev_ts = self.cur_ts.replace(now);
if self.pools.is_empty() {
for rss in Snapshot::iter(None).unwrap() {
let ss = rss?;
Self::upsert(&mut self.cur, ss, self.children);
}
} else {
for pool in self.pools.iter() {
for rss in Snapshot::iter(Some(pool)).unwrap() {
let ss = rss?;
Self::upsert(&mut self.cur, ss, self.children);
}
}
}
Ok(())
}
fn toggle_children(&mut self) -> Result<(), Box<dyn Error>> {
self.children ^= true;
// Wipe out previous statistics. The next refresh will report stats
// since boot.
self.refresh()?;
mem::take(&mut self.prev);
self.prev_ts = None;
Ok(())
}
/// Insert a snapshot into `cur`, and/or update it and its parents
fn upsert(
cur: &mut BTreeMap<String, Snapshot>,
ss: Snapshot,
children: bool,
) {
if children {
for dsname in Self::with_parents(&ss.name) {
match cur.entry(dsname.to_string()) {
btree_map::Entry::Vacant(ve) => {
if ss.name == dsname {
ve.insert(ss.clone());
} else {
let mut parent_ss = ss.clone();
parent_ss.name = dsname.to_string();
ve.insert(parent_ss);
}
}
btree_map::Entry::Occupied(mut oe) => {
*oe.get_mut() += &ss;
}
}
}
} else {
match cur.entry(ss.name.clone()) {
btree_map::Entry::Vacant(ve) => {
ve.insert(ss);
}
btree_map::Entry::Occupied(mut oe) => {
*oe.get_mut() += &ss;
}
}
};
}
}
struct DataSourceIter<'a> {
inner_iter: btree_map::Iter<'a, String, Snapshot>,
ds: &'a DataSource,
etime: f64,
}
impl<'a> Iterator for DataSourceIter<'a> {
type Item = Element;
fn next(&mut self) -> Option<Self::Item> {
self.inner_iter
.next()
.map(|(_, ss)| ss.compute(self.ds.prev.get(&ss.name), self.etime))
}
}
/// One thing to display in the table
#[derive(Clone, Debug)]
pub struct Element {
pub name: String,
/// Read IOPs
pub ops_r: f64,
/// Read B/s
pub r_s: f64,
/// Delete IOPs
pub ops_d: f64,
/// Delete B/s
pub d_s: f64,
/// Write IOPs
pub ops_w: f64,
/// Write B/s
pub w_s: f64,
}
#[derive(Default)]
pub struct App {
auto: bool,
data: DataSource,
depth: Option<NonZeroUsize>,
filter: Option<Regex>,
reverse: bool,
should_quit: bool,
/// 0-based index of the column to sort by, if any
sort_idx: Option<usize>,
}
impl App {
pub fn new(
auto: bool,
children: bool,
pools: Vec<String>,
depth: Option<NonZeroUsize>,
filter: Option<Regex>,
reverse: bool,
sort_idx: Option<usize>,
) -> Self {
let mut data = DataSource::new(children, pools);
data.refresh().unwrap();
App {
auto,
data,
depth,
filter,
reverse,
sort_idx,
..Default::default()
}
}
pub fn clear_filter(&mut self) {
self.filter = None;
}
/// Return the elements that should be displayed, in order
#[rustfmt::skip]
pub fn elements(&mut self) -> Vec<Element> {
let auto = self.auto;
let depth = self.depth;
let filter = &self.filter;
let mut v = self.data.iter()
.filter(move |elem| {
if let Some(limit) = depth {
let edepth = elem.name.split('/').count();
edepth <= limit.get()
} else {
true
}
}).filter(|elem|
filter.as_ref()
.map(|f| f.is_match(&elem.name))
.unwrap_or(true)
).filter(|elem|!auto || (elem.r_s + elem.w_s + elem.d_s > 1.0))
.collect::<Vec<_>>();
match (self.reverse, self.sort_idx) {
// TODO: when the total_cmp feature stabilities, use f64::total_cmp
// instead.
// https://github.com/rust-lang/rust/issues/72599
(false, Some(0)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.ops_r, &y.ops_r)),
(true, Some(0)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.ops_r, &x.ops_r)),
(false, Some(1)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.r_s, &y.r_s)),
(true, Some(1)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.r_s, &x.r_s)),
(false, Some(2)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.ops_w, &y.ops_w)),
(true, Some(2)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.ops_w, &x.ops_w)),
(false, Some(3)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.w_s, &y.w_s)),
(true, Some(3)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.w_s, &x.w_s)),
(false, Some(4)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.ops_d, &y.ops_d)),
(true, Some(4)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.ops_d, &x.ops_d)),
(false, Some(5)) => v.sort_by(|x, y| Ieee754::total_cmp(&x.d_s, &y.d_s)),
(true, Some(5)) => v.sort_by(|x, y| Ieee754::total_cmp(&y.d_s, &x.d_s)),
(false, Some(6)) => v.sort_by(|x, y| x.name.cmp(&y.name)),
(true, Some(6)) => v.sort_by(|x, y| y.name.cmp(&x.name)),
_ => ()
}
v
}
pub fn on_a(&mut self) {
self.auto ^= true;
}
pub fn on_c(&mut self) -> Result<(), Box<dyn Error>> {
self.data.toggle_children()
}
pub fn on_d(&mut self, more_depth: bool) {
self.depth = if more_depth {
match self.depth { | } else {
match self.depth {
None => None,
Some(x) => NonZeroUsize::new(x.get() - 1),
}
}
}
pub fn on_minus(&mut self) {
self.sort_idx = match self.sort_idx {
Some(0) => None,
Some(old) => Some(old - 1),
None => Some(6),
}
}
pub fn on_plus(&mut self) {
self.sort_idx = match self.sort_idx {
Some(old) if old >= 6 => None,
Some(old) => Some(old + 1),
None => Some(0),
}
}
pub fn on_q(&mut self) {
self.should_quit = true;
}
pub fn on_r(&mut self) {
self.reverse ^= true;
}
pub fn on_tick(&mut self) {
self.data.refresh().unwrap();
}
pub fn set_filter(&mut self, filter: Regex) {
self.filter = Some(filter);
}
pub fn should_quit(&self) -> bool {
self.should_quit
}
pub fn sort_idx(&self) -> Option<usize> {
self.sort_idx
}
}
#[cfg(test)]
mod t {
mod with_parents {
use super::super::*;
/// The empty string is not a valid dataset, but make sure nothing bad
/// happens anyway
#[test]
fn empty() {
let ds = "";
let mut actual = DataSource::with_parents(ds);
assert!(actual.next().is_none());
}
#[test]
fn pool() {
let ds = "zroot";
let expected = ["zroot"];
let actual = DataSource::with_parents(ds).collect::<Vec<_>>();
assert_eq!(&expected[..], &actual[..]);
}
#[test]
fn one_level() {
let ds = "zroot/ROOT";
let expected = ["zroot", "zroot/ROOT"];
let actual = DataSource::with_parents(ds).collect::<Vec<_>>();
assert_eq!(&expected[..], &actual[..]);
}
#[test]
fn two_levels() {
let ds = "zroot/ROOT/13.0-RELEASE";
let expected = ["zroot", "zroot/ROOT", "zroot/ROOT/13.0-RELEASE"];
let actual = DataSource::with_parents(ds).collect::<Vec<_>>();
assert_eq!(&expected[..], &actual[..]);
}
}
} | None => NonZeroUsize::new(1),
Some(x) => NonZeroUsize::new(x.get() + 1),
} | random_line_split |
bip32.rs | FromBase58, ToBase58};
/// A chain code
pub struct ChainCode([u8,..32]);
impl_array_newtype!(ChainCode, u8, 32)
impl_array_newtype_show!(ChainCode)
impl_array_newtype_encodable!(ChainCode, u8, 32)
/// A fingerprint
pub struct Fingerprint([u8,..4]);
impl_array_newtype!(Fingerprint, u8, 4)
impl_array_newtype_show!(Fingerprint)
impl_array_newtype_encodable!(Fingerprint, u8, 4)
impl Default for Fingerprint {
fn default() -> Fingerprint { Fingerprint([0, 0, 0, 0]) }
}
/// Extended private key
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Show)]
pub struct ExtendedPrivKey {
/// The network this key is to be used on
pub network: Network,
/// How many derivations this key is from the master (which is 0)
pub depth: uint,
/// Fingerprint of the parent key (0 for master)
pub parent_fingerprint: Fingerprint,
/// Child number of the key used to derive from parent (0 for master)
pub child_number: ChildNumber,
/// Secret key
pub secret_key: SecretKey,
/// Chain code
pub chain_code: ChainCode
}
/// Extended public key
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Show)]
pub struct ExtendedPubKey {
/// The network this key is to be used on
pub network: Network,
/// How many derivations this key is from the master (which is 0)
pub depth: uint,
/// Fingerprint of the parent key
pub parent_fingerprint: Fingerprint,
/// Child number of the key used to derive from parent (0 for master)
pub child_number: ChildNumber,
/// Public key
pub public_key: PublicKey,
/// Chain code
pub chain_code: ChainCode
}
/// A child number for a derived key
#[deriving(Clone, PartialEq, Eq, Show)]
pub enum ChildNumber {
/// Hardened key index, within [0, 2^31 - 1]
Hardened(u32),
/// Non-hardened key, within [0, 2^31 - 1]
Normal(u32),
}
impl<S: Encoder<E>, E> Encodable<S, E> for ChildNumber {
fn encode(&self, s: &mut S) -> Result<(), E> {
match *self {
Hardened(n) => (n + (1 << 31)).encode(s),
Normal(n) => n.encode(s)
}
}
}
impl<D: Decoder<E>, E> Decodable<D, E> for ChildNumber {
fn decode(d: &mut D) -> Result<ChildNumber, E> {
let n: u32 = try!(Decodable::decode(d));
if n < (1 << 31) {
Ok(Normal(n))
} else {
Ok(Hardened(n - (1 << 31)))
}
}
}
/// A BIP32 error
#[deriving(Clone, PartialEq, Eq, Show)]
pub enum Error {
/// A pk->pk derivation was attempted on a hardened key
CannotDeriveFromHardenedKey,
/// A secp256k1 error occured
EcdsaError(secp256k1::Error),
/// A child number was provided that was out of range
InvalidChildNumber(ChildNumber),
/// Error creating a master seed --- for application use
RngError(String)
}
impl ExtendedPrivKey {
/// Construct a new master key from a seed value
pub fn new_master(network: Network, seed: &[u8]) -> Result<ExtendedPrivKey, Error> {
let mut result = [0,..64];
let mut hmac = Hmac::new(Sha512::new(), b"Bitcoin seed".as_slice());
hmac.input(seed);
hmac.raw_result(result.as_mut_slice());
Ok(ExtendedPrivKey {
network: network,
depth: 0,
parent_fingerprint: Default::default(),
child_number: Normal(0),
secret_key: try!(SecretKey::from_slice(result.slice_to(32)).map_err(EcdsaError)),
chain_code: ChainCode::from_slice(result.slice_from(32))
})
}
/// Creates a privkey from a path
pub fn from_path(master: &ExtendedPrivKey, path: &[ChildNumber])
-> Result<ExtendedPrivKey, Error> {
let mut sk = *master;
for &num in path.iter() {
sk = try!(sk.ckd_priv(num));
}
Ok(sk)
}
/// Private->Private child key derivation
pub fn ckd_priv(&self, i: ChildNumber) -> Result<ExtendedPrivKey, Error> {
let mut result = [0,..64];
let mut hmac = Hmac::new(Sha512::new(), self.chain_code.as_slice());
match i {
Normal(n) => {
if n >= (1 << 31) { return Err(InvalidChildNumber(i)) }
// Non-hardened key: compute public data and use that
secp256k1::init();
// Note the unwrap: this is fine, we checked the SK when we created it
hmac.input(PublicKey::from_secret_key(&self.secret_key, true).as_slice());
u64_to_be_bytes(n as u64, 4, |raw| hmac.input(raw));
}
Hardened(n) => {
if n >= (1 << 31) { return Err(InvalidChildNumber(i)) }
// Hardened key: use only secret data to prevent public derivation
hmac.input([0]);
hmac.input(self.secret_key.as_slice());
u64_to_be_bytes(n as u64 + (1 << 31), 4, |raw| hmac.input(raw));
}
}
hmac.raw_result(result.as_mut_slice());
let mut sk = try!(SecretKey::from_slice(result.slice_to(32)).map_err(EcdsaError));
try!(sk.add_assign(&self.secret_key).map_err(EcdsaError));
Ok(ExtendedPrivKey {
network: self.network,
depth: self.depth + 1,
parent_fingerprint: self.fingerprint(),
child_number: i,
secret_key: sk,
chain_code: ChainCode::from_slice(result.slice_from(32))
})
}
/// Returns the HASH160 of the chaincode
pub fn identifier(&self) -> [u8,..20] {
let mut sha2_res = [0,..32];
let mut ripemd_res = [0,..20];
// Compute extended public key
let pk = ExtendedPubKey::from_private(self);
// Do SHA256 of just the ECDSA pubkey
let mut sha2 = Sha256::new();
sha2.input(pk.public_key.as_slice());
sha2.result(sha2_res.as_mut_slice());
// do RIPEMD160
let mut ripemd = Ripemd160::new();
ripemd.input(sha2_res.as_slice());
ripemd.result(ripemd_res.as_mut_slice());
// Return
ripemd_res
}
/// Returns the first four bytes of the identifier
pub fn fingerprint(&self) -> Fingerprint {
Fingerprint::from_slice(self.identifier().slice_to(4))
}
}
impl ExtendedPubKey {
/// Derives a public key from a private key
pub fn from_private(sk: &ExtendedPrivKey) -> ExtendedPubKey {
secp256k1::init();
ExtendedPubKey {
network: sk.network,
depth: sk.depth,
parent_fingerprint: sk.parent_fingerprint,
child_number: sk.child_number,
public_key: PublicKey::from_secret_key(&sk.secret_key, true),
chain_code: sk.chain_code
}
}
/// Public->Public child key derivation
pub fn ckd_pub(&self, i: ChildNumber) -> Result<ExtendedPubKey, Error> {
match i {
Hardened(n) => {
if n >= (1 << 31) {
Err(InvalidChildNumber(i))
} else {
Err(CannotDeriveFromHardenedKey)
}
}
Normal(n) => {
let mut hmac = Hmac::new(Sha512::new(), self.chain_code.as_slice());
hmac.input(self.public_key.as_slice());
u64_to_be_bytes(n as u64, 4, |raw| hmac.input(raw));
let mut result = [0,..64];
hmac.raw_result(result.as_mut_slice());
let sk = try!(SecretKey::from_slice(result.slice_to(32)).map_err(EcdsaError));
let mut pk = self.public_key.clone();
try!(pk.add_exp_assign(&sk).map_err(EcdsaError));
Ok(ExtendedPubKey {
network: self.network,
depth: self.depth + 1,
parent_fingerprint: self.fingerprint(),
child_number: i,
public_key: pk,
chain_code: ChainCode::from_slice(result.slice_from(32))
})
}
}
}
/// Returns the HASH160 of the chaincode
pub fn identifier(&self) -> [u8,..20] {
let mut sha2_res = [0,..32];
let mut ripemd_res = [0,..20];
// Do SHA256 of just the ECDSA pubkey
let mut sha2 = Sha256::new();
sha2.input(self.public_key.as_slice());
sha2.result(sha2_res.as_mut_slice());
// do RIPEMD160
let mut ripemd = Ripemd160::new();
ripemd.input(sha2_res.as_slice());
ripemd.result(ripemd_res.as_mut_slice());
// Return
ripemd_res
}
/// Returns the first four bytes of the identifier
pub fn fingerprint(&self) -> Fingerprint {
Fingerprint::from_slice(self.identifier().slice_to(4))
}
}
impl ToBase58 for ExtendedPrivKey {
fn base58_layout(&self) -> Vec<u8> {
let mut ret = Vec::with_capacity(78);
ret.push_all(match self.network {
Bitcoin => [0x04, 0x88, 0xAD, 0xE4],
BitcoinTestnet => [0x04, 0x35, 0x83, 0x94]
});
ret.push(self.depth as u8);
ret.push_all(self.parent_fingerprint.as_slice());
match self.child_number {
Hardened(n) => {
u64_to_be_bytes(n as u64 + (1 << 31), 4, |raw| ret.push_all(raw));
}
Normal(n) => {
u64_to_be_bytes(n as u64, 4, |raw| ret.push_all(raw));
}
}
ret.push_all(self.chain_code.as_slice());
ret.push(0);
ret.push_all(self.secret_key.as_slice());
ret
}
}
impl FromBase58 for ExtendedPrivKey {
fn from_base58_layout(data: Vec<u8>) -> Result<ExtendedPrivKey, Base58Error> {
if data.len()!= 78 {
return Err(InvalidLength(data.len()));
}
let cn_int = u64_from_be_bytes(data.as_slice(), 9, 4) as u32;
let child_number = if cn_int < (1 << 31) { Normal(cn_int) }
else { Hardened(cn_int - (1 << 31)) };
Ok(ExtendedPrivKey {
network: match data.slice_to(4) {
[0x04, 0x88, 0xAD, 0xE4] => Bitcoin,
[0x04, 0x35, 0x83, 0x94] => BitcoinTestnet,
_ => { return Err(InvalidVersion(data.slice_to(4).to_vec())); }
},
depth: data[4] as uint,
parent_fingerprint: Fingerprint::from_slice(data.slice(5, 9)),
child_number: child_number,
chain_code: ChainCode::from_slice(data.slice(13, 45)),
secret_key: try!(SecretKey::from_slice(
data.slice(46, 78)).map_err(|e|
OtherBase58Error(e.to_string())))
})
}
}
impl ToBase58 for ExtendedPubKey {
fn base58_layout(&self) -> Vec<u8> {
assert!(self.public_key.is_compressed());
let mut ret = Vec::with_capacity(78);
ret.push_all(match self.network {
Bitcoin => [0x04, 0x88, 0xB2, 0x1E],
BitcoinTestnet => [0x04, 0x35, 0x87, 0xCF]
});
ret.push(self.depth as u8);
ret.push_all(self.parent_fingerprint.as_slice());
match self.child_number {
Hardened(n) => {
u64_to_be_bytes(n as u64 + (1 << 31), 4, |raw| ret.push_all(raw));
}
Normal(n) => {
u64_to_be_bytes(n as u64, 4, |raw| ret.push_all(raw));
}
}
ret.push_all(self.chain_code.as_slice());
ret.push_all(self.public_key.as_slice());
ret
}
}
impl FromBase58 for ExtendedPubKey {
fn from_base58_layout(data: Vec<u8>) -> Result<ExtendedPubKey, Base58Error> | data.slice(45, 78)).map_err(|e|
OtherBase58Error(e.to_string())))
})
}
}
#[cfg(test)]
mod tests {
use serialize::hex::FromHex;
use test::{Bencher, black_box};
use network::constants::{Network, Bitcoin};
use util::base58::{FromBase58, ToBase58};
use super::{ChildNumber, ExtendedPrivKey, ExtendedPubKey, Hardened, Normal};
fn test_path(network: Network,
seed: &[u8],
path: &[ChildNumber],
expected_sk: &str,
expected_pk: &str) {
let mut sk = ExtendedPrivKey::new_master(network, seed).unwrap();
let mut pk = ExtendedPubKey::from_private(&sk);
// Derive keys, checking hardened and non-hardened derivation
for &num in path.iter() {
sk = sk.ckd_priv(num).unwrap();
match num {
Normal(_) => {
let pk2 = pk.ckd_pub(num).unwrap();
pk = ExtendedPubKey::from_private(&sk);
assert_eq!(pk, pk2);
}
Hardened(_) => {
pk = ExtendedPubKey::from_private(&sk);
}
}
}
// Check result against expected base58
assert_eq!(sk.to_base58check().as_slice(), expected_sk);
assert_eq!(pk.to_base58check().as_slice(), expected_pk);
// Check decoded base58 against result
let decoded_sk = FromBase58::from_base58check(expected_sk);
let decoded_pk = FromBase58::from_base58check(expected_pk);
assert_eq!(Ok(sk), decoded_sk);
assert_eq!(Ok(pk), decoded_pk);
}
#[test]
fn test_vector_1() {
let seed = "000102030405060708090a0b0c0d0e0f".from_hex().unwrap();
// m
test_path(Bitcoin, seed.as_slice(), [],
"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi",
"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8");
// m/0h
test_path(Bitcoin, seed.as_slice(), [Hardened(0)],
"xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7",
"xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw");
// m/0h/1
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1)],
"xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs",
"xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ");
// m/0h/1/2h
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1), Hardened(2)],
"xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM",
"xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5");
// m/0h/1/2h/2
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1), Hardened(2), Normal(2)],
"xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334",
"xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV");
// m/0h/1/2h/2/1000000000
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1), Hardened(2), Normal(2), Normal(1000000000)],
"xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76",
"xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy");
}
#[test]
fn test_vector_2() {
let seed = "fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542".from_hex().unwrap();
// m
test_path(Bitcoin, seed.as_slice(), [],
"xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U",
"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB");
// m/0
test_path(Bitcoin, seed.as_slice(), [Normal(0)],
"xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt",
"xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH");
// m/0/2147483647h
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647)],
"xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9",
"xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a");
// m/0/2147483647h/1
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647), Normal(1)],
"xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef",
"xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon");
// m/0/2147483647h/1/2147483646h
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647), Normal(1), Hardened(2147483646)],
"xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc",
"xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL");
// m/0/2147483647h/1/2147483646h/2
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647), Normal(1), Hardened(2147483646), Normal(2)],
"xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j",
"xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt");
}
#[test]
pub fn encode_decode_childnumber | {
if data.len() != 78 {
return Err(InvalidLength(data.len()));
}
let cn_int = u64_from_be_bytes(data.as_slice(), 9, 4) as u32;
let child_number = if cn_int < (1 << 31) { Normal(cn_int) }
else { Hardened(cn_int - (1 << 31)) };
Ok(ExtendedPubKey {
network: match data.slice_to(4) {
[0x04, 0x88, 0xB2, 0x1E] => Bitcoin,
[0x04, 0x35, 0x87, 0xCF] => BitcoinTestnet,
_ => { return Err(InvalidVersion(data.slice_to(4).to_vec())); }
},
depth: data[4] as uint,
parent_fingerprint: Fingerprint::from_slice(data.slice(5, 9)),
child_number: child_number,
chain_code: ChainCode::from_slice(data.slice(13, 45)),
public_key: try!(PublicKey::from_slice( | identifier_body |
bip32.rs | FromBase58, ToBase58};
/// A chain code
pub struct ChainCode([u8,..32]);
impl_array_newtype!(ChainCode, u8, 32)
impl_array_newtype_show!(ChainCode)
impl_array_newtype_encodable!(ChainCode, u8, 32)
/// A fingerprint
pub struct Fingerprint([u8,..4]);
impl_array_newtype!(Fingerprint, u8, 4)
impl_array_newtype_show!(Fingerprint)
impl_array_newtype_encodable!(Fingerprint, u8, 4)
impl Default for Fingerprint {
fn default() -> Fingerprint { Fingerprint([0, 0, 0, 0]) }
}
/// Extended private key
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Show)]
pub struct ExtendedPrivKey {
/// The network this key is to be used on
pub network: Network,
/// How many derivations this key is from the master (which is 0)
pub depth: uint,
/// Fingerprint of the parent key (0 for master)
pub parent_fingerprint: Fingerprint,
/// Child number of the key used to derive from parent (0 for master)
pub child_number: ChildNumber,
/// Secret key
pub secret_key: SecretKey,
/// Chain code
pub chain_code: ChainCode
}
/// Extended public key
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Show)]
pub struct ExtendedPubKey {
/// The network this key is to be used on
pub network: Network,
/// How many derivations this key is from the master (which is 0)
pub depth: uint,
/// Fingerprint of the parent key
pub parent_fingerprint: Fingerprint,
/// Child number of the key used to derive from parent (0 for master)
pub child_number: ChildNumber,
/// Public key
pub public_key: PublicKey,
/// Chain code
pub chain_code: ChainCode
}
/// A child number for a derived key
#[deriving(Clone, PartialEq, Eq, Show)]
pub enum ChildNumber {
/// Hardened key index, within [0, 2^31 - 1]
Hardened(u32),
/// Non-hardened key, within [0, 2^31 - 1]
Normal(u32),
}
impl<S: Encoder<E>, E> Encodable<S, E> for ChildNumber {
fn encode(&self, s: &mut S) -> Result<(), E> {
match *self {
Hardened(n) => (n + (1 << 31)).encode(s),
Normal(n) => n.encode(s)
}
}
}
impl<D: Decoder<E>, E> Decodable<D, E> for ChildNumber {
fn decode(d: &mut D) -> Result<ChildNumber, E> {
let n: u32 = try!(Decodable::decode(d));
if n < (1 << 31) {
Ok(Normal(n))
} else {
Ok(Hardened(n - (1 << 31)))
}
}
}
/// A BIP32 error
#[deriving(Clone, PartialEq, Eq, Show)]
pub enum Error {
/// A pk->pk derivation was attempted on a hardened key
CannotDeriveFromHardenedKey,
/// A secp256k1 error occured
EcdsaError(secp256k1::Error),
/// A child number was provided that was out of range
InvalidChildNumber(ChildNumber),
/// Error creating a master seed --- for application use
RngError(String)
}
impl ExtendedPrivKey {
/// Construct a new master key from a seed value
pub fn new_master(network: Network, seed: &[u8]) -> Result<ExtendedPrivKey, Error> {
let mut result = [0,..64];
let mut hmac = Hmac::new(Sha512::new(), b"Bitcoin seed".as_slice());
hmac.input(seed);
hmac.raw_result(result.as_mut_slice());
Ok(ExtendedPrivKey {
network: network,
depth: 0,
parent_fingerprint: Default::default(),
child_number: Normal(0),
secret_key: try!(SecretKey::from_slice(result.slice_to(32)).map_err(EcdsaError)),
chain_code: ChainCode::from_slice(result.slice_from(32))
})
}
/// Creates a privkey from a path
pub fn | (master: &ExtendedPrivKey, path: &[ChildNumber])
-> Result<ExtendedPrivKey, Error> {
let mut sk = *master;
for &num in path.iter() {
sk = try!(sk.ckd_priv(num));
}
Ok(sk)
}
/// Private->Private child key derivation
pub fn ckd_priv(&self, i: ChildNumber) -> Result<ExtendedPrivKey, Error> {
let mut result = [0,..64];
let mut hmac = Hmac::new(Sha512::new(), self.chain_code.as_slice());
match i {
Normal(n) => {
if n >= (1 << 31) { return Err(InvalidChildNumber(i)) }
// Non-hardened key: compute public data and use that
secp256k1::init();
// Note the unwrap: this is fine, we checked the SK when we created it
hmac.input(PublicKey::from_secret_key(&self.secret_key, true).as_slice());
u64_to_be_bytes(n as u64, 4, |raw| hmac.input(raw));
}
Hardened(n) => {
if n >= (1 << 31) { return Err(InvalidChildNumber(i)) }
// Hardened key: use only secret data to prevent public derivation
hmac.input([0]);
hmac.input(self.secret_key.as_slice());
u64_to_be_bytes(n as u64 + (1 << 31), 4, |raw| hmac.input(raw));
}
}
hmac.raw_result(result.as_mut_slice());
let mut sk = try!(SecretKey::from_slice(result.slice_to(32)).map_err(EcdsaError));
try!(sk.add_assign(&self.secret_key).map_err(EcdsaError));
Ok(ExtendedPrivKey {
network: self.network,
depth: self.depth + 1,
parent_fingerprint: self.fingerprint(),
child_number: i,
secret_key: sk,
chain_code: ChainCode::from_slice(result.slice_from(32))
})
}
/// Returns the HASH160 of the chaincode
pub fn identifier(&self) -> [u8,..20] {
let mut sha2_res = [0,..32];
let mut ripemd_res = [0,..20];
// Compute extended public key
let pk = ExtendedPubKey::from_private(self);
// Do SHA256 of just the ECDSA pubkey
let mut sha2 = Sha256::new();
sha2.input(pk.public_key.as_slice());
sha2.result(sha2_res.as_mut_slice());
// do RIPEMD160
let mut ripemd = Ripemd160::new();
ripemd.input(sha2_res.as_slice());
ripemd.result(ripemd_res.as_mut_slice());
// Return
ripemd_res
}
/// Returns the first four bytes of the identifier
pub fn fingerprint(&self) -> Fingerprint {
Fingerprint::from_slice(self.identifier().slice_to(4))
}
}
impl ExtendedPubKey {
/// Derives a public key from a private key
pub fn from_private(sk: &ExtendedPrivKey) -> ExtendedPubKey {
secp256k1::init();
ExtendedPubKey {
network: sk.network,
depth: sk.depth,
parent_fingerprint: sk.parent_fingerprint,
child_number: sk.child_number,
public_key: PublicKey::from_secret_key(&sk.secret_key, true),
chain_code: sk.chain_code
}
}
/// Public->Public child key derivation
pub fn ckd_pub(&self, i: ChildNumber) -> Result<ExtendedPubKey, Error> {
match i {
Hardened(n) => {
if n >= (1 << 31) {
Err(InvalidChildNumber(i))
} else {
Err(CannotDeriveFromHardenedKey)
}
}
Normal(n) => {
let mut hmac = Hmac::new(Sha512::new(), self.chain_code.as_slice());
hmac.input(self.public_key.as_slice());
u64_to_be_bytes(n as u64, 4, |raw| hmac.input(raw));
let mut result = [0,..64];
hmac.raw_result(result.as_mut_slice());
let sk = try!(SecretKey::from_slice(result.slice_to(32)).map_err(EcdsaError));
let mut pk = self.public_key.clone();
try!(pk.add_exp_assign(&sk).map_err(EcdsaError));
Ok(ExtendedPubKey {
network: self.network,
depth: self.depth + 1,
parent_fingerprint: self.fingerprint(),
child_number: i,
public_key: pk,
chain_code: ChainCode::from_slice(result.slice_from(32))
})
}
}
}
/// Returns the HASH160 of the chaincode
pub fn identifier(&self) -> [u8,..20] {
let mut sha2_res = [0,..32];
let mut ripemd_res = [0,..20];
// Do SHA256 of just the ECDSA pubkey
let mut sha2 = Sha256::new();
sha2.input(self.public_key.as_slice());
sha2.result(sha2_res.as_mut_slice());
// do RIPEMD160
let mut ripemd = Ripemd160::new();
ripemd.input(sha2_res.as_slice());
ripemd.result(ripemd_res.as_mut_slice());
// Return
ripemd_res
}
/// Returns the first four bytes of the identifier
pub fn fingerprint(&self) -> Fingerprint {
Fingerprint::from_slice(self.identifier().slice_to(4))
}
}
impl ToBase58 for ExtendedPrivKey {
fn base58_layout(&self) -> Vec<u8> {
let mut ret = Vec::with_capacity(78);
ret.push_all(match self.network {
Bitcoin => [0x04, 0x88, 0xAD, 0xE4],
BitcoinTestnet => [0x04, 0x35, 0x83, 0x94]
});
ret.push(self.depth as u8);
ret.push_all(self.parent_fingerprint.as_slice());
match self.child_number {
Hardened(n) => {
u64_to_be_bytes(n as u64 + (1 << 31), 4, |raw| ret.push_all(raw));
}
Normal(n) => {
u64_to_be_bytes(n as u64, 4, |raw| ret.push_all(raw));
}
}
ret.push_all(self.chain_code.as_slice());
ret.push(0);
ret.push_all(self.secret_key.as_slice());
ret
}
}
impl FromBase58 for ExtendedPrivKey {
fn from_base58_layout(data: Vec<u8>) -> Result<ExtendedPrivKey, Base58Error> {
if data.len()!= 78 {
return Err(InvalidLength(data.len()));
}
let cn_int = u64_from_be_bytes(data.as_slice(), 9, 4) as u32;
let child_number = if cn_int < (1 << 31) { Normal(cn_int) }
else { Hardened(cn_int - (1 << 31)) };
Ok(ExtendedPrivKey {
network: match data.slice_to(4) {
[0x04, 0x88, 0xAD, 0xE4] => Bitcoin,
[0x04, 0x35, 0x83, 0x94] => BitcoinTestnet,
_ => { return Err(InvalidVersion(data.slice_to(4).to_vec())); }
},
depth: data[4] as uint,
parent_fingerprint: Fingerprint::from_slice(data.slice(5, 9)),
child_number: child_number,
chain_code: ChainCode::from_slice(data.slice(13, 45)),
secret_key: try!(SecretKey::from_slice(
data.slice(46, 78)).map_err(|e|
OtherBase58Error(e.to_string())))
})
}
}
impl ToBase58 for ExtendedPubKey {
fn base58_layout(&self) -> Vec<u8> {
assert!(self.public_key.is_compressed());
let mut ret = Vec::with_capacity(78);
ret.push_all(match self.network {
Bitcoin => [0x04, 0x88, 0xB2, 0x1E],
BitcoinTestnet => [0x04, 0x35, 0x87, 0xCF]
});
ret.push(self.depth as u8);
ret.push_all(self.parent_fingerprint.as_slice());
match self.child_number {
Hardened(n) => {
u64_to_be_bytes(n as u64 + (1 << 31), 4, |raw| ret.push_all(raw));
}
Normal(n) => {
u64_to_be_bytes(n as u64, 4, |raw| ret.push_all(raw));
}
}
ret.push_all(self.chain_code.as_slice());
ret.push_all(self.public_key.as_slice());
ret
}
}
impl FromBase58 for ExtendedPubKey {
fn from_base58_layout(data: Vec<u8>) -> Result<ExtendedPubKey, Base58Error> {
if data.len()!= 78 {
return Err(InvalidLength(data.len()));
}
let cn_int = u64_from_be_bytes(data.as_slice(), 9, 4) as u32;
let child_number = if cn_int < (1 << 31) { Normal(cn_int) }
else { Hardened(cn_int - (1 << 31)) };
Ok(ExtendedPubKey {
network: match data.slice_to(4) {
[0x04, 0x88, 0xB2, 0x1E] => Bitcoin,
[0x04, 0x35, 0x87, 0xCF] => BitcoinTestnet,
_ => { return Err(InvalidVersion(data.slice_to(4).to_vec())); }
},
depth: data[4] as uint,
parent_fingerprint: Fingerprint::from_slice(data.slice(5, 9)),
child_number: child_number,
chain_code: ChainCode::from_slice(data.slice(13, 45)),
public_key: try!(PublicKey::from_slice(
data.slice(45, 78)).map_err(|e|
OtherBase58Error(e.to_string())))
})
}
}
#[cfg(test)]
mod tests {
use serialize::hex::FromHex;
use test::{Bencher, black_box};
use network::constants::{Network, Bitcoin};
use util::base58::{FromBase58, ToBase58};
use super::{ChildNumber, ExtendedPrivKey, ExtendedPubKey, Hardened, Normal};
fn test_path(network: Network,
seed: &[u8],
path: &[ChildNumber],
expected_sk: &str,
expected_pk: &str) {
let mut sk = ExtendedPrivKey::new_master(network, seed).unwrap();
let mut pk = ExtendedPubKey::from_private(&sk);
// Derive keys, checking hardened and non-hardened derivation
for &num in path.iter() {
sk = sk.ckd_priv(num).unwrap();
match num {
Normal(_) => {
let pk2 = pk.ckd_pub(num).unwrap();
pk = ExtendedPubKey::from_private(&sk);
assert_eq!(pk, pk2);
}
Hardened(_) => {
pk = ExtendedPubKey::from_private(&sk);
}
}
}
// Check result against expected base58
assert_eq!(sk.to_base58check().as_slice(), expected_sk);
assert_eq!(pk.to_base58check().as_slice(), expected_pk);
// Check decoded base58 against result
let decoded_sk = FromBase58::from_base58check(expected_sk);
let decoded_pk = FromBase58::from_base58check(expected_pk);
assert_eq!(Ok(sk), decoded_sk);
assert_eq!(Ok(pk), decoded_pk);
}
#[test]
fn test_vector_1() {
let seed = "000102030405060708090a0b0c0d0e0f".from_hex().unwrap();
// m
test_path(Bitcoin, seed.as_slice(), [],
"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi",
"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8");
// m/0h
test_path(Bitcoin, seed.as_slice(), [Hardened(0)],
"xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7",
"xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw");
// m/0h/1
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1)],
"xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs",
"xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ");
// m/0h/1/2h
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1), Hardened(2)],
"xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM",
"xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5");
// m/0h/1/2h/2
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1), Hardened(2), Normal(2)],
"xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334",
"xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV");
// m/0h/1/2h/2/1000000000
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1), Hardened(2), Normal(2), Normal(1000000000)],
"xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76",
"xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy");
}
#[test]
fn test_vector_2() {
let seed = "fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542".from_hex().unwrap();
// m
test_path(Bitcoin, seed.as_slice(), [],
"xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U",
"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB");
// m/0
test_path(Bitcoin, seed.as_slice(), [Normal(0)],
"xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt",
"xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH");
// m/0/2147483647h
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647)],
"xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9",
"xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a");
// m/0/2147483647h/1
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647), Normal(1)],
"xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef",
"xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon");
// m/0/2147483647h/1/2147483646h
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647), Normal(1), Hardened(2147483646)],
"xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc",
"xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL");
// m/0/2147483647h/1/2147483646h/2
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647), Normal(1), Hardened(2147483646), Normal(2)],
"xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j",
"xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt");
}
#[test]
pub fn encode_decode_childnumber | from_path | identifier_name |
bip32.rs | FromBase58, ToBase58};
/// A chain code
pub struct ChainCode([u8,..32]);
impl_array_newtype!(ChainCode, u8, 32)
impl_array_newtype_show!(ChainCode)
impl_array_newtype_encodable!(ChainCode, u8, 32)
/// A fingerprint
pub struct Fingerprint([u8,..4]); | impl Default for Fingerprint {
fn default() -> Fingerprint { Fingerprint([0, 0, 0, 0]) }
}
/// Extended private key
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Show)]
pub struct ExtendedPrivKey {
/// The network this key is to be used on
pub network: Network,
/// How many derivations this key is from the master (which is 0)
pub depth: uint,
/// Fingerprint of the parent key (0 for master)
pub parent_fingerprint: Fingerprint,
/// Child number of the key used to derive from parent (0 for master)
pub child_number: ChildNumber,
/// Secret key
pub secret_key: SecretKey,
/// Chain code
pub chain_code: ChainCode
}
/// Extended public key
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Show)]
pub struct ExtendedPubKey {
/// The network this key is to be used on
pub network: Network,
/// How many derivations this key is from the master (which is 0)
pub depth: uint,
/// Fingerprint of the parent key
pub parent_fingerprint: Fingerprint,
/// Child number of the key used to derive from parent (0 for master)
pub child_number: ChildNumber,
/// Public key
pub public_key: PublicKey,
/// Chain code
pub chain_code: ChainCode
}
/// A child number for a derived key
#[deriving(Clone, PartialEq, Eq, Show)]
pub enum ChildNumber {
/// Hardened key index, within [0, 2^31 - 1]
Hardened(u32),
/// Non-hardened key, within [0, 2^31 - 1]
Normal(u32),
}
impl<S: Encoder<E>, E> Encodable<S, E> for ChildNumber {
fn encode(&self, s: &mut S) -> Result<(), E> {
match *self {
Hardened(n) => (n + (1 << 31)).encode(s),
Normal(n) => n.encode(s)
}
}
}
impl<D: Decoder<E>, E> Decodable<D, E> for ChildNumber {
fn decode(d: &mut D) -> Result<ChildNumber, E> {
let n: u32 = try!(Decodable::decode(d));
if n < (1 << 31) {
Ok(Normal(n))
} else {
Ok(Hardened(n - (1 << 31)))
}
}
}
/// A BIP32 error
#[deriving(Clone, PartialEq, Eq, Show)]
pub enum Error {
/// A pk->pk derivation was attempted on a hardened key
CannotDeriveFromHardenedKey,
/// A secp256k1 error occured
EcdsaError(secp256k1::Error),
/// A child number was provided that was out of range
InvalidChildNumber(ChildNumber),
/// Error creating a master seed --- for application use
RngError(String)
}
impl ExtendedPrivKey {
/// Construct a new master key from a seed value
pub fn new_master(network: Network, seed: &[u8]) -> Result<ExtendedPrivKey, Error> {
let mut result = [0,..64];
let mut hmac = Hmac::new(Sha512::new(), b"Bitcoin seed".as_slice());
hmac.input(seed);
hmac.raw_result(result.as_mut_slice());
Ok(ExtendedPrivKey {
network: network,
depth: 0,
parent_fingerprint: Default::default(),
child_number: Normal(0),
secret_key: try!(SecretKey::from_slice(result.slice_to(32)).map_err(EcdsaError)),
chain_code: ChainCode::from_slice(result.slice_from(32))
})
}
/// Creates a privkey from a path
pub fn from_path(master: &ExtendedPrivKey, path: &[ChildNumber])
-> Result<ExtendedPrivKey, Error> {
let mut sk = *master;
for &num in path.iter() {
sk = try!(sk.ckd_priv(num));
}
Ok(sk)
}
/// Private->Private child key derivation
pub fn ckd_priv(&self, i: ChildNumber) -> Result<ExtendedPrivKey, Error> {
let mut result = [0,..64];
let mut hmac = Hmac::new(Sha512::new(), self.chain_code.as_slice());
match i {
Normal(n) => {
if n >= (1 << 31) { return Err(InvalidChildNumber(i)) }
// Non-hardened key: compute public data and use that
secp256k1::init();
// Note the unwrap: this is fine, we checked the SK when we created it
hmac.input(PublicKey::from_secret_key(&self.secret_key, true).as_slice());
u64_to_be_bytes(n as u64, 4, |raw| hmac.input(raw));
}
Hardened(n) => {
if n >= (1 << 31) { return Err(InvalidChildNumber(i)) }
// Hardened key: use only secret data to prevent public derivation
hmac.input([0]);
hmac.input(self.secret_key.as_slice());
u64_to_be_bytes(n as u64 + (1 << 31), 4, |raw| hmac.input(raw));
}
}
hmac.raw_result(result.as_mut_slice());
let mut sk = try!(SecretKey::from_slice(result.slice_to(32)).map_err(EcdsaError));
try!(sk.add_assign(&self.secret_key).map_err(EcdsaError));
Ok(ExtendedPrivKey {
network: self.network,
depth: self.depth + 1,
parent_fingerprint: self.fingerprint(),
child_number: i,
secret_key: sk,
chain_code: ChainCode::from_slice(result.slice_from(32))
})
}
/// Returns the HASH160 of the chaincode
pub fn identifier(&self) -> [u8,..20] {
let mut sha2_res = [0,..32];
let mut ripemd_res = [0,..20];
// Compute extended public key
let pk = ExtendedPubKey::from_private(self);
// Do SHA256 of just the ECDSA pubkey
let mut sha2 = Sha256::new();
sha2.input(pk.public_key.as_slice());
sha2.result(sha2_res.as_mut_slice());
// do RIPEMD160
let mut ripemd = Ripemd160::new();
ripemd.input(sha2_res.as_slice());
ripemd.result(ripemd_res.as_mut_slice());
// Return
ripemd_res
}
/// Returns the first four bytes of the identifier
pub fn fingerprint(&self) -> Fingerprint {
Fingerprint::from_slice(self.identifier().slice_to(4))
}
}
impl ExtendedPubKey {
/// Derives a public key from a private key
pub fn from_private(sk: &ExtendedPrivKey) -> ExtendedPubKey {
secp256k1::init();
ExtendedPubKey {
network: sk.network,
depth: sk.depth,
parent_fingerprint: sk.parent_fingerprint,
child_number: sk.child_number,
public_key: PublicKey::from_secret_key(&sk.secret_key, true),
chain_code: sk.chain_code
}
}
/// Public->Public child key derivation
pub fn ckd_pub(&self, i: ChildNumber) -> Result<ExtendedPubKey, Error> {
match i {
Hardened(n) => {
if n >= (1 << 31) {
Err(InvalidChildNumber(i))
} else {
Err(CannotDeriveFromHardenedKey)
}
}
Normal(n) => {
let mut hmac = Hmac::new(Sha512::new(), self.chain_code.as_slice());
hmac.input(self.public_key.as_slice());
u64_to_be_bytes(n as u64, 4, |raw| hmac.input(raw));
let mut result = [0,..64];
hmac.raw_result(result.as_mut_slice());
let sk = try!(SecretKey::from_slice(result.slice_to(32)).map_err(EcdsaError));
let mut pk = self.public_key.clone();
try!(pk.add_exp_assign(&sk).map_err(EcdsaError));
Ok(ExtendedPubKey {
network: self.network,
depth: self.depth + 1,
parent_fingerprint: self.fingerprint(),
child_number: i,
public_key: pk,
chain_code: ChainCode::from_slice(result.slice_from(32))
})
}
}
}
/// Returns the HASH160 of the chaincode
pub fn identifier(&self) -> [u8,..20] {
let mut sha2_res = [0,..32];
let mut ripemd_res = [0,..20];
// Do SHA256 of just the ECDSA pubkey
let mut sha2 = Sha256::new();
sha2.input(self.public_key.as_slice());
sha2.result(sha2_res.as_mut_slice());
// do RIPEMD160
let mut ripemd = Ripemd160::new();
ripemd.input(sha2_res.as_slice());
ripemd.result(ripemd_res.as_mut_slice());
// Return
ripemd_res
}
/// Returns the first four bytes of the identifier
pub fn fingerprint(&self) -> Fingerprint {
Fingerprint::from_slice(self.identifier().slice_to(4))
}
}
impl ToBase58 for ExtendedPrivKey {
fn base58_layout(&self) -> Vec<u8> {
let mut ret = Vec::with_capacity(78);
ret.push_all(match self.network {
Bitcoin => [0x04, 0x88, 0xAD, 0xE4],
BitcoinTestnet => [0x04, 0x35, 0x83, 0x94]
});
ret.push(self.depth as u8);
ret.push_all(self.parent_fingerprint.as_slice());
match self.child_number {
Hardened(n) => {
u64_to_be_bytes(n as u64 + (1 << 31), 4, |raw| ret.push_all(raw));
}
Normal(n) => {
u64_to_be_bytes(n as u64, 4, |raw| ret.push_all(raw));
}
}
ret.push_all(self.chain_code.as_slice());
ret.push(0);
ret.push_all(self.secret_key.as_slice());
ret
}
}
impl FromBase58 for ExtendedPrivKey {
fn from_base58_layout(data: Vec<u8>) -> Result<ExtendedPrivKey, Base58Error> {
if data.len()!= 78 {
return Err(InvalidLength(data.len()));
}
let cn_int = u64_from_be_bytes(data.as_slice(), 9, 4) as u32;
let child_number = if cn_int < (1 << 31) { Normal(cn_int) }
else { Hardened(cn_int - (1 << 31)) };
Ok(ExtendedPrivKey {
network: match data.slice_to(4) {
[0x04, 0x88, 0xAD, 0xE4] => Bitcoin,
[0x04, 0x35, 0x83, 0x94] => BitcoinTestnet,
_ => { return Err(InvalidVersion(data.slice_to(4).to_vec())); }
},
depth: data[4] as uint,
parent_fingerprint: Fingerprint::from_slice(data.slice(5, 9)),
child_number: child_number,
chain_code: ChainCode::from_slice(data.slice(13, 45)),
secret_key: try!(SecretKey::from_slice(
data.slice(46, 78)).map_err(|e|
OtherBase58Error(e.to_string())))
})
}
}
impl ToBase58 for ExtendedPubKey {
fn base58_layout(&self) -> Vec<u8> {
assert!(self.public_key.is_compressed());
let mut ret = Vec::with_capacity(78);
ret.push_all(match self.network {
Bitcoin => [0x04, 0x88, 0xB2, 0x1E],
BitcoinTestnet => [0x04, 0x35, 0x87, 0xCF]
});
ret.push(self.depth as u8);
ret.push_all(self.parent_fingerprint.as_slice());
match self.child_number {
Hardened(n) => {
u64_to_be_bytes(n as u64 + (1 << 31), 4, |raw| ret.push_all(raw));
}
Normal(n) => {
u64_to_be_bytes(n as u64, 4, |raw| ret.push_all(raw));
}
}
ret.push_all(self.chain_code.as_slice());
ret.push_all(self.public_key.as_slice());
ret
}
}
impl FromBase58 for ExtendedPubKey {
fn from_base58_layout(data: Vec<u8>) -> Result<ExtendedPubKey, Base58Error> {
if data.len()!= 78 {
return Err(InvalidLength(data.len()));
}
let cn_int = u64_from_be_bytes(data.as_slice(), 9, 4) as u32;
let child_number = if cn_int < (1 << 31) { Normal(cn_int) }
else { Hardened(cn_int - (1 << 31)) };
Ok(ExtendedPubKey {
network: match data.slice_to(4) {
[0x04, 0x88, 0xB2, 0x1E] => Bitcoin,
[0x04, 0x35, 0x87, 0xCF] => BitcoinTestnet,
_ => { return Err(InvalidVersion(data.slice_to(4).to_vec())); }
},
depth: data[4] as uint,
parent_fingerprint: Fingerprint::from_slice(data.slice(5, 9)),
child_number: child_number,
chain_code: ChainCode::from_slice(data.slice(13, 45)),
public_key: try!(PublicKey::from_slice(
data.slice(45, 78)).map_err(|e|
OtherBase58Error(e.to_string())))
})
}
}
#[cfg(test)]
mod tests {
use serialize::hex::FromHex;
use test::{Bencher, black_box};
use network::constants::{Network, Bitcoin};
use util::base58::{FromBase58, ToBase58};
use super::{ChildNumber, ExtendedPrivKey, ExtendedPubKey, Hardened, Normal};
fn test_path(network: Network,
seed: &[u8],
path: &[ChildNumber],
expected_sk: &str,
expected_pk: &str) {
let mut sk = ExtendedPrivKey::new_master(network, seed).unwrap();
let mut pk = ExtendedPubKey::from_private(&sk);
// Derive keys, checking hardened and non-hardened derivation
for &num in path.iter() {
sk = sk.ckd_priv(num).unwrap();
match num {
Normal(_) => {
let pk2 = pk.ckd_pub(num).unwrap();
pk = ExtendedPubKey::from_private(&sk);
assert_eq!(pk, pk2);
}
Hardened(_) => {
pk = ExtendedPubKey::from_private(&sk);
}
}
}
// Check result against expected base58
assert_eq!(sk.to_base58check().as_slice(), expected_sk);
assert_eq!(pk.to_base58check().as_slice(), expected_pk);
// Check decoded base58 against result
let decoded_sk = FromBase58::from_base58check(expected_sk);
let decoded_pk = FromBase58::from_base58check(expected_pk);
assert_eq!(Ok(sk), decoded_sk);
assert_eq!(Ok(pk), decoded_pk);
}
#[test]
fn test_vector_1() {
let seed = "000102030405060708090a0b0c0d0e0f".from_hex().unwrap();
// m
test_path(Bitcoin, seed.as_slice(), [],
"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi",
"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8");
// m/0h
test_path(Bitcoin, seed.as_slice(), [Hardened(0)],
"xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7",
"xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw");
// m/0h/1
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1)],
"xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs",
"xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ");
// m/0h/1/2h
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1), Hardened(2)],
"xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM",
"xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5");
// m/0h/1/2h/2
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1), Hardened(2), Normal(2)],
"xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334",
"xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV");
// m/0h/1/2h/2/1000000000
test_path(Bitcoin, seed.as_slice(), [Hardened(0), Normal(1), Hardened(2), Normal(2), Normal(1000000000)],
"xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76",
"xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy");
}
#[test]
fn test_vector_2() {
let seed = "fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542".from_hex().unwrap();
// m
test_path(Bitcoin, seed.as_slice(), [],
"xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U",
"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB");
// m/0
test_path(Bitcoin, seed.as_slice(), [Normal(0)],
"xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt",
"xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH");
// m/0/2147483647h
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647)],
"xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9",
"xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a");
// m/0/2147483647h/1
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647), Normal(1)],
"xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef",
"xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon");
// m/0/2147483647h/1/2147483646h
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647), Normal(1), Hardened(2147483646)],
"xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc",
"xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL");
// m/0/2147483647h/1/2147483646h/2
test_path(Bitcoin, seed.as_slice(), [Normal(0), Hardened(2147483647), Normal(1), Hardened(2147483646), Normal(2)],
"xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j",
"xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt");
}
#[test]
pub fn encode_decode_childnumber() { | impl_array_newtype!(Fingerprint, u8, 4)
impl_array_newtype_show!(Fingerprint)
impl_array_newtype_encodable!(Fingerprint, u8, 4)
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.