file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
web.rs
use std::path::PathBuf; use std::rc::Rc; use std::cell::RefCell; use std::collections::BTreeMap; use std::fs::File; use std::io::prelude::*; use std::ffi::OsStr; use std::sync::Arc; use std::time::{Duration as SDuration, Instant}; use crate::utils::{print_error_and_causes, FutureExt as _, ResultExt as ResultExt2}; use crate::HeaterControlMode; use failure::{Error, ResultExt, bail}; use crate::Shared; use crate::DataLogEntry; use crate::TSDataLogEntry; use file_db::{create_intervall_filtermap, TimestampedMethods}; use hyper::StatusCode; use hyper::server::{Http, NewService, Request, Response, Server, Service}; use hyper::header; use futures::future::{FutureExt as _, TryFutureExt}; // for conversion use futures01::future::{self, Future}; use futures01; use futures01::Stream; use handlebars::Handlebars; use tokio_inotify::AsyncINotify; use rmp_serde::{Deserializer, Serializer}; use serde::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize}; use chrono::NaiveDate; use chrono::NaiveDateTime; use chrono::{Duration, Timelike}; pub fn make_web_server(shared: &Shared) -> Result<Server<HelloWorldSpawner, ::hyper::Body>, Error> { let assets_folder: PathBuf = ::std::fs::canonicalize(std::env::var("WEBASSETS_FOLDER") .context("Environment variable WEBASSETS_FOLDER must be set.")?)?; if!assets_folder.is_dir() { bail!( "WEBASSETS_FOLDER not found ({})", assets_folder.to_string_lossy() ); } let index_html = assets_folder.join("index.html"); if!index_html.is_file() { bail!("Missing index.html in WEBASSETS_FOLDER."); } let template_registry = Rc::new(RefCell::new(Handlebars::new())); let addr = "0.0.0.0:12345".parse().unwrap(); let server = Http::new() .bind( &addr, HelloWorldSpawner { shared: shared.clone(), template_registry: template_registry.clone(), assets_folder: Rc::new(assets_folder.clone()), }, ) .unwrap(); // handlebars template template_registry .borrow_mut() .register_template_file("index.html", &index_html) .with_context(|_e| { format!("Cannot compile {}", &index_html.to_string_lossy()) })?; // todo find all other.html files in the folder // React live on asset changes let path_notify = AsyncINotify::init(&server.handle())?; const IN_CLOSE_WRITE: u32 = 8; path_notify .add_watch(&assets_folder, IN_CLOSE_WRITE) .context("Web server can not watch the webassets folder for changes.")?; let template_registry1 = Rc::clone(&template_registry); let webassets_updater = path_notify.for_each(move |_event| { if _event.name.extension().unwrap_or(OsStr::new("")) == "html" { template_registry1 .try_borrow_mut() .map(|mut registry| { registry .register_template_file( &_event.name.to_string_lossy(), assets_folder.join(&_event.name), ) .with_context(|_e| { format!("Cannot compile {}", &_event.name.to_string_lossy()) }) .print_error_and_causes(); }) .print_error_and_causes(); } future::ok(()) }); server .handle() .spawn(webassets_updater.map_err(|e| print_error_and_causes(e) )); Ok(server) } pub struct HelloWorldSpawner { shared: Shared, template_registry: Rc<RefCell<Handlebars>>, assets_folder: Rc<PathBuf>, } impl NewService for HelloWorldSpawner { type Request = Request; type Response = Response; type Error = ::hyper::Error; type Instance = HelloWorld; fn new_service(&self) -> Result<Self::Instance, ::std::io::Error> { Ok(HelloWorld { shared: async_std::sync::Arc::clone(&self.shared), template_registry: Rc::clone(&self.template_registry), assets_folder: Rc::clone(&self.assets_folder), }) } } pub struct HelloWorld { shared: Shared, template_registry: Rc<RefCell<Handlebars>>, assets_folder: Rc<PathBuf>, } type HandlerResult = Box<dyn Future<Item = Response, Error = ::hyper::Error>>; impl Service for HelloWorld { // boilerplate hooking up hyper's server types type Request = Request; type Response = Response; type Error = ::hyper::Error; // The future representing the eventual Response your call will // resolve to. This can change to whatever Future you need. type Future = HandlerResult; fn call(&self, _req: Request) -> Self::Future { let mut path_segments = _req.path().split("/").skip(1); let response_body = match path_segments.next() { Some("") | Some("index.html") => self.indexhtml(), Some("assets") => self.serve_asset(path_segments), Some("history") => self.serve_history(path_segments.next()), Some("dates") => self.serve_available_dates(), Some("current") => self.serve_current_temperatures(), Some("set_heater_control_strategy") if _req.query().is_some() => { self.set_heater_control_strategy(_req.query().unwrap()) } _ => make404(), }; response_body } } impl HelloWorld { fn indexhtml(&self) -> HandlerResult { let template_registry = Rc::clone(&self.template_registry); box_and_convert_error(future::lazy(move || { let data: BTreeMap<String, String> = BTreeMap::new(); let resp = template_registry .borrow() .render("index.html", &data) .map_err(|err| ::failure::Context::new(format!("{}", err)))?; Ok(resp).map(str_to_response) })) } fn serve_asset<'a, I: Iterator<Item = &'a str>>(&self, mut path_segments: I) -> HandlerResult { match path_segments.next() { Some(filename) => { let path = self.assets_folder.join(filename); box_and_convert_error(future::lazy(move || { if path.is_file()
else { Err(::failure::err_msg("Unknown asset")) } })) } None => make404(), } } fn serve_history<'a>(&self, date: Option<&'a str>) -> HandlerResult { match NaiveDate::parse_from_str(date.unwrap_or("nodate"), "%Y-%m-%d") { Ok(date) => { let shared = self.shared.clone(); let every_3_minutes = create_intervall_filtermap( Duration::minutes(3), |data: &TSDataLogEntry| JsData::from(data), 0.25, ); use file_db::Key; struct CachedAndFilteredMarker; impl Key for CachedAndFilteredMarker { type Value = Vec<u8>; } let fut = async move { let serialized = shared .db .custom_cached_by_chunk_key_async::<CachedAndFilteredMarker>( date.into(), Box::new(move |data: &[::file_db::Timestamped<DataLogEntry>]| { let as_vec: Vec<_> = every_3_minutes(data); let mut buf = Vec::with_capacity(0); as_vec .serialize(&mut Serializer::new(&mut buf)) .print_error_and_causes(); buf }), ).await?; let resp = Response::new() .with_header(header::ContentLength(serialized.len() as u64)) .with_header(header::ContentType(::hyper::mime::APPLICATION_MSGPACK)) // TODO: Performance by using stream and without copy .with_body((*serialized).clone()); Ok(resp) }; box_and_convert_error(fut.boxed().compat()) } Err(_err) => make404(), } } fn serve_available_dates(&self) -> HandlerResult { let shared = async_std::sync::Arc::clone(&self.shared); let fut = async move { let datesvec = shared.db.get_non_empty_chunk_keys_async().await?; let json_str = serde_json::to_string(&datesvec)?; let resp = Response::new() .with_header(header::ContentLength(json_str.len() as u64)) .with_header(header::ContentType(::hyper::mime::APPLICATION_JSON)) .with_body(json_str); Ok(resp) }; box_and_convert_error(fut.boxed().compat()) } fn serve_current_temperatures(&self) -> HandlerResult { let shared = async_std::sync::Arc::clone(&self.shared); let fut = async move { let data = DataLogEntry::new_from_current(&shared).await; #[derive(Serialize)] struct Current { block : JsData, control_strategy : String, } let data = Current { block : (&data).into(), // do better control_strategy : format!("{:?}", shared.control_strategy.load()), }; let json_str = serde_json::to_string(&data)?; let resp = Response::new() .with_header(header::ContentLength(json_str.len() as u64)) .with_header(header::ContentType(::hyper::mime::APPLICATION_JSON)) .with_body(json_str); Ok(resp) }; box_and_convert_error(fut.boxed().compat()) } fn set_heater_control_strategy(&self, query: &str) -> HandlerResult { let shared = async_std::sync::Arc::clone(&self.shared); let mut action = None; for k_v in query.split('&') { if!k_v.contains("=") { continue; } let mut k_v = k_v.split("="); if k_v.next() == Some("action") { action = k_v.next(); } } let answer = match action { Some("on") => { shared.control_strategy.store(HeaterControlMode::ForceOn{ until: Instant::now() + SDuration::from_secs(3600 * 12) }); "set: on" } Some("off") => { shared.control_strategy.store(HeaterControlMode::ForceOff{ until: Instant::now() + SDuration::from_secs(3600 * 12) }); "set: off" } Some("auto") => { shared.control_strategy.store(HeaterControlMode::Auto); "set: auto" } _ => { "do nothing" } }; box_and_convert_error(future::lazy(move || Ok(answer.to_string()).map(str_to_response))) } } fn str_to_response(body: String) -> Response { Response::new() .with_header(header::ContentLength(body.len() as u64)) .with_body(body) } fn box_and_convert_error<F>(result: F) -> HandlerResult where F: Future<Item = Response, Error = Error> + Sized +'static, { Box::new(result.then(|result| { let f = match result { Ok(response) => response, Err(err) => { use std::fmt::Write; let mut buf = String::with_capacity(1000); for (i, cause) in err.iter_chain().enumerate() { if i == 0 { write!(buf, "<p>{}</p>", cause).unwrap(); } else { write!(buf, "<p> &gt; caused by: {} </p>", cause).unwrap(); } } write!(buf, "<pre>{}</pre>", err.backtrace()).unwrap(); let body = format!( r#"<!doctype html> <html lang="en"><head> <meta charset="utf-8"> <title>505 Internal Server Error</title> </head> <body> <h1>505 Internal Server Error</h1> {} </body></html>"#, buf ); print_error_and_causes(err); Response::new() .with_status(StatusCode::InternalServerError) .with_header(header::ContentLength(body.len() as u64)) .with_body(body) } }; Ok(f) })) } fn make404() -> HandlerResult { Box::new(future::lazy(|| { let body = format!( r#"<!doctype html> <html lang="en"><head> <meta charset="utf-8"> <title>404 Not Found</title> </head> <body> <h1>404 Not Found</h1> </body></html>"# ); Ok( Response::new() .with_status(StatusCode::NotFound) .with_header(header::ContentLength(body.len() as u64)) .with_body(body), ) })) } #[derive(Serialize, Deserialize, Clone)] pub struct JsData { pub time: String, pub high: f64, pub highmid: f64, pub mid: f64, pub midlow: f64, pub low: f64, pub outside: f64, pub heater_state: u8, pub reference: f64, } impl<'a> From<&'a TSDataLogEntry> for JsData { fn from(d: &TSDataLogEntry) -> JsData { JsData { time: d.time().format("%Y-%m-%dT%H:%M:%S+0000").to_string(), high: d.celsius[0] as f64 / 100.0, highmid: d.celsius[1] as f64 / 100.0, mid: d.celsius[2] as f64 / 100.0, midlow: d.celsius[3] as f64 / 100.0, low: d.celsius[4] as f64 / 100.0, outside: d.celsius[5] as f64 / 100.0, heater_state: if d.heater_state { 1 } else { 0 }, reference: d.reference_celsius.unwrap_or(0) as f64 / 100.0, } } }
{ let mut f = File::open(path).unwrap(); let mut buffer = String::new(); f.read_to_string(&mut buffer).unwrap(); Ok(buffer).map(str_to_response) }
conditional_block
web.rs
use std::path::PathBuf; use std::rc::Rc; use std::cell::RefCell; use std::collections::BTreeMap; use std::fs::File; use std::io::prelude::*; use std::ffi::OsStr; use std::sync::Arc; use std::time::{Duration as SDuration, Instant}; use crate::utils::{print_error_and_causes, FutureExt as _, ResultExt as ResultExt2}; use crate::HeaterControlMode; use failure::{Error, ResultExt, bail}; use crate::Shared; use crate::DataLogEntry; use crate::TSDataLogEntry; use file_db::{create_intervall_filtermap, TimestampedMethods}; use hyper::StatusCode; use hyper::server::{Http, NewService, Request, Response, Server, Service}; use hyper::header; use futures::future::{FutureExt as _, TryFutureExt}; // for conversion use futures01::future::{self, Future}; use futures01; use futures01::Stream; use handlebars::Handlebars; use tokio_inotify::AsyncINotify; use rmp_serde::{Deserializer, Serializer}; use serde::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize}; use chrono::NaiveDate; use chrono::NaiveDateTime; use chrono::{Duration, Timelike}; pub fn make_web_server(shared: &Shared) -> Result<Server<HelloWorldSpawner, ::hyper::Body>, Error> { let assets_folder: PathBuf = ::std::fs::canonicalize(std::env::var("WEBASSETS_FOLDER") .context("Environment variable WEBASSETS_FOLDER must be set.")?)?; if!assets_folder.is_dir() { bail!( "WEBASSETS_FOLDER not found ({})", assets_folder.to_string_lossy() ); } let index_html = assets_folder.join("index.html"); if!index_html.is_file() { bail!("Missing index.html in WEBASSETS_FOLDER."); } let template_registry = Rc::new(RefCell::new(Handlebars::new())); let addr = "0.0.0.0:12345".parse().unwrap(); let server = Http::new() .bind( &addr, HelloWorldSpawner { shared: shared.clone(), template_registry: template_registry.clone(), assets_folder: Rc::new(assets_folder.clone()), }, ) .unwrap(); // handlebars template template_registry .borrow_mut() .register_template_file("index.html", &index_html) .with_context(|_e| { format!("Cannot compile {}", &index_html.to_string_lossy()) })?; // todo find all other.html files in the folder // React live on asset changes let path_notify = AsyncINotify::init(&server.handle())?; const IN_CLOSE_WRITE: u32 = 8; path_notify .add_watch(&assets_folder, IN_CLOSE_WRITE) .context("Web server can not watch the webassets folder for changes.")?; let template_registry1 = Rc::clone(&template_registry); let webassets_updater = path_notify.for_each(move |_event| { if _event.name.extension().unwrap_or(OsStr::new("")) == "html" { template_registry1 .try_borrow_mut() .map(|mut registry| { registry .register_template_file( &_event.name.to_string_lossy(), assets_folder.join(&_event.name), ) .with_context(|_e| { format!("Cannot compile {}", &_event.name.to_string_lossy()) }) .print_error_and_causes(); }) .print_error_and_causes(); } future::ok(()) }); server .handle() .spawn(webassets_updater.map_err(|e| print_error_and_causes(e) )); Ok(server) } pub struct HelloWorldSpawner { shared: Shared, template_registry: Rc<RefCell<Handlebars>>, assets_folder: Rc<PathBuf>, } impl NewService for HelloWorldSpawner { type Request = Request; type Response = Response; type Error = ::hyper::Error; type Instance = HelloWorld; fn new_service(&self) -> Result<Self::Instance, ::std::io::Error> { Ok(HelloWorld { shared: async_std::sync::Arc::clone(&self.shared), template_registry: Rc::clone(&self.template_registry), assets_folder: Rc::clone(&self.assets_folder), }) } } pub struct HelloWorld { shared: Shared, template_registry: Rc<RefCell<Handlebars>>, assets_folder: Rc<PathBuf>, } type HandlerResult = Box<dyn Future<Item = Response, Error = ::hyper::Error>>; impl Service for HelloWorld { // boilerplate hooking up hyper's server types type Request = Request; type Response = Response; type Error = ::hyper::Error; // The future representing the eventual Response your call will // resolve to. This can change to whatever Future you need. type Future = HandlerResult; fn call(&self, _req: Request) -> Self::Future { let mut path_segments = _req.path().split("/").skip(1); let response_body = match path_segments.next() { Some("") | Some("index.html") => self.indexhtml(), Some("assets") => self.serve_asset(path_segments), Some("history") => self.serve_history(path_segments.next()), Some("dates") => self.serve_available_dates(), Some("current") => self.serve_current_temperatures(), Some("set_heater_control_strategy") if _req.query().is_some() => { self.set_heater_control_strategy(_req.query().unwrap()) } _ => make404(), }; response_body } } impl HelloWorld { fn indexhtml(&self) -> HandlerResult { let template_registry = Rc::clone(&self.template_registry); box_and_convert_error(future::lazy(move || { let data: BTreeMap<String, String> = BTreeMap::new(); let resp = template_registry .borrow() .render("index.html", &data) .map_err(|err| ::failure::Context::new(format!("{}", err)))?; Ok(resp).map(str_to_response) })) } fn serve_asset<'a, I: Iterator<Item = &'a str>>(&self, mut path_segments: I) -> HandlerResult { match path_segments.next() { Some(filename) => { let path = self.assets_folder.join(filename); box_and_convert_error(future::lazy(move || { if path.is_file() { let mut f = File::open(path).unwrap(); let mut buffer = String::new(); f.read_to_string(&mut buffer).unwrap(); Ok(buffer).map(str_to_response) } else { Err(::failure::err_msg("Unknown asset")) } })) } None => make404(), } } fn serve_history<'a>(&self, date: Option<&'a str>) -> HandlerResult { match NaiveDate::parse_from_str(date.unwrap_or("nodate"), "%Y-%m-%d") { Ok(date) => { let shared = self.shared.clone(); let every_3_minutes = create_intervall_filtermap( Duration::minutes(3), |data: &TSDataLogEntry| JsData::from(data), 0.25, ); use file_db::Key; struct CachedAndFilteredMarker; impl Key for CachedAndFilteredMarker { type Value = Vec<u8>; } let fut = async move { let serialized = shared .db .custom_cached_by_chunk_key_async::<CachedAndFilteredMarker>( date.into(), Box::new(move |data: &[::file_db::Timestamped<DataLogEntry>]| { let as_vec: Vec<_> = every_3_minutes(data); let mut buf = Vec::with_capacity(0); as_vec .serialize(&mut Serializer::new(&mut buf)) .print_error_and_causes(); buf }), ).await?; let resp = Response::new() .with_header(header::ContentLength(serialized.len() as u64)) .with_header(header::ContentType(::hyper::mime::APPLICATION_MSGPACK)) // TODO: Performance by using stream and without copy .with_body((*serialized).clone()); Ok(resp) }; box_and_convert_error(fut.boxed().compat()) } Err(_err) => make404(), } } fn serve_available_dates(&self) -> HandlerResult { let shared = async_std::sync::Arc::clone(&self.shared); let fut = async move { let datesvec = shared.db.get_non_empty_chunk_keys_async().await?; let json_str = serde_json::to_string(&datesvec)?; let resp = Response::new() .with_header(header::ContentLength(json_str.len() as u64)) .with_header(header::ContentType(::hyper::mime::APPLICATION_JSON)) .with_body(json_str); Ok(resp) }; box_and_convert_error(fut.boxed().compat()) } fn serve_current_temperatures(&self) -> HandlerResult { let shared = async_std::sync::Arc::clone(&self.shared); let fut = async move { let data = DataLogEntry::new_from_current(&shared).await; #[derive(Serialize)] struct Current { block : JsData, control_strategy : String, } let data = Current { block : (&data).into(), // do better control_strategy : format!("{:?}", shared.control_strategy.load()), }; let json_str = serde_json::to_string(&data)?; let resp = Response::new() .with_header(header::ContentLength(json_str.len() as u64)) .with_header(header::ContentType(::hyper::mime::APPLICATION_JSON)) .with_body(json_str); Ok(resp) }; box_and_convert_error(fut.boxed().compat()) } fn set_heater_control_strategy(&self, query: &str) -> HandlerResult { let shared = async_std::sync::Arc::clone(&self.shared); let mut action = None; for k_v in query.split('&') { if!k_v.contains("=") { continue; } let mut k_v = k_v.split("="); if k_v.next() == Some("action") { action = k_v.next(); } } let answer = match action { Some("on") => { shared.control_strategy.store(HeaterControlMode::ForceOn{ until: Instant::now() + SDuration::from_secs(3600 * 12) }); "set: on" } Some("off") => { shared.control_strategy.store(HeaterControlMode::ForceOff{ until: Instant::now() + SDuration::from_secs(3600 * 12) }); "set: off" } Some("auto") => { shared.control_strategy.store(HeaterControlMode::Auto); "set: auto" } _ => { "do nothing" } }; box_and_convert_error(future::lazy(move || Ok(answer.to_string()).map(str_to_response))) } } fn str_to_response(body: String) -> Response
fn box_and_convert_error<F>(result: F) -> HandlerResult where F: Future<Item = Response, Error = Error> + Sized +'static, { Box::new(result.then(|result| { let f = match result { Ok(response) => response, Err(err) => { use std::fmt::Write; let mut buf = String::with_capacity(1000); for (i, cause) in err.iter_chain().enumerate() { if i == 0 { write!(buf, "<p>{}</p>", cause).unwrap(); } else { write!(buf, "<p> &gt; caused by: {} </p>", cause).unwrap(); } } write!(buf, "<pre>{}</pre>", err.backtrace()).unwrap(); let body = format!( r#"<!doctype html> <html lang="en"><head> <meta charset="utf-8"> <title>505 Internal Server Error</title> </head> <body> <h1>505 Internal Server Error</h1> {} </body></html>"#, buf ); print_error_and_causes(err); Response::new() .with_status(StatusCode::InternalServerError) .with_header(header::ContentLength(body.len() as u64)) .with_body(body) } }; Ok(f) })) } fn make404() -> HandlerResult { Box::new(future::lazy(|| { let body = format!( r#"<!doctype html> <html lang="en"><head> <meta charset="utf-8"> <title>404 Not Found</title> </head> <body> <h1>404 Not Found</h1> </body></html>"# ); Ok( Response::new() .with_status(StatusCode::NotFound) .with_header(header::ContentLength(body.len() as u64)) .with_body(body), ) })) } #[derive(Serialize, Deserialize, Clone)] pub struct JsData { pub time: String, pub high: f64, pub highmid: f64, pub mid: f64, pub midlow: f64, pub low: f64, pub outside: f64, pub heater_state: u8, pub reference: f64, } impl<'a> From<&'a TSDataLogEntry> for JsData { fn from(d: &TSDataLogEntry) -> JsData { JsData { time: d.time().format("%Y-%m-%dT%H:%M:%S+0000").to_string(), high: d.celsius[0] as f64 / 100.0, highmid: d.celsius[1] as f64 / 100.0, mid: d.celsius[2] as f64 / 100.0, midlow: d.celsius[3] as f64 / 100.0, low: d.celsius[4] as f64 / 100.0, outside: d.celsius[5] as f64 / 100.0, heater_state: if d.heater_state { 1 } else { 0 }, reference: d.reference_celsius.unwrap_or(0) as f64 / 100.0, } } }
{ Response::new() .with_header(header::ContentLength(body.len() as u64)) .with_body(body) }
identifier_body
lib.rs
#[macro_use] extern crate serde_derive; extern crate argon2; extern crate libc; extern crate liner; #[macro_use] extern crate failure; extern crate pkgutils; extern crate rand; extern crate redoxfs; extern crate syscall; extern crate termion; mod config; mod disk_wrapper; pub use config::Config; pub use config::file::FileConfig; pub use config::package::PackageConfig; use disk_wrapper::DiskWrapper; use failure::{Error, err_msg}; use rand::{RngCore, rngs::OsRng}; use redoxfs::{unmount_path, Disk, DiskIo, FileSystem}; use termion::input::TermRead; use pkgutils::{Repo, Package}; use std::{ collections::BTreeMap, env, fs, io::{self, Seek, SeekFrom, Write}, path::Path, sync::mpsc::channel, time::{SystemTime, UNIX_EPOCH}, thread, }; pub(crate) type Result<T> = std::result::Result<T, Error>; const REMOTE: &'static str = "https://static.redox-os.org/pkg"; fn get_target() -> String { env::var("TARGET").unwrap_or( option_env!("TARGET").map_or( "x86_64-unknown-redox".to_string(), |x| x.to_string() ) ) } /// Converts a password to a serialized argon2rs hash, understandable /// by redox_users. If the password is blank, the hash is blank. fn hash_password(password: &str) -> Result<String> { if password!= "" { let salt = format!("{:X}", OsRng.next_u64()); let config = argon2::Config::default(); let hash = argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &config)?; Ok(hash) } else { Ok("".to_string()) } } fn syscall_error(err: syscall::Error) -> io::Error { io::Error::from_raw_os_error(err.errno) } /// Returns a password collected from the user (plaintext) fn prompt_password(prompt: &str, confirm_prompt: &str) -> Result<String> { let stdin = io::stdin(); let mut stdin = stdin.lock(); let stdout = io::stdout(); let mut stdout = stdout.lock(); print!("{}", prompt); let password = stdin.read_passwd(&mut stdout)?; print!("\n{}", confirm_prompt); let confirm_password = stdin.read_passwd(&mut stdout)?; // Note: Actually comparing two Option<String> values if confirm_password == password { Ok(password.unwrap_or("".to_string())) } else { Err(err_msg("passwords do not match")) } } //TODO: error handling fn install_packages<S: AsRef<str>>(config: &Config, dest: &str, cookbook: Option<S>) { let target = &get_target(); let mut repo = Repo::new(target); repo.add_remote(REMOTE); if let Some(cookbook) = cookbook { let dest_pkg = format!("{}/pkg", dest); if! Path::new(&dest_pkg).exists() { fs::create_dir(&dest_pkg).unwrap(); } for (packagename, package) in &config.packages { let pkgar_path = format!("{}/{}/repo/{}/{}.pkgar", env::current_dir().unwrap().to_string_lossy(), cookbook.as_ref(), target, packagename); let from_remote = match (config.general.cooking, package) { (Some(true), PackageConfig::Empty) => true, (Some(true), PackageConfig::Spec { version: None, git: None, path: None }) => true, _ => false }; if from_remote { println!("Installing package from remote: {}", packagename); repo.fetch(&packagename).unwrap().install(dest).unwrap(); } else if Path::new(&pkgar_path).exists() { println!("Installing package from local repo: {}", packagename); let public_path = format!("{}/{}/build/id_ed25519.pub.toml", env::current_dir().unwrap().to_string_lossy(), cookbook.as_ref()); pkgar::extract(&public_path, &pkgar_path, dest).unwrap(); let head_path = format!("{}/{}.pkgar_head", dest_pkg, packagename); pkgar::split(&public_path, &pkgar_path, &head_path, Option::<&str>::None).unwrap(); } else { println!("Installing package tar.gz from local repo: {}", packagename); let path = format!("{}/{}/repo/{}/{}.tar.gz", env::current_dir().unwrap().to_string_lossy(), cookbook.as_ref(), target, packagename); Package::from_path(&path).unwrap().install(dest).unwrap(); } } } else { for (packagename, _package) in &config.packages { println!("Installing package from remote: {}", packagename); repo.fetch(&packagename).unwrap().install(dest).unwrap(); } } } pub fn install_dir<P: AsRef<Path>, S: AsRef<str>>(config: Config, output_dir: P, cookbook: Option<S>) -> Result<()> { //let mut context = liner::Context::new(); macro_rules! prompt { ($dst:expr, $def:expr, $($arg:tt)*) => (if config.general.prompt { Err(io::Error::new( io::ErrorKind::Other, "prompt not currently supported" )) // match unwrap_or_prompt($dst, &mut context, &format!($($arg)*)) { // Ok(res) => if res.is_empty() { // Ok($def) // } else { // Ok(res) // }, // Err(err) => Err(err) // } } else { Ok($dst.unwrap_or($def)) }) } let output_dir = output_dir.as_ref(); let output_dir = output_dir.to_owned(); install_packages(&config, output_dir.to_str().unwrap(), cookbook); for file in config.files { file.create(&output_dir)?; } let mut passwd = String::new(); let mut shadow = String::new(); let mut next_uid = 1000; for (username, user) in config.users { // plaintext let password = if let Some(password) = user.password { password } else if config.general.prompt { prompt_password( &format!("{}: enter password: ", username), &format!("{}: confirm password: ", username))? } else { String::new() }; let uid = user.uid.unwrap_or(next_uid); if uid >= next_uid { next_uid = uid + 1; } let gid = user.gid.unwrap_or(uid); let name = prompt!(user.name, username.clone(), "{}: name (GECOS) [{}]: ", username, username)?; let home = prompt!(user.home, format!("/home/{}", username), "{}: home [/home/{}]: ", username, username)?; let shell = prompt!(user.shell, "/bin/ion".to_string(), "{}: shell [/bin/ion]: ", username)?; println!("Adding user {}:", username); println!("\tPassword: {}", password); println!("\tUID: {}", uid); println!("\tGID: {}", gid); println!("\tName: {}", name); println!("\tHome: {}", home); println!("\tShell: {}", shell); FileConfig { path: home.clone(), data: String::new(), symlink: false, directory: true, mode: Some(0o0700), uid: Some(uid), gid: Some(gid), recursive_chown: true, }.create(&output_dir)?; let password = hash_password(&password)?; passwd.push_str(&format!("{};{};{};{};file:{};file:{}\n", username, uid, gid, name, home, shell)); shadow.push_str(&format!("{};{}\n", username, password)); } if!passwd.is_empty() { FileConfig { path: "/etc/passwd".to_string(), data: passwd, symlink: false, directory: false, // Take defaults mode: None, uid: None, gid: None, recursive_chown: false, }.create(&output_dir)?; } if!shadow.is_empty() { FileConfig { path: "/etc/shadow".to_string(), data: shadow, symlink: false, directory: false, mode: Some(0o0600), uid: Some(0), gid: Some(0), recursive_chown: false, }.create(&output_dir)?; } Ok(()) } pub fn with_redoxfs<D, T, F>(disk: D, password_opt: Option<&[u8]>, callback: F) -> Result<T> where D: Disk + Send +'static, F: FnOnce(&Path) -> Result<T> { let mount_path = if cfg!(target_os = "redox") { "file/redox_installer" } else { "/tmp/redox_installer" }; if cfg!(not(target_os = "redox")) { if! Path::new(mount_path).exists() { fs::create_dir(mount_path)?; } } let ctime = SystemTime::now().duration_since(UNIX_EPOCH)?; let fs = FileSystem::create( disk, password_opt, ctime.as_secs(), ctime.subsec_nanos() ).map_err(syscall_error)?; let (tx, rx) = channel(); let join_handle = thread::spawn(move || { let res = redoxfs::mount( fs, mount_path, |real_path| { tx.send(Ok(real_path.to_owned())).unwrap(); } ); match res { Ok(()) => (), Err(err) => { tx.send(Err(err)).unwrap(); }, }; }); let res = match rx.recv() { Ok(ok) => match ok { Ok(real_path) => callback(&real_path), Err(err) => return Err(err.into()), }, Err(_) => return Err(io::Error::new( io::ErrorKind::NotConnected, "redoxfs thread did not send a result" ).into()), }; unmount_path(mount_path)?; join_handle.join().unwrap(); res } pub fn fetch_bootloaders<S: AsRef<str>>(config: &Config, cookbook: Option<S>, live: bool) -> Result<(Vec<u8>, Vec<u8>)> { //TODO: make it safe to run this concurrently let bootloader_dir = "/tmp/redox_installer_bootloader"; if Path::new(bootloader_dir).exists() { fs::remove_dir_all(&bootloader_dir)?; } fs::create_dir(bootloader_dir)?; let mut bootloader_config = Config::default(); bootloader_config.general = config.general.clone(); bootloader_config.packages.insert("bootloader".to_string(), PackageConfig::default()); install_packages(&bootloader_config, bootloader_dir, cookbook.as_ref()); let boot_dir = Path::new(bootloader_dir).join("boot"); let bios_path = boot_dir.join(if live { "bootloader-live.bios" } else { "bootloader.bios" }); let efi_path = boot_dir.join(if live { "bootloader-live.efi" } else { "bootloader.efi" }); Ok(( if bios_path.exists() { fs::read(bios_path)? } else { Vec::new() }, if efi_path.exists() { fs::read(efi_path)? } else { Vec::new() }, )) } //TODO: make bootloaders use Option, dynamically create BIOS and EFI partitions pub fn with_whole_disk<P, F, T>(disk_path: P, bootloader_bios: &[u8], bootloader_efi: &[u8], password_opt: Option<&[u8]>, callback: F) -> Result<T> where P: AsRef<Path>, F: FnOnce(&Path) -> Result<T> { let target = get_target(); let bootloader_efi_name = match target.as_str() { "aarch64-unknown-redox" => "BOOTAA64.EFI", "i686-unknown-redox" => "BOOTIA32.EFI", "x86_64-unknown-redox" => "BOOTX64.EFI", _ => { return Err(format_err!("target '{}' not supported", target)); } }; // Open disk and read metadata eprintln!("Opening disk {}", disk_path.as_ref().display()); let mut disk_file = DiskWrapper::open(disk_path.as_ref())?; let disk_size = disk_file.size(); let block_size = disk_file.block_size() as u64; let gpt_block_size = match block_size { 512 => gpt::disk::LogicalBlockSize::Lb512, _ => { // TODO: support (and test) other block sizes return Err(format_err!("block size {} not supported", block_size)); } }; // Calculate partition offsets let gpt_reserved = 34 * 512; // GPT always reserves 34 512-byte sectors let mibi = 1024 * 1024; // First megabyte of the disk is reserved for BIOS partition, wich includes GPT tables let bios_start = gpt_reserved / block_size; let bios_end = (mibi / block_size) - 1; // Second megabyte of the disk is reserved for EFI partition let efi_start = bios_end + 1; let efi_end = efi_start + (mibi / block_size) - 1; // The rest of the disk is RedoxFS, reserving the GPT table mirror at the end of disk let redoxfs_start = efi_end + 1; let redoxfs_end = ((((disk_size - gpt_reserved) / mibi) * mibi) / block_size) - 1; // Format and install BIOS partition { // Write BIOS bootloader to disk eprintln!("Write bootloader with size {:#x}", bootloader_bios.len()); disk_file.seek(SeekFrom::Start(0))?; disk_file.write_all(&bootloader_bios)?; // Replace MBR tables with protective MBR let mbr_blocks = ((disk_size + block_size - 1) / block_size) - 1; eprintln!("Writing protective MBR with disk blocks {:#x}", mbr_blocks); gpt::mbr::ProtectiveMBR::with_lb_size(mbr_blocks as u32) .update_conservative(&mut disk_file)?; // Open disk, mark it as not initialized let mut gpt_disk = gpt::GptConfig::new() .initialized(false) .writable(true) .logical_block_size(gpt_block_size) .create_from_device(Box::new(&mut disk_file), None)?; // Add BIOS boot partition let mut partitions = BTreeMap::new(); let mut partition_id = 1; partitions.insert(partition_id, gpt::partition::Partition { part_type_guid: gpt::partition_types::BIOS, part_guid: uuid::Uuid::new_v4(), first_lba: bios_start, last_lba: bios_end, flags: 0, // TODO name: "BIOS".to_string(), }); partition_id += 1; // Add EFI boot partition partitions.insert(partition_id, gpt::partition::Partition { part_type_guid: gpt::partition_types::EFI, part_guid: uuid::Uuid::new_v4(), first_lba: efi_start, last_lba: efi_end, flags: 0, // TODO name: "EFI".to_string(), }); partition_id += 1; // Add RedoxFS partition partitions.insert(partition_id, gpt::partition::Partition { //TODO: Use REDOX_REDOXFS type (needs GPT crate changes) part_type_guid: gpt::partition_types::LINUX_FS, part_guid: uuid::Uuid::new_v4(), first_lba: redoxfs_start, last_lba: redoxfs_end, flags: 0, name: "REDOX".to_string(), }); eprintln!("Writing GPT tables: {:#?}", partitions); // Initialize GPT table gpt_disk.update_partitions(partitions)?; // Write partition layout, returning disk file gpt_disk.write()?; } // Format and install EFI partition { let disk_efi_start = efi_start * block_size; let disk_efi_end = (efi_end + 1) * block_size; let mut disk_efi = fscommon::StreamSlice::new( &mut disk_file, disk_efi_start, disk_efi_end, )?; eprintln!("Formatting EFI partition with size {:#x}", disk_efi_end - disk_efi_start); fatfs::format_volume(&mut disk_efi, fatfs::FormatVolumeOptions::new())?; eprintln!("Opening EFI partition"); let fs = fatfs::FileSystem::new(&mut disk_efi, fatfs::FsOptions::new())?; eprintln!("Creating EFI directory"); let root_dir = fs.root_dir(); root_dir.create_dir("EFI")?; eprintln!("Creating EFI/BOOT directory"); let efi_dir = root_dir.open_dir("EFI")?; efi_dir.create_dir("BOOT")?; eprintln!("Writing EFI/BOOT/{} file with size {:#x}", bootloader_efi_name, bootloader_efi.len()); let boot_dir = efi_dir.open_dir("BOOT")?; let mut file = boot_dir.create_file(bootloader_efi_name)?; file.truncate()?; file.write_all(&bootloader_efi)?; } // Format and install RedoxFS partition eprintln!("Installing to RedoxFS partition with size {:#x}", (redoxfs_end - redoxfs_start) * block_size); let disk_redoxfs = DiskIo(fscommon::StreamSlice::new( disk_file, redoxfs_start * block_size, (redoxfs_end + 1) * block_size )?);
) } pub fn install<P, S>(config: Config, output: P, cookbook: Option<S>, live: bool) -> Result<()> where P: AsRef<Path>, S: AsRef<str>, { println!("Install {:#?} to {}", config, output.as_ref().display()); if output.as_ref().is_dir() { install_dir(config, output, cookbook) } else { let (bootloader_bios, bootloader_efi) = fetch_bootloaders(&config, cookbook.as_ref(), live)?; with_whole_disk(output, &bootloader_bios, &bootloader_efi, None, move |mount_path| { install_dir(config, mount_path, cookbook) } ) } }
with_redoxfs( disk_redoxfs, password_opt, callback
random_line_split
lib.rs
#[macro_use] extern crate serde_derive; extern crate argon2; extern crate libc; extern crate liner; #[macro_use] extern crate failure; extern crate pkgutils; extern crate rand; extern crate redoxfs; extern crate syscall; extern crate termion; mod config; mod disk_wrapper; pub use config::Config; pub use config::file::FileConfig; pub use config::package::PackageConfig; use disk_wrapper::DiskWrapper; use failure::{Error, err_msg}; use rand::{RngCore, rngs::OsRng}; use redoxfs::{unmount_path, Disk, DiskIo, FileSystem}; use termion::input::TermRead; use pkgutils::{Repo, Package}; use std::{ collections::BTreeMap, env, fs, io::{self, Seek, SeekFrom, Write}, path::Path, sync::mpsc::channel, time::{SystemTime, UNIX_EPOCH}, thread, }; pub(crate) type Result<T> = std::result::Result<T, Error>; const REMOTE: &'static str = "https://static.redox-os.org/pkg"; fn get_target() -> String { env::var("TARGET").unwrap_or( option_env!("TARGET").map_or( "x86_64-unknown-redox".to_string(), |x| x.to_string() ) ) } /// Converts a password to a serialized argon2rs hash, understandable /// by redox_users. If the password is blank, the hash is blank. fn hash_password(password: &str) -> Result<String> { if password!= "" { let salt = format!("{:X}", OsRng.next_u64()); let config = argon2::Config::default(); let hash = argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &config)?; Ok(hash) } else { Ok("".to_string()) } } fn syscall_error(err: syscall::Error) -> io::Error { io::Error::from_raw_os_error(err.errno) } /// Returns a password collected from the user (plaintext) fn prompt_password(prompt: &str, confirm_prompt: &str) -> Result<String> { let stdin = io::stdin(); let mut stdin = stdin.lock(); let stdout = io::stdout(); let mut stdout = stdout.lock(); print!("{}", prompt); let password = stdin.read_passwd(&mut stdout)?; print!("\n{}", confirm_prompt); let confirm_password = stdin.read_passwd(&mut stdout)?; // Note: Actually comparing two Option<String> values if confirm_password == password { Ok(password.unwrap_or("".to_string())) } else { Err(err_msg("passwords do not match")) } } //TODO: error handling fn install_packages<S: AsRef<str>>(config: &Config, dest: &str, cookbook: Option<S>) { let target = &get_target(); let mut repo = Repo::new(target); repo.add_remote(REMOTE); if let Some(cookbook) = cookbook { let dest_pkg = format!("{}/pkg", dest); if! Path::new(&dest_pkg).exists() { fs::create_dir(&dest_pkg).unwrap(); } for (packagename, package) in &config.packages { let pkgar_path = format!("{}/{}/repo/{}/{}.pkgar", env::current_dir().unwrap().to_string_lossy(), cookbook.as_ref(), target, packagename); let from_remote = match (config.general.cooking, package) { (Some(true), PackageConfig::Empty) => true, (Some(true), PackageConfig::Spec { version: None, git: None, path: None }) => true, _ => false }; if from_remote { println!("Installing package from remote: {}", packagename); repo.fetch(&packagename).unwrap().install(dest).unwrap(); } else if Path::new(&pkgar_path).exists() { println!("Installing package from local repo: {}", packagename); let public_path = format!("{}/{}/build/id_ed25519.pub.toml", env::current_dir().unwrap().to_string_lossy(), cookbook.as_ref()); pkgar::extract(&public_path, &pkgar_path, dest).unwrap(); let head_path = format!("{}/{}.pkgar_head", dest_pkg, packagename); pkgar::split(&public_path, &pkgar_path, &head_path, Option::<&str>::None).unwrap(); } else { println!("Installing package tar.gz from local repo: {}", packagename); let path = format!("{}/{}/repo/{}/{}.tar.gz", env::current_dir().unwrap().to_string_lossy(), cookbook.as_ref(), target, packagename); Package::from_path(&path).unwrap().install(dest).unwrap(); } } } else { for (packagename, _package) in &config.packages { println!("Installing package from remote: {}", packagename); repo.fetch(&packagename).unwrap().install(dest).unwrap(); } } } pub fn install_dir<P: AsRef<Path>, S: AsRef<str>>(config: Config, output_dir: P, cookbook: Option<S>) -> Result<()>
} let output_dir = output_dir.as_ref(); let output_dir = output_dir.to_owned(); install_packages(&config, output_dir.to_str().unwrap(), cookbook); for file in config.files { file.create(&output_dir)?; } let mut passwd = String::new(); let mut shadow = String::new(); let mut next_uid = 1000; for (username, user) in config.users { // plaintext let password = if let Some(password) = user.password { password } else if config.general.prompt { prompt_password( &format!("{}: enter password: ", username), &format!("{}: confirm password: ", username))? } else { String::new() }; let uid = user.uid.unwrap_or(next_uid); if uid >= next_uid { next_uid = uid + 1; } let gid = user.gid.unwrap_or(uid); let name = prompt!(user.name, username.clone(), "{}: name (GECOS) [{}]: ", username, username)?; let home = prompt!(user.home, format!("/home/{}", username), "{}: home [/home/{}]: ", username, username)?; let shell = prompt!(user.shell, "/bin/ion".to_string(), "{}: shell [/bin/ion]: ", username)?; println!("Adding user {}:", username); println!("\tPassword: {}", password); println!("\tUID: {}", uid); println!("\tGID: {}", gid); println!("\tName: {}", name); println!("\tHome: {}", home); println!("\tShell: {}", shell); FileConfig { path: home.clone(), data: String::new(), symlink: false, directory: true, mode: Some(0o0700), uid: Some(uid), gid: Some(gid), recursive_chown: true, }.create(&output_dir)?; let password = hash_password(&password)?; passwd.push_str(&format!("{};{};{};{};file:{};file:{}\n", username, uid, gid, name, home, shell)); shadow.push_str(&format!("{};{}\n", username, password)); } if!passwd.is_empty() { FileConfig { path: "/etc/passwd".to_string(), data: passwd, symlink: false, directory: false, // Take defaults mode: None, uid: None, gid: None, recursive_chown: false, }.create(&output_dir)?; } if!shadow.is_empty() { FileConfig { path: "/etc/shadow".to_string(), data: shadow, symlink: false, directory: false, mode: Some(0o0600), uid: Some(0), gid: Some(0), recursive_chown: false, }.create(&output_dir)?; } Ok(()) } pub fn with_redoxfs<D, T, F>(disk: D, password_opt: Option<&[u8]>, callback: F) -> Result<T> where D: Disk + Send +'static, F: FnOnce(&Path) -> Result<T> { let mount_path = if cfg!(target_os = "redox") { "file/redox_installer" } else { "/tmp/redox_installer" }; if cfg!(not(target_os = "redox")) { if! Path::new(mount_path).exists() { fs::create_dir(mount_path)?; } } let ctime = SystemTime::now().duration_since(UNIX_EPOCH)?; let fs = FileSystem::create( disk, password_opt, ctime.as_secs(), ctime.subsec_nanos() ).map_err(syscall_error)?; let (tx, rx) = channel(); let join_handle = thread::spawn(move || { let res = redoxfs::mount( fs, mount_path, |real_path| { tx.send(Ok(real_path.to_owned())).unwrap(); } ); match res { Ok(()) => (), Err(err) => { tx.send(Err(err)).unwrap(); }, }; }); let res = match rx.recv() { Ok(ok) => match ok { Ok(real_path) => callback(&real_path), Err(err) => return Err(err.into()), }, Err(_) => return Err(io::Error::new( io::ErrorKind::NotConnected, "redoxfs thread did not send a result" ).into()), }; unmount_path(mount_path)?; join_handle.join().unwrap(); res } pub fn fetch_bootloaders<S: AsRef<str>>(config: &Config, cookbook: Option<S>, live: bool) -> Result<(Vec<u8>, Vec<u8>)> { //TODO: make it safe to run this concurrently let bootloader_dir = "/tmp/redox_installer_bootloader"; if Path::new(bootloader_dir).exists() { fs::remove_dir_all(&bootloader_dir)?; } fs::create_dir(bootloader_dir)?; let mut bootloader_config = Config::default(); bootloader_config.general = config.general.clone(); bootloader_config.packages.insert("bootloader".to_string(), PackageConfig::default()); install_packages(&bootloader_config, bootloader_dir, cookbook.as_ref()); let boot_dir = Path::new(bootloader_dir).join("boot"); let bios_path = boot_dir.join(if live { "bootloader-live.bios" } else { "bootloader.bios" }); let efi_path = boot_dir.join(if live { "bootloader-live.efi" } else { "bootloader.efi" }); Ok(( if bios_path.exists() { fs::read(bios_path)? } else { Vec::new() }, if efi_path.exists() { fs::read(efi_path)? } else { Vec::new() }, )) } //TODO: make bootloaders use Option, dynamically create BIOS and EFI partitions pub fn with_whole_disk<P, F, T>(disk_path: P, bootloader_bios: &[u8], bootloader_efi: &[u8], password_opt: Option<&[u8]>, callback: F) -> Result<T> where P: AsRef<Path>, F: FnOnce(&Path) -> Result<T> { let target = get_target(); let bootloader_efi_name = match target.as_str() { "aarch64-unknown-redox" => "BOOTAA64.EFI", "i686-unknown-redox" => "BOOTIA32.EFI", "x86_64-unknown-redox" => "BOOTX64.EFI", _ => { return Err(format_err!("target '{}' not supported", target)); } }; // Open disk and read metadata eprintln!("Opening disk {}", disk_path.as_ref().display()); let mut disk_file = DiskWrapper::open(disk_path.as_ref())?; let disk_size = disk_file.size(); let block_size = disk_file.block_size() as u64; let gpt_block_size = match block_size { 512 => gpt::disk::LogicalBlockSize::Lb512, _ => { // TODO: support (and test) other block sizes return Err(format_err!("block size {} not supported", block_size)); } }; // Calculate partition offsets let gpt_reserved = 34 * 512; // GPT always reserves 34 512-byte sectors let mibi = 1024 * 1024; // First megabyte of the disk is reserved for BIOS partition, wich includes GPT tables let bios_start = gpt_reserved / block_size; let bios_end = (mibi / block_size) - 1; // Second megabyte of the disk is reserved for EFI partition let efi_start = bios_end + 1; let efi_end = efi_start + (mibi / block_size) - 1; // The rest of the disk is RedoxFS, reserving the GPT table mirror at the end of disk let redoxfs_start = efi_end + 1; let redoxfs_end = ((((disk_size - gpt_reserved) / mibi) * mibi) / block_size) - 1; // Format and install BIOS partition { // Write BIOS bootloader to disk eprintln!("Write bootloader with size {:#x}", bootloader_bios.len()); disk_file.seek(SeekFrom::Start(0))?; disk_file.write_all(&bootloader_bios)?; // Replace MBR tables with protective MBR let mbr_blocks = ((disk_size + block_size - 1) / block_size) - 1; eprintln!("Writing protective MBR with disk blocks {:#x}", mbr_blocks); gpt::mbr::ProtectiveMBR::with_lb_size(mbr_blocks as u32) .update_conservative(&mut disk_file)?; // Open disk, mark it as not initialized let mut gpt_disk = gpt::GptConfig::new() .initialized(false) .writable(true) .logical_block_size(gpt_block_size) .create_from_device(Box::new(&mut disk_file), None)?; // Add BIOS boot partition let mut partitions = BTreeMap::new(); let mut partition_id = 1; partitions.insert(partition_id, gpt::partition::Partition { part_type_guid: gpt::partition_types::BIOS, part_guid: uuid::Uuid::new_v4(), first_lba: bios_start, last_lba: bios_end, flags: 0, // TODO name: "BIOS".to_string(), }); partition_id += 1; // Add EFI boot partition partitions.insert(partition_id, gpt::partition::Partition { part_type_guid: gpt::partition_types::EFI, part_guid: uuid::Uuid::new_v4(), first_lba: efi_start, last_lba: efi_end, flags: 0, // TODO name: "EFI".to_string(), }); partition_id += 1; // Add RedoxFS partition partitions.insert(partition_id, gpt::partition::Partition { //TODO: Use REDOX_REDOXFS type (needs GPT crate changes) part_type_guid: gpt::partition_types::LINUX_FS, part_guid: uuid::Uuid::new_v4(), first_lba: redoxfs_start, last_lba: redoxfs_end, flags: 0, name: "REDOX".to_string(), }); eprintln!("Writing GPT tables: {:#?}", partitions); // Initialize GPT table gpt_disk.update_partitions(partitions)?; // Write partition layout, returning disk file gpt_disk.write()?; } // Format and install EFI partition { let disk_efi_start = efi_start * block_size; let disk_efi_end = (efi_end + 1) * block_size; let mut disk_efi = fscommon::StreamSlice::new( &mut disk_file, disk_efi_start, disk_efi_end, )?; eprintln!("Formatting EFI partition with size {:#x}", disk_efi_end - disk_efi_start); fatfs::format_volume(&mut disk_efi, fatfs::FormatVolumeOptions::new())?; eprintln!("Opening EFI partition"); let fs = fatfs::FileSystem::new(&mut disk_efi, fatfs::FsOptions::new())?; eprintln!("Creating EFI directory"); let root_dir = fs.root_dir(); root_dir.create_dir("EFI")?; eprintln!("Creating EFI/BOOT directory"); let efi_dir = root_dir.open_dir("EFI")?; efi_dir.create_dir("BOOT")?; eprintln!("Writing EFI/BOOT/{} file with size {:#x}", bootloader_efi_name, bootloader_efi.len()); let boot_dir = efi_dir.open_dir("BOOT")?; let mut file = boot_dir.create_file(bootloader_efi_name)?; file.truncate()?; file.write_all(&bootloader_efi)?; } // Format and install RedoxFS partition eprintln!("Installing to RedoxFS partition with size {:#x}", (redoxfs_end - redoxfs_start) * block_size); let disk_redoxfs = DiskIo(fscommon::StreamSlice::new( disk_file, redoxfs_start * block_size, (redoxfs_end + 1) * block_size )?); with_redoxfs( disk_redoxfs, password_opt, callback ) } pub fn install<P, S>(config: Config, output: P, cookbook: Option<S>, live: bool) -> Result<()> where P: AsRef<Path>, S: AsRef<str>, { println!("Install {:#?} to {}", config, output.as_ref().display()); if output.as_ref().is_dir() { install_dir(config, output, cookbook) } else { let (bootloader_bios, bootloader_efi) = fetch_bootloaders(&config, cookbook.as_ref(), live)?; with_whole_disk(output, &bootloader_bios, &bootloader_efi, None, move |mount_path| { install_dir(config, mount_path, cookbook) } ) } }
{ //let mut context = liner::Context::new(); macro_rules! prompt { ($dst:expr, $def:expr, $($arg:tt)*) => (if config.general.prompt { Err(io::Error::new( io::ErrorKind::Other, "prompt not currently supported" )) // match unwrap_or_prompt($dst, &mut context, &format!($($arg)*)) { // Ok(res) => if res.is_empty() { // Ok($def) // } else { // Ok(res) // }, // Err(err) => Err(err) // } } else { Ok($dst.unwrap_or($def)) })
identifier_body
lib.rs
#[macro_use] extern crate serde_derive; extern crate argon2; extern crate libc; extern crate liner; #[macro_use] extern crate failure; extern crate pkgutils; extern crate rand; extern crate redoxfs; extern crate syscall; extern crate termion; mod config; mod disk_wrapper; pub use config::Config; pub use config::file::FileConfig; pub use config::package::PackageConfig; use disk_wrapper::DiskWrapper; use failure::{Error, err_msg}; use rand::{RngCore, rngs::OsRng}; use redoxfs::{unmount_path, Disk, DiskIo, FileSystem}; use termion::input::TermRead; use pkgutils::{Repo, Package}; use std::{ collections::BTreeMap, env, fs, io::{self, Seek, SeekFrom, Write}, path::Path, sync::mpsc::channel, time::{SystemTime, UNIX_EPOCH}, thread, }; pub(crate) type Result<T> = std::result::Result<T, Error>; const REMOTE: &'static str = "https://static.redox-os.org/pkg"; fn get_target() -> String { env::var("TARGET").unwrap_or( option_env!("TARGET").map_or( "x86_64-unknown-redox".to_string(), |x| x.to_string() ) ) } /// Converts a password to a serialized argon2rs hash, understandable /// by redox_users. If the password is blank, the hash is blank. fn hash_password(password: &str) -> Result<String> { if password!= "" { let salt = format!("{:X}", OsRng.next_u64()); let config = argon2::Config::default(); let hash = argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &config)?; Ok(hash) } else { Ok("".to_string()) } } fn syscall_error(err: syscall::Error) -> io::Error { io::Error::from_raw_os_error(err.errno) } /// Returns a password collected from the user (plaintext) fn prompt_password(prompt: &str, confirm_prompt: &str) -> Result<String> { let stdin = io::stdin(); let mut stdin = stdin.lock(); let stdout = io::stdout(); let mut stdout = stdout.lock(); print!("{}", prompt); let password = stdin.read_passwd(&mut stdout)?; print!("\n{}", confirm_prompt); let confirm_password = stdin.read_passwd(&mut stdout)?; // Note: Actually comparing two Option<String> values if confirm_password == password { Ok(password.unwrap_or("".to_string())) } else { Err(err_msg("passwords do not match")) } } //TODO: error handling fn install_packages<S: AsRef<str>>(config: &Config, dest: &str, cookbook: Option<S>) { let target = &get_target(); let mut repo = Repo::new(target); repo.add_remote(REMOTE); if let Some(cookbook) = cookbook { let dest_pkg = format!("{}/pkg", dest); if! Path::new(&dest_pkg).exists() { fs::create_dir(&dest_pkg).unwrap(); } for (packagename, package) in &config.packages { let pkgar_path = format!("{}/{}/repo/{}/{}.pkgar", env::current_dir().unwrap().to_string_lossy(), cookbook.as_ref(), target, packagename); let from_remote = match (config.general.cooking, package) { (Some(true), PackageConfig::Empty) => true, (Some(true), PackageConfig::Spec { version: None, git: None, path: None }) => true, _ => false }; if from_remote { println!("Installing package from remote: {}", packagename); repo.fetch(&packagename).unwrap().install(dest).unwrap(); } else if Path::new(&pkgar_path).exists() { println!("Installing package from local repo: {}", packagename); let public_path = format!("{}/{}/build/id_ed25519.pub.toml", env::current_dir().unwrap().to_string_lossy(), cookbook.as_ref()); pkgar::extract(&public_path, &pkgar_path, dest).unwrap(); let head_path = format!("{}/{}.pkgar_head", dest_pkg, packagename); pkgar::split(&public_path, &pkgar_path, &head_path, Option::<&str>::None).unwrap(); } else { println!("Installing package tar.gz from local repo: {}", packagename); let path = format!("{}/{}/repo/{}/{}.tar.gz", env::current_dir().unwrap().to_string_lossy(), cookbook.as_ref(), target, packagename); Package::from_path(&path).unwrap().install(dest).unwrap(); } } } else { for (packagename, _package) in &config.packages { println!("Installing package from remote: {}", packagename); repo.fetch(&packagename).unwrap().install(dest).unwrap(); } } } pub fn install_dir<P: AsRef<Path>, S: AsRef<str>>(config: Config, output_dir: P, cookbook: Option<S>) -> Result<()> { //let mut context = liner::Context::new(); macro_rules! prompt { ($dst:expr, $def:expr, $($arg:tt)*) => (if config.general.prompt { Err(io::Error::new( io::ErrorKind::Other, "prompt not currently supported" )) // match unwrap_or_prompt($dst, &mut context, &format!($($arg)*)) { // Ok(res) => if res.is_empty() { // Ok($def) // } else { // Ok(res) // }, // Err(err) => Err(err) // } } else { Ok($dst.unwrap_or($def)) }) } let output_dir = output_dir.as_ref(); let output_dir = output_dir.to_owned(); install_packages(&config, output_dir.to_str().unwrap(), cookbook); for file in config.files { file.create(&output_dir)?; } let mut passwd = String::new(); let mut shadow = String::new(); let mut next_uid = 1000; for (username, user) in config.users { // plaintext let password = if let Some(password) = user.password { password } else if config.general.prompt { prompt_password( &format!("{}: enter password: ", username), &format!("{}: confirm password: ", username))? } else { String::new() }; let uid = user.uid.unwrap_or(next_uid); if uid >= next_uid { next_uid = uid + 1; } let gid = user.gid.unwrap_or(uid); let name = prompt!(user.name, username.clone(), "{}: name (GECOS) [{}]: ", username, username)?; let home = prompt!(user.home, format!("/home/{}", username), "{}: home [/home/{}]: ", username, username)?; let shell = prompt!(user.shell, "/bin/ion".to_string(), "{}: shell [/bin/ion]: ", username)?; println!("Adding user {}:", username); println!("\tPassword: {}", password); println!("\tUID: {}", uid); println!("\tGID: {}", gid); println!("\tName: {}", name); println!("\tHome: {}", home); println!("\tShell: {}", shell); FileConfig { path: home.clone(), data: String::new(), symlink: false, directory: true, mode: Some(0o0700), uid: Some(uid), gid: Some(gid), recursive_chown: true, }.create(&output_dir)?; let password = hash_password(&password)?; passwd.push_str(&format!("{};{};{};{};file:{};file:{}\n", username, uid, gid, name, home, shell)); shadow.push_str(&format!("{};{}\n", username, password)); } if!passwd.is_empty() { FileConfig { path: "/etc/passwd".to_string(), data: passwd, symlink: false, directory: false, // Take defaults mode: None, uid: None, gid: None, recursive_chown: false, }.create(&output_dir)?; } if!shadow.is_empty() { FileConfig { path: "/etc/shadow".to_string(), data: shadow, symlink: false, directory: false, mode: Some(0o0600), uid: Some(0), gid: Some(0), recursive_chown: false, }.create(&output_dir)?; } Ok(()) } pub fn with_redoxfs<D, T, F>(disk: D, password_opt: Option<&[u8]>, callback: F) -> Result<T> where D: Disk + Send +'static, F: FnOnce(&Path) -> Result<T> { let mount_path = if cfg!(target_os = "redox") { "file/redox_installer" } else { "/tmp/redox_installer" }; if cfg!(not(target_os = "redox")) { if! Path::new(mount_path).exists() { fs::create_dir(mount_path)?; } } let ctime = SystemTime::now().duration_since(UNIX_EPOCH)?; let fs = FileSystem::create( disk, password_opt, ctime.as_secs(), ctime.subsec_nanos() ).map_err(syscall_error)?; let (tx, rx) = channel(); let join_handle = thread::spawn(move || { let res = redoxfs::mount( fs, mount_path, |real_path| { tx.send(Ok(real_path.to_owned())).unwrap(); } ); match res { Ok(()) => (), Err(err) => { tx.send(Err(err)).unwrap(); }, }; }); let res = match rx.recv() { Ok(ok) => match ok { Ok(real_path) => callback(&real_path), Err(err) => return Err(err.into()), }, Err(_) => return Err(io::Error::new( io::ErrorKind::NotConnected, "redoxfs thread did not send a result" ).into()), }; unmount_path(mount_path)?; join_handle.join().unwrap(); res } pub fn fetch_bootloaders<S: AsRef<str>>(config: &Config, cookbook: Option<S>, live: bool) -> Result<(Vec<u8>, Vec<u8>)> { //TODO: make it safe to run this concurrently let bootloader_dir = "/tmp/redox_installer_bootloader"; if Path::new(bootloader_dir).exists() { fs::remove_dir_all(&bootloader_dir)?; } fs::create_dir(bootloader_dir)?; let mut bootloader_config = Config::default(); bootloader_config.general = config.general.clone(); bootloader_config.packages.insert("bootloader".to_string(), PackageConfig::default()); install_packages(&bootloader_config, bootloader_dir, cookbook.as_ref()); let boot_dir = Path::new(bootloader_dir).join("boot"); let bios_path = boot_dir.join(if live { "bootloader-live.bios" } else { "bootloader.bios" }); let efi_path = boot_dir.join(if live { "bootloader-live.efi" } else { "bootloader.efi" }); Ok(( if bios_path.exists() { fs::read(bios_path)? } else { Vec::new() }, if efi_path.exists() { fs::read(efi_path)? } else { Vec::new() }, )) } //TODO: make bootloaders use Option, dynamically create BIOS and EFI partitions pub fn
<P, F, T>(disk_path: P, bootloader_bios: &[u8], bootloader_efi: &[u8], password_opt: Option<&[u8]>, callback: F) -> Result<T> where P: AsRef<Path>, F: FnOnce(&Path) -> Result<T> { let target = get_target(); let bootloader_efi_name = match target.as_str() { "aarch64-unknown-redox" => "BOOTAA64.EFI", "i686-unknown-redox" => "BOOTIA32.EFI", "x86_64-unknown-redox" => "BOOTX64.EFI", _ => { return Err(format_err!("target '{}' not supported", target)); } }; // Open disk and read metadata eprintln!("Opening disk {}", disk_path.as_ref().display()); let mut disk_file = DiskWrapper::open(disk_path.as_ref())?; let disk_size = disk_file.size(); let block_size = disk_file.block_size() as u64; let gpt_block_size = match block_size { 512 => gpt::disk::LogicalBlockSize::Lb512, _ => { // TODO: support (and test) other block sizes return Err(format_err!("block size {} not supported", block_size)); } }; // Calculate partition offsets let gpt_reserved = 34 * 512; // GPT always reserves 34 512-byte sectors let mibi = 1024 * 1024; // First megabyte of the disk is reserved for BIOS partition, wich includes GPT tables let bios_start = gpt_reserved / block_size; let bios_end = (mibi / block_size) - 1; // Second megabyte of the disk is reserved for EFI partition let efi_start = bios_end + 1; let efi_end = efi_start + (mibi / block_size) - 1; // The rest of the disk is RedoxFS, reserving the GPT table mirror at the end of disk let redoxfs_start = efi_end + 1; let redoxfs_end = ((((disk_size - gpt_reserved) / mibi) * mibi) / block_size) - 1; // Format and install BIOS partition { // Write BIOS bootloader to disk eprintln!("Write bootloader with size {:#x}", bootloader_bios.len()); disk_file.seek(SeekFrom::Start(0))?; disk_file.write_all(&bootloader_bios)?; // Replace MBR tables with protective MBR let mbr_blocks = ((disk_size + block_size - 1) / block_size) - 1; eprintln!("Writing protective MBR with disk blocks {:#x}", mbr_blocks); gpt::mbr::ProtectiveMBR::with_lb_size(mbr_blocks as u32) .update_conservative(&mut disk_file)?; // Open disk, mark it as not initialized let mut gpt_disk = gpt::GptConfig::new() .initialized(false) .writable(true) .logical_block_size(gpt_block_size) .create_from_device(Box::new(&mut disk_file), None)?; // Add BIOS boot partition let mut partitions = BTreeMap::new(); let mut partition_id = 1; partitions.insert(partition_id, gpt::partition::Partition { part_type_guid: gpt::partition_types::BIOS, part_guid: uuid::Uuid::new_v4(), first_lba: bios_start, last_lba: bios_end, flags: 0, // TODO name: "BIOS".to_string(), }); partition_id += 1; // Add EFI boot partition partitions.insert(partition_id, gpt::partition::Partition { part_type_guid: gpt::partition_types::EFI, part_guid: uuid::Uuid::new_v4(), first_lba: efi_start, last_lba: efi_end, flags: 0, // TODO name: "EFI".to_string(), }); partition_id += 1; // Add RedoxFS partition partitions.insert(partition_id, gpt::partition::Partition { //TODO: Use REDOX_REDOXFS type (needs GPT crate changes) part_type_guid: gpt::partition_types::LINUX_FS, part_guid: uuid::Uuid::new_v4(), first_lba: redoxfs_start, last_lba: redoxfs_end, flags: 0, name: "REDOX".to_string(), }); eprintln!("Writing GPT tables: {:#?}", partitions); // Initialize GPT table gpt_disk.update_partitions(partitions)?; // Write partition layout, returning disk file gpt_disk.write()?; } // Format and install EFI partition { let disk_efi_start = efi_start * block_size; let disk_efi_end = (efi_end + 1) * block_size; let mut disk_efi = fscommon::StreamSlice::new( &mut disk_file, disk_efi_start, disk_efi_end, )?; eprintln!("Formatting EFI partition with size {:#x}", disk_efi_end - disk_efi_start); fatfs::format_volume(&mut disk_efi, fatfs::FormatVolumeOptions::new())?; eprintln!("Opening EFI partition"); let fs = fatfs::FileSystem::new(&mut disk_efi, fatfs::FsOptions::new())?; eprintln!("Creating EFI directory"); let root_dir = fs.root_dir(); root_dir.create_dir("EFI")?; eprintln!("Creating EFI/BOOT directory"); let efi_dir = root_dir.open_dir("EFI")?; efi_dir.create_dir("BOOT")?; eprintln!("Writing EFI/BOOT/{} file with size {:#x}", bootloader_efi_name, bootloader_efi.len()); let boot_dir = efi_dir.open_dir("BOOT")?; let mut file = boot_dir.create_file(bootloader_efi_name)?; file.truncate()?; file.write_all(&bootloader_efi)?; } // Format and install RedoxFS partition eprintln!("Installing to RedoxFS partition with size {:#x}", (redoxfs_end - redoxfs_start) * block_size); let disk_redoxfs = DiskIo(fscommon::StreamSlice::new( disk_file, redoxfs_start * block_size, (redoxfs_end + 1) * block_size )?); with_redoxfs( disk_redoxfs, password_opt, callback ) } pub fn install<P, S>(config: Config, output: P, cookbook: Option<S>, live: bool) -> Result<()> where P: AsRef<Path>, S: AsRef<str>, { println!("Install {:#?} to {}", config, output.as_ref().display()); if output.as_ref().is_dir() { install_dir(config, output, cookbook) } else { let (bootloader_bios, bootloader_efi) = fetch_bootloaders(&config, cookbook.as_ref(), live)?; with_whole_disk(output, &bootloader_bios, &bootloader_efi, None, move |mount_path| { install_dir(config, mount_path, cookbook) } ) } }
with_whole_disk
identifier_name
verify.rs
set can require /// overflow storage :(. #[derive(Clone)] pub struct StackSlot { vars: Bitset, code: Atom, expr: Range<usize>, } /// A constructor trait for plugging in to the verifier, to collect extra data during the /// verification pass pub trait ProofBuilder { /// The data type being generated type Item: Clone; /// The hyp gathering type type Accum: Default; /// Add a new hyp to the accumulation type fn push(&mut self, hyps: &mut Self::Accum, hyp: Self::Item); /// Create a proof data node from a statement, the data for the hypotheses, /// and the compressed constant string fn build(&mut self, addr: StatementAddress, hyps: Self::Accum, pool: &[u8], expr: Range<usize>) -> Self::Item; } /// The "null" proof builder, which creates no extra data. This /// is used for one-shot verification, where no extra data beyond the stack /// information is needed. impl ProofBuilder for () { type Item = (); type Accum = (); fn push(&mut self, _: &mut (), _: ()) {} fn build(&mut self, _: StatementAddress, _: (), _: &[u8], _: Range<usize>) -> () {} } /// Working memory used by the verifier on a segment. This expands for the /// first few proofs and the rest can be handled without allocation. struct VerifyState<'a, P: 'a + ProofBuilder> { /// Segment we are working on this_seg: SegmentRef<'a>, /// Segment order oracle order: &'a SegmentOrder, /// Atom name oracle, used for hypotheses nameset: &'a Nameset, /// Used to access previously proved assertions scoper: ScopeReader<'a>, /// Used to produce proof trees as a side effect of verification builder: &'a mut P, /// The extended frame we are working on cur_frame: &'a Frame, /// Steps which can be invoked in the current proof, grows on every Z prepared: Vec<PreparedStep<'a, P::Item>>, /// Stack of active subtrees stack: Vec<(P::Item, StackSlot)>, /// Buffer for math strings of subtrees and hypotheses; shared to reduce /// actual copying when a hypothesis or saved step is recalled stack_buffer: Vec<u8>, /// Scratch space used only when checking the final step temp_buffer: Vec<u8>, /// Scratch space used for a substitution mapping while invoking a prior /// assertion subst_info: Vec<(Range<usize>, Bitset)>, /// Tracks mandatory and optional variables in use in the current proof var2bit: HashMap<Atom, usize>, /// Disjoint variable conditions in the current extended frame dv_map: &'a [Bitset], } type Result<T> = result::Result<T, Diagnostic>; /// Variables are added lazily to the extended frame. All variables which are /// associated with hypotheses or $d constraints are numbered by scopeck, but if /// a dummy variable is used in a proof without a $d constraint it won't be /// discovered until we get here, and a number needs to be assigned to it. /// Unfortunately this does mean that it'll be outside the valid range of dv_map /// and dv_map checks need to guard against that. fn map_var<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>, token: Atom) -> usize { let nbit = state.var2bit.len(); // actually, it _might not_ break anything to have a single variable index // allocated by scopeck for all non-$d-ed variables. after all, they aren't // observably disjoint. *state.var2bit.entry(token).or_insert(nbit) } // the initial hypotheses are accessed directly from the initial extended frame // to avoid having to look up their pseudo-frames by name; also, $e statements // no longer have pseudo-frames, so this is the only way to prepare an $e fn prepare_hypothesis<'a, P: ProofBuilder>(state: &mut VerifyState<P>, hyp: &'a scopeck::Hyp) { let mut vars = Bitset::new(); let tos = state.stack_buffer.len(); match hyp { &Floating(_addr, var_index, _typecode) => { fast_extend(&mut state.stack_buffer, state.nameset.atom_name(state.cur_frame.var_list[var_index])); *state.stack_buffer.last_mut().unwrap() |= 0x80; vars.set_bit(var_index); // and we have prior knowledge it's identity mapped } &Essential(_addr, ref expr) => { // this is the first of many subtle variations on the "interpret an // ExprFragment" theme in this module. for part in &*expr.tail { fast_extend(&mut state.stack_buffer, &state.cur_frame.const_pool[part.prefix.clone()]); fast_extend(&mut state.stack_buffer, state.nameset.atom_name(state.cur_frame.var_list[part.var])); *state.stack_buffer.last_mut().unwrap() |= 0x80; vars.set_bit(part.var); // and we have prior knowledge it's identity mapped } fast_extend(&mut state.stack_buffer, &state.cur_frame.const_pool[expr.rump.clone()]); } }; let ntos = state.stack_buffer.len(); state.prepared .push(Hyp(vars, hyp.typecode(), tos..ntos, state.builder.build(hyp.address(), Default::default(), &state.stack_buffer, tos..ntos))); } /// Adds a named $e hypothesis to the prepared array. These are not kept in the /// frame array due to infrequent use, so other measures are needed. This is /// not normally used by compressed proofs. /// /// This is used as a fallback when looking up a $e in the assertion hashtable /// fails. fn prepare_named_hyp<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> { for hyp in &*state.cur_frame.hypotheses { if let &Essential(addr, _) = hyp { assert!(addr.segment_id == state.this_seg.id); // we don't allow $e statements to be valid across segments, so this // can be done as a local lookup in this_seg. Since we always // invalidate the VerifySegment if the current segment has changed // in any way, we don't even need to track dependencies here. if state.this_seg.statement(addr.index).label() == label { prepare_hypothesis(state, hyp); return Ok(()); } } } // whoops, not in the assertion table _or_ the extended frame return Err(Diagnostic::StepMissing(copy_token(label))); } /// Used for named step references. For NORMAL proofs this is immediately /// before execute_step, but for COMPRESSED proofs all used steps are prepared /// ahead of time, and assigned sequential numbers for later use. fn
<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> { // it's either an assertion or a hypothesis. $f hyps have pseudo-frames // which this function can use, $e don't and need to be looked up in the // local hyp list after the frame lookup fails let frame = match state.scoper.get(label) { Some(fp) => fp, None => return prepare_named_hyp(state, label), }; // disallow circular reasoning let valid = frame.valid; let pos = state.cur_frame.valid.start; try_assert!(state.order.cmp(&pos, &valid.start) == Ordering::Greater, Diagnostic::StepUsedBeforeDefinition(copy_token(label))); try_assert!(valid.end == NO_STATEMENT || pos.segment_id == valid.start.segment_id && pos.index < valid.end, Diagnostic::StepUsedAfterScope(copy_token(label))); if frame.stype == StatementType::Axiom || frame.stype == StatementType::Provable { state.prepared.push(Assert(frame)); } else { let mut vars = Bitset::new(); for &var in &*frame.var_list { vars.set_bit(map_var(state, var)); } let tos = state.stack_buffer.len(); fast_extend(&mut state.stack_buffer, &frame.stub_expr); let ntos = state.stack_buffer.len(); state.prepared .push(Hyp(vars, frame.target.typecode, tos..ntos, state.builder.build(valid.start, Default::default(), &state.stack_buffer, tos..ntos))); } Ok(()) } // perform a substitution after it has been built in `vars`, appending to // `target` #[inline(always)] fn do_substitute(target: &mut Vec<u8>, frame: &Frame, expr: &VerifyExpr, vars: &[(Range<usize>, Bitset)]) { for part in &*expr.tail { fast_extend(target, &frame.const_pool[part.prefix.clone()]); copy_portion(target, vars[part.var].0.clone()); } fast_extend(target, &frame.const_pool[expr.rump.clone()]); } // like a substitution and equality check, but in one pass #[inline(always)] fn do_substitute_eq(mut compare: &[u8], frame: &Frame, expr: &VerifyExpr, vars: &[(Range<usize>, Bitset)], var_buffer: &[u8]) -> bool { fn step(compare: &mut &[u8], slice: &[u8]) -> bool { let len = slice.len(); if (*compare).len() < len { return true; } if slice!= &(*compare)[0..len] { return true; } *compare = &(*compare)[len..]; return false; } for part in &*expr.tail { if step(&mut compare, &frame.const_pool[part.prefix.clone()]) { return false; } if step(&mut compare, &var_buffer[vars[part.var].0.clone()]) { return false; } } if step(&mut compare, &frame.const_pool[expr.rump.clone()]) { return false; } return compare.is_empty(); } // substitute with the _names_ of variables, for the final "did we prove what we // claimed we would" check fn do_substitute_raw(target: &mut Vec<u8>, frame: &Frame, nameset: &Nameset) { for part in &*frame.target.tail { fast_extend(target, &frame.const_pool[part.prefix.clone()]); fast_extend(target, nameset.atom_name(frame.var_list[part.var])); *target.last_mut().unwrap() |= 0x80; } fast_extend(target, &frame.const_pool[frame.target.rump.clone()]); } // generate a bitmask for a substituted expression #[inline(always)] fn do_substitute_vars(expr: &[ExprFragment], vars: &[(Range<usize>, Bitset)]) -> Bitset { let mut out = Bitset::new(); for part in expr { out |= &vars[part.var].1; } out } /// This is the main "VM" function, and responsible for ~30% of CPU time during /// a one-shot verify operation. fn execute_step<P: ProofBuilder>(state: &mut VerifyState<P>, index: usize) -> Result<()> { try_assert!(index < state.prepared.len(), Diagnostic::StepOutOfRange); let fref = match state.prepared[index] { Hyp(ref vars, code, ref expr, ref data) => { // hypotheses/saved steps are the easy case. unfortunately, this is // also a very unpredictable branch state.stack.push((data.clone(), StackSlot { vars: vars.clone(), code: code, expr: expr.clone(), })); return Ok(()); } Assert(fref) => fref, }; let sbase = try!(state.stack .len() .checked_sub(fref.hypotheses.len()) .ok_or(Diagnostic::ProofUnderflow)); while state.subst_info.len() < fref.mandatory_count { // this is mildly unhygenic, since slots corresponding to $e hyps won't get cleared, but // scopeck shouldn't generate references to them state.subst_info.push((0..0, Bitset::new())); } let mut datavec = Default::default(); // process the hypotheses of the assertion we're about to apply. $f hyps // allow the caller to define a replacement for a variable; $e hyps are // logical hypotheses that must have been proved; the result is then // substituted and pushed. // // since a variable must be $f-declared before it can appear in an $e (or // else we'll ignore the $e), and that logical file order is reflected in // the stack order of the hypotheses, we can do this in one pass for (ix, hyp) in fref.hypotheses.iter().enumerate() { let (ref data, ref slot) = state.stack[sbase + ix]; state.builder.push(&mut datavec, data.clone()); match hyp { &Floating(_addr, var_index, typecode) => { try_assert!(slot.code == typecode, Diagnostic::StepFloatWrongType); state.subst_info[var_index] = (slot.expr.clone(), slot.vars.clone()); } &Essential(_addr, ref expr) => { try_assert!(slot.code == expr.typecode, Diagnostic::StepEssenWrongType); try_assert!(do_substitute_eq(&state.stack_buffer[slot.expr.clone()], fref, &expr, &state.subst_info, &state.stack_buffer), Diagnostic::StepEssenWrong); } } } // replace the hypotheses on the stack with the substituted target // expression. does not physically remove the hypotheses from the stack // pool, because they might have been saved steps or hypotheses, and // deciding whether we need to move anything would swamp any savings, anyway // - remember that this function is largely a branch predictor benchmark let tos = state.stack_buffer.len(); do_substitute(&mut state.stack_buffer, fref, &fref.target, &state.subst_info); let ntos = state.stack_buffer.len(); state.stack.truncate(sbase); state.stack .push((state.builder.build(fref.valid.start, datavec, &state.stack_buffer, tos..ntos), StackSlot { code: fref.target.typecode, vars: do_substitute_vars(&fref.target.tail, &state.subst_info), expr: tos..ntos, })); // check $d constraints on the used assertion now that the dust has settled. // Remember that we might have variable indexes allocated during the proof // that are out of range for dv_map for &(ix1, ix2) in &*fref.mandatory_dv { for var1 in &state.subst_info[ix1].1 { for var2 in &state.subst_info[ix2].1 { try_assert!(var1 < state.dv_map.len() && state.dv_map[var1].has_bit(var2), Diagnostic::ProofDvViolation); } } } Ok(()) } fn finalize_step<P: ProofBuilder>(state: &mut VerifyState<P>) -> Result<P::Item> { // if we get here, it's a valid proof, but was it the _right_ valid proof? try_assert!(state.stack.len() <= 1, Diagnostic::ProofExcessEnd); let &(ref data, ref tos) = try!(state.stack.last().ok_or(Diagnostic::ProofNoSteps)); try_assert!(tos.code == state.cur_frame.target.typecode, Diagnostic::ProofWrongTypeEnd); fast_clear(&mut state.temp_buffer); do_substitute_raw(&mut state.temp_buffer, &state.cur_frame, state.nameset); try_assert!(state.stack_buffer[tos.expr.clone()] == state.temp_buffer[..], Diagnostic::ProofWrongExprEnd); Ok(data.clone()) } fn save_step<P: ProofBuilder>(state: &mut VerifyState<P>) { let &(ref data, ref top) = state.stack.last().expect("can_save should prevent getting here"); state.prepared.push(Hyp(top.vars.clone(), top.code, top.expr.clone(), data.clone())); } // proofs are not self-synchronizing, so it's not likely to get >1 usable error fn verify_proof<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>, stmt: StatementRef<'a>) -> Result<P::Item> { // clear, but do not free memory state.stack.clear(); fast_clear(&mut state.stack_buffer); state.prepared.clear(); state.var2bit.clear(); state.dv_map = &state.cur_frame.optional_dv; // temp_buffer is cleared before use; subst_info should be overwritten // before use if scopeck is working correctly // use scopeck-assigned numbers for mandatory variables and optional // variables with active $d constraints. optional variables without active // $d constraints are numbered on demand by map_var for (index, &tokr) in state.cur_frame.var_list.iter().enumerate() { state.var2bit.insert(tokr, index); } if stmt.proof_len() > 0 && stmt.proof_slice_at(0) == b"(" { // this is a compressed proof let mut i = 1; // compressed proofs preload the hypotheses so they don't need to (but // are not forbidden to) reference them by name for hyp in &*state.cur_frame.hypotheses { prepare_hypothesis(state, hyp); } // parse and prepare the label list before the ) loop { try_assert!(i < stmt.proof_len(), Diagnostic::ProofUnterminatedRoster); let chunk = stmt.proof_slice_at(i); i += 1; if chunk == b")" { break; } try!(prepare_step(state, chunk)); } // after ) is a packed list of varints. decode them and execute the // corresponding steps. the varint decoder is surprisingly CPU-heavy, // presumably due to branch overhead let mut k = 0usize; let mut can_save = false; while i < stmt.proof_len() { let chunk = stmt.proof_slice_at(i); for &ch in chunk { if ch >= b'A' && ch <= b'T' { k = k * 20 + (ch - b'A') as usize; try!(execute_step(state, k)); k = 0; can_save = true; } else if ch >= b'U' && ch <= b'Y' { k = k * 5 + 1 + (ch - b'U') as usize; try_assert!(k < (u32::max_value() as usize / 20) - 1, Diagnostic::ProofMalformedVarint); can_save = false; } else if ch == b'Z' { try_assert!(can_save, Diagnostic::ProofInvalidSave); save_step(state); can_save = false; } else if ch == b'?' { try_assert!(k == 0, Diagnostic::ProofMalformedVarint); return Err(Diagnostic::ProofIncomplete); } } i += 1; } try_assert!(k == 0, Diagnostic::ProofMalformedVarint); } else { let mut count = 0; // NORMAL mode proofs are just a list of steps, with no saving provision for i in 0..stmt.proof_len() { let chunk = stmt.proof_slice_at(i); try_assert!(chunk!= b"?", Diagnostic::ProofIncomplete); try!(prepare_step(state, chunk)); try!(execute_step(state, count)); count += 1; } } finalize_step(state) } /// Stored result of running the verifier on a segment. struct VerifySegment { source: Arc<Segment>, scope_usage: ScopeUsage, diagnostics: HashMap<StatementAddress, Diagnostic>,
prepare_step
identifier_name
verify.rs
bitset can require /// overflow storage :(. #[derive(Clone)] pub struct StackSlot { vars: Bitset, code: Atom, expr: Range<usize>, } /// A constructor trait for plugging in to the verifier, to collect extra data during the /// verification pass pub trait ProofBuilder { /// The data type being generated type Item: Clone; /// The hyp gathering type type Accum: Default; /// Add a new hyp to the accumulation type fn push(&mut self, hyps: &mut Self::Accum, hyp: Self::Item); /// Create a proof data node from a statement, the data for the hypotheses, /// and the compressed constant string fn build(&mut self, addr: StatementAddress, hyps: Self::Accum, pool: &[u8], expr: Range<usize>) -> Self::Item; } /// The "null" proof builder, which creates no extra data. This /// is used for one-shot verification, where no extra data beyond the stack /// information is needed. impl ProofBuilder for () { type Item = (); type Accum = (); fn push(&mut self, _: &mut (), _: ()) {} fn build(&mut self, _: StatementAddress, _: (), _: &[u8], _: Range<usize>) -> () {} } /// Working memory used by the verifier on a segment. This expands for the /// first few proofs and the rest can be handled without allocation. struct VerifyState<'a, P: 'a + ProofBuilder> { /// Segment we are working on this_seg: SegmentRef<'a>, /// Segment order oracle order: &'a SegmentOrder, /// Atom name oracle, used for hypotheses nameset: &'a Nameset, /// Used to access previously proved assertions scoper: ScopeReader<'a>, /// Used to produce proof trees as a side effect of verification builder: &'a mut P, /// The extended frame we are working on cur_frame: &'a Frame, /// Steps which can be invoked in the current proof, grows on every Z prepared: Vec<PreparedStep<'a, P::Item>>, /// Stack of active subtrees stack: Vec<(P::Item, StackSlot)>, /// Buffer for math strings of subtrees and hypotheses; shared to reduce /// actual copying when a hypothesis or saved step is recalled stack_buffer: Vec<u8>, /// Scratch space used only when checking the final step temp_buffer: Vec<u8>, /// Scratch space used for a substitution mapping while invoking a prior /// assertion subst_info: Vec<(Range<usize>, Bitset)>, /// Tracks mandatory and optional variables in use in the current proof var2bit: HashMap<Atom, usize>, /// Disjoint variable conditions in the current extended frame dv_map: &'a [Bitset], } type Result<T> = result::Result<T, Diagnostic>; /// Variables are added lazily to the extended frame. All variables which are /// associated with hypotheses or $d constraints are numbered by scopeck, but if /// a dummy variable is used in a proof without a $d constraint it won't be /// discovered until we get here, and a number needs to be assigned to it. /// Unfortunately this does mean that it'll be outside the valid range of dv_map /// and dv_map checks need to guard against that. fn map_var<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>, token: Atom) -> usize { let nbit = state.var2bit.len(); // actually, it _might not_ break anything to have a single variable index // allocated by scopeck for all non-$d-ed variables. after all, they aren't // observably disjoint. *state.var2bit.entry(token).or_insert(nbit) } // the initial hypotheses are accessed directly from the initial extended frame // to avoid having to look up their pseudo-frames by name; also, $e statements // no longer have pseudo-frames, so this is the only way to prepare an $e fn prepare_hypothesis<'a, P: ProofBuilder>(state: &mut VerifyState<P>, hyp: &'a scopeck::Hyp) { let mut vars = Bitset::new(); let tos = state.stack_buffer.len(); match hyp { &Floating(_addr, var_index, _typecode) => { fast_extend(&mut state.stack_buffer, state.nameset.atom_name(state.cur_frame.var_list[var_index])); *state.stack_buffer.last_mut().unwrap() |= 0x80; vars.set_bit(var_index); // and we have prior knowledge it's identity mapped } &Essential(_addr, ref expr) => { // this is the first of many subtle variations on the "interpret an // ExprFragment" theme in this module. for part in &*expr.tail { fast_extend(&mut state.stack_buffer, &state.cur_frame.const_pool[part.prefix.clone()]); fast_extend(&mut state.stack_buffer, state.nameset.atom_name(state.cur_frame.var_list[part.var])); *state.stack_buffer.last_mut().unwrap() |= 0x80; vars.set_bit(part.var); // and we have prior knowledge it's identity mapped } fast_extend(&mut state.stack_buffer, &state.cur_frame.const_pool[expr.rump.clone()]); } }; let ntos = state.stack_buffer.len(); state.prepared .push(Hyp(vars, hyp.typecode(),
state.builder.build(hyp.address(), Default::default(), &state.stack_buffer, tos..ntos))); } /// Adds a named $e hypothesis to the prepared array. These are not kept in the /// frame array due to infrequent use, so other measures are needed. This is /// not normally used by compressed proofs. /// /// This is used as a fallback when looking up a $e in the assertion hashtable /// fails. fn prepare_named_hyp<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> { for hyp in &*state.cur_frame.hypotheses { if let &Essential(addr, _) = hyp { assert!(addr.segment_id == state.this_seg.id); // we don't allow $e statements to be valid across segments, so this // can be done as a local lookup in this_seg. Since we always // invalidate the VerifySegment if the current segment has changed // in any way, we don't even need to track dependencies here. if state.this_seg.statement(addr.index).label() == label { prepare_hypothesis(state, hyp); return Ok(()); } } } // whoops, not in the assertion table _or_ the extended frame return Err(Diagnostic::StepMissing(copy_token(label))); } /// Used for named step references. For NORMAL proofs this is immediately /// before execute_step, but for COMPRESSED proofs all used steps are prepared /// ahead of time, and assigned sequential numbers for later use. fn prepare_step<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> { // it's either an assertion or a hypothesis. $f hyps have pseudo-frames // which this function can use, $e don't and need to be looked up in the // local hyp list after the frame lookup fails let frame = match state.scoper.get(label) { Some(fp) => fp, None => return prepare_named_hyp(state, label), }; // disallow circular reasoning let valid = frame.valid; let pos = state.cur_frame.valid.start; try_assert!(state.order.cmp(&pos, &valid.start) == Ordering::Greater, Diagnostic::StepUsedBeforeDefinition(copy_token(label))); try_assert!(valid.end == NO_STATEMENT || pos.segment_id == valid.start.segment_id && pos.index < valid.end, Diagnostic::StepUsedAfterScope(copy_token(label))); if frame.stype == StatementType::Axiom || frame.stype == StatementType::Provable { state.prepared.push(Assert(frame)); } else { let mut vars = Bitset::new(); for &var in &*frame.var_list { vars.set_bit(map_var(state, var)); } let tos = state.stack_buffer.len(); fast_extend(&mut state.stack_buffer, &frame.stub_expr); let ntos = state.stack_buffer.len(); state.prepared .push(Hyp(vars, frame.target.typecode, tos..ntos, state.builder.build(valid.start, Default::default(), &state.stack_buffer, tos..ntos))); } Ok(()) } // perform a substitution after it has been built in `vars`, appending to // `target` #[inline(always)] fn do_substitute(target: &mut Vec<u8>, frame: &Frame, expr: &VerifyExpr, vars: &[(Range<usize>, Bitset)]) { for part in &*expr.tail { fast_extend(target, &frame.const_pool[part.prefix.clone()]); copy_portion(target, vars[part.var].0.clone()); } fast_extend(target, &frame.const_pool[expr.rump.clone()]); } // like a substitution and equality check, but in one pass #[inline(always)] fn do_substitute_eq(mut compare: &[u8], frame: &Frame, expr: &VerifyExpr, vars: &[(Range<usize>, Bitset)], var_buffer: &[u8]) -> bool { fn step(compare: &mut &[u8], slice: &[u8]) -> bool { let len = slice.len(); if (*compare).len() < len { return true; } if slice!= &(*compare)[0..len] { return true; } *compare = &(*compare)[len..]; return false; } for part in &*expr.tail { if step(&mut compare, &frame.const_pool[part.prefix.clone()]) { return false; } if step(&mut compare, &var_buffer[vars[part.var].0.clone()]) { return false; } } if step(&mut compare, &frame.const_pool[expr.rump.clone()]) { return false; } return compare.is_empty(); } // substitute with the _names_ of variables, for the final "did we prove what we // claimed we would" check fn do_substitute_raw(target: &mut Vec<u8>, frame: &Frame, nameset: &Nameset) { for part in &*frame.target.tail { fast_extend(target, &frame.const_pool[part.prefix.clone()]); fast_extend(target, nameset.atom_name(frame.var_list[part.var])); *target.last_mut().unwrap() |= 0x80; } fast_extend(target, &frame.const_pool[frame.target.rump.clone()]); } // generate a bitmask for a substituted expression #[inline(always)] fn do_substitute_vars(expr: &[ExprFragment], vars: &[(Range<usize>, Bitset)]) -> Bitset { let mut out = Bitset::new(); for part in expr { out |= &vars[part.var].1; } out } /// This is the main "VM" function, and responsible for ~30% of CPU time during /// a one-shot verify operation. fn execute_step<P: ProofBuilder>(state: &mut VerifyState<P>, index: usize) -> Result<()> { try_assert!(index < state.prepared.len(), Diagnostic::StepOutOfRange); let fref = match state.prepared[index] { Hyp(ref vars, code, ref expr, ref data) => { // hypotheses/saved steps are the easy case. unfortunately, this is // also a very unpredictable branch state.stack.push((data.clone(), StackSlot { vars: vars.clone(), code: code, expr: expr.clone(), })); return Ok(()); } Assert(fref) => fref, }; let sbase = try!(state.stack .len() .checked_sub(fref.hypotheses.len()) .ok_or(Diagnostic::ProofUnderflow)); while state.subst_info.len() < fref.mandatory_count { // this is mildly unhygenic, since slots corresponding to $e hyps won't get cleared, but // scopeck shouldn't generate references to them state.subst_info.push((0..0, Bitset::new())); } let mut datavec = Default::default(); // process the hypotheses of the assertion we're about to apply. $f hyps // allow the caller to define a replacement for a variable; $e hyps are // logical hypotheses that must have been proved; the result is then // substituted and pushed. // // since a variable must be $f-declared before it can appear in an $e (or // else we'll ignore the $e), and that logical file order is reflected in // the stack order of the hypotheses, we can do this in one pass for (ix, hyp) in fref.hypotheses.iter().enumerate() { let (ref data, ref slot) = state.stack[sbase + ix]; state.builder.push(&mut datavec, data.clone()); match hyp { &Floating(_addr, var_index, typecode) => { try_assert!(slot.code == typecode, Diagnostic::StepFloatWrongType); state.subst_info[var_index] = (slot.expr.clone(), slot.vars.clone()); } &Essential(_addr, ref expr) => { try_assert!(slot.code == expr.typecode, Diagnostic::StepEssenWrongType); try_assert!(do_substitute_eq(&state.stack_buffer[slot.expr.clone()], fref, &expr, &state.subst_info, &state.stack_buffer), Diagnostic::StepEssenWrong); } } } // replace the hypotheses on the stack with the substituted target // expression. does not physically remove the hypotheses from the stack // pool, because they might have been saved steps or hypotheses, and // deciding whether we need to move anything would swamp any savings, anyway // - remember that this function is largely a branch predictor benchmark let tos = state.stack_buffer.len(); do_substitute(&mut state.stack_buffer, fref, &fref.target, &state.subst_info); let ntos = state.stack_buffer.len(); state.stack.truncate(sbase); state.stack .push((state.builder.build(fref.valid.start, datavec, &state.stack_buffer, tos..ntos), StackSlot { code: fref.target.typecode, vars: do_substitute_vars(&fref.target.tail, &state.subst_info), expr: tos..ntos, })); // check $d constraints on the used assertion now that the dust has settled. // Remember that we might have variable indexes allocated during the proof // that are out of range for dv_map for &(ix1, ix2) in &*fref.mandatory_dv { for var1 in &state.subst_info[ix1].1 { for var2 in &state.subst_info[ix2].1 { try_assert!(var1 < state.dv_map.len() && state.dv_map[var1].has_bit(var2), Diagnostic::ProofDvViolation); } } } Ok(()) } fn finalize_step<P: ProofBuilder>(state: &mut VerifyState<P>) -> Result<P::Item> { // if we get here, it's a valid proof, but was it the _right_ valid proof? try_assert!(state.stack.len() <= 1, Diagnostic::ProofExcessEnd); let &(ref data, ref tos) = try!(state.stack.last().ok_or(Diagnostic::ProofNoSteps)); try_assert!(tos.code == state.cur_frame.target.typecode, Diagnostic::ProofWrongTypeEnd); fast_clear(&mut state.temp_buffer); do_substitute_raw(&mut state.temp_buffer, &state.cur_frame, state.nameset); try_assert!(state.stack_buffer[tos.expr.clone()] == state.temp_buffer[..], Diagnostic::ProofWrongExprEnd); Ok(data.clone()) } fn save_step<P: ProofBuilder>(state: &mut VerifyState<P>) { let &(ref data, ref top) = state.stack.last().expect("can_save should prevent getting here"); state.prepared.push(Hyp(top.vars.clone(), top.code, top.expr.clone(), data.clone())); } // proofs are not self-synchronizing, so it's not likely to get >1 usable error fn verify_proof<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>, stmt: StatementRef<'a>) -> Result<P::Item> { // clear, but do not free memory state.stack.clear(); fast_clear(&mut state.stack_buffer); state.prepared.clear(); state.var2bit.clear(); state.dv_map = &state.cur_frame.optional_dv; // temp_buffer is cleared before use; subst_info should be overwritten // before use if scopeck is working correctly // use scopeck-assigned numbers for mandatory variables and optional // variables with active $d constraints. optional variables without active // $d constraints are numbered on demand by map_var for (index, &tokr) in state.cur_frame.var_list.iter().enumerate() { state.var2bit.insert(tokr, index); } if stmt.proof_len() > 0 && stmt.proof_slice_at(0) == b"(" { // this is a compressed proof let mut i = 1; // compressed proofs preload the hypotheses so they don't need to (but // are not forbidden to) reference them by name for hyp in &*state.cur_frame.hypotheses { prepare_hypothesis(state, hyp); } // parse and prepare the label list before the ) loop { try_assert!(i < stmt.proof_len(), Diagnostic::ProofUnterminatedRoster); let chunk = stmt.proof_slice_at(i); i += 1; if chunk == b")" { break; } try!(prepare_step(state, chunk)); } // after ) is a packed list of varints. decode them and execute the // corresponding steps. the varint decoder is surprisingly CPU-heavy, // presumably due to branch overhead let mut k = 0usize; let mut can_save = false; while i < stmt.proof_len() { let chunk = stmt.proof_slice_at(i); for &ch in chunk { if ch >= b'A' && ch <= b'T' { k = k * 20 + (ch - b'A') as usize; try!(execute_step(state, k)); k = 0; can_save = true; } else if ch >= b'U' && ch <= b'Y' { k = k * 5 + 1 + (ch - b'U') as usize; try_assert!(k < (u32::max_value() as usize / 20) - 1, Diagnostic::ProofMalformedVarint); can_save = false; } else if ch == b'Z' { try_assert!(can_save, Diagnostic::ProofInvalidSave); save_step(state); can_save = false; } else if ch == b'?' { try_assert!(k == 0, Diagnostic::ProofMalformedVarint); return Err(Diagnostic::ProofIncomplete); } } i += 1; } try_assert!(k == 0, Diagnostic::ProofMalformedVarint); } else { let mut count = 0; // NORMAL mode proofs are just a list of steps, with no saving provision for i in 0..stmt.proof_len() { let chunk = stmt.proof_slice_at(i); try_assert!(chunk!= b"?", Diagnostic::ProofIncomplete); try!(prepare_step(state, chunk)); try!(execute_step(state, count)); count += 1; } } finalize_step(state) } /// Stored result of running the verifier on a segment. struct VerifySegment { source: Arc<Segment>, scope_usage: ScopeUsage, diagnostics: HashMap<StatementAddress, Diagnostic>, }
tos..ntos,
random_line_split
verify.rs
, Diagnostic>; /// Variables are added lazily to the extended frame. All variables which are /// associated with hypotheses or $d constraints are numbered by scopeck, but if /// a dummy variable is used in a proof without a $d constraint it won't be /// discovered until we get here, and a number needs to be assigned to it. /// Unfortunately this does mean that it'll be outside the valid range of dv_map /// and dv_map checks need to guard against that. fn map_var<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>, token: Atom) -> usize { let nbit = state.var2bit.len(); // actually, it _might not_ break anything to have a single variable index // allocated by scopeck for all non-$d-ed variables. after all, they aren't // observably disjoint. *state.var2bit.entry(token).or_insert(nbit) } // the initial hypotheses are accessed directly from the initial extended frame // to avoid having to look up their pseudo-frames by name; also, $e statements // no longer have pseudo-frames, so this is the only way to prepare an $e fn prepare_hypothesis<'a, P: ProofBuilder>(state: &mut VerifyState<P>, hyp: &'a scopeck::Hyp) { let mut vars = Bitset::new(); let tos = state.stack_buffer.len(); match hyp { &Floating(_addr, var_index, _typecode) => { fast_extend(&mut state.stack_buffer, state.nameset.atom_name(state.cur_frame.var_list[var_index])); *state.stack_buffer.last_mut().unwrap() |= 0x80; vars.set_bit(var_index); // and we have prior knowledge it's identity mapped } &Essential(_addr, ref expr) => { // this is the first of many subtle variations on the "interpret an // ExprFragment" theme in this module. for part in &*expr.tail { fast_extend(&mut state.stack_buffer, &state.cur_frame.const_pool[part.prefix.clone()]); fast_extend(&mut state.stack_buffer, state.nameset.atom_name(state.cur_frame.var_list[part.var])); *state.stack_buffer.last_mut().unwrap() |= 0x80; vars.set_bit(part.var); // and we have prior knowledge it's identity mapped } fast_extend(&mut state.stack_buffer, &state.cur_frame.const_pool[expr.rump.clone()]); } }; let ntos = state.stack_buffer.len(); state.prepared .push(Hyp(vars, hyp.typecode(), tos..ntos, state.builder.build(hyp.address(), Default::default(), &state.stack_buffer, tos..ntos))); } /// Adds a named $e hypothesis to the prepared array. These are not kept in the /// frame array due to infrequent use, so other measures are needed. This is /// not normally used by compressed proofs. /// /// This is used as a fallback when looking up a $e in the assertion hashtable /// fails. fn prepare_named_hyp<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> { for hyp in &*state.cur_frame.hypotheses { if let &Essential(addr, _) = hyp { assert!(addr.segment_id == state.this_seg.id); // we don't allow $e statements to be valid across segments, so this // can be done as a local lookup in this_seg. Since we always // invalidate the VerifySegment if the current segment has changed // in any way, we don't even need to track dependencies here. if state.this_seg.statement(addr.index).label() == label { prepare_hypothesis(state, hyp); return Ok(()); } } } // whoops, not in the assertion table _or_ the extended frame return Err(Diagnostic::StepMissing(copy_token(label))); } /// Used for named step references. For NORMAL proofs this is immediately /// before execute_step, but for COMPRESSED proofs all used steps are prepared /// ahead of time, and assigned sequential numbers for later use. fn prepare_step<P: ProofBuilder>(state: &mut VerifyState<P>, label: TokenPtr) -> Result<()> { // it's either an assertion or a hypothesis. $f hyps have pseudo-frames // which this function can use, $e don't and need to be looked up in the // local hyp list after the frame lookup fails let frame = match state.scoper.get(label) { Some(fp) => fp, None => return prepare_named_hyp(state, label), }; // disallow circular reasoning let valid = frame.valid; let pos = state.cur_frame.valid.start; try_assert!(state.order.cmp(&pos, &valid.start) == Ordering::Greater, Diagnostic::StepUsedBeforeDefinition(copy_token(label))); try_assert!(valid.end == NO_STATEMENT || pos.segment_id == valid.start.segment_id && pos.index < valid.end, Diagnostic::StepUsedAfterScope(copy_token(label))); if frame.stype == StatementType::Axiom || frame.stype == StatementType::Provable { state.prepared.push(Assert(frame)); } else { let mut vars = Bitset::new(); for &var in &*frame.var_list { vars.set_bit(map_var(state, var)); } let tos = state.stack_buffer.len(); fast_extend(&mut state.stack_buffer, &frame.stub_expr); let ntos = state.stack_buffer.len(); state.prepared .push(Hyp(vars, frame.target.typecode, tos..ntos, state.builder.build(valid.start, Default::default(), &state.stack_buffer, tos..ntos))); } Ok(()) } // perform a substitution after it has been built in `vars`, appending to // `target` #[inline(always)] fn do_substitute(target: &mut Vec<u8>, frame: &Frame, expr: &VerifyExpr, vars: &[(Range<usize>, Bitset)]) { for part in &*expr.tail { fast_extend(target, &frame.const_pool[part.prefix.clone()]); copy_portion(target, vars[part.var].0.clone()); } fast_extend(target, &frame.const_pool[expr.rump.clone()]); } // like a substitution and equality check, but in one pass #[inline(always)] fn do_substitute_eq(mut compare: &[u8], frame: &Frame, expr: &VerifyExpr, vars: &[(Range<usize>, Bitset)], var_buffer: &[u8]) -> bool { fn step(compare: &mut &[u8], slice: &[u8]) -> bool { let len = slice.len(); if (*compare).len() < len { return true; } if slice!= &(*compare)[0..len] { return true; } *compare = &(*compare)[len..]; return false; } for part in &*expr.tail { if step(&mut compare, &frame.const_pool[part.prefix.clone()]) { return false; } if step(&mut compare, &var_buffer[vars[part.var].0.clone()]) { return false; } } if step(&mut compare, &frame.const_pool[expr.rump.clone()]) { return false; } return compare.is_empty(); } // substitute with the _names_ of variables, for the final "did we prove what we // claimed we would" check fn do_substitute_raw(target: &mut Vec<u8>, frame: &Frame, nameset: &Nameset) { for part in &*frame.target.tail { fast_extend(target, &frame.const_pool[part.prefix.clone()]); fast_extend(target, nameset.atom_name(frame.var_list[part.var])); *target.last_mut().unwrap() |= 0x80; } fast_extend(target, &frame.const_pool[frame.target.rump.clone()]); } // generate a bitmask for a substituted expression #[inline(always)] fn do_substitute_vars(expr: &[ExprFragment], vars: &[(Range<usize>, Bitset)]) -> Bitset { let mut out = Bitset::new(); for part in expr { out |= &vars[part.var].1; } out } /// This is the main "VM" function, and responsible for ~30% of CPU time during /// a one-shot verify operation. fn execute_step<P: ProofBuilder>(state: &mut VerifyState<P>, index: usize) -> Result<()> { try_assert!(index < state.prepared.len(), Diagnostic::StepOutOfRange); let fref = match state.prepared[index] { Hyp(ref vars, code, ref expr, ref data) => { // hypotheses/saved steps are the easy case. unfortunately, this is // also a very unpredictable branch state.stack.push((data.clone(), StackSlot { vars: vars.clone(), code: code, expr: expr.clone(), })); return Ok(()); } Assert(fref) => fref, }; let sbase = try!(state.stack .len() .checked_sub(fref.hypotheses.len()) .ok_or(Diagnostic::ProofUnderflow)); while state.subst_info.len() < fref.mandatory_count { // this is mildly unhygenic, since slots corresponding to $e hyps won't get cleared, but // scopeck shouldn't generate references to them state.subst_info.push((0..0, Bitset::new())); } let mut datavec = Default::default(); // process the hypotheses of the assertion we're about to apply. $f hyps // allow the caller to define a replacement for a variable; $e hyps are // logical hypotheses that must have been proved; the result is then // substituted and pushed. // // since a variable must be $f-declared before it can appear in an $e (or // else we'll ignore the $e), and that logical file order is reflected in // the stack order of the hypotheses, we can do this in one pass for (ix, hyp) in fref.hypotheses.iter().enumerate() { let (ref data, ref slot) = state.stack[sbase + ix]; state.builder.push(&mut datavec, data.clone()); match hyp { &Floating(_addr, var_index, typecode) => { try_assert!(slot.code == typecode, Diagnostic::StepFloatWrongType); state.subst_info[var_index] = (slot.expr.clone(), slot.vars.clone()); } &Essential(_addr, ref expr) => { try_assert!(slot.code == expr.typecode, Diagnostic::StepEssenWrongType); try_assert!(do_substitute_eq(&state.stack_buffer[slot.expr.clone()], fref, &expr, &state.subst_info, &state.stack_buffer), Diagnostic::StepEssenWrong); } } } // replace the hypotheses on the stack with the substituted target // expression. does not physically remove the hypotheses from the stack // pool, because they might have been saved steps or hypotheses, and // deciding whether we need to move anything would swamp any savings, anyway // - remember that this function is largely a branch predictor benchmark let tos = state.stack_buffer.len(); do_substitute(&mut state.stack_buffer, fref, &fref.target, &state.subst_info); let ntos = state.stack_buffer.len(); state.stack.truncate(sbase); state.stack .push((state.builder.build(fref.valid.start, datavec, &state.stack_buffer, tos..ntos), StackSlot { code: fref.target.typecode, vars: do_substitute_vars(&fref.target.tail, &state.subst_info), expr: tos..ntos, })); // check $d constraints on the used assertion now that the dust has settled. // Remember that we might have variable indexes allocated during the proof // that are out of range for dv_map for &(ix1, ix2) in &*fref.mandatory_dv { for var1 in &state.subst_info[ix1].1 { for var2 in &state.subst_info[ix2].1 { try_assert!(var1 < state.dv_map.len() && state.dv_map[var1].has_bit(var2), Diagnostic::ProofDvViolation); } } } Ok(()) } fn finalize_step<P: ProofBuilder>(state: &mut VerifyState<P>) -> Result<P::Item> { // if we get here, it's a valid proof, but was it the _right_ valid proof? try_assert!(state.stack.len() <= 1, Diagnostic::ProofExcessEnd); let &(ref data, ref tos) = try!(state.stack.last().ok_or(Diagnostic::ProofNoSteps)); try_assert!(tos.code == state.cur_frame.target.typecode, Diagnostic::ProofWrongTypeEnd); fast_clear(&mut state.temp_buffer); do_substitute_raw(&mut state.temp_buffer, &state.cur_frame, state.nameset); try_assert!(state.stack_buffer[tos.expr.clone()] == state.temp_buffer[..], Diagnostic::ProofWrongExprEnd); Ok(data.clone()) } fn save_step<P: ProofBuilder>(state: &mut VerifyState<P>) { let &(ref data, ref top) = state.stack.last().expect("can_save should prevent getting here"); state.prepared.push(Hyp(top.vars.clone(), top.code, top.expr.clone(), data.clone())); } // proofs are not self-synchronizing, so it's not likely to get >1 usable error fn verify_proof<'a, P: ProofBuilder>(state: &mut VerifyState<'a, P>, stmt: StatementRef<'a>) -> Result<P::Item> { // clear, but do not free memory state.stack.clear(); fast_clear(&mut state.stack_buffer); state.prepared.clear(); state.var2bit.clear(); state.dv_map = &state.cur_frame.optional_dv; // temp_buffer is cleared before use; subst_info should be overwritten // before use if scopeck is working correctly // use scopeck-assigned numbers for mandatory variables and optional // variables with active $d constraints. optional variables without active // $d constraints are numbered on demand by map_var for (index, &tokr) in state.cur_frame.var_list.iter().enumerate() { state.var2bit.insert(tokr, index); } if stmt.proof_len() > 0 && stmt.proof_slice_at(0) == b"(" { // this is a compressed proof let mut i = 1; // compressed proofs preload the hypotheses so they don't need to (but // are not forbidden to) reference them by name for hyp in &*state.cur_frame.hypotheses { prepare_hypothesis(state, hyp); } // parse and prepare the label list before the ) loop { try_assert!(i < stmt.proof_len(), Diagnostic::ProofUnterminatedRoster); let chunk = stmt.proof_slice_at(i); i += 1; if chunk == b")" { break; } try!(prepare_step(state, chunk)); } // after ) is a packed list of varints. decode them and execute the // corresponding steps. the varint decoder is surprisingly CPU-heavy, // presumably due to branch overhead let mut k = 0usize; let mut can_save = false; while i < stmt.proof_len() { let chunk = stmt.proof_slice_at(i); for &ch in chunk { if ch >= b'A' && ch <= b'T' { k = k * 20 + (ch - b'A') as usize; try!(execute_step(state, k)); k = 0; can_save = true; } else if ch >= b'U' && ch <= b'Y' { k = k * 5 + 1 + (ch - b'U') as usize; try_assert!(k < (u32::max_value() as usize / 20) - 1, Diagnostic::ProofMalformedVarint); can_save = false; } else if ch == b'Z' { try_assert!(can_save, Diagnostic::ProofInvalidSave); save_step(state); can_save = false; } else if ch == b'?' { try_assert!(k == 0, Diagnostic::ProofMalformedVarint); return Err(Diagnostic::ProofIncomplete); } } i += 1; } try_assert!(k == 0, Diagnostic::ProofMalformedVarint); } else { let mut count = 0; // NORMAL mode proofs are just a list of steps, with no saving provision for i in 0..stmt.proof_len() { let chunk = stmt.proof_slice_at(i); try_assert!(chunk!= b"?", Diagnostic::ProofIncomplete); try!(prepare_step(state, chunk)); try!(execute_step(state, count)); count += 1; } } finalize_step(state) } /// Stored result of running the verifier on a segment. struct VerifySegment { source: Arc<Segment>, scope_usage: ScopeUsage, diagnostics: HashMap<StatementAddress, Diagnostic>, } /// Analysis pass result for the verifier. #[derive(Default,Clone)] pub struct VerifyResult { segments: HashMap<SegmentId, Arc<VerifySegment>>, } impl VerifyResult { /// Report errors found during database verification. pub fn diagnostics(&self) -> Vec<(StatementAddress, Diagnostic)> { let mut out = Vec::new(); for vsr in self.segments.values() { for (&sa, &ref diag) in &vsr.diagnostics { out.push((sa, diag.clone())); } } out } } /// Driver which verifies each statement in a segment. fn verify_segment(sset: &SegmentSet, nset: &Nameset, scopes: &ScopeResult, sid: SegmentId) -> VerifySegment { let mut diagnostics = new_map(); let dummy_frame = Frame::default(); let sref = sset.segment(sid); let mut state = VerifyState { this_seg: sref, scoper: ScopeReader::new(scopes), nameset: nset, builder: &mut (), order: &sset.order, cur_frame: &dummy_frame, stack: Vec::new(), stack_buffer: Vec::new(), prepared: Vec::new(), temp_buffer: Vec::new(), subst_info: Vec::new(), var2bit: new_map(), dv_map: &dummy_frame.optional_dv, }; // use the _same_ VerifyState so that memory can be reused for stmt in sref { // only intend to check $p statements if stmt.statement_type() == StatementType::Provable { // no valid frame -> no use checking // may wish to record a secondary error? if let Some(frame) = state.scoper.get(stmt.label())
{ state.cur_frame = frame; if let Err(diag) = verify_proof(&mut state, stmt) { diagnostics.insert(stmt.address(), diag); } }
conditional_block
instruction.rs
use std::fmt; use std::hash; enum_from_primitive! { #[allow(non_camel_case_types)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum Opcode { // Two-operand opcodes (2OP) OP2_1 = 1, OP2_2 = 2, OP2_3 = 3, OP2_4 = 4, OP2_5 = 5, OP2_6 = 6, OP2_7 = 7, OP2_8 = 8, OP2_9 = 9, OP2_10 = 10, OP2_11 = 11, OP2_12 = 12, OP2_13 = 13, OP2_14 = 14, OP2_15 = 15, OP2_16 = 16, OP2_17 = 17, OP2_18 = 18, OP2_19 = 19, OP2_20 = 20, OP2_21 = 21, OP2_22 = 22, OP2_23 = 23, OP2_24 = 24, OP2_25 = 25, OP2_26 = 26, OP2_27 = 27, OP2_28 = 28, // One-operand opcodes (1OP) OP1_128 = 128, OP1_129 = 129, OP1_130 = 130, OP1_131 = 131, OP1_132 = 132, OP1_133 = 133, OP1_134 = 134, OP1_135 = 135, OP1_136 = 136, OP1_137 = 137, OP1_138 = 138, OP1_139 = 139, OP1_140 = 140, OP1_141 = 141, OP1_142 = 142, OP1_143 = 143, // Zero-operand opcodes (0OP) OP0_176 = 176, OP0_177 = 177, OP0_178 = 178, OP0_179 = 179, OP0_180 = 180, OP0_181 = 181, OP0_182 = 182, OP0_183 = 183, OP0_184 = 184, OP0_185 = 185, OP0_186 = 186, OP0_187 = 187, OP0_188 = 188, OP0_189 = 189, OP0_191 = 191, // Variable-operand opcodes (VAR) VAR_224 = 224, VAR_225 = 225, VAR_226 = 226, VAR_227 = 227, VAR_228 = 228, VAR_229 = 229, VAR_230 = 230, VAR_231 = 231, VAR_232 = 232, VAR_233 = 233, VAR_234 = 234, VAR_235 = 235, VAR_236 = 236, VAR_237 = 237, VAR_238 = 238, VAR_239 = 239, VAR_240 = 240, VAR_241 = 241, VAR_242 = 242, VAR_243 = 243, VAR_244 = 244, VAR_245 = 245, VAR_246 = 246, VAR_247 = 247, VAR_248 = 248, VAR_249 = 249, VAR_250 = 250, VAR_251 = 251, VAR_252 = 252, VAR_253 = 253, VAR_254 = 254, VAR_255 = 255, // Extended opcodes (EXT) EXT_1000 = 1000, EXT_1001 = 1001, EXT_1002 = 1002, EXT_1003 = 1003, EXT_1004 = 1004, EXT_1005 = 1005, EXT_1006 = 1006, EXT_1007 = 1007, EXT_1008 = 1008, EXT_1009 = 1009, EXT_1010 = 1010, EXT_1011 = 1011, EXT_1012 = 1012, EXT_1013 = 1013, EXT_1016 = 1016, EXT_1017 = 1017, EXT_1018 = 1018, EXT_1019 = 1019, EXT_1020 = 1020, EXT_1021 = 1021, EXT_1022 = 1022, EXT_1023 = 1023, EXT_1024 = 1024, EXT_1025 = 1025, EXT_1026 = 1026, EXT_1027 = 1027, EXT_1028 = 1028, EXT_1029 = 1029, } } #[derive(Debug, PartialEq)] pub enum OperandType { Small, Large, Variable, Omitted, } impl OperandType { pub fn from(bytes: &[u8]) -> Vec<OperandType> { bytes .iter() .fold(Vec::new(), |mut acc, n| { acc.push((n & 0b1100_0000) >> 6); acc.push((n & 0b0011_0000) >> 4); acc.push((n & 0b0000_1100) >> 2); acc.push(n & 0b0000_0011); acc }) .into_iter() .map(|b| match b { 0b00 => OperandType::Large, 0b01 => OperandType::Small, 0b10 => OperandType::Variable, 0b11 => OperandType::Omitted, _ => unreachable!("Can't get operand type of: {:08b}", b), }) .take_while(|t| *t!= OperandType::Omitted) .collect() } } #[derive(Debug)] pub enum Operand { Small(u8), Large(u16), Variable(u8), } impl fmt::Display for Operand { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Operand::Small(x) => write!(f, "#{:02x}", x), Operand::Large(x) => write!(f, "{:04x}", x), Operand::Variable(x) => match x { 0 => write!(f, "sp"), 1...15 => write!(f, "local{}", x - 1), _ => write!(f, "g{}", x - 16), }, } } } #[derive(Debug)] pub struct Branch { pub condition: u16, pub address: Option<usize>, pub returns: Option<u16>, } #[derive(Debug)] pub struct Instruction { pub addr: usize, pub opcode: Opcode, pub name: String, pub operands: Vec<Operand>, pub store: Option<u8>, pub branch: Option<Branch>, pub text: Option<String>, pub next: usize, } impl Instruction { pub fn does_store(opcode: Opcode, version: u8) -> bool { use self::Opcode::*; match opcode { // does a store in any version OP2_8 | OP2_9 | OP2_15 | OP2_16 | OP2_17 | OP2_18 | OP2_19 | OP2_20 | OP2_21 | OP2_22 | OP2_23 | OP2_24 | OP2_25 | OP1_129 | OP1_130 | OP1_131 | OP1_132 | OP1_136 | OP1_142 | VAR_224 | VAR_231 | VAR_236 | VAR_246 | VAR_247 | VAR_248 | EXT_1000 | EXT_1001 | EXT_1002 | EXT_1003 | EXT_1004 | EXT_1009 | EXT_1010 | EXT_1019 | EXT_1029 => true, // only stores in certain versions OP1_143 => version < 5, OP0_181 => version == 4, // missing * in spec? OP0_182 => version == 4, // missing * in spec? OP0_185 => version >= 5, VAR_228 => version >= 5, VAR_233 => version == 6, _ => false, } } pub fn does_branch(opcode: Opcode, version: u8) -> bool { use self::Opcode::*; match opcode { // does a branch in any version OP2_1 | OP2_2 | OP2_3 | OP2_4 | OP2_5 | OP2_6 | OP2_7 | OP2_10 | OP1_128 | OP1_129 | OP1_130 | OP0_189 | OP0_191 | VAR_247 | VAR_255 | EXT_1006 | EXT_1024 | EXT_1027 => { true } // only branches in certain versions OP0_181 => version < 4, OP0_182 => version < 4, _ => false, } } pub fn does_text(opcode: Opcode) -> bool { use self::Opcode::*; match opcode { OP0_178 | OP0_179 => true, _ => false, } } pub fn name(opcode: Opcode, version: u8) -> String { use self::Opcode::*; match opcode { OP2_1 => "je", OP2_2 => "jl", OP2_3 => "jg", OP2_4 => "dec_chk", OP2_5 => "inc_chk", OP2_6 => "jin", OP2_7 => "test", OP2_8 => "or", OP2_9 => "and", OP2_10 => "test_attr", OP2_11 => "set_attr", OP2_12 => "clear_attr", OP2_13 => "store", OP2_14 => "insert_obj", OP2_15 => "loadw", OP2_16 => "loadb", OP2_17 => "get_prop", OP2_18 => "get_prop_addr", OP2_19 => "get_next_prop", OP2_20 => "add", OP2_21 => "sub", OP2_22 => "mul", OP2_23 => "div", OP2_24 => "mod", OP2_25 => "call_2s", OP2_26 => "call_2n", OP2_27 => "set_colour", OP2_28 => "throw", OP1_128 => "jz", OP1_129 => "get_sibling", OP1_130 => "get_child", OP1_131 => "get_parent", OP1_132 => "get_prop_len", OP1_133 => "inc", OP1_134 => "dec", OP1_135 => "print_addr", OP1_136 => "call_1s", OP1_137 => "remove_obj", OP1_138 => "print_obj", OP1_139 => "ret", OP1_140 => "jump", OP1_141 => "print_paddr", OP1_142 => "load", // actually 2 different operations: OP1_143 => if version < 4 { "not" } else { "call_1n" }, OP0_176 => "rtrue", OP0_177 => "rfalse", OP0_178 => "print", OP0_179 => "print_ret", OP0_180 => "nop", OP0_181 => "save", OP0_182 => "restore", OP0_183 => "restart", OP0_184 => "ret_popped", // actually 2 different operations: OP0_185 => if version < 4 { "pop" } else { "catch" }, OP0_186 => "quit", OP0_187 => "new_line", OP0_188 => "show_status", OP0_189 => "verify", OP0_191 => "piracy", // "call" is the same as "call_vs" (name changed to remove ambiguity) VAR_224 => if version < 4 { "call" } else { "call_vs" }, VAR_225 => "storew", VAR_226 => "storeb", VAR_227 => "put_prop", // "sread", "aread", plain "read" are really all the same thing: VAR_228 => if version < 4 { "sread" } else { "aread" }, VAR_229 => "print_char", VAR_230 => "print_num", VAR_231 => "random", VAR_232 => "push", VAR_233 => "pull", VAR_234 => "split_window", VAR_235 => "set_window", VAR_236 => "call_vs2", VAR_237 => "erase_window", VAR_238 => "erase_line", VAR_239 => "set_cursor", VAR_240 => "get_cursor", VAR_241 => "set_text_style", VAR_242 => "buffer_mode", VAR_243 => "output_stream", VAR_244 => "input_stream", VAR_245 => "sound_effect", VAR_246 => "read_char", VAR_247 => "scan_table", VAR_248 => "not", VAR_249 => "call_vn", VAR_250 => "call_vn2", VAR_251 => "tokenise", VAR_252 => "encode_text", VAR_253 => "copy_table", VAR_254 => "print_table", VAR_255 => "check_arg_count", EXT_1000 => "save", EXT_1001 => "restore", EXT_1002 => "log_shift", EXT_1003 => "art_shift", EXT_1004 => "set_font", EXT_1005 => "draw_picture", EXT_1006 => "picture_data", EXT_1007 => "erase_picture", EXT_1008 => "set_margins", EXT_1009 => "save_undo", EXT_1010 => "restore_undo", EXT_1011 => "print_unicode", EXT_1012 => "check_unicode", EXT_1013 => "set_true_colour", EXT_1016 => "move_window", EXT_1017 => "window_size", EXT_1018 => "window_style", EXT_1019 => "get_wind_prop", EXT_1020 => "scroll_window", EXT_1021 => "pop_stack", EXT_1022 => "read_mouse", EXT_1023 => "mouse_window", EXT_1024 => "push_stack", EXT_1025 => "put_wind_prop", EXT_1026 => "print_form", EXT_1027 => "make_menu", EXT_1028 => "picture_table", EXT_1029 => "buffer_screen", }.to_string() } } impl Instruction { pub fn advances(&self) -> bool { use self::Opcode::*; // Some instructions never advance to the next instruction: // throw, ret, jump, rtrue, rfalse, print_ret, restart, and ret_popped match self.opcode { OP2_28 | OP1_139 | OP1_140 | OP0_176 | OP0_177 | OP0_179 | OP0_183 | OP0_184 | OP0_186 => false, _ => true, } } pub fn does_call(&self, version: u8) -> bool
pub fn should_advance(&self, version: u8) -> bool { !self.does_call(version) && self.opcode!= Opcode::OP0_181 && self.opcode!= Opcode::OP0_182 } } impl hash::Hash for Instruction { fn hash<H>(&self, state: &mut H) where H: hash::Hasher, { state.write_usize(self.addr); state.finish(); } } impl PartialEq for Instruction { fn eq(&self, other: &Instruction) -> bool { self.addr == other.addr } } impl Eq for Instruction {} impl fmt::Display for Instruction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:5x}: {:16}", self.addr, self.name)?; for op in &self.operands { write!(f, " {}", op)?; } if let Some(x) = self.store { match x { 0 => write!(f, " -> sp"), 1...15 => write!(f, " -> local{}", x - 1), _ => write!(f, " -> g{}", x - 16), }?; }; if let Some(Branch { address, returns, condition, }) = self.branch { match (address, returns, condition) { (Some(addr), _, 1) => write!(f, "?{:04x}", addr), (Some(addr), _, 0) => write!(f, "?~{:04x}", addr), (None, Some(1), 1) => write!(f, "?rtrue"), (None, Some(1), 0) => write!(f, "?~rtrue"), (None, Some(0), 1) => write!(f, "?rfalse"), (None, Some(0), 0) => write!(f, "?~rfalse"), _ => write!(f, ""), }?; }; if let Some(ref text) = self.text { write!(f, " {}", text)?; }; write!(f, "") } }
{ use self::Opcode::*; match self.opcode { OP2_25 | OP2_26 | OP1_136 | VAR_224 | VAR_236 | VAR_249 | VAR_250 => true, OP1_143 => version >= 4, _ => false, } }
identifier_body
instruction.rs
use std::fmt; use std::hash; enum_from_primitive! { #[allow(non_camel_case_types)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum Opcode { // Two-operand opcodes (2OP) OP2_1 = 1, OP2_2 = 2, OP2_3 = 3, OP2_4 = 4, OP2_5 = 5, OP2_6 = 6, OP2_7 = 7, OP2_8 = 8, OP2_9 = 9, OP2_10 = 10, OP2_11 = 11, OP2_12 = 12, OP2_13 = 13, OP2_14 = 14, OP2_15 = 15, OP2_16 = 16, OP2_17 = 17, OP2_18 = 18, OP2_19 = 19, OP2_20 = 20, OP2_21 = 21, OP2_22 = 22, OP2_23 = 23, OP2_24 = 24, OP2_25 = 25, OP2_26 = 26, OP2_27 = 27, OP2_28 = 28, // One-operand opcodes (1OP) OP1_128 = 128, OP1_129 = 129, OP1_130 = 130, OP1_131 = 131, OP1_132 = 132, OP1_133 = 133, OP1_134 = 134, OP1_135 = 135, OP1_136 = 136, OP1_137 = 137, OP1_138 = 138, OP1_139 = 139, OP1_140 = 140, OP1_141 = 141, OP1_142 = 142, OP1_143 = 143, // Zero-operand opcodes (0OP) OP0_176 = 176, OP0_177 = 177, OP0_178 = 178, OP0_179 = 179, OP0_180 = 180, OP0_181 = 181, OP0_182 = 182, OP0_183 = 183, OP0_184 = 184, OP0_185 = 185, OP0_186 = 186, OP0_187 = 187, OP0_188 = 188, OP0_189 = 189, OP0_191 = 191, // Variable-operand opcodes (VAR) VAR_224 = 224, VAR_225 = 225, VAR_226 = 226, VAR_227 = 227, VAR_228 = 228, VAR_229 = 229, VAR_230 = 230, VAR_231 = 231, VAR_232 = 232, VAR_233 = 233, VAR_234 = 234, VAR_235 = 235, VAR_236 = 236, VAR_237 = 237, VAR_238 = 238, VAR_239 = 239, VAR_240 = 240, VAR_241 = 241, VAR_242 = 242, VAR_243 = 243, VAR_244 = 244, VAR_245 = 245, VAR_246 = 246, VAR_247 = 247, VAR_248 = 248, VAR_249 = 249, VAR_250 = 250, VAR_251 = 251, VAR_252 = 252, VAR_253 = 253, VAR_254 = 254, VAR_255 = 255, // Extended opcodes (EXT) EXT_1000 = 1000, EXT_1001 = 1001, EXT_1002 = 1002, EXT_1003 = 1003, EXT_1004 = 1004, EXT_1005 = 1005, EXT_1006 = 1006, EXT_1007 = 1007, EXT_1008 = 1008, EXT_1009 = 1009, EXT_1010 = 1010, EXT_1011 = 1011, EXT_1012 = 1012, EXT_1013 = 1013, EXT_1016 = 1016, EXT_1017 = 1017, EXT_1018 = 1018, EXT_1019 = 1019, EXT_1020 = 1020, EXT_1021 = 1021, EXT_1022 = 1022, EXT_1023 = 1023, EXT_1024 = 1024, EXT_1025 = 1025, EXT_1026 = 1026, EXT_1027 = 1027, EXT_1028 = 1028, EXT_1029 = 1029, } } #[derive(Debug, PartialEq)] pub enum OperandType { Small, Large, Variable, Omitted, } impl OperandType { pub fn
(bytes: &[u8]) -> Vec<OperandType> { bytes .iter() .fold(Vec::new(), |mut acc, n| { acc.push((n & 0b1100_0000) >> 6); acc.push((n & 0b0011_0000) >> 4); acc.push((n & 0b0000_1100) >> 2); acc.push(n & 0b0000_0011); acc }) .into_iter() .map(|b| match b { 0b00 => OperandType::Large, 0b01 => OperandType::Small, 0b10 => OperandType::Variable, 0b11 => OperandType::Omitted, _ => unreachable!("Can't get operand type of: {:08b}", b), }) .take_while(|t| *t!= OperandType::Omitted) .collect() } } #[derive(Debug)] pub enum Operand { Small(u8), Large(u16), Variable(u8), } impl fmt::Display for Operand { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Operand::Small(x) => write!(f, "#{:02x}", x), Operand::Large(x) => write!(f, "{:04x}", x), Operand::Variable(x) => match x { 0 => write!(f, "sp"), 1...15 => write!(f, "local{}", x - 1), _ => write!(f, "g{}", x - 16), }, } } } #[derive(Debug)] pub struct Branch { pub condition: u16, pub address: Option<usize>, pub returns: Option<u16>, } #[derive(Debug)] pub struct Instruction { pub addr: usize, pub opcode: Opcode, pub name: String, pub operands: Vec<Operand>, pub store: Option<u8>, pub branch: Option<Branch>, pub text: Option<String>, pub next: usize, } impl Instruction { pub fn does_store(opcode: Opcode, version: u8) -> bool { use self::Opcode::*; match opcode { // does a store in any version OP2_8 | OP2_9 | OP2_15 | OP2_16 | OP2_17 | OP2_18 | OP2_19 | OP2_20 | OP2_21 | OP2_22 | OP2_23 | OP2_24 | OP2_25 | OP1_129 | OP1_130 | OP1_131 | OP1_132 | OP1_136 | OP1_142 | VAR_224 | VAR_231 | VAR_236 | VAR_246 | VAR_247 | VAR_248 | EXT_1000 | EXT_1001 | EXT_1002 | EXT_1003 | EXT_1004 | EXT_1009 | EXT_1010 | EXT_1019 | EXT_1029 => true, // only stores in certain versions OP1_143 => version < 5, OP0_181 => version == 4, // missing * in spec? OP0_182 => version == 4, // missing * in spec? OP0_185 => version >= 5, VAR_228 => version >= 5, VAR_233 => version == 6, _ => false, } } pub fn does_branch(opcode: Opcode, version: u8) -> bool { use self::Opcode::*; match opcode { // does a branch in any version OP2_1 | OP2_2 | OP2_3 | OP2_4 | OP2_5 | OP2_6 | OP2_7 | OP2_10 | OP1_128 | OP1_129 | OP1_130 | OP0_189 | OP0_191 | VAR_247 | VAR_255 | EXT_1006 | EXT_1024 | EXT_1027 => { true } // only branches in certain versions OP0_181 => version < 4, OP0_182 => version < 4, _ => false, } } pub fn does_text(opcode: Opcode) -> bool { use self::Opcode::*; match opcode { OP0_178 | OP0_179 => true, _ => false, } } pub fn name(opcode: Opcode, version: u8) -> String { use self::Opcode::*; match opcode { OP2_1 => "je", OP2_2 => "jl", OP2_3 => "jg", OP2_4 => "dec_chk", OP2_5 => "inc_chk", OP2_6 => "jin", OP2_7 => "test", OP2_8 => "or", OP2_9 => "and", OP2_10 => "test_attr", OP2_11 => "set_attr", OP2_12 => "clear_attr", OP2_13 => "store", OP2_14 => "insert_obj", OP2_15 => "loadw", OP2_16 => "loadb", OP2_17 => "get_prop", OP2_18 => "get_prop_addr", OP2_19 => "get_next_prop", OP2_20 => "add", OP2_21 => "sub", OP2_22 => "mul", OP2_23 => "div", OP2_24 => "mod", OP2_25 => "call_2s", OP2_26 => "call_2n", OP2_27 => "set_colour", OP2_28 => "throw", OP1_128 => "jz", OP1_129 => "get_sibling", OP1_130 => "get_child", OP1_131 => "get_parent", OP1_132 => "get_prop_len", OP1_133 => "inc", OP1_134 => "dec", OP1_135 => "print_addr", OP1_136 => "call_1s", OP1_137 => "remove_obj", OP1_138 => "print_obj", OP1_139 => "ret", OP1_140 => "jump", OP1_141 => "print_paddr", OP1_142 => "load", // actually 2 different operations: OP1_143 => if version < 4 { "not" } else { "call_1n" }, OP0_176 => "rtrue", OP0_177 => "rfalse", OP0_178 => "print", OP0_179 => "print_ret", OP0_180 => "nop", OP0_181 => "save", OP0_182 => "restore", OP0_183 => "restart", OP0_184 => "ret_popped", // actually 2 different operations: OP0_185 => if version < 4 { "pop" } else { "catch" }, OP0_186 => "quit", OP0_187 => "new_line", OP0_188 => "show_status", OP0_189 => "verify", OP0_191 => "piracy", // "call" is the same as "call_vs" (name changed to remove ambiguity) VAR_224 => if version < 4 { "call" } else { "call_vs" }, VAR_225 => "storew", VAR_226 => "storeb", VAR_227 => "put_prop", // "sread", "aread", plain "read" are really all the same thing: VAR_228 => if version < 4 { "sread" } else { "aread" }, VAR_229 => "print_char", VAR_230 => "print_num", VAR_231 => "random", VAR_232 => "push", VAR_233 => "pull", VAR_234 => "split_window", VAR_235 => "set_window", VAR_236 => "call_vs2", VAR_237 => "erase_window", VAR_238 => "erase_line", VAR_239 => "set_cursor", VAR_240 => "get_cursor", VAR_241 => "set_text_style", VAR_242 => "buffer_mode", VAR_243 => "output_stream", VAR_244 => "input_stream", VAR_245 => "sound_effect", VAR_246 => "read_char", VAR_247 => "scan_table", VAR_248 => "not", VAR_249 => "call_vn", VAR_250 => "call_vn2", VAR_251 => "tokenise", VAR_252 => "encode_text", VAR_253 => "copy_table", VAR_254 => "print_table", VAR_255 => "check_arg_count", EXT_1000 => "save", EXT_1001 => "restore", EXT_1002 => "log_shift", EXT_1003 => "art_shift", EXT_1004 => "set_font", EXT_1005 => "draw_picture", EXT_1006 => "picture_data", EXT_1007 => "erase_picture", EXT_1008 => "set_margins", EXT_1009 => "save_undo", EXT_1010 => "restore_undo", EXT_1011 => "print_unicode", EXT_1012 => "check_unicode", EXT_1013 => "set_true_colour", EXT_1016 => "move_window", EXT_1017 => "window_size", EXT_1018 => "window_style", EXT_1019 => "get_wind_prop", EXT_1020 => "scroll_window", EXT_1021 => "pop_stack", EXT_1022 => "read_mouse", EXT_1023 => "mouse_window", EXT_1024 => "push_stack", EXT_1025 => "put_wind_prop", EXT_1026 => "print_form", EXT_1027 => "make_menu", EXT_1028 => "picture_table", EXT_1029 => "buffer_screen", }.to_string() } } impl Instruction { pub fn advances(&self) -> bool { use self::Opcode::*; // Some instructions never advance to the next instruction: // throw, ret, jump, rtrue, rfalse, print_ret, restart, and ret_popped match self.opcode { OP2_28 | OP1_139 | OP1_140 | OP0_176 | OP0_177 | OP0_179 | OP0_183 | OP0_184 | OP0_186 => false, _ => true, } } pub fn does_call(&self, version: u8) -> bool { use self::Opcode::*; match self.opcode { OP2_25 | OP2_26 | OP1_136 | VAR_224 | VAR_236 | VAR_249 | VAR_250 => true, OP1_143 => version >= 4, _ => false, } } pub fn should_advance(&self, version: u8) -> bool { !self.does_call(version) && self.opcode!= Opcode::OP0_181 && self.opcode!= Opcode::OP0_182 } } impl hash::Hash for Instruction { fn hash<H>(&self, state: &mut H) where H: hash::Hasher, { state.write_usize(self.addr); state.finish(); } } impl PartialEq for Instruction { fn eq(&self, other: &Instruction) -> bool { self.addr == other.addr } } impl Eq for Instruction {} impl fmt::Display for Instruction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:5x}: {:16}", self.addr, self.name)?; for op in &self.operands { write!(f, " {}", op)?; } if let Some(x) = self.store { match x { 0 => write!(f, " -> sp"), 1...15 => write!(f, " -> local{}", x - 1), _ => write!(f, " -> g{}", x - 16), }?; }; if let Some(Branch { address, returns, condition, }) = self.branch { match (address, returns, condition) { (Some(addr), _, 1) => write!(f, "?{:04x}", addr), (Some(addr), _, 0) => write!(f, "?~{:04x}", addr), (None, Some(1), 1) => write!(f, "?rtrue"), (None, Some(1), 0) => write!(f, "?~rtrue"), (None, Some(0), 1) => write!(f, "?rfalse"), (None, Some(0), 0) => write!(f, "?~rfalse"), _ => write!(f, ""), }?; }; if let Some(ref text) = self.text { write!(f, " {}", text)?; }; write!(f, "") } }
from
identifier_name
instruction.rs
use std::fmt; use std::hash; enum_from_primitive! { #[allow(non_camel_case_types)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum Opcode { // Two-operand opcodes (2OP) OP2_1 = 1, OP2_2 = 2, OP2_3 = 3, OP2_4 = 4, OP2_5 = 5, OP2_6 = 6, OP2_7 = 7, OP2_8 = 8, OP2_9 = 9, OP2_10 = 10, OP2_11 = 11, OP2_12 = 12, OP2_13 = 13, OP2_14 = 14, OP2_15 = 15, OP2_16 = 16, OP2_17 = 17, OP2_18 = 18, OP2_19 = 19, OP2_20 = 20, OP2_21 = 21, OP2_22 = 22, OP2_23 = 23, OP2_24 = 24, OP2_25 = 25, OP2_26 = 26, OP2_27 = 27, OP2_28 = 28, // One-operand opcodes (1OP) OP1_128 = 128, OP1_129 = 129, OP1_130 = 130, OP1_131 = 131, OP1_132 = 132, OP1_133 = 133, OP1_134 = 134, OP1_135 = 135, OP1_136 = 136, OP1_137 = 137, OP1_138 = 138, OP1_139 = 139, OP1_140 = 140, OP1_141 = 141, OP1_142 = 142, OP1_143 = 143, // Zero-operand opcodes (0OP) OP0_176 = 176, OP0_177 = 177, OP0_178 = 178, OP0_179 = 179, OP0_180 = 180, OP0_181 = 181, OP0_182 = 182, OP0_183 = 183, OP0_184 = 184, OP0_185 = 185, OP0_186 = 186, OP0_187 = 187, OP0_188 = 188, OP0_189 = 189, OP0_191 = 191, // Variable-operand opcodes (VAR) VAR_224 = 224, VAR_225 = 225, VAR_226 = 226, VAR_227 = 227, VAR_228 = 228, VAR_229 = 229, VAR_230 = 230, VAR_231 = 231, VAR_232 = 232, VAR_233 = 233, VAR_234 = 234, VAR_235 = 235, VAR_236 = 236, VAR_237 = 237, VAR_238 = 238, VAR_239 = 239, VAR_240 = 240, VAR_241 = 241, VAR_242 = 242, VAR_243 = 243, VAR_244 = 244, VAR_245 = 245, VAR_246 = 246, VAR_247 = 247, VAR_248 = 248, VAR_249 = 249, VAR_250 = 250, VAR_251 = 251, VAR_252 = 252, VAR_253 = 253, VAR_254 = 254, VAR_255 = 255, // Extended opcodes (EXT) EXT_1000 = 1000, EXT_1001 = 1001, EXT_1002 = 1002, EXT_1003 = 1003, EXT_1004 = 1004, EXT_1005 = 1005, EXT_1006 = 1006, EXT_1007 = 1007, EXT_1008 = 1008, EXT_1009 = 1009, EXT_1010 = 1010, EXT_1011 = 1011, EXT_1012 = 1012, EXT_1013 = 1013, EXT_1016 = 1016, EXT_1017 = 1017, EXT_1018 = 1018, EXT_1019 = 1019, EXT_1020 = 1020, EXT_1021 = 1021, EXT_1022 = 1022, EXT_1023 = 1023, EXT_1024 = 1024, EXT_1025 = 1025, EXT_1026 = 1026, EXT_1027 = 1027, EXT_1028 = 1028, EXT_1029 = 1029, } } #[derive(Debug, PartialEq)] pub enum OperandType { Small, Large, Variable, Omitted, } impl OperandType { pub fn from(bytes: &[u8]) -> Vec<OperandType> { bytes .iter() .fold(Vec::new(), |mut acc, n| { acc.push((n & 0b1100_0000) >> 6); acc.push((n & 0b0011_0000) >> 4); acc.push((n & 0b0000_1100) >> 2); acc.push(n & 0b0000_0011); acc }) .into_iter() .map(|b| match b { 0b00 => OperandType::Large, 0b01 => OperandType::Small, 0b10 => OperandType::Variable, 0b11 => OperandType::Omitted, _ => unreachable!("Can't get operand type of: {:08b}", b), }) .take_while(|t| *t!= OperandType::Omitted) .collect() } } #[derive(Debug)] pub enum Operand { Small(u8), Large(u16), Variable(u8), } impl fmt::Display for Operand { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Operand::Small(x) => write!(f, "#{:02x}", x), Operand::Large(x) => write!(f, "{:04x}", x), Operand::Variable(x) => match x { 0 => write!(f, "sp"), 1...15 => write!(f, "local{}", x - 1), _ => write!(f, "g{}", x - 16), }, } } } #[derive(Debug)] pub struct Branch { pub condition: u16, pub address: Option<usize>, pub returns: Option<u16>, }
pub addr: usize, pub opcode: Opcode, pub name: String, pub operands: Vec<Operand>, pub store: Option<u8>, pub branch: Option<Branch>, pub text: Option<String>, pub next: usize, } impl Instruction { pub fn does_store(opcode: Opcode, version: u8) -> bool { use self::Opcode::*; match opcode { // does a store in any version OP2_8 | OP2_9 | OP2_15 | OP2_16 | OP2_17 | OP2_18 | OP2_19 | OP2_20 | OP2_21 | OP2_22 | OP2_23 | OP2_24 | OP2_25 | OP1_129 | OP1_130 | OP1_131 | OP1_132 | OP1_136 | OP1_142 | VAR_224 | VAR_231 | VAR_236 | VAR_246 | VAR_247 | VAR_248 | EXT_1000 | EXT_1001 | EXT_1002 | EXT_1003 | EXT_1004 | EXT_1009 | EXT_1010 | EXT_1019 | EXT_1029 => true, // only stores in certain versions OP1_143 => version < 5, OP0_181 => version == 4, // missing * in spec? OP0_182 => version == 4, // missing * in spec? OP0_185 => version >= 5, VAR_228 => version >= 5, VAR_233 => version == 6, _ => false, } } pub fn does_branch(opcode: Opcode, version: u8) -> bool { use self::Opcode::*; match opcode { // does a branch in any version OP2_1 | OP2_2 | OP2_3 | OP2_4 | OP2_5 | OP2_6 | OP2_7 | OP2_10 | OP1_128 | OP1_129 | OP1_130 | OP0_189 | OP0_191 | VAR_247 | VAR_255 | EXT_1006 | EXT_1024 | EXT_1027 => { true } // only branches in certain versions OP0_181 => version < 4, OP0_182 => version < 4, _ => false, } } pub fn does_text(opcode: Opcode) -> bool { use self::Opcode::*; match opcode { OP0_178 | OP0_179 => true, _ => false, } } pub fn name(opcode: Opcode, version: u8) -> String { use self::Opcode::*; match opcode { OP2_1 => "je", OP2_2 => "jl", OP2_3 => "jg", OP2_4 => "dec_chk", OP2_5 => "inc_chk", OP2_6 => "jin", OP2_7 => "test", OP2_8 => "or", OP2_9 => "and", OP2_10 => "test_attr", OP2_11 => "set_attr", OP2_12 => "clear_attr", OP2_13 => "store", OP2_14 => "insert_obj", OP2_15 => "loadw", OP2_16 => "loadb", OP2_17 => "get_prop", OP2_18 => "get_prop_addr", OP2_19 => "get_next_prop", OP2_20 => "add", OP2_21 => "sub", OP2_22 => "mul", OP2_23 => "div", OP2_24 => "mod", OP2_25 => "call_2s", OP2_26 => "call_2n", OP2_27 => "set_colour", OP2_28 => "throw", OP1_128 => "jz", OP1_129 => "get_sibling", OP1_130 => "get_child", OP1_131 => "get_parent", OP1_132 => "get_prop_len", OP1_133 => "inc", OP1_134 => "dec", OP1_135 => "print_addr", OP1_136 => "call_1s", OP1_137 => "remove_obj", OP1_138 => "print_obj", OP1_139 => "ret", OP1_140 => "jump", OP1_141 => "print_paddr", OP1_142 => "load", // actually 2 different operations: OP1_143 => if version < 4 { "not" } else { "call_1n" }, OP0_176 => "rtrue", OP0_177 => "rfalse", OP0_178 => "print", OP0_179 => "print_ret", OP0_180 => "nop", OP0_181 => "save", OP0_182 => "restore", OP0_183 => "restart", OP0_184 => "ret_popped", // actually 2 different operations: OP0_185 => if version < 4 { "pop" } else { "catch" }, OP0_186 => "quit", OP0_187 => "new_line", OP0_188 => "show_status", OP0_189 => "verify", OP0_191 => "piracy", // "call" is the same as "call_vs" (name changed to remove ambiguity) VAR_224 => if version < 4 { "call" } else { "call_vs" }, VAR_225 => "storew", VAR_226 => "storeb", VAR_227 => "put_prop", // "sread", "aread", plain "read" are really all the same thing: VAR_228 => if version < 4 { "sread" } else { "aread" }, VAR_229 => "print_char", VAR_230 => "print_num", VAR_231 => "random", VAR_232 => "push", VAR_233 => "pull", VAR_234 => "split_window", VAR_235 => "set_window", VAR_236 => "call_vs2", VAR_237 => "erase_window", VAR_238 => "erase_line", VAR_239 => "set_cursor", VAR_240 => "get_cursor", VAR_241 => "set_text_style", VAR_242 => "buffer_mode", VAR_243 => "output_stream", VAR_244 => "input_stream", VAR_245 => "sound_effect", VAR_246 => "read_char", VAR_247 => "scan_table", VAR_248 => "not", VAR_249 => "call_vn", VAR_250 => "call_vn2", VAR_251 => "tokenise", VAR_252 => "encode_text", VAR_253 => "copy_table", VAR_254 => "print_table", VAR_255 => "check_arg_count", EXT_1000 => "save", EXT_1001 => "restore", EXT_1002 => "log_shift", EXT_1003 => "art_shift", EXT_1004 => "set_font", EXT_1005 => "draw_picture", EXT_1006 => "picture_data", EXT_1007 => "erase_picture", EXT_1008 => "set_margins", EXT_1009 => "save_undo", EXT_1010 => "restore_undo", EXT_1011 => "print_unicode", EXT_1012 => "check_unicode", EXT_1013 => "set_true_colour", EXT_1016 => "move_window", EXT_1017 => "window_size", EXT_1018 => "window_style", EXT_1019 => "get_wind_prop", EXT_1020 => "scroll_window", EXT_1021 => "pop_stack", EXT_1022 => "read_mouse", EXT_1023 => "mouse_window", EXT_1024 => "push_stack", EXT_1025 => "put_wind_prop", EXT_1026 => "print_form", EXT_1027 => "make_menu", EXT_1028 => "picture_table", EXT_1029 => "buffer_screen", }.to_string() } } impl Instruction { pub fn advances(&self) -> bool { use self::Opcode::*; // Some instructions never advance to the next instruction: // throw, ret, jump, rtrue, rfalse, print_ret, restart, and ret_popped match self.opcode { OP2_28 | OP1_139 | OP1_140 | OP0_176 | OP0_177 | OP0_179 | OP0_183 | OP0_184 | OP0_186 => false, _ => true, } } pub fn does_call(&self, version: u8) -> bool { use self::Opcode::*; match self.opcode { OP2_25 | OP2_26 | OP1_136 | VAR_224 | VAR_236 | VAR_249 | VAR_250 => true, OP1_143 => version >= 4, _ => false, } } pub fn should_advance(&self, version: u8) -> bool { !self.does_call(version) && self.opcode!= Opcode::OP0_181 && self.opcode!= Opcode::OP0_182 } } impl hash::Hash for Instruction { fn hash<H>(&self, state: &mut H) where H: hash::Hasher, { state.write_usize(self.addr); state.finish(); } } impl PartialEq for Instruction { fn eq(&self, other: &Instruction) -> bool { self.addr == other.addr } } impl Eq for Instruction {} impl fmt::Display for Instruction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:5x}: {:16}", self.addr, self.name)?; for op in &self.operands { write!(f, " {}", op)?; } if let Some(x) = self.store { match x { 0 => write!(f, " -> sp"), 1...15 => write!(f, " -> local{}", x - 1), _ => write!(f, " -> g{}", x - 16), }?; }; if let Some(Branch { address, returns, condition, }) = self.branch { match (address, returns, condition) { (Some(addr), _, 1) => write!(f, "?{:04x}", addr), (Some(addr), _, 0) => write!(f, "?~{:04x}", addr), (None, Some(1), 1) => write!(f, "?rtrue"), (None, Some(1), 0) => write!(f, "?~rtrue"), (None, Some(0), 1) => write!(f, "?rfalse"), (None, Some(0), 0) => write!(f, "?~rfalse"), _ => write!(f, ""), }?; }; if let Some(ref text) = self.text { write!(f, " {}", text)?; }; write!(f, "") } }
#[derive(Debug)] pub struct Instruction {
random_line_split
peer.rs
use config; use dt::{Set}; use proto::{Request, Response, Transport}; use tokio_core::channel::{channel, Sender, Receiver}; use tokio_core::reactor::Handle; use tokio_core::net::TcpStream; use tokio_service::Service; use tokio_proto::easy::{EasyClient, multiplex}; use tokio_timer::{Timer, Sleep}; use futures::{Future, Poll, Async}; use futures::stream::Stream; use std::{io, mem}; use std::time::Duration; // Handle to the peer task. // // Sending a join message to a peer dispatches a message on `tx` to the task // managing the peer connection and will be processed there. // // See `Task` for details on the peer task. pub struct Peer { tx: Sender<Set<String>>, } // State required for managing a peer connection. // // Connections to MiniDB peers are managed on reactor tasks. When the server // initializes, it spawns one task for each peer in the cluster. The peer task // is responsible for maintaining an open connection to the peer and to send a // `Join` message every time the state is sent to the task. // // If the connection fails, the task will attempt a reconnect after a short // period of time. struct Task { // Receives `Set` values that need to be sent to the peer. rx: Receiver<Set<String>>, // Route information route: config::Route, // Tokio reactor handle. Used to establish tcp connections reactor_handle: Handle, // Handle to the timer. The timer is used to set a re-connect timeout when // the peer tcp connection fails. timer: Timer, // Current tcp connection state, see below state: State, // Pending `Join` message to send. This also tracks in-flight joins. If a // join request to a peer fails, the connection will be re-established. // Once it is re-established, the join request should be sent again. // // However, if while the task is waiting to re-establish a connection, a // new state is replicated, then drop the join request that failed to send // in favor of the newer one. Doing so is safe thanks to CRDTs! pending_message: PendingMessage, // Pending response future. A join was issued to the peer and the task is // currently waiting for the response. pending_response: Option<Box<Future<Item = Response, Error = io::Error>>>, } // Peer connection state. The actual connection to the peer node can be in one // of the following states: enum State { // Waiting to connect, this state is reached after hitting a connect error Waiting(Sleep), // Connecting to the remote. A TCP connect has been issued and the task is // waiting on the connect to complete Connecting(Box<Future<Item = TcpStream, Error = io::Error>>), // A connection is open to the peer. Connected(EasyClient<Request, Response>), } // Tracks the state of replication requests enum PendingMessage { // A replication request is waiting to be sent. Pending(Set<String>), // A replication request is currently in-flight. The value of the message // is saved in case the request fails and must be re-issued later. InFlight(Set<String>), // There are no pending replication requests. None, } impl Peer { /// Establish a connection to a peer node. pub fn connect(route: config::Route, handle: &Handle, timer: &Timer) -> Peer { // Create a channel. The channel will be used to send replication // requests from the server task to the peer task. let (tx, rx) = channel(handle).unwrap(); // Initialize the task state let task = Task {
reactor_handle: handle.clone(), timer: timer.clone(), // Initialize in the "waiting to connect" state but with a 0 length // sleep. This will effectively initiate the connect immediately state: State::Waiting(timer.sleep(Duration::from_millis(0))), // There are no pending messages pending_message: PendingMessage::None, // There are no pending responses pending_response: None, }; // Spawn the task handle.spawn(task); // Return the send half as the peer handle Peer { tx: tx } } // Send a replication request to the task managing the peer connection pub fn send(&self, set: Set<String>) { self.tx.send(set).unwrap(); } } // Implement `Future` for `Task`. All tasks spawned on the I/O reactor must // implement future w/ Item = () and Error = (); impl Future for Task { type Item = (); type Error = (); fn poll(&mut self) -> Poll<(), ()> { // First, process any in-bound replication requests self.process_rx(); // Perform pending work. try!(self.tick()); Ok(Async::NotReady) } } impl Task { fn process_rx(&mut self) { // Read any pending replication request and set `pending_message`. It // is expected that some messages will be dropped. The most important // thing is that the **last** replication request ends up getting // processed. while let Async::Ready(Some(set)) = self.rx.poll().unwrap() { self.pending_message = PendingMessage::Pending(set); } } fn tick(&mut self) -> Poll<(), ()> { trace!("Peer::tick; actor-id={:?}", self.route.destination()); loop { match self.state { State::Waiting(..) => { // Currently waiting a short period of time before // establishing the TCP connection with the peer. try_ready!(self.process_waiting()); } State::Connecting(..) => { // Waiting for the TCP connection finish connecting try_ready!(self.process_connecting()); } State::Connected(..) => { if self.pending_response.is_some() { // A request has been sent, waiting for the response. try_ready!(self.process_response()); } else if self.pending_message.is_some() { // A join request is pending, dispatch it try_ready!(self.process_connected()); } else { // Nothing to do, return ok return Ok(Async::Ready(())); } } } } } fn process_waiting(&mut self) -> Poll<(), ()> { trace!(" --> waiting"); match self.state { // Try polling the sleep future. If `NotReady` `process_waiting` // will return. State::Waiting(ref mut sleep) => try_ready!(sleep.poll()), _ => unreachable!(), } // We are done waiting and can now attempt to establish the connection trace!(" --> sleep complete -- attempting tcp connect"); // Start a tcp connect let socket = TcpStream::connect(&self.route.remote_addr(), &self.reactor_handle); // Set a connect timeout of 5 seconds let socket = self.timer.timeout(socket, Duration::from_secs(5)); // Transition the state to "connecting" self.state = State::Connecting(Box::new(socket)); Ok(Async::Ready(())) } fn process_connecting(&mut self) -> Poll<(), ()> { trace!(" --> connecting"); // Check if the `connecting` future is complete, aka the connection has // been established let socket = match self.state { State::Connecting(ref mut connecting) => { match connecting.poll() { // The connection is not yet established Ok(Async::NotReady) => return Ok(Async::NotReady), // The connection is established Ok(Async::Ready(socket)) => Some(socket), // An error was hit while connecting. A timeout for a short // period of time will be set after which, the connect will // be stablished again. Err(err) => { info!("failed to connect to {}; attempting again in 5 seconds; err={:?}", self.route.remote_addr(), err); None } } } _ => unreachable!(), }; if let Some(socket) = socket { trace!(" --> connect success"); info!("established peer connection to {:?}", self.route.remote_addr()); // The connection was successfully established. Now we have a Tcp // socket. Using that, we will build up the MiniDB transport. // // The socket will be wrapped by the length delimited framer, // followed by junkify, and last `Transport`. let transport = Transport::junkified(socket, &self.route, &self.timer); // Using the transport, spawn a task that manages this connection // (vs. the general peer replication task). // // This is done with `tokio-proto`, which takes the transport and // returns a `Service`. Requests can be dispatched directly to the // service. let service = multiplex::connect(transport, &self.reactor_handle); // Update the state self.state = State::Connected(service); } else { trace!(" --> connect failed"); // The connection failed, transition the state to "waiting to // reconnect". We will wait a short bit of time before attempting a // reconnect. self.transition_to_waiting(); } Ok(Async::Ready(())) } fn process_connected(&mut self) -> Poll<(), ()> { trace!(" --> process peer connection"); let service = match self.state { State::Connected(ref mut service) => service, _ => unreachable!(), }; // The connection is currently in the connected state. If there are any // pending replication requests, then they should be dispatched to the // client. // First ensure that the service handle is ready to accept requests, if // not, return `NotReady` and try again later. if!service.poll_ready().is_ready() { trace!(" --> peer socket not ready"); return Ok(Async::NotReady); } // Build the join / replication request let set = self.pending_message.message_to_send().unwrap(); let msg = Request::Join(set); trace!(" --> sending Join message"); // Dispatch the replication request and get back a future repesenting // the response from the peer node. let resp = service.call(msg); // Timeout the response after 5 seconds. If the peer does not // respond to the join within this time, the connection will be // reestablished and the join sent again let resp = self.timer.timeout(resp, Duration::from_secs(5)); // Track the response future self.pending_response = Some(Box::new(resp)); Ok(Async::Ready(())) } fn process_response(&mut self) -> Poll<(), ()> { trace!(" --> process peer response"); // Check the response future. If it is complete, see if it is a // successful response or if the connection needs to be re-established let response = match self.pending_response { Some(ref mut pending_response) => { match pending_response.poll() { Ok(Async::Ready(v)) => Ok(v), Err(e) => Err(e), Ok(Async::NotReady) => return Ok(Async::NotReady), } } _ => unreachable!(), }; // Clear the pending response future self.pending_response = None; // The response has completed, check to see if it was successful match response { Ok(_) => { // The join / replication successfully applied self.pending_message.in_flight_succeeded(); trace!(" --> received response: OK"); } Err(e) => { // The replication failed. Transition the state to waiting to // connect. Also, setup the join request to get redisptached // once the connection is established again. warn!("message send failed to remote {:?} -- attempting reconnect in 5 seconds; err={:?}", self.route.remote_addr(), e); self.pending_message.in_flight_failed(); self.transition_to_waiting(); } } Ok(Async::Ready(())) } fn transition_to_waiting(&mut self) { trace!("waiting for 5 seconds before reconnecting; actor-id={:?}", self.route.destination()); // Set a timeout for 5 seconds let sleep = self.timer.sleep(Duration::from_secs(5)); // Update the state to reflect waiting self.state = State::Waiting(sleep); } } impl PendingMessage { fn is_none(&self) -> bool { match *self { PendingMessage::None => true, _ => false, } } fn is_some(&self) -> bool { !self.is_none() } fn message_to_send(&mut self) -> Option<Set<String>> { match mem::replace(self, PendingMessage::None) { PendingMessage::Pending(set) => { *self = PendingMessage::InFlight(set.clone()); Some(set) } _ => None, } } fn in_flight_succeeded(&mut self) { match *self { PendingMessage::Pending(..) => return, _ => *self = PendingMessage::None, } } fn in_flight_failed(&mut self) { match mem::replace(self, PendingMessage::None) { PendingMessage::InFlight(set) => { *self = PendingMessage::Pending(set); } v => *self = v, } } }
rx: rx, route: route,
random_line_split
peer.rs
use config; use dt::{Set}; use proto::{Request, Response, Transport}; use tokio_core::channel::{channel, Sender, Receiver}; use tokio_core::reactor::Handle; use tokio_core::net::TcpStream; use tokio_service::Service; use tokio_proto::easy::{EasyClient, multiplex}; use tokio_timer::{Timer, Sleep}; use futures::{Future, Poll, Async}; use futures::stream::Stream; use std::{io, mem}; use std::time::Duration; // Handle to the peer task. // // Sending a join message to a peer dispatches a message on `tx` to the task // managing the peer connection and will be processed there. // // See `Task` for details on the peer task. pub struct Peer { tx: Sender<Set<String>>, } // State required for managing a peer connection. // // Connections to MiniDB peers are managed on reactor tasks. When the server // initializes, it spawns one task for each peer in the cluster. The peer task // is responsible for maintaining an open connection to the peer and to send a // `Join` message every time the state is sent to the task. // // If the connection fails, the task will attempt a reconnect after a short // period of time. struct Task { // Receives `Set` values that need to be sent to the peer. rx: Receiver<Set<String>>, // Route information route: config::Route, // Tokio reactor handle. Used to establish tcp connections reactor_handle: Handle, // Handle to the timer. The timer is used to set a re-connect timeout when // the peer tcp connection fails. timer: Timer, // Current tcp connection state, see below state: State, // Pending `Join` message to send. This also tracks in-flight joins. If a // join request to a peer fails, the connection will be re-established. // Once it is re-established, the join request should be sent again. // // However, if while the task is waiting to re-establish a connection, a // new state is replicated, then drop the join request that failed to send // in favor of the newer one. Doing so is safe thanks to CRDTs! pending_message: PendingMessage, // Pending response future. A join was issued to the peer and the task is // currently waiting for the response. pending_response: Option<Box<Future<Item = Response, Error = io::Error>>>, } // Peer connection state. The actual connection to the peer node can be in one // of the following states: enum State { // Waiting to connect, this state is reached after hitting a connect error Waiting(Sleep), // Connecting to the remote. A TCP connect has been issued and the task is // waiting on the connect to complete Connecting(Box<Future<Item = TcpStream, Error = io::Error>>), // A connection is open to the peer. Connected(EasyClient<Request, Response>), } // Tracks the state of replication requests enum PendingMessage { // A replication request is waiting to be sent. Pending(Set<String>), // A replication request is currently in-flight. The value of the message // is saved in case the request fails and must be re-issued later. InFlight(Set<String>), // There are no pending replication requests. None, } impl Peer { /// Establish a connection to a peer node. pub fn connect(route: config::Route, handle: &Handle, timer: &Timer) -> Peer { // Create a channel. The channel will be used to send replication // requests from the server task to the peer task. let (tx, rx) = channel(handle).unwrap(); // Initialize the task state let task = Task { rx: rx, route: route, reactor_handle: handle.clone(), timer: timer.clone(), // Initialize in the "waiting to connect" state but with a 0 length // sleep. This will effectively initiate the connect immediately state: State::Waiting(timer.sleep(Duration::from_millis(0))), // There are no pending messages pending_message: PendingMessage::None, // There are no pending responses pending_response: None, }; // Spawn the task handle.spawn(task); // Return the send half as the peer handle Peer { tx: tx } } // Send a replication request to the task managing the peer connection pub fn send(&self, set: Set<String>) { self.tx.send(set).unwrap(); } } // Implement `Future` for `Task`. All tasks spawned on the I/O reactor must // implement future w/ Item = () and Error = (); impl Future for Task { type Item = (); type Error = (); fn poll(&mut self) -> Poll<(), ()> { // First, process any in-bound replication requests self.process_rx(); // Perform pending work. try!(self.tick()); Ok(Async::NotReady) } } impl Task { fn process_rx(&mut self) { // Read any pending replication request and set `pending_message`. It // is expected that some messages will be dropped. The most important // thing is that the **last** replication request ends up getting // processed. while let Async::Ready(Some(set)) = self.rx.poll().unwrap() { self.pending_message = PendingMessage::Pending(set); } } fn tick(&mut self) -> Poll<(), ()> { trace!("Peer::tick; actor-id={:?}", self.route.destination()); loop { match self.state { State::Waiting(..) => { // Currently waiting a short period of time before // establishing the TCP connection with the peer. try_ready!(self.process_waiting()); } State::Connecting(..) => { // Waiting for the TCP connection finish connecting try_ready!(self.process_connecting()); } State::Connected(..) => { if self.pending_response.is_some() { // A request has been sent, waiting for the response. try_ready!(self.process_response()); } else if self.pending_message.is_some() { // A join request is pending, dispatch it try_ready!(self.process_connected()); } else { // Nothing to do, return ok return Ok(Async::Ready(())); } } } } } fn process_waiting(&mut self) -> Poll<(), ()> { trace!(" --> waiting"); match self.state { // Try polling the sleep future. If `NotReady` `process_waiting` // will return. State::Waiting(ref mut sleep) => try_ready!(sleep.poll()), _ => unreachable!(), } // We are done waiting and can now attempt to establish the connection trace!(" --> sleep complete -- attempting tcp connect"); // Start a tcp connect let socket = TcpStream::connect(&self.route.remote_addr(), &self.reactor_handle); // Set a connect timeout of 5 seconds let socket = self.timer.timeout(socket, Duration::from_secs(5)); // Transition the state to "connecting" self.state = State::Connecting(Box::new(socket)); Ok(Async::Ready(())) } fn process_connecting(&mut self) -> Poll<(), ()> { trace!(" --> connecting"); // Check if the `connecting` future is complete, aka the connection has // been established let socket = match self.state { State::Connecting(ref mut connecting) => { match connecting.poll() { // The connection is not yet established Ok(Async::NotReady) => return Ok(Async::NotReady), // The connection is established Ok(Async::Ready(socket)) => Some(socket), // An error was hit while connecting. A timeout for a short // period of time will be set after which, the connect will // be stablished again. Err(err) => { info!("failed to connect to {}; attempting again in 5 seconds; err={:?}", self.route.remote_addr(), err); None } } } _ => unreachable!(), }; if let Some(socket) = socket { trace!(" --> connect success"); info!("established peer connection to {:?}", self.route.remote_addr()); // The connection was successfully established. Now we have a Tcp // socket. Using that, we will build up the MiniDB transport. // // The socket will be wrapped by the length delimited framer, // followed by junkify, and last `Transport`. let transport = Transport::junkified(socket, &self.route, &self.timer); // Using the transport, spawn a task that manages this connection // (vs. the general peer replication task). // // This is done with `tokio-proto`, which takes the transport and // returns a `Service`. Requests can be dispatched directly to the // service. let service = multiplex::connect(transport, &self.reactor_handle); // Update the state self.state = State::Connected(service); } else { trace!(" --> connect failed"); // The connection failed, transition the state to "waiting to // reconnect". We will wait a short bit of time before attempting a // reconnect. self.transition_to_waiting(); } Ok(Async::Ready(())) } fn
(&mut self) -> Poll<(), ()> { trace!(" --> process peer connection"); let service = match self.state { State::Connected(ref mut service) => service, _ => unreachable!(), }; // The connection is currently in the connected state. If there are any // pending replication requests, then they should be dispatched to the // client. // First ensure that the service handle is ready to accept requests, if // not, return `NotReady` and try again later. if!service.poll_ready().is_ready() { trace!(" --> peer socket not ready"); return Ok(Async::NotReady); } // Build the join / replication request let set = self.pending_message.message_to_send().unwrap(); let msg = Request::Join(set); trace!(" --> sending Join message"); // Dispatch the replication request and get back a future repesenting // the response from the peer node. let resp = service.call(msg); // Timeout the response after 5 seconds. If the peer does not // respond to the join within this time, the connection will be // reestablished and the join sent again let resp = self.timer.timeout(resp, Duration::from_secs(5)); // Track the response future self.pending_response = Some(Box::new(resp)); Ok(Async::Ready(())) } fn process_response(&mut self) -> Poll<(), ()> { trace!(" --> process peer response"); // Check the response future. If it is complete, see if it is a // successful response or if the connection needs to be re-established let response = match self.pending_response { Some(ref mut pending_response) => { match pending_response.poll() { Ok(Async::Ready(v)) => Ok(v), Err(e) => Err(e), Ok(Async::NotReady) => return Ok(Async::NotReady), } } _ => unreachable!(), }; // Clear the pending response future self.pending_response = None; // The response has completed, check to see if it was successful match response { Ok(_) => { // The join / replication successfully applied self.pending_message.in_flight_succeeded(); trace!(" --> received response: OK"); } Err(e) => { // The replication failed. Transition the state to waiting to // connect. Also, setup the join request to get redisptached // once the connection is established again. warn!("message send failed to remote {:?} -- attempting reconnect in 5 seconds; err={:?}", self.route.remote_addr(), e); self.pending_message.in_flight_failed(); self.transition_to_waiting(); } } Ok(Async::Ready(())) } fn transition_to_waiting(&mut self) { trace!("waiting for 5 seconds before reconnecting; actor-id={:?}", self.route.destination()); // Set a timeout for 5 seconds let sleep = self.timer.sleep(Duration::from_secs(5)); // Update the state to reflect waiting self.state = State::Waiting(sleep); } } impl PendingMessage { fn is_none(&self) -> bool { match *self { PendingMessage::None => true, _ => false, } } fn is_some(&self) -> bool { !self.is_none() } fn message_to_send(&mut self) -> Option<Set<String>> { match mem::replace(self, PendingMessage::None) { PendingMessage::Pending(set) => { *self = PendingMessage::InFlight(set.clone()); Some(set) } _ => None, } } fn in_flight_succeeded(&mut self) { match *self { PendingMessage::Pending(..) => return, _ => *self = PendingMessage::None, } } fn in_flight_failed(&mut self) { match mem::replace(self, PendingMessage::None) { PendingMessage::InFlight(set) => { *self = PendingMessage::Pending(set); } v => *self = v, } } }
process_connected
identifier_name
peer.rs
use config; use dt::{Set}; use proto::{Request, Response, Transport}; use tokio_core::channel::{channel, Sender, Receiver}; use tokio_core::reactor::Handle; use tokio_core::net::TcpStream; use tokio_service::Service; use tokio_proto::easy::{EasyClient, multiplex}; use tokio_timer::{Timer, Sleep}; use futures::{Future, Poll, Async}; use futures::stream::Stream; use std::{io, mem}; use std::time::Duration; // Handle to the peer task. // // Sending a join message to a peer dispatches a message on `tx` to the task // managing the peer connection and will be processed there. // // See `Task` for details on the peer task. pub struct Peer { tx: Sender<Set<String>>, } // State required for managing a peer connection. // // Connections to MiniDB peers are managed on reactor tasks. When the server // initializes, it spawns one task for each peer in the cluster. The peer task // is responsible for maintaining an open connection to the peer and to send a // `Join` message every time the state is sent to the task. // // If the connection fails, the task will attempt a reconnect after a short // period of time. struct Task { // Receives `Set` values that need to be sent to the peer. rx: Receiver<Set<String>>, // Route information route: config::Route, // Tokio reactor handle. Used to establish tcp connections reactor_handle: Handle, // Handle to the timer. The timer is used to set a re-connect timeout when // the peer tcp connection fails. timer: Timer, // Current tcp connection state, see below state: State, // Pending `Join` message to send. This also tracks in-flight joins. If a // join request to a peer fails, the connection will be re-established. // Once it is re-established, the join request should be sent again. // // However, if while the task is waiting to re-establish a connection, a // new state is replicated, then drop the join request that failed to send // in favor of the newer one. Doing so is safe thanks to CRDTs! pending_message: PendingMessage, // Pending response future. A join was issued to the peer and the task is // currently waiting for the response. pending_response: Option<Box<Future<Item = Response, Error = io::Error>>>, } // Peer connection state. The actual connection to the peer node can be in one // of the following states: enum State { // Waiting to connect, this state is reached after hitting a connect error Waiting(Sleep), // Connecting to the remote. A TCP connect has been issued and the task is // waiting on the connect to complete Connecting(Box<Future<Item = TcpStream, Error = io::Error>>), // A connection is open to the peer. Connected(EasyClient<Request, Response>), } // Tracks the state of replication requests enum PendingMessage { // A replication request is waiting to be sent. Pending(Set<String>), // A replication request is currently in-flight. The value of the message // is saved in case the request fails and must be re-issued later. InFlight(Set<String>), // There are no pending replication requests. None, } impl Peer { /// Establish a connection to a peer node. pub fn connect(route: config::Route, handle: &Handle, timer: &Timer) -> Peer { // Create a channel. The channel will be used to send replication // requests from the server task to the peer task. let (tx, rx) = channel(handle).unwrap(); // Initialize the task state let task = Task { rx: rx, route: route, reactor_handle: handle.clone(), timer: timer.clone(), // Initialize in the "waiting to connect" state but with a 0 length // sleep. This will effectively initiate the connect immediately state: State::Waiting(timer.sleep(Duration::from_millis(0))), // There are no pending messages pending_message: PendingMessage::None, // There are no pending responses pending_response: None, }; // Spawn the task handle.spawn(task); // Return the send half as the peer handle Peer { tx: tx } } // Send a replication request to the task managing the peer connection pub fn send(&self, set: Set<String>) { self.tx.send(set).unwrap(); } } // Implement `Future` for `Task`. All tasks spawned on the I/O reactor must // implement future w/ Item = () and Error = (); impl Future for Task { type Item = (); type Error = (); fn poll(&mut self) -> Poll<(), ()> { // First, process any in-bound replication requests self.process_rx(); // Perform pending work. try!(self.tick()); Ok(Async::NotReady) } } impl Task { fn process_rx(&mut self) { // Read any pending replication request and set `pending_message`. It // is expected that some messages will be dropped. The most important // thing is that the **last** replication request ends up getting // processed. while let Async::Ready(Some(set)) = self.rx.poll().unwrap() { self.pending_message = PendingMessage::Pending(set); } } fn tick(&mut self) -> Poll<(), ()> { trace!("Peer::tick; actor-id={:?}", self.route.destination()); loop { match self.state { State::Waiting(..) => { // Currently waiting a short period of time before // establishing the TCP connection with the peer. try_ready!(self.process_waiting()); } State::Connecting(..) => { // Waiting for the TCP connection finish connecting try_ready!(self.process_connecting()); } State::Connected(..) => { if self.pending_response.is_some() { // A request has been sent, waiting for the response. try_ready!(self.process_response()); } else if self.pending_message.is_some() { // A join request is pending, dispatch it try_ready!(self.process_connected()); } else { // Nothing to do, return ok return Ok(Async::Ready(())); } } } } } fn process_waiting(&mut self) -> Poll<(), ()> { trace!(" --> waiting"); match self.state { // Try polling the sleep future. If `NotReady` `process_waiting` // will return. State::Waiting(ref mut sleep) => try_ready!(sleep.poll()), _ => unreachable!(), } // We are done waiting and can now attempt to establish the connection trace!(" --> sleep complete -- attempting tcp connect"); // Start a tcp connect let socket = TcpStream::connect(&self.route.remote_addr(), &self.reactor_handle); // Set a connect timeout of 5 seconds let socket = self.timer.timeout(socket, Duration::from_secs(5)); // Transition the state to "connecting" self.state = State::Connecting(Box::new(socket)); Ok(Async::Ready(())) } fn process_connecting(&mut self) -> Poll<(), ()> { trace!(" --> connecting"); // Check if the `connecting` future is complete, aka the connection has // been established let socket = match self.state { State::Connecting(ref mut connecting) => { match connecting.poll() { // The connection is not yet established Ok(Async::NotReady) => return Ok(Async::NotReady), // The connection is established Ok(Async::Ready(socket)) => Some(socket), // An error was hit while connecting. A timeout for a short // period of time will be set after which, the connect will // be stablished again. Err(err) => { info!("failed to connect to {}; attempting again in 5 seconds; err={:?}", self.route.remote_addr(), err); None } } } _ => unreachable!(), }; if let Some(socket) = socket { trace!(" --> connect success"); info!("established peer connection to {:?}", self.route.remote_addr()); // The connection was successfully established. Now we have a Tcp // socket. Using that, we will build up the MiniDB transport. // // The socket will be wrapped by the length delimited framer, // followed by junkify, and last `Transport`. let transport = Transport::junkified(socket, &self.route, &self.timer); // Using the transport, spawn a task that manages this connection // (vs. the general peer replication task). // // This is done with `tokio-proto`, which takes the transport and // returns a `Service`. Requests can be dispatched directly to the // service. let service = multiplex::connect(transport, &self.reactor_handle); // Update the state self.state = State::Connected(service); } else { trace!(" --> connect failed"); // The connection failed, transition the state to "waiting to // reconnect". We will wait a short bit of time before attempting a // reconnect. self.transition_to_waiting(); } Ok(Async::Ready(())) } fn process_connected(&mut self) -> Poll<(), ()>
let set = self.pending_message.message_to_send().unwrap(); let msg = Request::Join(set); trace!(" --> sending Join message"); // Dispatch the replication request and get back a future repesenting // the response from the peer node. let resp = service.call(msg); // Timeout the response after 5 seconds. If the peer does not // respond to the join within this time, the connection will be // reestablished and the join sent again let resp = self.timer.timeout(resp, Duration::from_secs(5)); // Track the response future self.pending_response = Some(Box::new(resp)); Ok(Async::Ready(())) } fn process_response(&mut self) -> Poll<(), ()> { trace!(" --> process peer response"); // Check the response future. If it is complete, see if it is a // successful response or if the connection needs to be re-established let response = match self.pending_response { Some(ref mut pending_response) => { match pending_response.poll() { Ok(Async::Ready(v)) => Ok(v), Err(e) => Err(e), Ok(Async::NotReady) => return Ok(Async::NotReady), } } _ => unreachable!(), }; // Clear the pending response future self.pending_response = None; // The response has completed, check to see if it was successful match response { Ok(_) => { // The join / replication successfully applied self.pending_message.in_flight_succeeded(); trace!(" --> received response: OK"); } Err(e) => { // The replication failed. Transition the state to waiting to // connect. Also, setup the join request to get redisptached // once the connection is established again. warn!("message send failed to remote {:?} -- attempting reconnect in 5 seconds; err={:?}", self.route.remote_addr(), e); self.pending_message.in_flight_failed(); self.transition_to_waiting(); } } Ok(Async::Ready(())) } fn transition_to_waiting(&mut self) { trace!("waiting for 5 seconds before reconnecting; actor-id={:?}", self.route.destination()); // Set a timeout for 5 seconds let sleep = self.timer.sleep(Duration::from_secs(5)); // Update the state to reflect waiting self.state = State::Waiting(sleep); } } impl PendingMessage { fn is_none(&self) -> bool { match *self { PendingMessage::None => true, _ => false, } } fn is_some(&self) -> bool { !self.is_none() } fn message_to_send(&mut self) -> Option<Set<String>> { match mem::replace(self, PendingMessage::None) { PendingMessage::Pending(set) => { *self = PendingMessage::InFlight(set.clone()); Some(set) } _ => None, } } fn in_flight_succeeded(&mut self) { match *self { PendingMessage::Pending(..) => return, _ => *self = PendingMessage::None, } } fn in_flight_failed(&mut self) { match mem::replace(self, PendingMessage::None) { PendingMessage::InFlight(set) => { *self = PendingMessage::Pending(set); } v => *self = v, } } }
{ trace!(" --> process peer connection"); let service = match self.state { State::Connected(ref mut service) => service, _ => unreachable!(), }; // The connection is currently in the connected state. If there are any // pending replication requests, then they should be dispatched to the // client. // First ensure that the service handle is ready to accept requests, if // not, return `NotReady` and try again later. if !service.poll_ready().is_ready() { trace!(" --> peer socket not ready"); return Ok(Async::NotReady); } // Build the join / replication request
identifier_body
lib.rs
#![cfg_attr(docsrs, doc = include_str!("../README.md"))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg, doc_cfg_hide))] #![cfg_attr(docsrs, deny(missing_docs))] #![cfg_attr(not(any(feature = "std", test)), no_std)] #![allow(unused_unsafe)] //! //! ## data structures //! //! `cordyceps` provides implementations of the following data structures: //! //! - **[`List`]: a mutable, doubly-linked list.** //! //! A [`List`] provides *O*(1) insertion and removal at both the head and //! tail of the list. In addition, parts of a [`List`] may be split off to //! form new [`List`]s, and two [`List`]s may be spliced together to form a //! single [`List`], all in *O*(1) time. The [`list`] module also provides //! [`list::Cursor`] and [`list::CursorMut`] types, which allow traversal and //! modification of elements in a list. Finally, elements can remove themselves //! from arbitrary positions in a [`List`], provided that they have mutable //! access to the [`List`] itself. This makes the [`List`] type suitable for //! use in cases where elements must be able to drop themselves while linked //! into a list. //! //! The [`List`] type is **not** a lock-free data structure, and can only be //! modified through `&mut` references. //! //! - **[`MpscQueue`]: a multi-producer, single-consumer (MPSC) lock-free //! last-in, first-out (LIFO) queue.** //! //! A [`MpscQueue`] is a *lock-free* concurrent data structure that allows //! multiple producers to concurrently push elements onto the queue, and a //! single consumer to dequeue elements in the order that they were pushed. //! //! [`MpscQueue`]s can be used to efficiently share data from multiple //! concurrent producers with a consumer. //! //! - **[`Stack`]: a mutable, singly-linked first-in, first-out (FIFO) //! stack.** //! //! This is a simple, singly-linked stack with *O*(1) push and pop //! operations. The pop operation returns the *last* element pushed to the //! stack. A [`Stack`] also implements the [`Iterator`] trait; iterating over //! a stack pops elements from the end of the list. //! //! The [`Stack`] type is **not** a lock-free data structure, and can only be //! modified through `&mut` references. //! //! - **[`TransferStack`]: a lock-free, multi-producer FIFO stack, where //! all elements currently in the stack are popped in a single atomic operation.** //! //! A [`TransferStack`] is a lock-free data structure where multiple producers //! can [concurrently push elements](stack::TransferStack::push) to the end of //! the stack through immutable `&` references. A consumer can [pop all //! elements currently in the `TransferStack`](stack::TransferStack::take_all) //! in a single atomic operation, returning a new [`Stack`]. Pushing an //! element, and taking all elements in the [`TransferStack`] are both *O*(1) //! operations. //! //! A [`TransferStack`] can be used to efficiently transfer ownership of //! resources from multiple producers to a consumer, such as for reuse or //! cleanup. #[cfg(feature = "alloc")] extern crate alloc; #[cfg(test)] extern crate std; #[macro_use] pub(crate) mod util; pub mod list; pub mod mpsc_queue; pub mod stack; #[doc(inline)] pub use list::List; #[doc(inline)] pub use mpsc_queue::MpscQueue; #[doc(inline)] pub use stack::{Stack, TransferStack}; pub(crate) mod loom; use core::ptr::NonNull; /// Trait implemented by types which can be members of an [intrusive collection]. /// /// In order to be part of an intrusive collection, a type must contain a /// `Links` type that stores the pointers to other nodes in that collection. For /// example, to be part of a [doubly-linked list], a type must contain the /// [`list::Links`] struct, or to be part of a [MPSC queue], a type must contain /// the [`mpsc_queue::Links`] struct. /// /// # Safety /// /// This is unsafe to implement because it's the implementation's responsibility /// to ensure that types implementing this trait are valid intrusive collection /// nodes. In particular: /// /// - Implementations **must** ensure that implementors are pinned in memory while they /// are in an intrusive collection. While a given `Linked` type is in an intrusive /// data structure, it may not be deallocated or moved to a different memory /// location. /// - The type implementing this trait **must not** implement [`Unpin`]. /// - Additional safety requirements for individual methods on this trait are /// documented on those methods. /// /// Failure to uphold these invariants will result in corruption of the /// intrusive data structure, including dangling pointers. /// /// # Implementing `Linked::links` /// /// The [`Linked::links`] method provides access to a `Linked` type's `Links` /// field through a [`NonNull`] pointer. This is necessary for a type to /// participate in an intrusive structure, as it tells the intrusive structure /// how to access the links to other parts of that data structure. However, this /// method is somewhat difficult to implement correctly.
/// /// Suppose we have an entry type like this: /// ```rust /// use cordyceps::list; /// /// struct Entry { /// links: list::Links<Self>, /// data: usize, /// } /// ``` /// /// The naive implementation of [`links`](Linked::links) for this `Entry` type /// might look like this: /// /// ``` /// use cordyceps::Linked; /// use core::ptr::NonNull; /// /// # use cordyceps::list; /// # struct Entry { /// # links: list::Links<Self>, /// # } /// /// unsafe impl Linked<list::Links<Self>> for Entry { /// # type Handle = NonNull<Self>; /// # fn into_ptr(r: Self::Handle) -> NonNull<Self> { r } /// # unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle { ptr } /// //... /// /// unsafe fn links(mut target: NonNull<Self>) -> NonNull<list::Links<Self>> { /// // Borrow the target's `links` field. /// let links = &mut target.as_mut().links; /// // Convert that reference into a pointer. /// NonNull::from(links) /// } /// } /// ``` /// /// However, this implementation **is not sound** under [Stacked Borrows]! It /// creates a temporary reference from the original raw pointer, and then /// creates a new raw pointer from that temporary reference. Stacked Borrows /// will reject this reborrow as unsound.[^1] /// /// There are two ways we can implement [`Linked::links`] without creating a /// temporary reference in this manner. The recommended one is to use the /// [`core::ptr::addr_of_mut!`] macro, as follows: /// /// ``` /// use core::ptr::{self, NonNull}; /// # use cordyceps::{Linked, list}; /// # struct Entry { /// # links: list::Links<Self>, /// # } /// /// unsafe impl Linked<list::Links<Self>> for Entry { /// # type Handle = NonNull<Self>; /// # fn into_ptr(r: Self::Handle) -> NonNull<Self> { r } /// # unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle { ptr } /// //... /// /// unsafe fn links(target: NonNull<Self>) -> NonNull<list::Links<Self>> { /// let target = target.as_ptr(); /// /// // Using the `ptr::addr_of_mut!` macro, we can offset a raw pointer to a /// // raw pointer to a field *without* creating a temporary reference. /// let links = ptr::addr_of_mut!((*target).links); /// /// // `NonNull::new_unchecked` is safe to use here, because the pointer that /// // we offset was not null, implying that the pointer produced by offsetting /// // it will also not be null. /// NonNull::new_unchecked(links) /// } /// } /// ``` /// /// It is also possible to ensure that the struct implementing `Linked` is laid /// out so that the `Links` field is the first member of the struct, and then /// cast the pointer to a `Links`. Since [Rust's native type representation][repr] /// does not guarantee the layout of struct members, it is **necessary** to ensure /// that any struct that implements the `Linked::links` method in this manner has a /// [`#[repr(C)]` attribute][repr-c], ensuring that its fields are laid out in the /// order that they are defined. /// /// For example: /// /// ``` /// use core::ptr::NonNull; /// use cordyceps::{Linked, list}; /// /// // This `repr(C)` attribute is *mandatory* here, as it ensures that the /// // `links` field will *always* be the first field in the struct's in-memory /// // representation. /// #[repr(C)] /// struct Entry { /// links: list::Links<Self>, /// data: usize, /// } /// /// unsafe impl Linked<list::Links<Self>> for Entry { /// # type Handle = NonNull<Self>; /// # fn into_ptr(r: Self::Handle) -> NonNull<Self> { r } /// # unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle { ptr } /// //... /// /// unsafe fn links(target: NonNull<Self>) -> NonNull<list::Links<Self>> { /// // Safety: this performs a layout-dependent cast! it is only sound /// // if the `Entry` type has a `#[repr(C)]` attribute! /// target.cast::<list::Links<Self>>() /// } /// } /// ``` /// /// In general, this approach is not recommended, and using /// [`core::ptr::addr_of_mut!`] should be preferred in almost all cases. In /// particular, the layout-dependent cast is more error-prone, as it requires a /// `#[repr(C)]` attribute to avoid soundness issues. Additionally, the /// layout-based cast does not permit a single struct to contain `Links` fields /// for multiple intrusive data structures, as the `Links` type *must* be the /// struct's first field.[^2] Therefore, [`Linked::links`] should generally be /// implemented using [`addr_of_mut!`](core::ptr::addr_of_mut). /// /// [^1]: Note that code like this is not *currently* known to result in /// miscompiles, but it is rejected by tools like Miri as being unsound. /// Like all undefined behavior, there is no guarantee that future Rust /// compilers will not miscompile code like this, with disastrous results. /// /// [^2]: And two different fields cannot both be the first field at the same /// time...by definition. /// /// [intrusive collection]: crate#intrusive-data-structures /// [`Unpin`]: core::marker::Unpin /// [doubly-linked list]: crate::list /// [MSPC queue]: crate::mpsc_queue /// [Stacked Borrows]: https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md /// [repr]: https://doc.rust-lang.org/nomicon/repr-rust.html /// [repr-c]: https://doc.rust-lang.org/nomicon/other-reprs.html#reprc pub unsafe trait Linked<L> { /// The handle owning nodes in the linked list. /// /// This type must have ownership over a `Self`-typed value. When a `Handle` /// is dropped, it should drop the corresponding `Linked` type. /// /// A quintessential example of a `Handle` is [`Box`]. /// /// [`Box`]: alloc::boxed::Box type Handle; /// Convert a [`Self::Handle`] to a raw pointer to `Self`, taking ownership /// of it in the process. fn into_ptr(r: Self::Handle) -> NonNull<Self>; /// Convert a raw pointer to `Self` into an owning [`Self::Handle`]. /// /// # Safety /// /// This function is safe to call when: /// - It is valid to construct a [`Self::Handle`] from a`raw pointer /// - The pointer points to a valid instance of `Self` (e.g. it does not /// dangle). unsafe fn from_ptr(ptr: NonNull<Self>) -> Self::Handle; /// Return the links of the node pointed to by `ptr`. /// /// # Safety /// /// This function is safe to call when: /// - It is valid to construct a [`Self::Handle`] from a`raw pointer /// - The pointer points to a valid instance of `Self` (e.g. it does not /// dangle). /// /// See [the trait-level documentation](#implementing-linkedlinks) for /// details on how to correctly implement this method. unsafe fn links(ptr: NonNull<Self>) -> NonNull<L>; }
random_line_split
accounts.rs
use nimiq_account::{ Account, Accounts, BlockLogger, BlockState, RevertInfo, TransactionOperationReceipt, }; use nimiq_block::{Block, BlockError, SkipBlockInfo}; use nimiq_blockchain_interface::PushError; use nimiq_database::{traits::Database, TransactionProxy}; use nimiq_keys::Address; use nimiq_primitives::{ key_nibbles::KeyNibbles, trie::{trie_diff::TrieDiff, trie_proof::TrieProof}, }; use nimiq_serde::Deserialize; use nimiq_transaction::extended_transaction::ExtendedTransaction; use nimiq_trie::WriteTransactionProxy; use crate::{blockchain_state::BlockchainState, Blockchain}; /// Subset of the accounts in the accounts tree pub struct AccountsChunk { /// The end of the chunk. The end key is exclusive. /// When set to None it means that it is the last trie chunk. pub end_key: Option<KeyNibbles>, /// The set of accounts retrieved. pub accounts: Vec<(Address, Account)>, } /// Implements methods to handle the accounts. impl Blockchain { /// Updates the accounts given a block. /// Expects a full block with body. pub fn commit_accounts( &self, state: &BlockchainState, block: &Block, diff: Option<TrieDiff>, txn: &mut WriteTransactionProxy, block_logger: &mut BlockLogger, ) -> Result<u64, PushError> { // Get the accounts from the state. let accounts = &state.accounts; let block_state = BlockState::new(block.block_number(), block.timestamp()); // Check the type of the block. match block { Block::Macro(ref macro_block) => { // Initialize a vector to store the inherents. let inherents = self.create_macro_block_inherents(macro_block); // Commit block to AccountsTree. if accounts.is_complete(Some(txn)) { accounts.commit(txn, &[], &inherents, &block_state, block_logger)?; } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?; } else { return Err(PushError::MissingAccountsTrieDiff); } // Macro blocks are final and receipts for the previous batch are no longer necessary // as rebranching across this block is not possible. self.chain_store.clear_revert_infos(txn.raw()); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, macro_block.header.block_number, macro_block.header.timestamp, vec![], inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), macro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } Block::Micro(ref micro_block) => { // Get the body of the block. let body = micro_block .body .as_ref() .expect("Block body must be present"); let skip_block_info = SkipBlockInfo::from_micro_block(micro_block); // Create the inherents from any forks or skip block info. let inherents = self.create_punishment_inherents( block_state.number, &body.fork_proofs, skip_block_info, Some(txn), ); // Commit block to AccountsTree and create the receipts. let revert_info: RevertInfo = if accounts.is_complete(Some(txn)) { accounts .commit( txn, &body.get_raw_transactions(), &inherents, &block_state, block_logger, )? .into() } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?.into() } else { return Err(PushError::MissingAccountsTrieDiff); }; // Check that the transaction results match the ones in the block. if let RevertInfo::Receipts(receipts) = &revert_info { assert_eq!(receipts.transactions.len(), body.transactions.len()); for (index, receipt) in receipts.transactions.iter().enumerate() { let matches = match receipt { TransactionOperationReceipt::Ok(..) => { body.transactions[index].succeeded() } TransactionOperationReceipt::Err(..) => { body.transactions[index].failed() } }; if!matches { return Err(PushError::InvalidBlock( BlockError::TransactionExecutionMismatch, )); } } } // Store revert info. self.chain_store.put_revert_info( txn.raw(), micro_block.header.block_number, &revert_info, ); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, micro_block.header.block_number, micro_block.header.timestamp, body.transactions.clone(), inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), micro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } } } /// Reverts the accounts given a block. This only applies to micro blocks and skip blocks, since /// macro blocks are final and can't be reverted. pub(crate) fn revert_accounts( &self, accounts: &Accounts, txn: &mut WriteTransactionProxy, block: &Block, block_logger: &mut BlockLogger, ) -> Result<u64, PushError> { if block.is_macro() { panic!("Can't revert {block} - macro blocks are final"); } let block = block.unwrap_micro_ref(); let body = block.body.as_ref().unwrap(); debug!( block = %block, is_skip = block.is_skip_block(), num_transactions = body.transactions.len(), num_fork_proofs = body.fork_proofs.len(), "Reverting block" ); // Verify accounts hash if the tree is complete or changes only happened in the complete part. if let Some(accounts_hash) = accounts.get_root_hash(Some(txn)) { assert_eq!( block.header.state_root, accounts_hash, "Cannot revert {} - inconsistent state", block, ); } // Create the inherents from any forks or skip block info. let skip_block_info = SkipBlockInfo::from_micro_block(block); let inherents = self.create_punishment_inherents(
block.block_number(), &body.fork_proofs, skip_block_info, Some(txn), ); // Get the revert info for this block. let revert_info = self .chain_store .get_revert_info(block.block_number(), Some(txn)) .expect("Failed to revert - missing revert info"); // Revert the block from AccountsTree. let block_state = BlockState::new(block.block_number(), block.header.timestamp); let result = accounts.revert( txn, &body.get_raw_transactions(), &inherents, &block_state, revert_info, block_logger, ); if let Err(e) = result { panic!("Failed to revert {block} - {e:?}"); } // Remove the transactions from the History tree. For this you only need to calculate the // number of transactions that you want to remove. let num_txs = body.transactions.len() + inherents.len(); let (_, total_size) = self .history_store .remove_partial_history(txn.raw(), block.epoch_number(), num_txs) .expect("Failed to remove partial history"); Ok(total_size) } /// Produces a Merkle proof of the inclusion of the given keys in the /// Merkle Radix Trie. pub fn get_accounts_proof(&self, keys: Vec<&KeyNibbles>) -> Option<TrieProof> { let txn = self.env.read_transaction(); self.state().accounts.get_proof(Some(&txn), keys).ok() } /// Gets an accounts chunk given a start key and a limit pub fn get_accounts_chunk( &self, txn_option: Option<&TransactionProxy>, start: KeyNibbles, limit: usize, ) -> AccountsChunk { let trie_chunk = self.state().accounts.get_chunk(start, limit, txn_option); let end_key = trie_chunk.end_key; let accounts = trie_chunk .items .into_iter() .filter(|item| item.key.to_address().is_some()) .map(|item| { ( item.key.to_address().unwrap(), Account::deserialize_from_vec(&item.value).unwrap(), ) }) .collect(); AccountsChunk { end_key, accounts } } }
random_line_split
accounts.rs
use nimiq_account::{ Account, Accounts, BlockLogger, BlockState, RevertInfo, TransactionOperationReceipt, }; use nimiq_block::{Block, BlockError, SkipBlockInfo}; use nimiq_blockchain_interface::PushError; use nimiq_database::{traits::Database, TransactionProxy}; use nimiq_keys::Address; use nimiq_primitives::{ key_nibbles::KeyNibbles, trie::{trie_diff::TrieDiff, trie_proof::TrieProof}, }; use nimiq_serde::Deserialize; use nimiq_transaction::extended_transaction::ExtendedTransaction; use nimiq_trie::WriteTransactionProxy; use crate::{blockchain_state::BlockchainState, Blockchain}; /// Subset of the accounts in the accounts tree pub struct AccountsChunk { /// The end of the chunk. The end key is exclusive. /// When set to None it means that it is the last trie chunk. pub end_key: Option<KeyNibbles>, /// The set of accounts retrieved. pub accounts: Vec<(Address, Account)>, } /// Implements methods to handle the accounts. impl Blockchain { /// Updates the accounts given a block. /// Expects a full block with body. pub fn commit_accounts( &self, state: &BlockchainState, block: &Block, diff: Option<TrieDiff>, txn: &mut WriteTransactionProxy, block_logger: &mut BlockLogger, ) -> Result<u64, PushError>
// Macro blocks are final and receipts for the previous batch are no longer necessary // as rebranching across this block is not possible. self.chain_store.clear_revert_infos(txn.raw()); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, macro_block.header.block_number, macro_block.header.timestamp, vec![], inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), macro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } Block::Micro(ref micro_block) => { // Get the body of the block. let body = micro_block .body .as_ref() .expect("Block body must be present"); let skip_block_info = SkipBlockInfo::from_micro_block(micro_block); // Create the inherents from any forks or skip block info. let inherents = self.create_punishment_inherents( block_state.number, &body.fork_proofs, skip_block_info, Some(txn), ); // Commit block to AccountsTree and create the receipts. let revert_info: RevertInfo = if accounts.is_complete(Some(txn)) { accounts .commit( txn, &body.get_raw_transactions(), &inherents, &block_state, block_logger, )? .into() } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?.into() } else { return Err(PushError::MissingAccountsTrieDiff); }; // Check that the transaction results match the ones in the block. if let RevertInfo::Receipts(receipts) = &revert_info { assert_eq!(receipts.transactions.len(), body.transactions.len()); for (index, receipt) in receipts.transactions.iter().enumerate() { let matches = match receipt { TransactionOperationReceipt::Ok(..) => { body.transactions[index].succeeded() } TransactionOperationReceipt::Err(..) => { body.transactions[index].failed() } }; if!matches { return Err(PushError::InvalidBlock( BlockError::TransactionExecutionMismatch, )); } } } // Store revert info. self.chain_store.put_revert_info( txn.raw(), micro_block.header.block_number, &revert_info, ); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, micro_block.header.block_number, micro_block.header.timestamp, body.transactions.clone(), inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), micro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } } } /// Reverts the accounts given a block. This only applies to micro blocks and skip blocks, since /// macro blocks are final and can't be reverted. pub(crate) fn revert_accounts( &self, accounts: &Accounts, txn: &mut WriteTransactionProxy, block: &Block, block_logger: &mut BlockLogger, ) -> Result<u64, PushError> { if block.is_macro() { panic!("Can't revert {block} - macro blocks are final"); } let block = block.unwrap_micro_ref(); let body = block.body.as_ref().unwrap(); debug!( block = %block, is_skip = block.is_skip_block(), num_transactions = body.transactions.len(), num_fork_proofs = body.fork_proofs.len(), "Reverting block" ); // Verify accounts hash if the tree is complete or changes only happened in the complete part. if let Some(accounts_hash) = accounts.get_root_hash(Some(txn)) { assert_eq!( block.header.state_root, accounts_hash, "Cannot revert {} - inconsistent state", block, ); } // Create the inherents from any forks or skip block info. let skip_block_info = SkipBlockInfo::from_micro_block(block); let inherents = self.create_punishment_inherents( block.block_number(), &body.fork_proofs, skip_block_info, Some(txn), ); // Get the revert info for this block. let revert_info = self .chain_store .get_revert_info(block.block_number(), Some(txn)) .expect("Failed to revert - missing revert info"); // Revert the block from AccountsTree. let block_state = BlockState::new(block.block_number(), block.header.timestamp); let result = accounts.revert( txn, &body.get_raw_transactions(), &inherents, &block_state, revert_info, block_logger, ); if let Err(e) = result { panic!("Failed to revert {block} - {e:?}"); } // Remove the transactions from the History tree. For this you only need to calculate the // number of transactions that you want to remove. let num_txs = body.transactions.len() + inherents.len(); let (_, total_size) = self .history_store .remove_partial_history(txn.raw(), block.epoch_number(), num_txs) .expect("Failed to remove partial history"); Ok(total_size) } /// Produces a Merkle proof of the inclusion of the given keys in the /// Merkle Radix Trie. pub fn get_accounts_proof(&self, keys: Vec<&KeyNibbles>) -> Option<TrieProof> { let txn = self.env.read_transaction(); self.state().accounts.get_proof(Some(&txn), keys).ok() } /// Gets an accounts chunk given a start key and a limit pub fn get_accounts_chunk( &self, txn_option: Option<&TransactionProxy>, start: KeyNibbles, limit: usize, ) -> AccountsChunk { let trie_chunk = self.state().accounts.get_chunk(start, limit, txn_option); let end_key = trie_chunk.end_key; let accounts = trie_chunk .items .into_iter() .filter(|item| item.key.to_address().is_some()) .map(|item| { ( item.key.to_address().unwrap(), Account::deserialize_from_vec(&item.value).unwrap(), ) }) .collect(); AccountsChunk { end_key, accounts } } }
{ // Get the accounts from the state. let accounts = &state.accounts; let block_state = BlockState::new(block.block_number(), block.timestamp()); // Check the type of the block. match block { Block::Macro(ref macro_block) => { // Initialize a vector to store the inherents. let inherents = self.create_macro_block_inherents(macro_block); // Commit block to AccountsTree. if accounts.is_complete(Some(txn)) { accounts.commit(txn, &[], &inherents, &block_state, block_logger)?; } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?; } else { return Err(PushError::MissingAccountsTrieDiff); }
identifier_body
accounts.rs
use nimiq_account::{ Account, Accounts, BlockLogger, BlockState, RevertInfo, TransactionOperationReceipt, }; use nimiq_block::{Block, BlockError, SkipBlockInfo}; use nimiq_blockchain_interface::PushError; use nimiq_database::{traits::Database, TransactionProxy}; use nimiq_keys::Address; use nimiq_primitives::{ key_nibbles::KeyNibbles, trie::{trie_diff::TrieDiff, trie_proof::TrieProof}, }; use nimiq_serde::Deserialize; use nimiq_transaction::extended_transaction::ExtendedTransaction; use nimiq_trie::WriteTransactionProxy; use crate::{blockchain_state::BlockchainState, Blockchain}; /// Subset of the accounts in the accounts tree pub struct
{ /// The end of the chunk. The end key is exclusive. /// When set to None it means that it is the last trie chunk. pub end_key: Option<KeyNibbles>, /// The set of accounts retrieved. pub accounts: Vec<(Address, Account)>, } /// Implements methods to handle the accounts. impl Blockchain { /// Updates the accounts given a block. /// Expects a full block with body. pub fn commit_accounts( &self, state: &BlockchainState, block: &Block, diff: Option<TrieDiff>, txn: &mut WriteTransactionProxy, block_logger: &mut BlockLogger, ) -> Result<u64, PushError> { // Get the accounts from the state. let accounts = &state.accounts; let block_state = BlockState::new(block.block_number(), block.timestamp()); // Check the type of the block. match block { Block::Macro(ref macro_block) => { // Initialize a vector to store the inherents. let inherents = self.create_macro_block_inherents(macro_block); // Commit block to AccountsTree. if accounts.is_complete(Some(txn)) { accounts.commit(txn, &[], &inherents, &block_state, block_logger)?; } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?; } else { return Err(PushError::MissingAccountsTrieDiff); } // Macro blocks are final and receipts for the previous batch are no longer necessary // as rebranching across this block is not possible. self.chain_store.clear_revert_infos(txn.raw()); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, macro_block.header.block_number, macro_block.header.timestamp, vec![], inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), macro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } Block::Micro(ref micro_block) => { // Get the body of the block. let body = micro_block .body .as_ref() .expect("Block body must be present"); let skip_block_info = SkipBlockInfo::from_micro_block(micro_block); // Create the inherents from any forks or skip block info. let inherents = self.create_punishment_inherents( block_state.number, &body.fork_proofs, skip_block_info, Some(txn), ); // Commit block to AccountsTree and create the receipts. let revert_info: RevertInfo = if accounts.is_complete(Some(txn)) { accounts .commit( txn, &body.get_raw_transactions(), &inherents, &block_state, block_logger, )? .into() } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?.into() } else { return Err(PushError::MissingAccountsTrieDiff); }; // Check that the transaction results match the ones in the block. if let RevertInfo::Receipts(receipts) = &revert_info { assert_eq!(receipts.transactions.len(), body.transactions.len()); for (index, receipt) in receipts.transactions.iter().enumerate() { let matches = match receipt { TransactionOperationReceipt::Ok(..) => { body.transactions[index].succeeded() } TransactionOperationReceipt::Err(..) => { body.transactions[index].failed() } }; if!matches { return Err(PushError::InvalidBlock( BlockError::TransactionExecutionMismatch, )); } } } // Store revert info. self.chain_store.put_revert_info( txn.raw(), micro_block.header.block_number, &revert_info, ); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, micro_block.header.block_number, micro_block.header.timestamp, body.transactions.clone(), inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), micro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } } } /// Reverts the accounts given a block. This only applies to micro blocks and skip blocks, since /// macro blocks are final and can't be reverted. pub(crate) fn revert_accounts( &self, accounts: &Accounts, txn: &mut WriteTransactionProxy, block: &Block, block_logger: &mut BlockLogger, ) -> Result<u64, PushError> { if block.is_macro() { panic!("Can't revert {block} - macro blocks are final"); } let block = block.unwrap_micro_ref(); let body = block.body.as_ref().unwrap(); debug!( block = %block, is_skip = block.is_skip_block(), num_transactions = body.transactions.len(), num_fork_proofs = body.fork_proofs.len(), "Reverting block" ); // Verify accounts hash if the tree is complete or changes only happened in the complete part. if let Some(accounts_hash) = accounts.get_root_hash(Some(txn)) { assert_eq!( block.header.state_root, accounts_hash, "Cannot revert {} - inconsistent state", block, ); } // Create the inherents from any forks or skip block info. let skip_block_info = SkipBlockInfo::from_micro_block(block); let inherents = self.create_punishment_inherents( block.block_number(), &body.fork_proofs, skip_block_info, Some(txn), ); // Get the revert info for this block. let revert_info = self .chain_store .get_revert_info(block.block_number(), Some(txn)) .expect("Failed to revert - missing revert info"); // Revert the block from AccountsTree. let block_state = BlockState::new(block.block_number(), block.header.timestamp); let result = accounts.revert( txn, &body.get_raw_transactions(), &inherents, &block_state, revert_info, block_logger, ); if let Err(e) = result { panic!("Failed to revert {block} - {e:?}"); } // Remove the transactions from the History tree. For this you only need to calculate the // number of transactions that you want to remove. let num_txs = body.transactions.len() + inherents.len(); let (_, total_size) = self .history_store .remove_partial_history(txn.raw(), block.epoch_number(), num_txs) .expect("Failed to remove partial history"); Ok(total_size) } /// Produces a Merkle proof of the inclusion of the given keys in the /// Merkle Radix Trie. pub fn get_accounts_proof(&self, keys: Vec<&KeyNibbles>) -> Option<TrieProof> { let txn = self.env.read_transaction(); self.state().accounts.get_proof(Some(&txn), keys).ok() } /// Gets an accounts chunk given a start key and a limit pub fn get_accounts_chunk( &self, txn_option: Option<&TransactionProxy>, start: KeyNibbles, limit: usize, ) -> AccountsChunk { let trie_chunk = self.state().accounts.get_chunk(start, limit, txn_option); let end_key = trie_chunk.end_key; let accounts = trie_chunk .items .into_iter() .filter(|item| item.key.to_address().is_some()) .map(|item| { ( item.key.to_address().unwrap(), Account::deserialize_from_vec(&item.value).unwrap(), ) }) .collect(); AccountsChunk { end_key, accounts } } }
AccountsChunk
identifier_name
refcounteddb.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Disk-backed, ref-counted `JournalDB` implementation. use std::{ io, sync::Arc, collections::HashMap, }; use ethereum_types::H256; use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; use log::trace; use malloc_size_of::{MallocSizeOf, allocators::new_malloc_size_ops}; use parity_bytes::Bytes; use rlp::{encode, decode}; use crate::{ overlaydb::OverlayDB, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY, util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}, }; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// /// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. /// /// journal format: /// ```text /// [era, 0] => [ id, [insert_0,...], [remove_0,...] ] /// [era, 1] => [ id, [insert_0,...], [remove_0,...] ] /// [era, n] => [... ] /// ``` /// /// when we make a new commit, we journal the inserts and removes. /// for each `end_era` that we journaled that we are no passing by, /// we remove all of its removes assuming it is canonical and all /// of its inserts otherwise. // TODO: store last_era, reclaim_period. pub struct RefCountedDB { forward: OverlayDB, backing: Arc<dyn KeyValueDB>, latest_era: Option<u64>, inserts: Vec<H256>, removes: Vec<H256>, column: Option<u32>, } impl RefCountedDB { /// Create a new instance given a `backing` database. pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB { let latest_era = backing.get(column, &LATEST_ERA_KEY) .expect("Low-level database error.") .map(|v| decode::<u64>(&v).expect("decoding db value failed")); RefCountedDB { forward: OverlayDB::new(backing.clone(), column), backing, inserts: vec![], removes: vec![], latest_era, column, } } } impl HashDB<KeccakHasher, DBValue> for RefCountedDB { fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> { self.forward.get(key, prefix) } fn contains(&self, key: &H256, prefix: Prefix) -> bool { self.forward.contains(key, prefix) } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H256 { let r = self.forward.insert(prefix, value); self.inserts.push(r.clone()); r } fn emplace(&mut self, key: H256, prefix: Prefix, value: DBValue) { self.inserts.push(key.clone()); self.forward.emplace(key, prefix, value); } fn remove(&mut self, key: &H256, _prefix: Prefix) { self.removes.push(key.clone()); } } impl JournalDB for RefCountedDB { fn boxed_clone(&self) -> Box<dyn JournalDB> { Box::new(RefCountedDB { forward: self.forward.clone(), backing: self.backing.clone(), latest_era: self.latest_era, inserts: self.inserts.clone(), removes: self.removes.clone(), column: self.column.clone(), }) } fn mem_used(&self) -> usize { let mut ops = new_malloc_size_ops(); self.inserts.size_of(&mut ops) + self.removes.size_of(&mut ops) } fn is_empty(&self) -> bool { self.latest_era.is_none() } fn backing(&self) -> &Arc<dyn KeyValueDB> { &self.backing } fn latest_era(&self) -> Option<u64> { self.latest_era } fn state(&self, id: &H256) -> Option<Bytes> { self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) } fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> { // record new commit's details. let mut db_key = DatabaseKey { era: now, index: 0usize, }; let mut last; while self.backing.get(self.column, { last = encode(&db_key); &last })?.is_some() { db_key.index += 1; } { let value_ref = DatabaseValueRef { id, inserts: &self.inserts, deletes: &self.removes, }; batch.put(self.column, &last, &encode(&value_ref)); } let ops = self.inserts.len() + self.removes.len(); trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); self.inserts.clear(); self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); self.latest_era = Some(now); } Ok(ops as u32) } fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result<u32> { // apply old commits' details let mut db_key = DatabaseKey { era: end_era, index: 0usize, }; let mut last; while let Some(rlp_data) = { self.backing.get(self.column, { last = encode(&db_key); &last })? } { let view = DatabaseValueView::from_rlp(&rlp_data); let our_id = view.id().expect("rlp read from db; qed"); let to_remove = if canon_id == &our_id { view.deletes() } else { view.inserts() }.expect("rlp read from db; qed"); trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); for i in &to_remove { self.forward.remove(i, EMPTY_PREFIX); } batch.delete(self.column, &last); db_key.index += 1; } let r = self.forward.commit_to_batch(batch)?; Ok(r) } fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> { self.inserts.clear(); for remove in self.removes.drain(..) { self.forward.remove(&remove, EMPTY_PREFIX); } self.forward.commit_to_batch(batch) } fn consolidate(&mut self, mut with: super::MemoryDB) { for (key, (value, rc)) in with.drain() { for _ in 0..rc { self.emplace(key, EMPTY_PREFIX, value.clone()); } for _ in rc..0 { self.remove(&key, EMPTY_PREFIX); } } } fn
(&self) -> HashMap<H256, i32> { self.forward.keys() } } #[cfg(test)] mod tests { use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; use crate::{JournalDB, inject_batch, commit_batch}; fn new_db() -> RefCountedDB { let backing = Arc::new(kvdb_memorydb::create(0)); RefCountedDB::new(backing, None) } #[test] fn long_history() { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } #[test] fn latest_era_should_work() { // history is 3 let mut jdb = new_db(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(2)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert_eq!(jdb.latest_era(), Some(3)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert_eq!(jdb.latest_era(), Some(4)); } #[test] fn complex() { // history is 1 let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); jdb.remove(&bar, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.remove(&baz, EMPTY_PREFIX); commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); } #[test] fn fork() { // history is 1 let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.remove(&bar, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); } #[test] fn inject() { let mut jdb = new_db(); let key = jdb.insert(EMPTY_PREFIX, b"dog"); inject_batch(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog")); jdb.remove(&key, EMPTY_PREFIX); inject_batch(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } }
keys
identifier_name
refcounteddb.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Disk-backed, ref-counted `JournalDB` implementation. use std::{ io, sync::Arc, collections::HashMap, }; use ethereum_types::H256; use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; use log::trace; use malloc_size_of::{MallocSizeOf, allocators::new_malloc_size_ops}; use parity_bytes::Bytes; use rlp::{encode, decode}; use crate::{ overlaydb::OverlayDB, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY, util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}, }; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// /// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. /// /// journal format: /// ```text /// [era, 0] => [ id, [insert_0,...], [remove_0,...] ] /// [era, 1] => [ id, [insert_0,...], [remove_0,...] ] /// [era, n] => [... ] /// ``` /// /// when we make a new commit, we journal the inserts and removes. /// for each `end_era` that we journaled that we are no passing by, /// we remove all of its removes assuming it is canonical and all /// of its inserts otherwise. // TODO: store last_era, reclaim_period. pub struct RefCountedDB { forward: OverlayDB, backing: Arc<dyn KeyValueDB>, latest_era: Option<u64>, inserts: Vec<H256>, removes: Vec<H256>, column: Option<u32>, } impl RefCountedDB { /// Create a new instance given a `backing` database. pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB { let latest_era = backing.get(column, &LATEST_ERA_KEY) .expect("Low-level database error.") .map(|v| decode::<u64>(&v).expect("decoding db value failed")); RefCountedDB { forward: OverlayDB::new(backing.clone(), column), backing, inserts: vec![], removes: vec![], latest_era, column, } } } impl HashDB<KeccakHasher, DBValue> for RefCountedDB { fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> { self.forward.get(key, prefix) } fn contains(&self, key: &H256, prefix: Prefix) -> bool { self.forward.contains(key, prefix) } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H256 { let r = self.forward.insert(prefix, value); self.inserts.push(r.clone()); r } fn emplace(&mut self, key: H256, prefix: Prefix, value: DBValue) { self.inserts.push(key.clone()); self.forward.emplace(key, prefix, value); } fn remove(&mut self, key: &H256, _prefix: Prefix) { self.removes.push(key.clone()); } } impl JournalDB for RefCountedDB { fn boxed_clone(&self) -> Box<dyn JournalDB> { Box::new(RefCountedDB { forward: self.forward.clone(), backing: self.backing.clone(), latest_era: self.latest_era, inserts: self.inserts.clone(), removes: self.removes.clone(), column: self.column.clone(), }) } fn mem_used(&self) -> usize { let mut ops = new_malloc_size_ops(); self.inserts.size_of(&mut ops) + self.removes.size_of(&mut ops) } fn is_empty(&self) -> bool { self.latest_era.is_none() } fn backing(&self) -> &Arc<dyn KeyValueDB> { &self.backing } fn latest_era(&self) -> Option<u64> { self.latest_era } fn state(&self, id: &H256) -> Option<Bytes> { self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) } fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> { // record new commit's details. let mut db_key = DatabaseKey { era: now, index: 0usize, }; let mut last; while self.backing.get(self.column, { last = encode(&db_key); &last })?.is_some() { db_key.index += 1; } { let value_ref = DatabaseValueRef { id, inserts: &self.inserts, deletes: &self.removes, }; batch.put(self.column, &last, &encode(&value_ref)); } let ops = self.inserts.len() + self.removes.len(); trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); self.inserts.clear(); self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); self.latest_era = Some(now); } Ok(ops as u32) } fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result<u32> { // apply old commits' details let mut db_key = DatabaseKey { era: end_era, index: 0usize, }; let mut last; while let Some(rlp_data) = { self.backing.get(self.column, { last = encode(&db_key); &last })? } { let view = DatabaseValueView::from_rlp(&rlp_data); let our_id = view.id().expect("rlp read from db; qed"); let to_remove = if canon_id == &our_id { view.deletes() } else
.expect("rlp read from db; qed"); trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); for i in &to_remove { self.forward.remove(i, EMPTY_PREFIX); } batch.delete(self.column, &last); db_key.index += 1; } let r = self.forward.commit_to_batch(batch)?; Ok(r) } fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> { self.inserts.clear(); for remove in self.removes.drain(..) { self.forward.remove(&remove, EMPTY_PREFIX); } self.forward.commit_to_batch(batch) } fn consolidate(&mut self, mut with: super::MemoryDB) { for (key, (value, rc)) in with.drain() { for _ in 0..rc { self.emplace(key, EMPTY_PREFIX, value.clone()); } for _ in rc..0 { self.remove(&key, EMPTY_PREFIX); } } } fn keys(&self) -> HashMap<H256, i32> { self.forward.keys() } } #[cfg(test)] mod tests { use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; use crate::{JournalDB, inject_batch, commit_batch}; fn new_db() -> RefCountedDB { let backing = Arc::new(kvdb_memorydb::create(0)); RefCountedDB::new(backing, None) } #[test] fn long_history() { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } #[test] fn latest_era_should_work() { // history is 3 let mut jdb = new_db(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(2)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert_eq!(jdb.latest_era(), Some(3)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert_eq!(jdb.latest_era(), Some(4)); } #[test] fn complex() { // history is 1 let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); jdb.remove(&bar, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.remove(&baz, EMPTY_PREFIX); commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); } #[test] fn fork() { // history is 1 let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.remove(&bar, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); } #[test] fn inject() { let mut jdb = new_db(); let key = jdb.insert(EMPTY_PREFIX, b"dog"); inject_batch(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog")); jdb.remove(&key, EMPTY_PREFIX); inject_batch(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } }
{ view.inserts() }
conditional_block
refcounteddb.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Disk-backed, ref-counted `JournalDB` implementation. use std::{ io, sync::Arc, collections::HashMap, }; use ethereum_types::H256; use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; use log::trace; use malloc_size_of::{MallocSizeOf, allocators::new_malloc_size_ops}; use parity_bytes::Bytes; use rlp::{encode, decode}; use crate::{ overlaydb::OverlayDB, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY, util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}, }; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// /// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. /// /// journal format: /// ```text /// [era, 0] => [ id, [insert_0,...], [remove_0,...] ] /// [era, 1] => [ id, [insert_0,...], [remove_0,...] ] /// [era, n] => [... ] /// ``` /// /// when we make a new commit, we journal the inserts and removes. /// for each `end_era` that we journaled that we are no passing by, /// we remove all of its removes assuming it is canonical and all /// of its inserts otherwise. // TODO: store last_era, reclaim_period. pub struct RefCountedDB { forward: OverlayDB, backing: Arc<dyn KeyValueDB>, latest_era: Option<u64>, inserts: Vec<H256>, removes: Vec<H256>, column: Option<u32>, } impl RefCountedDB { /// Create a new instance given a `backing` database. pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB { let latest_era = backing.get(column, &LATEST_ERA_KEY) .expect("Low-level database error.") .map(|v| decode::<u64>(&v).expect("decoding db value failed")); RefCountedDB { forward: OverlayDB::new(backing.clone(), column), backing, inserts: vec![], removes: vec![], latest_era, column, } } } impl HashDB<KeccakHasher, DBValue> for RefCountedDB { fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> { self.forward.get(key, prefix) } fn contains(&self, key: &H256, prefix: Prefix) -> bool { self.forward.contains(key, prefix) } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H256 { let r = self.forward.insert(prefix, value); self.inserts.push(r.clone()); r } fn emplace(&mut self, key: H256, prefix: Prefix, value: DBValue) { self.inserts.push(key.clone()); self.forward.emplace(key, prefix, value); } fn remove(&mut self, key: &H256, _prefix: Prefix) { self.removes.push(key.clone()); } } impl JournalDB for RefCountedDB { fn boxed_clone(&self) -> Box<dyn JournalDB> { Box::new(RefCountedDB { forward: self.forward.clone(), backing: self.backing.clone(), latest_era: self.latest_era, inserts: self.inserts.clone(), removes: self.removes.clone(), column: self.column.clone(),
fn mem_used(&self) -> usize { let mut ops = new_malloc_size_ops(); self.inserts.size_of(&mut ops) + self.removes.size_of(&mut ops) } fn is_empty(&self) -> bool { self.latest_era.is_none() } fn backing(&self) -> &Arc<dyn KeyValueDB> { &self.backing } fn latest_era(&self) -> Option<u64> { self.latest_era } fn state(&self, id: &H256) -> Option<Bytes> { self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) } fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> { // record new commit's details. let mut db_key = DatabaseKey { era: now, index: 0usize, }; let mut last; while self.backing.get(self.column, { last = encode(&db_key); &last })?.is_some() { db_key.index += 1; } { let value_ref = DatabaseValueRef { id, inserts: &self.inserts, deletes: &self.removes, }; batch.put(self.column, &last, &encode(&value_ref)); } let ops = self.inserts.len() + self.removes.len(); trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); self.inserts.clear(); self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); self.latest_era = Some(now); } Ok(ops as u32) } fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result<u32> { // apply old commits' details let mut db_key = DatabaseKey { era: end_era, index: 0usize, }; let mut last; while let Some(rlp_data) = { self.backing.get(self.column, { last = encode(&db_key); &last })? } { let view = DatabaseValueView::from_rlp(&rlp_data); let our_id = view.id().expect("rlp read from db; qed"); let to_remove = if canon_id == &our_id { view.deletes() } else { view.inserts() }.expect("rlp read from db; qed"); trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); for i in &to_remove { self.forward.remove(i, EMPTY_PREFIX); } batch.delete(self.column, &last); db_key.index += 1; } let r = self.forward.commit_to_batch(batch)?; Ok(r) } fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> { self.inserts.clear(); for remove in self.removes.drain(..) { self.forward.remove(&remove, EMPTY_PREFIX); } self.forward.commit_to_batch(batch) } fn consolidate(&mut self, mut with: super::MemoryDB) { for (key, (value, rc)) in with.drain() { for _ in 0..rc { self.emplace(key, EMPTY_PREFIX, value.clone()); } for _ in rc..0 { self.remove(&key, EMPTY_PREFIX); } } } fn keys(&self) -> HashMap<H256, i32> { self.forward.keys() } } #[cfg(test)] mod tests { use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; use crate::{JournalDB, inject_batch, commit_batch}; fn new_db() -> RefCountedDB { let backing = Arc::new(kvdb_memorydb::create(0)); RefCountedDB::new(backing, None) } #[test] fn long_history() { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } #[test] fn latest_era_should_work() { // history is 3 let mut jdb = new_db(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(2)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert_eq!(jdb.latest_era(), Some(3)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert_eq!(jdb.latest_era(), Some(4)); } #[test] fn complex() { // history is 1 let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); jdb.remove(&bar, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.remove(&baz, EMPTY_PREFIX); commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); } #[test] fn fork() { // history is 1 let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.remove(&bar, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); } #[test] fn inject() { let mut jdb = new_db(); let key = jdb.insert(EMPTY_PREFIX, b"dog"); inject_batch(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog")); jdb.remove(&key, EMPTY_PREFIX); inject_batch(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } }
}) }
random_line_split
refcounteddb.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Disk-backed, ref-counted `JournalDB` implementation. use std::{ io, sync::Arc, collections::HashMap, }; use ethereum_types::H256; use hash_db::{HashDB, Prefix, EMPTY_PREFIX}; use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction, DBValue}; use log::trace; use malloc_size_of::{MallocSizeOf, allocators::new_malloc_size_ops}; use parity_bytes::Bytes; use rlp::{encode, decode}; use crate::{ overlaydb::OverlayDB, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY, util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}, }; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// /// Like `OverlayDB`, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike `OverlayDB`, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. /// /// journal format: /// ```text /// [era, 0] => [ id, [insert_0,...], [remove_0,...] ] /// [era, 1] => [ id, [insert_0,...], [remove_0,...] ] /// [era, n] => [... ] /// ``` /// /// when we make a new commit, we journal the inserts and removes. /// for each `end_era` that we journaled that we are no passing by, /// we remove all of its removes assuming it is canonical and all /// of its inserts otherwise. // TODO: store last_era, reclaim_period. pub struct RefCountedDB { forward: OverlayDB, backing: Arc<dyn KeyValueDB>, latest_era: Option<u64>, inserts: Vec<H256>, removes: Vec<H256>, column: Option<u32>, } impl RefCountedDB { /// Create a new instance given a `backing` database. pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB { let latest_era = backing.get(column, &LATEST_ERA_KEY) .expect("Low-level database error.") .map(|v| decode::<u64>(&v).expect("decoding db value failed")); RefCountedDB { forward: OverlayDB::new(backing.clone(), column), backing, inserts: vec![], removes: vec![], latest_era, column, } } } impl HashDB<KeccakHasher, DBValue> for RefCountedDB { fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> { self.forward.get(key, prefix) } fn contains(&self, key: &H256, prefix: Prefix) -> bool { self.forward.contains(key, prefix) } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H256 { let r = self.forward.insert(prefix, value); self.inserts.push(r.clone()); r } fn emplace(&mut self, key: H256, prefix: Prefix, value: DBValue) { self.inserts.push(key.clone()); self.forward.emplace(key, prefix, value); } fn remove(&mut self, key: &H256, _prefix: Prefix) { self.removes.push(key.clone()); } } impl JournalDB for RefCountedDB { fn boxed_clone(&self) -> Box<dyn JournalDB> { Box::new(RefCountedDB { forward: self.forward.clone(), backing: self.backing.clone(), latest_era: self.latest_era, inserts: self.inserts.clone(), removes: self.removes.clone(), column: self.column.clone(), }) } fn mem_used(&self) -> usize { let mut ops = new_malloc_size_ops(); self.inserts.size_of(&mut ops) + self.removes.size_of(&mut ops) } fn is_empty(&self) -> bool { self.latest_era.is_none() } fn backing(&self) -> &Arc<dyn KeyValueDB> { &self.backing } fn latest_era(&self) -> Option<u64> { self.latest_era } fn state(&self, id: &H256) -> Option<Bytes> { self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) } fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> { // record new commit's details. let mut db_key = DatabaseKey { era: now, index: 0usize, }; let mut last; while self.backing.get(self.column, { last = encode(&db_key); &last })?.is_some() { db_key.index += 1; } { let value_ref = DatabaseValueRef { id, inserts: &self.inserts, deletes: &self.removes, }; batch.put(self.column, &last, &encode(&value_ref)); } let ops = self.inserts.len() + self.removes.len(); trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); self.inserts.clear(); self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); self.latest_era = Some(now); } Ok(ops as u32) } fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result<u32> { // apply old commits' details let mut db_key = DatabaseKey { era: end_era, index: 0usize, }; let mut last; while let Some(rlp_data) = { self.backing.get(self.column, { last = encode(&db_key); &last })? } { let view = DatabaseValueView::from_rlp(&rlp_data); let our_id = view.id().expect("rlp read from db; qed"); let to_remove = if canon_id == &our_id { view.deletes() } else { view.inserts() }.expect("rlp read from db; qed"); trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); for i in &to_remove { self.forward.remove(i, EMPTY_PREFIX); } batch.delete(self.column, &last); db_key.index += 1; } let r = self.forward.commit_to_batch(batch)?; Ok(r) } fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> { self.inserts.clear(); for remove in self.removes.drain(..) { self.forward.remove(&remove, EMPTY_PREFIX); } self.forward.commit_to_batch(batch) } fn consolidate(&mut self, mut with: super::MemoryDB) { for (key, (value, rc)) in with.drain() { for _ in 0..rc { self.emplace(key, EMPTY_PREFIX, value.clone()); } for _ in rc..0 { self.remove(&key, EMPTY_PREFIX); } } } fn keys(&self) -> HashMap<H256, i32> { self.forward.keys() } } #[cfg(test)] mod tests { use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; use crate::{JournalDB, inject_batch, commit_batch}; fn new_db() -> RefCountedDB
#[test] fn long_history() { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } #[test] fn latest_era_should_work() { // history is 3 let mut jdb = new_db(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(2)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert_eq!(jdb.latest_era(), Some(3)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert_eq!(jdb.latest_era(), Some(4)); } #[test] fn complex() { // history is 1 let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); jdb.remove(&bar, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); commit_batch(&mut jdb, 1, &keccak(b"1"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); jdb.remove(&baz, EMPTY_PREFIX); commit_batch(&mut jdb, 2, &keccak(b"2"), Some((1, keccak(b"1")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((2, keccak(b"2")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((3, keccak(b"3")))).unwrap(); assert!(!jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); } #[test] fn fork() { // history is 1 let mut jdb = new_db(); let foo = jdb.insert(EMPTY_PREFIX, b"foo"); let bar = jdb.insert(EMPTY_PREFIX, b"bar"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); jdb.remove(&foo, EMPTY_PREFIX); let baz = jdb.insert(EMPTY_PREFIX, b"baz"); commit_batch(&mut jdb, 1, &keccak(b"1a"), Some((0, keccak(b"0")))).unwrap(); jdb.remove(&bar, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1b"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(jdb.contains(&bar, EMPTY_PREFIX)); assert!(jdb.contains(&baz, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2b"), Some((1, keccak(b"1b")))).unwrap(); assert!(jdb.contains(&foo, EMPTY_PREFIX)); assert!(!jdb.contains(&baz, EMPTY_PREFIX)); assert!(!jdb.contains(&bar, EMPTY_PREFIX)); } #[test] fn inject() { let mut jdb = new_db(); let key = jdb.insert(EMPTY_PREFIX, b"dog"); inject_batch(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), DBValue::from_slice(b"dog")); jdb.remove(&key, EMPTY_PREFIX); inject_batch(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } }
{ let backing = Arc::new(kvdb_memorydb::create(0)); RefCountedDB::new(backing, None) }
identifier_body
main.rs
use std::collections::HashMap; fn main() { let test_one_input = "Today is Monday"; let max_chars = one(test_one_input); println!("1) the most of a char(first appearing) in '{}' is '{}', appearing {} times", test_one_input, max_chars.0, max_chars.1); let test_two_input = "supracalafragalisticexpealadocious"; let test_two_output = two(test_two_input); println!("2) all subsequent duplicates removed of '{}' is '{}'", test_two_input, test_two_output); let test_three_input = "supracalafragalisticexpealadocious"; let test_three_output = three(test_three_input); println!("3) duplicates of '{}' is '{}'", test_three_input, test_three_output); let test_four_input_1 = "spain"; let test_four_input_2 = "the rain in spain falls mainly in the plain"; let test_four_output = four(test_four_input_1, test_four_input_2); println!("4) characters of '{}' removed from '{}' yields '{}'", test_four_input_1, test_four_input_2, test_four_output); let test_five_input_1 = "XaXbXcXdXe"; let test_five_input_2 = "XcXdXeXaXb"; let test_five_output = five(test_five_input_1, test_five_input_2); let mut maybe = ""; if!test_five_output { maybe = "not "; } println!("5) '{}' is a {}rotation of '{}'", test_five_input_1, maybe, test_five_input_2); let test_six_input = "abracadabra"; let test_six_output = six(test_six_input); println!("6) '{}' reversed is '{}'", test_six_input, test_six_output); let test_seven_input = "123"; let test_seven_output = seven("", test_seven_input); println!("7) '{}' reversed is '{}'", test_seven_input, test_seven_output); let test_eight_input = "012"; let test_eight_output = eight(test_eight_input); println!("8) '{}' has {} permutations {:?}", test_eight_input, test_eight_output.len(), test_eight_output); let test_nine_input = "uprasupradupra"; let test_nine_output = nine(test_nine_input); println!("9) the first unrepeated char in '{}' is '{}'", test_nine_input, test_nine_output); let test_ten_input = "best is Rust"; let test_ten_output = ten(test_ten_input); println!("10) reversed sentence '{}' is '{}'", test_ten_input, test_ten_output); let test_eleven_input1 = "this is a test string"; let test_eleven_input2 = "tist"; let test_eleven_output = eleven(test_eleven_input1, test_eleven_input2); println!("11) smallest substring '{}' inside of '{}' is '{}'", test_eleven_input2, test_eleven_input1, test_eleven_output); let test_twelve_input1 = "Army"; let test_twelve_input2 = "Mary"; let test_twelve_output = twelve(test_twelve_input1, test_twelve_input2); maybe = ""; if!test_twelve_output { maybe = "not "; } println!("12) '{}' is {}an anagram of '{}'", test_twelve_input1, maybe, test_twelve_input2); let test_thirteen_input1 = "Racecar"; let test_thirteen_output = thirteen(test_thirteen_input1); maybe = ""; if!test_thirteen_output { maybe = "not "; } println!("13) '{}' is {}a palindrome of '{}'", test_thirteen_input1, maybe, test_thirteen_output); let test_fourteen_input = "-123000"; let test_fourteen_output = fourteen(test_fourteen_input); println!("14) string '{}' is the isize {}", test_fourteen_input, test_fourteen_output); let test_fifteen_input = "MDCCLXXVI"; let test_fifteen_output = fifteen(test_fifteen_input); println!("15) roman number '{}' equals arabic number {}", test_fifteen_input, test_fifteen_output); let test_eighteen_input = "\tthe rain in spain falls\n\tmainly in the plain \n "; let test_eighteen_output = eighteen(test_eighteen_input); println!("18) '{}' has {} words", test_eighteen_input, test_eighteen_output); let test_nineteen_input = "([{}][]([]()){})"; let test_nineteen_output = nineteen(test_nineteen_input); maybe = "has"; if!test_nineteen_output { maybe = "does not have"; } println!("19) '{}' {} balanced parens", test_nineteen_input, maybe); let test_twenty_input = "the rain in spain red rum sir is murder falls sometimes on the racecar but mainly in the plains"; let test_twenty_output = twenty(test_twenty_input); println!("20) '{}' has '{}' as the longest internal palindrome", test_twenty_input, test_twenty_output); let test_twentyone_input = 1776; //;"MDCCLXXVI"; let test_twentyone_output = twentyone(test_twentyone_input); println!("21) arabic number '{}' equals roman number {}", test_twentyone_input, test_twentyone_output); } fn one(input: &str) -> (char, i32) { // return the char that appears most and it's count // the first occurence of let mut counts = HashMap::new(); let mut max_char = 'z'; //input.chars().next().unwrap(); let mut max_value = 0; input.chars().rev().for_each( | c | { if counts.contains_key(&c) { let next_total = counts.get(&c).unwrap() + 1; if next_total >= max_value { max_char = c; max_value = next_total; } counts.insert( c, next_total); } else { counts.insert( c, 1); } }); (max_char, max_value) } fn two(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if!s.contains(&c) { s.insert(c); r.push(c); } }); r } fn three(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if!s.contains(&c) { s.insert(c); } else { if!r.contains(c) { r.push(c); } } }); r } fn four(input1: &str, input2: &str) -> String { let mut r = String::new(); let mut p = 'z'; input2.chars().for_each( |c | { if!input1.contains(c) { if!(c =='' && p =='') { r.push(c); p = c; } } }); r } fn five(input1: &str, input2: &str) -> bool { let doubleup = format!("{}{}", input1, input1); doubleup.contains(input2) } fn six(input: &str) -> String { let mut r = String::new(); input.chars().for_each( | c | { r = format!("{}{}", c, r); }); r } fn seven(i1: &str, i2: &str) -> String { let mut r2 = String::from(i2); if i1.len() == 0 { return r2; } r2.push(i1.chars().last().unwrap()); let size_minus_one = i1.len() - 1; let r1 = &i1[..size_minus_one]; return seven(&r1, &r2); } fn eight(i: &str) -> Vec<String> { let mut r = vec!(); if i.len() == 1 { r.push(String::from(i)); return r; } for idx in 0..i.len() { let front = &i[0..idx]; let char = &i[idx..idx+1]; let end = &i[idx+1..]; let without = format!("{}{}", front, end); let subperms = eight(&without); for sp in subperms { r.push(format!("{}{}", char, sp)); } } r } fn nine(i: &str) -> char { for e in i.chars() { let mut count = 0; for se in i.chars() { if se == e { count = count + 1; } } if count == 1 { return e; } } '\0' } fn ten(i: &str) -> String { let mut r = String::new(); let mut is_first = true; for each in i.split(" ") { if is_first { is_first = false; r = String::from(each); } else { r = format!("{} {}", each, r); } } r } fn eleven(i1: &str, i2: &str) -> String { let mut solutions: Vec<String> = vec!(); // tuples of (char, original index of char in the input string) let mut pairs: Vec<(char, usize)> = i1.chars().enumerate().map( |e | -> (char, usize) { (e.1, e.0) }).filter(|p| i2.contains(p.0)).collect(); // println!("{:?}", pairs); //iterate the input string from left to right for _i in 0..pairs.len() { // p will be the match that we remove characters from // if p becomes empty we know we've matched let mut p = String::from(i2.clone()); // remember the first match from p as the head let mut head: Option<(char, usize)> = None; // remember the final match/char from p as the tail let mut tail: Option<(char, usize)> = None; // lets iterate over our pairs of (char, index) for e in &pairs { // if the pair is in p, // remove the character from p and // try and set the head and tail if p.contains(e.0) { p = p.replacen(e.0, "", 1); match head { None => { head = Some(*e) }, Some(_) if p.is_empty() => { tail = Some(*e); break }, Some(_) => {} } } } // if we found all the characters in i2 // we have a match, so head and tail will be populated // chop the string out of i1 and submit it as a solution if head!= None && tail!= None { let h = head.unwrap(); let t = tail.unwrap(); let solution = String::from(&i1[h.1..=t.1]); solutions.push(solution); } // remove the front character, and iterate again pairs.remove(0); } // println!("{:?}", solutions); // find the shortest solution let shortest = solutions.iter().fold(solutions[0].clone(), |acc, item| { if item.len() < acc.len() { item.clone() } else { acc } }); shortest } fn twelve(i1: &str, i2: &str) -> bool{ let i1 = i1.replace(" ", "").to_lowercase(); let mut i2 = i2.replace(" ", "").to_lowercase(); if i1.len()!= i2.len() { return false; } for c in i1.chars() { i2 = i2.replacen(c, "", 1); } let r = i2.len() == 0; r } fn thirteen(i1: &str) -> bool { let i1 = i1.replace(" ", "").to_lowercase(); let i2 = i1.replace(" ", "").to_lowercase().chars().rev().collect::<String>(); let r = i1 == i2; r } fn fourteen(i: &str) -> isize
fn fifteen(i: &str) -> isize { let mut i = i.to_uppercase(); let mut r = 0; let mut to_long = 0; while i.len() > 0 { for (rn, an) in ROMANS.iter().rev() { if i.starts_with(rn) { r = r + an; i = i.replacen(rn,"",1); break; } } to_long = to_long + 1; if to_long > 20 { return -1 } } r } const ROMANS: [(&str, isize); 30] = [ ("C", 100), ("X", 10), ("I", 1), ("CC", 200), ("XX", 20), ("II", 2), ("CCC", 300), ("XXX", 30), ("III", 3), ("CD", 400), ("XL", 40), ("IV", 4), ("D", 500), ("L", 50), ("V", 5), ("DC", 600), ("LX", 60), ("VI", 6), ("DCC", 700), ("LXX", 70), ("VII", 7), ("DCCC", 800), ("LXXX", 80), ("VIII", 8), ("CM", 900), ("XC", 90), ("IX", 9), ("M", 1000), ("MM", 2000), ("MMM", 3000)]; fn eighteen(i: &str) -> isize { let mut i = String::from(i.trim()); i = i.replace("\n", " "); i = i.replace("\t", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); let count = i.split(' ').count() as isize; count } fn nineteen(i: &str) -> bool { let mut s:Vec<char> = vec!(); for c in i.chars() { match c { '(' => s.push('('), '[' => s.push('['), '{' => s.push('{'), ')' => if '('!= s.pop().unwrap() { return false }, ']' => if '['!= s.pop().unwrap() { return false }, '}' => if '{'!= s.pop().unwrap() { return false }, _ => {} } } true } fn twenty(i: &str) -> String { let mut solutions = vec!(); for n in 2..(i.len()-1) { let e = palindrome_at(i, n); solutions.push(e); } // find the longest solution let longest = solutions.iter().fold( solutions[0].clone(), |acc, item| { if item.len() > acc.len() { item.clone() } else { acc } } ); longest } fn palindrome_at(input: &str, s: usize) -> String { let i:Vec<(char, usize)> = input.chars().enumerate() .map(|(i,c)| (c, i)) .filter(|p| p.0!='') .collect(); let m = i.len(); let fs = std::cmp::min( i.len() - 2, std::cmp::max(1,s)); let mut l = fs; let mut r = fs; if i[l].0!= i[r].0 { // we are not the same assume a center "pivot" character center r = r + 1; } while l > 0 && r < m && i[l].0 == i[r].0 { l = l - 1; r = r + 1; } l = std::cmp::max(0, l); r = std::cmp::min(i.len() - 1, r); let begin = i[l+1].1; let end = std::cmp::min(input.len() - 1,i[r].1); let result = String::from(&input[begin..end]); result } fn twentyone(i: isize) -> String { let s = format!("{}", i); let mut r = String::from(""); let mut mult = 1; for c in s.chars().rev() { let num = mult * c.to_digit(10).unwrap() as isize; let p = ROMANS.iter().find(|p| p.1 == num); match p { Some((rn, _v)) => { r = format!("{}{}", rn, r); }, _ => {}, } mult = mult * 10; } r }
{ let mut i = String::from(i); let is_negative = i.contains('-'); if is_negative { i = i.replace("-", ""); } let mut r = 0; for c in i.chars() { let d = c.to_digit(10).unwrap(); r = d + (r * 10); } let mut r = r as isize; if is_negative { r = r * -1; } r }
identifier_body
main.rs
use std::collections::HashMap; fn main() { let test_one_input = "Today is Monday"; let max_chars = one(test_one_input); println!("1) the most of a char(first appearing) in '{}' is '{}', appearing {} times", test_one_input, max_chars.0, max_chars.1); let test_two_input = "supracalafragalisticexpealadocious"; let test_two_output = two(test_two_input); println!("2) all subsequent duplicates removed of '{}' is '{}'", test_two_input, test_two_output); let test_three_input = "supracalafragalisticexpealadocious"; let test_three_output = three(test_three_input); println!("3) duplicates of '{}' is '{}'", test_three_input, test_three_output); let test_four_input_1 = "spain"; let test_four_input_2 = "the rain in spain falls mainly in the plain"; let test_four_output = four(test_four_input_1, test_four_input_2); println!("4) characters of '{}' removed from '{}' yields '{}'", test_four_input_1, test_four_input_2, test_four_output); let test_five_input_1 = "XaXbXcXdXe"; let test_five_input_2 = "XcXdXeXaXb"; let test_five_output = five(test_five_input_1, test_five_input_2); let mut maybe = ""; if!test_five_output { maybe = "not "; } println!("5) '{}' is a {}rotation of '{}'", test_five_input_1, maybe, test_five_input_2); let test_six_input = "abracadabra"; let test_six_output = six(test_six_input); println!("6) '{}' reversed is '{}'", test_six_input, test_six_output); let test_seven_input = "123"; let test_seven_output = seven("", test_seven_input); println!("7) '{}' reversed is '{}'", test_seven_input, test_seven_output); let test_eight_input = "012"; let test_eight_output = eight(test_eight_input);
println!("9) the first unrepeated char in '{}' is '{}'", test_nine_input, test_nine_output); let test_ten_input = "best is Rust"; let test_ten_output = ten(test_ten_input); println!("10) reversed sentence '{}' is '{}'", test_ten_input, test_ten_output); let test_eleven_input1 = "this is a test string"; let test_eleven_input2 = "tist"; let test_eleven_output = eleven(test_eleven_input1, test_eleven_input2); println!("11) smallest substring '{}' inside of '{}' is '{}'", test_eleven_input2, test_eleven_input1, test_eleven_output); let test_twelve_input1 = "Army"; let test_twelve_input2 = "Mary"; let test_twelve_output = twelve(test_twelve_input1, test_twelve_input2); maybe = ""; if!test_twelve_output { maybe = "not "; } println!("12) '{}' is {}an anagram of '{}'", test_twelve_input1, maybe, test_twelve_input2); let test_thirteen_input1 = "Racecar"; let test_thirteen_output = thirteen(test_thirteen_input1); maybe = ""; if!test_thirteen_output { maybe = "not "; } println!("13) '{}' is {}a palindrome of '{}'", test_thirteen_input1, maybe, test_thirteen_output); let test_fourteen_input = "-123000"; let test_fourteen_output = fourteen(test_fourteen_input); println!("14) string '{}' is the isize {}", test_fourteen_input, test_fourteen_output); let test_fifteen_input = "MDCCLXXVI"; let test_fifteen_output = fifteen(test_fifteen_input); println!("15) roman number '{}' equals arabic number {}", test_fifteen_input, test_fifteen_output); let test_eighteen_input = "\tthe rain in spain falls\n\tmainly in the plain \n "; let test_eighteen_output = eighteen(test_eighteen_input); println!("18) '{}' has {} words", test_eighteen_input, test_eighteen_output); let test_nineteen_input = "([{}][]([]()){})"; let test_nineteen_output = nineteen(test_nineteen_input); maybe = "has"; if!test_nineteen_output { maybe = "does not have"; } println!("19) '{}' {} balanced parens", test_nineteen_input, maybe); let test_twenty_input = "the rain in spain red rum sir is murder falls sometimes on the racecar but mainly in the plains"; let test_twenty_output = twenty(test_twenty_input); println!("20) '{}' has '{}' as the longest internal palindrome", test_twenty_input, test_twenty_output); let test_twentyone_input = 1776; //;"MDCCLXXVI"; let test_twentyone_output = twentyone(test_twentyone_input); println!("21) arabic number '{}' equals roman number {}", test_twentyone_input, test_twentyone_output); } fn one(input: &str) -> (char, i32) { // return the char that appears most and it's count // the first occurence of let mut counts = HashMap::new(); let mut max_char = 'z'; //input.chars().next().unwrap(); let mut max_value = 0; input.chars().rev().for_each( | c | { if counts.contains_key(&c) { let next_total = counts.get(&c).unwrap() + 1; if next_total >= max_value { max_char = c; max_value = next_total; } counts.insert( c, next_total); } else { counts.insert( c, 1); } }); (max_char, max_value) } fn two(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if!s.contains(&c) { s.insert(c); r.push(c); } }); r } fn three(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if!s.contains(&c) { s.insert(c); } else { if!r.contains(c) { r.push(c); } } }); r } fn four(input1: &str, input2: &str) -> String { let mut r = String::new(); let mut p = 'z'; input2.chars().for_each( |c | { if!input1.contains(c) { if!(c =='' && p =='') { r.push(c); p = c; } } }); r } fn five(input1: &str, input2: &str) -> bool { let doubleup = format!("{}{}", input1, input1); doubleup.contains(input2) } fn six(input: &str) -> String { let mut r = String::new(); input.chars().for_each( | c | { r = format!("{}{}", c, r); }); r } fn seven(i1: &str, i2: &str) -> String { let mut r2 = String::from(i2); if i1.len() == 0 { return r2; } r2.push(i1.chars().last().unwrap()); let size_minus_one = i1.len() - 1; let r1 = &i1[..size_minus_one]; return seven(&r1, &r2); } fn eight(i: &str) -> Vec<String> { let mut r = vec!(); if i.len() == 1 { r.push(String::from(i)); return r; } for idx in 0..i.len() { let front = &i[0..idx]; let char = &i[idx..idx+1]; let end = &i[idx+1..]; let without = format!("{}{}", front, end); let subperms = eight(&without); for sp in subperms { r.push(format!("{}{}", char, sp)); } } r } fn nine(i: &str) -> char { for e in i.chars() { let mut count = 0; for se in i.chars() { if se == e { count = count + 1; } } if count == 1 { return e; } } '\0' } fn ten(i: &str) -> String { let mut r = String::new(); let mut is_first = true; for each in i.split(" ") { if is_first { is_first = false; r = String::from(each); } else { r = format!("{} {}", each, r); } } r } fn eleven(i1: &str, i2: &str) -> String { let mut solutions: Vec<String> = vec!(); // tuples of (char, original index of char in the input string) let mut pairs: Vec<(char, usize)> = i1.chars().enumerate().map( |e | -> (char, usize) { (e.1, e.0) }).filter(|p| i2.contains(p.0)).collect(); // println!("{:?}", pairs); //iterate the input string from left to right for _i in 0..pairs.len() { // p will be the match that we remove characters from // if p becomes empty we know we've matched let mut p = String::from(i2.clone()); // remember the first match from p as the head let mut head: Option<(char, usize)> = None; // remember the final match/char from p as the tail let mut tail: Option<(char, usize)> = None; // lets iterate over our pairs of (char, index) for e in &pairs { // if the pair is in p, // remove the character from p and // try and set the head and tail if p.contains(e.0) { p = p.replacen(e.0, "", 1); match head { None => { head = Some(*e) }, Some(_) if p.is_empty() => { tail = Some(*e); break }, Some(_) => {} } } } // if we found all the characters in i2 // we have a match, so head and tail will be populated // chop the string out of i1 and submit it as a solution if head!= None && tail!= None { let h = head.unwrap(); let t = tail.unwrap(); let solution = String::from(&i1[h.1..=t.1]); solutions.push(solution); } // remove the front character, and iterate again pairs.remove(0); } // println!("{:?}", solutions); // find the shortest solution let shortest = solutions.iter().fold(solutions[0].clone(), |acc, item| { if item.len() < acc.len() { item.clone() } else { acc } }); shortest } fn twelve(i1: &str, i2: &str) -> bool{ let i1 = i1.replace(" ", "").to_lowercase(); let mut i2 = i2.replace(" ", "").to_lowercase(); if i1.len()!= i2.len() { return false; } for c in i1.chars() { i2 = i2.replacen(c, "", 1); } let r = i2.len() == 0; r } fn thirteen(i1: &str) -> bool { let i1 = i1.replace(" ", "").to_lowercase(); let i2 = i1.replace(" ", "").to_lowercase().chars().rev().collect::<String>(); let r = i1 == i2; r } fn fourteen(i: &str) -> isize { let mut i = String::from(i); let is_negative = i.contains('-'); if is_negative { i = i.replace("-", ""); } let mut r = 0; for c in i.chars() { let d = c.to_digit(10).unwrap(); r = d + (r * 10); } let mut r = r as isize; if is_negative { r = r * -1; } r } fn fifteen(i: &str) -> isize { let mut i = i.to_uppercase(); let mut r = 0; let mut to_long = 0; while i.len() > 0 { for (rn, an) in ROMANS.iter().rev() { if i.starts_with(rn) { r = r + an; i = i.replacen(rn,"",1); break; } } to_long = to_long + 1; if to_long > 20 { return -1 } } r } const ROMANS: [(&str, isize); 30] = [ ("C", 100), ("X", 10), ("I", 1), ("CC", 200), ("XX", 20), ("II", 2), ("CCC", 300), ("XXX", 30), ("III", 3), ("CD", 400), ("XL", 40), ("IV", 4), ("D", 500), ("L", 50), ("V", 5), ("DC", 600), ("LX", 60), ("VI", 6), ("DCC", 700), ("LXX", 70), ("VII", 7), ("DCCC", 800), ("LXXX", 80), ("VIII", 8), ("CM", 900), ("XC", 90), ("IX", 9), ("M", 1000), ("MM", 2000), ("MMM", 3000)]; fn eighteen(i: &str) -> isize { let mut i = String::from(i.trim()); i = i.replace("\n", " "); i = i.replace("\t", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); let count = i.split(' ').count() as isize; count } fn nineteen(i: &str) -> bool { let mut s:Vec<char> = vec!(); for c in i.chars() { match c { '(' => s.push('('), '[' => s.push('['), '{' => s.push('{'), ')' => if '('!= s.pop().unwrap() { return false }, ']' => if '['!= s.pop().unwrap() { return false }, '}' => if '{'!= s.pop().unwrap() { return false }, _ => {} } } true } fn twenty(i: &str) -> String { let mut solutions = vec!(); for n in 2..(i.len()-1) { let e = palindrome_at(i, n); solutions.push(e); } // find the longest solution let longest = solutions.iter().fold( solutions[0].clone(), |acc, item| { if item.len() > acc.len() { item.clone() } else { acc } } ); longest } fn palindrome_at(input: &str, s: usize) -> String { let i:Vec<(char, usize)> = input.chars().enumerate() .map(|(i,c)| (c, i)) .filter(|p| p.0!='') .collect(); let m = i.len(); let fs = std::cmp::min( i.len() - 2, std::cmp::max(1,s)); let mut l = fs; let mut r = fs; if i[l].0!= i[r].0 { // we are not the same assume a center "pivot" character center r = r + 1; } while l > 0 && r < m && i[l].0 == i[r].0 { l = l - 1; r = r + 1; } l = std::cmp::max(0, l); r = std::cmp::min(i.len() - 1, r); let begin = i[l+1].1; let end = std::cmp::min(input.len() - 1,i[r].1); let result = String::from(&input[begin..end]); result } fn twentyone(i: isize) -> String { let s = format!("{}", i); let mut r = String::from(""); let mut mult = 1; for c in s.chars().rev() { let num = mult * c.to_digit(10).unwrap() as isize; let p = ROMANS.iter().find(|p| p.1 == num); match p { Some((rn, _v)) => { r = format!("{}{}", rn, r); }, _ => {}, } mult = mult * 10; } r }
println!("8) '{}' has {} permutations {:?}", test_eight_input, test_eight_output.len(), test_eight_output); let test_nine_input = "uprasupradupra"; let test_nine_output = nine(test_nine_input);
random_line_split
main.rs
use std::collections::HashMap; fn main() { let test_one_input = "Today is Monday"; let max_chars = one(test_one_input); println!("1) the most of a char(first appearing) in '{}' is '{}', appearing {} times", test_one_input, max_chars.0, max_chars.1); let test_two_input = "supracalafragalisticexpealadocious"; let test_two_output = two(test_two_input); println!("2) all subsequent duplicates removed of '{}' is '{}'", test_two_input, test_two_output); let test_three_input = "supracalafragalisticexpealadocious"; let test_three_output = three(test_three_input); println!("3) duplicates of '{}' is '{}'", test_three_input, test_three_output); let test_four_input_1 = "spain"; let test_four_input_2 = "the rain in spain falls mainly in the plain"; let test_four_output = four(test_four_input_1, test_four_input_2); println!("4) characters of '{}' removed from '{}' yields '{}'", test_four_input_1, test_four_input_2, test_four_output); let test_five_input_1 = "XaXbXcXdXe"; let test_five_input_2 = "XcXdXeXaXb"; let test_five_output = five(test_five_input_1, test_five_input_2); let mut maybe = ""; if!test_five_output { maybe = "not "; } println!("5) '{}' is a {}rotation of '{}'", test_five_input_1, maybe, test_five_input_2); let test_six_input = "abracadabra"; let test_six_output = six(test_six_input); println!("6) '{}' reversed is '{}'", test_six_input, test_six_output); let test_seven_input = "123"; let test_seven_output = seven("", test_seven_input); println!("7) '{}' reversed is '{}'", test_seven_input, test_seven_output); let test_eight_input = "012"; let test_eight_output = eight(test_eight_input); println!("8) '{}' has {} permutations {:?}", test_eight_input, test_eight_output.len(), test_eight_output); let test_nine_input = "uprasupradupra"; let test_nine_output = nine(test_nine_input); println!("9) the first unrepeated char in '{}' is '{}'", test_nine_input, test_nine_output); let test_ten_input = "best is Rust"; let test_ten_output = ten(test_ten_input); println!("10) reversed sentence '{}' is '{}'", test_ten_input, test_ten_output); let test_eleven_input1 = "this is a test string"; let test_eleven_input2 = "tist"; let test_eleven_output = eleven(test_eleven_input1, test_eleven_input2); println!("11) smallest substring '{}' inside of '{}' is '{}'", test_eleven_input2, test_eleven_input1, test_eleven_output); let test_twelve_input1 = "Army"; let test_twelve_input2 = "Mary"; let test_twelve_output = twelve(test_twelve_input1, test_twelve_input2); maybe = ""; if!test_twelve_output { maybe = "not "; } println!("12) '{}' is {}an anagram of '{}'", test_twelve_input1, maybe, test_twelve_input2); let test_thirteen_input1 = "Racecar"; let test_thirteen_output = thirteen(test_thirteen_input1); maybe = ""; if!test_thirteen_output { maybe = "not "; } println!("13) '{}' is {}a palindrome of '{}'", test_thirteen_input1, maybe, test_thirteen_output); let test_fourteen_input = "-123000"; let test_fourteen_output = fourteen(test_fourteen_input); println!("14) string '{}' is the isize {}", test_fourteen_input, test_fourteen_output); let test_fifteen_input = "MDCCLXXVI"; let test_fifteen_output = fifteen(test_fifteen_input); println!("15) roman number '{}' equals arabic number {}", test_fifteen_input, test_fifteen_output); let test_eighteen_input = "\tthe rain in spain falls\n\tmainly in the plain \n "; let test_eighteen_output = eighteen(test_eighteen_input); println!("18) '{}' has {} words", test_eighteen_input, test_eighteen_output); let test_nineteen_input = "([{}][]([]()){})"; let test_nineteen_output = nineteen(test_nineteen_input); maybe = "has"; if!test_nineteen_output { maybe = "does not have"; } println!("19) '{}' {} balanced parens", test_nineteen_input, maybe); let test_twenty_input = "the rain in spain red rum sir is murder falls sometimes on the racecar but mainly in the plains"; let test_twenty_output = twenty(test_twenty_input); println!("20) '{}' has '{}' as the longest internal palindrome", test_twenty_input, test_twenty_output); let test_twentyone_input = 1776; //;"MDCCLXXVI"; let test_twentyone_output = twentyone(test_twentyone_input); println!("21) arabic number '{}' equals roman number {}", test_twentyone_input, test_twentyone_output); } fn one(input: &str) -> (char, i32) { // return the char that appears most and it's count // the first occurence of let mut counts = HashMap::new(); let mut max_char = 'z'; //input.chars().next().unwrap(); let mut max_value = 0; input.chars().rev().for_each( | c | { if counts.contains_key(&c) { let next_total = counts.get(&c).unwrap() + 1; if next_total >= max_value { max_char = c; max_value = next_total; } counts.insert( c, next_total); } else { counts.insert( c, 1); } }); (max_char, max_value) } fn two(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if!s.contains(&c) { s.insert(c); r.push(c); } }); r } fn three(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if!s.contains(&c) { s.insert(c); } else { if!r.contains(c) { r.push(c); } } }); r } fn four(input1: &str, input2: &str) -> String { let mut r = String::new(); let mut p = 'z'; input2.chars().for_each( |c | { if!input1.contains(c) { if!(c =='' && p =='') { r.push(c); p = c; } } }); r } fn five(input1: &str, input2: &str) -> bool { let doubleup = format!("{}{}", input1, input1); doubleup.contains(input2) } fn six(input: &str) -> String { let mut r = String::new(); input.chars().for_each( | c | { r = format!("{}{}", c, r); }); r } fn seven(i1: &str, i2: &str) -> String { let mut r2 = String::from(i2); if i1.len() == 0 { return r2; } r2.push(i1.chars().last().unwrap()); let size_minus_one = i1.len() - 1; let r1 = &i1[..size_minus_one]; return seven(&r1, &r2); } fn eight(i: &str) -> Vec<String> { let mut r = vec!(); if i.len() == 1 { r.push(String::from(i)); return r; } for idx in 0..i.len() { let front = &i[0..idx]; let char = &i[idx..idx+1]; let end = &i[idx+1..]; let without = format!("{}{}", front, end); let subperms = eight(&without); for sp in subperms { r.push(format!("{}{}", char, sp)); } } r } fn nine(i: &str) -> char { for e in i.chars() { let mut count = 0; for se in i.chars() { if se == e { count = count + 1; } } if count == 1 { return e; } } '\0' } fn ten(i: &str) -> String { let mut r = String::new(); let mut is_first = true; for each in i.split(" ") { if is_first { is_first = false; r = String::from(each); } else { r = format!("{} {}", each, r); } } r } fn eleven(i1: &str, i2: &str) -> String { let mut solutions: Vec<String> = vec!(); // tuples of (char, original index of char in the input string) let mut pairs: Vec<(char, usize)> = i1.chars().enumerate().map( |e | -> (char, usize) { (e.1, e.0) }).filter(|p| i2.contains(p.0)).collect(); // println!("{:?}", pairs); //iterate the input string from left to right for _i in 0..pairs.len() { // p will be the match that we remove characters from // if p becomes empty we know we've matched let mut p = String::from(i2.clone()); // remember the first match from p as the head let mut head: Option<(char, usize)> = None; // remember the final match/char from p as the tail let mut tail: Option<(char, usize)> = None; // lets iterate over our pairs of (char, index) for e in &pairs { // if the pair is in p, // remove the character from p and // try and set the head and tail if p.contains(e.0) { p = p.replacen(e.0, "", 1); match head { None => { head = Some(*e) }, Some(_) if p.is_empty() => { tail = Some(*e); break }, Some(_) => {} } } } // if we found all the characters in i2 // we have a match, so head and tail will be populated // chop the string out of i1 and submit it as a solution if head!= None && tail!= None { let h = head.unwrap(); let t = tail.unwrap(); let solution = String::from(&i1[h.1..=t.1]); solutions.push(solution); } // remove the front character, and iterate again pairs.remove(0); } // println!("{:?}", solutions); // find the shortest solution let shortest = solutions.iter().fold(solutions[0].clone(), |acc, item| { if item.len() < acc.len() { item.clone() } else { acc } }); shortest } fn twelve(i1: &str, i2: &str) -> bool{ let i1 = i1.replace(" ", "").to_lowercase(); let mut i2 = i2.replace(" ", "").to_lowercase(); if i1.len()!= i2.len() { return false; } for c in i1.chars() { i2 = i2.replacen(c, "", 1); } let r = i2.len() == 0; r } fn thirteen(i1: &str) -> bool { let i1 = i1.replace(" ", "").to_lowercase(); let i2 = i1.replace(" ", "").to_lowercase().chars().rev().collect::<String>(); let r = i1 == i2; r } fn fourteen(i: &str) -> isize { let mut i = String::from(i); let is_negative = i.contains('-'); if is_negative { i = i.replace("-", ""); } let mut r = 0; for c in i.chars() { let d = c.to_digit(10).unwrap(); r = d + (r * 10); } let mut r = r as isize; if is_negative { r = r * -1; } r } fn fifteen(i: &str) -> isize { let mut i = i.to_uppercase(); let mut r = 0; let mut to_long = 0; while i.len() > 0 { for (rn, an) in ROMANS.iter().rev() { if i.starts_with(rn) { r = r + an; i = i.replacen(rn,"",1); break; } } to_long = to_long + 1; if to_long > 20 { return -1 } } r } const ROMANS: [(&str, isize); 30] = [ ("C", 100), ("X", 10), ("I", 1), ("CC", 200), ("XX", 20), ("II", 2), ("CCC", 300), ("XXX", 30), ("III", 3), ("CD", 400), ("XL", 40), ("IV", 4), ("D", 500), ("L", 50), ("V", 5), ("DC", 600), ("LX", 60), ("VI", 6), ("DCC", 700), ("LXX", 70), ("VII", 7), ("DCCC", 800), ("LXXX", 80), ("VIII", 8), ("CM", 900), ("XC", 90), ("IX", 9), ("M", 1000), ("MM", 2000), ("MMM", 3000)]; fn eighteen(i: &str) -> isize { let mut i = String::from(i.trim()); i = i.replace("\n", " "); i = i.replace("\t", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); let count = i.split(' ').count() as isize; count } fn nineteen(i: &str) -> bool { let mut s:Vec<char> = vec!(); for c in i.chars() { match c { '(' => s.push('('), '[' => s.push('['), '{' => s.push('{'), ')' => if '('!= s.pop().unwrap() { return false }, ']' => if '['!= s.pop().unwrap() { return false }, '}' => if '{'!= s.pop().unwrap() { return false }, _ => {} } } true } fn twenty(i: &str) -> String { let mut solutions = vec!(); for n in 2..(i.len()-1) { let e = palindrome_at(i, n); solutions.push(e); } // find the longest solution let longest = solutions.iter().fold( solutions[0].clone(), |acc, item| { if item.len() > acc.len() { item.clone() } else { acc } } ); longest } fn palindrome_at(input: &str, s: usize) -> String { let i:Vec<(char, usize)> = input.chars().enumerate() .map(|(i,c)| (c, i)) .filter(|p| p.0!='') .collect(); let m = i.len(); let fs = std::cmp::min( i.len() - 2, std::cmp::max(1,s)); let mut l = fs; let mut r = fs; if i[l].0!= i[r].0
while l > 0 && r < m && i[l].0 == i[r].0 { l = l - 1; r = r + 1; } l = std::cmp::max(0, l); r = std::cmp::min(i.len() - 1, r); let begin = i[l+1].1; let end = std::cmp::min(input.len() - 1,i[r].1); let result = String::from(&input[begin..end]); result } fn twentyone(i: isize) -> String { let s = format!("{}", i); let mut r = String::from(""); let mut mult = 1; for c in s.chars().rev() { let num = mult * c.to_digit(10).unwrap() as isize; let p = ROMANS.iter().find(|p| p.1 == num); match p { Some((rn, _v)) => { r = format!("{}{}", rn, r); }, _ => {}, } mult = mult * 10; } r }
{ // we are not the same assume a center "pivot" character center r = r + 1; }
conditional_block
main.rs
use std::collections::HashMap; fn main() { let test_one_input = "Today is Monday"; let max_chars = one(test_one_input); println!("1) the most of a char(first appearing) in '{}' is '{}', appearing {} times", test_one_input, max_chars.0, max_chars.1); let test_two_input = "supracalafragalisticexpealadocious"; let test_two_output = two(test_two_input); println!("2) all subsequent duplicates removed of '{}' is '{}'", test_two_input, test_two_output); let test_three_input = "supracalafragalisticexpealadocious"; let test_three_output = three(test_three_input); println!("3) duplicates of '{}' is '{}'", test_three_input, test_three_output); let test_four_input_1 = "spain"; let test_four_input_2 = "the rain in spain falls mainly in the plain"; let test_four_output = four(test_four_input_1, test_four_input_2); println!("4) characters of '{}' removed from '{}' yields '{}'", test_four_input_1, test_four_input_2, test_four_output); let test_five_input_1 = "XaXbXcXdXe"; let test_five_input_2 = "XcXdXeXaXb"; let test_five_output = five(test_five_input_1, test_five_input_2); let mut maybe = ""; if!test_five_output { maybe = "not "; } println!("5) '{}' is a {}rotation of '{}'", test_five_input_1, maybe, test_five_input_2); let test_six_input = "abracadabra"; let test_six_output = six(test_six_input); println!("6) '{}' reversed is '{}'", test_six_input, test_six_output); let test_seven_input = "123"; let test_seven_output = seven("", test_seven_input); println!("7) '{}' reversed is '{}'", test_seven_input, test_seven_output); let test_eight_input = "012"; let test_eight_output = eight(test_eight_input); println!("8) '{}' has {} permutations {:?}", test_eight_input, test_eight_output.len(), test_eight_output); let test_nine_input = "uprasupradupra"; let test_nine_output = nine(test_nine_input); println!("9) the first unrepeated char in '{}' is '{}'", test_nine_input, test_nine_output); let test_ten_input = "best is Rust"; let test_ten_output = ten(test_ten_input); println!("10) reversed sentence '{}' is '{}'", test_ten_input, test_ten_output); let test_eleven_input1 = "this is a test string"; let test_eleven_input2 = "tist"; let test_eleven_output = eleven(test_eleven_input1, test_eleven_input2); println!("11) smallest substring '{}' inside of '{}' is '{}'", test_eleven_input2, test_eleven_input1, test_eleven_output); let test_twelve_input1 = "Army"; let test_twelve_input2 = "Mary"; let test_twelve_output = twelve(test_twelve_input1, test_twelve_input2); maybe = ""; if!test_twelve_output { maybe = "not "; } println!("12) '{}' is {}an anagram of '{}'", test_twelve_input1, maybe, test_twelve_input2); let test_thirteen_input1 = "Racecar"; let test_thirteen_output = thirteen(test_thirteen_input1); maybe = ""; if!test_thirteen_output { maybe = "not "; } println!("13) '{}' is {}a palindrome of '{}'", test_thirteen_input1, maybe, test_thirteen_output); let test_fourteen_input = "-123000"; let test_fourteen_output = fourteen(test_fourteen_input); println!("14) string '{}' is the isize {}", test_fourteen_input, test_fourteen_output); let test_fifteen_input = "MDCCLXXVI"; let test_fifteen_output = fifteen(test_fifteen_input); println!("15) roman number '{}' equals arabic number {}", test_fifteen_input, test_fifteen_output); let test_eighteen_input = "\tthe rain in spain falls\n\tmainly in the plain \n "; let test_eighteen_output = eighteen(test_eighteen_input); println!("18) '{}' has {} words", test_eighteen_input, test_eighteen_output); let test_nineteen_input = "([{}][]([]()){})"; let test_nineteen_output = nineteen(test_nineteen_input); maybe = "has"; if!test_nineteen_output { maybe = "does not have"; } println!("19) '{}' {} balanced parens", test_nineteen_input, maybe); let test_twenty_input = "the rain in spain red rum sir is murder falls sometimes on the racecar but mainly in the plains"; let test_twenty_output = twenty(test_twenty_input); println!("20) '{}' has '{}' as the longest internal palindrome", test_twenty_input, test_twenty_output); let test_twentyone_input = 1776; //;"MDCCLXXVI"; let test_twentyone_output = twentyone(test_twentyone_input); println!("21) arabic number '{}' equals roman number {}", test_twentyone_input, test_twentyone_output); } fn one(input: &str) -> (char, i32) { // return the char that appears most and it's count // the first occurence of let mut counts = HashMap::new(); let mut max_char = 'z'; //input.chars().next().unwrap(); let mut max_value = 0; input.chars().rev().for_each( | c | { if counts.contains_key(&c) { let next_total = counts.get(&c).unwrap() + 1; if next_total >= max_value { max_char = c; max_value = next_total; } counts.insert( c, next_total); } else { counts.insert( c, 1); } }); (max_char, max_value) } fn two(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if!s.contains(&c) { s.insert(c); r.push(c); } }); r } fn three(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if!s.contains(&c) { s.insert(c); } else { if!r.contains(c) { r.push(c); } } }); r } fn four(input1: &str, input2: &str) -> String { let mut r = String::new(); let mut p = 'z'; input2.chars().for_each( |c | { if!input1.contains(c) { if!(c =='' && p =='') { r.push(c); p = c; } } }); r } fn five(input1: &str, input2: &str) -> bool { let doubleup = format!("{}{}", input1, input1); doubleup.contains(input2) } fn
(input: &str) -> String { let mut r = String::new(); input.chars().for_each( | c | { r = format!("{}{}", c, r); }); r } fn seven(i1: &str, i2: &str) -> String { let mut r2 = String::from(i2); if i1.len() == 0 { return r2; } r2.push(i1.chars().last().unwrap()); let size_minus_one = i1.len() - 1; let r1 = &i1[..size_minus_one]; return seven(&r1, &r2); } fn eight(i: &str) -> Vec<String> { let mut r = vec!(); if i.len() == 1 { r.push(String::from(i)); return r; } for idx in 0..i.len() { let front = &i[0..idx]; let char = &i[idx..idx+1]; let end = &i[idx+1..]; let without = format!("{}{}", front, end); let subperms = eight(&without); for sp in subperms { r.push(format!("{}{}", char, sp)); } } r } fn nine(i: &str) -> char { for e in i.chars() { let mut count = 0; for se in i.chars() { if se == e { count = count + 1; } } if count == 1 { return e; } } '\0' } fn ten(i: &str) -> String { let mut r = String::new(); let mut is_first = true; for each in i.split(" ") { if is_first { is_first = false; r = String::from(each); } else { r = format!("{} {}", each, r); } } r } fn eleven(i1: &str, i2: &str) -> String { let mut solutions: Vec<String> = vec!(); // tuples of (char, original index of char in the input string) let mut pairs: Vec<(char, usize)> = i1.chars().enumerate().map( |e | -> (char, usize) { (e.1, e.0) }).filter(|p| i2.contains(p.0)).collect(); // println!("{:?}", pairs); //iterate the input string from left to right for _i in 0..pairs.len() { // p will be the match that we remove characters from // if p becomes empty we know we've matched let mut p = String::from(i2.clone()); // remember the first match from p as the head let mut head: Option<(char, usize)> = None; // remember the final match/char from p as the tail let mut tail: Option<(char, usize)> = None; // lets iterate over our pairs of (char, index) for e in &pairs { // if the pair is in p, // remove the character from p and // try and set the head and tail if p.contains(e.0) { p = p.replacen(e.0, "", 1); match head { None => { head = Some(*e) }, Some(_) if p.is_empty() => { tail = Some(*e); break }, Some(_) => {} } } } // if we found all the characters in i2 // we have a match, so head and tail will be populated // chop the string out of i1 and submit it as a solution if head!= None && tail!= None { let h = head.unwrap(); let t = tail.unwrap(); let solution = String::from(&i1[h.1..=t.1]); solutions.push(solution); } // remove the front character, and iterate again pairs.remove(0); } // println!("{:?}", solutions); // find the shortest solution let shortest = solutions.iter().fold(solutions[0].clone(), |acc, item| { if item.len() < acc.len() { item.clone() } else { acc } }); shortest } fn twelve(i1: &str, i2: &str) -> bool{ let i1 = i1.replace(" ", "").to_lowercase(); let mut i2 = i2.replace(" ", "").to_lowercase(); if i1.len()!= i2.len() { return false; } for c in i1.chars() { i2 = i2.replacen(c, "", 1); } let r = i2.len() == 0; r } fn thirteen(i1: &str) -> bool { let i1 = i1.replace(" ", "").to_lowercase(); let i2 = i1.replace(" ", "").to_lowercase().chars().rev().collect::<String>(); let r = i1 == i2; r } fn fourteen(i: &str) -> isize { let mut i = String::from(i); let is_negative = i.contains('-'); if is_negative { i = i.replace("-", ""); } let mut r = 0; for c in i.chars() { let d = c.to_digit(10).unwrap(); r = d + (r * 10); } let mut r = r as isize; if is_negative { r = r * -1; } r } fn fifteen(i: &str) -> isize { let mut i = i.to_uppercase(); let mut r = 0; let mut to_long = 0; while i.len() > 0 { for (rn, an) in ROMANS.iter().rev() { if i.starts_with(rn) { r = r + an; i = i.replacen(rn,"",1); break; } } to_long = to_long + 1; if to_long > 20 { return -1 } } r } const ROMANS: [(&str, isize); 30] = [ ("C", 100), ("X", 10), ("I", 1), ("CC", 200), ("XX", 20), ("II", 2), ("CCC", 300), ("XXX", 30), ("III", 3), ("CD", 400), ("XL", 40), ("IV", 4), ("D", 500), ("L", 50), ("V", 5), ("DC", 600), ("LX", 60), ("VI", 6), ("DCC", 700), ("LXX", 70), ("VII", 7), ("DCCC", 800), ("LXXX", 80), ("VIII", 8), ("CM", 900), ("XC", 90), ("IX", 9), ("M", 1000), ("MM", 2000), ("MMM", 3000)]; fn eighteen(i: &str) -> isize { let mut i = String::from(i.trim()); i = i.replace("\n", " "); i = i.replace("\t", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); let count = i.split(' ').count() as isize; count } fn nineteen(i: &str) -> bool { let mut s:Vec<char> = vec!(); for c in i.chars() { match c { '(' => s.push('('), '[' => s.push('['), '{' => s.push('{'), ')' => if '('!= s.pop().unwrap() { return false }, ']' => if '['!= s.pop().unwrap() { return false }, '}' => if '{'!= s.pop().unwrap() { return false }, _ => {} } } true } fn twenty(i: &str) -> String { let mut solutions = vec!(); for n in 2..(i.len()-1) { let e = palindrome_at(i, n); solutions.push(e); } // find the longest solution let longest = solutions.iter().fold( solutions[0].clone(), |acc, item| { if item.len() > acc.len() { item.clone() } else { acc } } ); longest } fn palindrome_at(input: &str, s: usize) -> String { let i:Vec<(char, usize)> = input.chars().enumerate() .map(|(i,c)| (c, i)) .filter(|p| p.0!='') .collect(); let m = i.len(); let fs = std::cmp::min( i.len() - 2, std::cmp::max(1,s)); let mut l = fs; let mut r = fs; if i[l].0!= i[r].0 { // we are not the same assume a center "pivot" character center r = r + 1; } while l > 0 && r < m && i[l].0 == i[r].0 { l = l - 1; r = r + 1; } l = std::cmp::max(0, l); r = std::cmp::min(i.len() - 1, r); let begin = i[l+1].1; let end = std::cmp::min(input.len() - 1,i[r].1); let result = String::from(&input[begin..end]); result } fn twentyone(i: isize) -> String { let s = format!("{}", i); let mut r = String::from(""); let mut mult = 1; for c in s.chars().rev() { let num = mult * c.to_digit(10).unwrap() as isize; let p = ROMANS.iter().find(|p| p.1 == num); match p { Some((rn, _v)) => { r = format!("{}{}", rn, r); }, _ => {}, } mult = mult * 10; } r }
six
identifier_name
output.rs
use super::Token; pub use json::object::Object; pub use json::JsonValue; use nom::{ alt, call, do_parse, error_position, is_not, many0, map, named, opt, separated_list, tag, value, }; use log::{error, info}; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ResultClass { Done, Running, Connected, Error, Exit, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BreakPointEvent { Created, Deleted, Modified, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ThreadEvent { Created, GroupStarted, Exited, GroupExited, Selected, } #[derive(Debug, Clone, PartialEq, Eq)] pub enum AsyncClass { Stopped, CmdParamChanged, LibraryLoaded, Thread(ThreadEvent), BreakPoint(BreakPointEvent), Other(String), //? } #[derive(Debug)] pub enum AsyncKind { Exec, Status, Notify, } #[derive(Debug)] pub enum StreamKind { Console, Target, Log, } #[derive(Debug)] pub struct ResultRecord { pub(crate) token: Option<Token>, pub class: ResultClass, pub results: Object, } #[derive(Debug)] pub enum OutOfBandRecord { AsyncRecord { token: Option<Token>, kind: AsyncKind, class: AsyncClass, results: Object, }, StreamRecord { kind: StreamKind, data: String, }, } #[derive(Debug)] enum Output { Result(ResultRecord), OutOfBand(OutOfBandRecord), GDBLine, SomethingElse(String), /* Debug */ } use crate::OutOfBandRecordSink; use nom::IResult; use std::io::{BufRead, BufReader, Read}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; pub fn process_output<T: Read, S: OutOfBandRecordSink>( output: T, result_pipe: Sender<ResultRecord>, out_of_band_pipe: S, is_running: Arc<AtomicBool>, ) { let mut reader = BufReader::new(output); loop { let mut buffer = String::new(); match reader.read_line(&mut buffer) { Ok(0) => { return; } Ok(_) => { info!("{}", buffer.trim_end()); let parse_result = match Output::parse(&buffer) { Ok(r) => r, Err(e) => { error!("PARSING ERROR: {}", e); continue; } }; match parse_result { Output::Result(record) => { match record.class { ResultClass::Running => is_running.store(true, Ordering::SeqCst), //Apparently sometimes gdb first claims to be running, only to then stop again (without notifying the user)... ResultClass::Error => is_running.store(false, Ordering::SeqCst), _ => {} } result_pipe.send(record).expect("send result to pipe"); } Output::OutOfBand(record) => { if let OutOfBandRecord::AsyncRecord { class: AsyncClass::Stopped, .. } = record { is_running.store(false, Ordering::SeqCst); } out_of_band_pipe.send(record); } Output::GDBLine => {} //Output::SomethingElse(_) => { /*println!("SOMETHING ELSE: {}", str);*/ } Output::SomethingElse(text) => { out_of_band_pipe.send(OutOfBandRecord::StreamRecord { kind: StreamKind::Target, data: text, }); } } } Err(e) => { panic!("{}", e); } } } } impl Output { fn parse(line: &str) -> Result<Self, String> { match output(line.as_bytes()) { IResult::Done(_, c) => Ok(c), IResult::Incomplete(e) => Err(format!("parsing line: incomplete {:?}", e)), //Is it okay to read the next bytes then? IResult::Error(e) => Err(format!("parse error: {}", e)), } } } named!( result_class<ResultClass>, alt!( value!(ResultClass::Done, tag!("done")) | value!(ResultClass::Running, tag!("running")) | value!(ResultClass::Connected, tag!("connected")) | value!(ResultClass::Error, tag!("error"))
let byte = input[0]; if byte == b'\"' { IResult::Error(::nom::ErrorKind::Custom(1)) //what are we supposed to return here?? } else { IResult::Done(&input[1..], byte) } } named!( escaped_character<u8>, alt!( value!(b'\n', tag!("\\n")) | value!(b'\r', tag!("\\r")) | value!(b'\t', tag!("\\t")) | value!(b'\"', tag!("\\\"")) | value!(b'\\', tag!("\\\\")) | non_quote_byte ) ); named!( string<String>, do_parse!( tag!("\"") >> s: many0!(escaped_character) >> tag!("\"") >> (String::from_utf8_lossy(s.as_slice()).into_owned()) ) ); fn to_map(v: Vec<(String, JsonValue)>) -> Object { //TODO: fix this and parse the map directly let mut obj = Object::new(); for (name, value) in v { debug_assert!(obj.get(&name).is_none(), "Duplicate object member!"); obj.insert(&name, value); } obj } fn to_list(v: Vec<(String, JsonValue)>) -> Vec<JsonValue> { //The gdbmi-grammar is really weird... //TODO: fix this and parse the map directly v.into_iter().map(|(_, value)| value).collect() } named!( value<JsonValue>, alt!( map!(string, JsonValue::String) | do_parse!( tag!("{") >> results: separated_list!(tag!(","), result) >> tag!("}") >> (JsonValue::Object(to_map(results))) ) | do_parse!( tag!("[") >> values: separated_list!(tag!(","), value) >> tag!("]") >> (JsonValue::Array(values)) ) | do_parse!( tag!("[") >> results: separated_list!(tag!(","), result) >> tag!("]") >> (JsonValue::Array(to_list(results))) ) ) ); // Don't even ask... Against its spec, gdb(mi) sometimes emits multiple values for a single tuple // in a comma separated list. named!( buggy_gdb_list_in_result<JsonValue>, map!(separated_list!(tag!(","), value), |values: Vec< JsonValue, >| { if values.len() == 1 { values .into_iter() .next() .expect("len == 1 => first element is guaranteed") } else { JsonValue::Array(values) } }) ); named!( result<(String, JsonValue)>, do_parse!( var: is_not!("={}" /* Do not allow =, {, nor } */) >> tag!("=") >> val: buggy_gdb_list_in_result >> (String::from_utf8_lossy(var).into_owned(), val) ) ); named!( token<Token>, map!(::nom::digit, |values: &[u8]| values .iter() .fold(0, |acc, &ascii_digit| 10 * acc + (ascii_digit - b'0') as u64)) ); named!( result_record<Output>, do_parse!( t: opt!(token) >> tag!("^") >> c: result_class >> res: many0!(do_parse!(tag!(",") >> r: result >> (r))) >> (Output::Result(ResultRecord { token: t, class: c, results: to_map(res), })) ) ); named!( async_kind<AsyncKind>, alt!( value!(AsyncKind::Exec, tag!("*")) | value!(AsyncKind::Status, tag!("+")) | value!(AsyncKind::Notify, tag!("=")) ) ); named!( async_class<AsyncClass>, alt!( value!(AsyncClass::Stopped, tag!("stopped")) | value!( AsyncClass::Thread(ThreadEvent::Created), tag!("thread-created") ) | value!( AsyncClass::Thread(ThreadEvent::GroupStarted), tag!("thread-group-started") ) | value!( AsyncClass::Thread(ThreadEvent::Exited), tag!("thread-exited") ) | value!( AsyncClass::Thread(ThreadEvent::GroupExited), tag!("thread-group-exited") ) | value!( AsyncClass::Thread(ThreadEvent::Selected), tag!("thread-selected") ) | value!(AsyncClass::CmdParamChanged, tag!("cmd-param-changed")) | value!(AsyncClass::LibraryLoaded, tag!("library-loaded")) | value!( AsyncClass::BreakPoint(BreakPointEvent::Created), tag!("breakpoint-created") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Deleted), tag!("breakpoint-deleted") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Modified), tag!("breakpoint-modified") ) | map!(is_not!(","), |msg| AsyncClass::Other( String::from_utf8_lossy(msg).into_owned() )) ) ); named!( async_record<OutOfBandRecord>, do_parse!( t: opt!(token) >> kind: async_kind >> class: async_class >> results: many0!(do_parse!(tag!(",") >> r: result >> (r))) >> (OutOfBandRecord::AsyncRecord { token: t, kind, class, results: to_map(results), }) ) ); named!( stream_kind<StreamKind>, alt!( value!(StreamKind::Console, tag!("~")) | value!(StreamKind::Target, tag!("@")) | value!(StreamKind::Log, tag!("&")) ) ); named!( stream_record<OutOfBandRecord>, do_parse!( kind: stream_kind >> msg: string >> (OutOfBandRecord::StreamRecord { kind, data: msg }) ) ); named!( out_of_band_record<Output>, map!(alt!(stream_record | async_record), |record| { Output::OutOfBand(record) }) ); named!( gdb_line<Output>, value!(Output::GDBLine, tag!("(gdb) ")) //TODO proper matching ); fn debug_line(i: &[u8]) -> IResult<&[u8], Output> { IResult::Done( i, Output::SomethingElse(String::from_utf8_lossy(i).into_owned()), ) } // Ends all records, but can probably ignored named!(nl, alt!(tag!("\n") | tag!("\r\n"))); named!( output<Output>, do_parse!( output: alt!(result_record | out_of_band_record | gdb_line | debug_line) >> nl >> (output) ) ); #[cfg(test)] mod test { use super::*; #[test] fn test_output() { let _ = Output::parse("=library-loaded,ranges=[{}]\n"); } }
| value!(ResultClass::Exit, tag!("exit")) ) ); fn non_quote_byte(input: &[u8]) -> IResult<&[u8], u8> {
random_line_split
output.rs
use super::Token; pub use json::object::Object; pub use json::JsonValue; use nom::{ alt, call, do_parse, error_position, is_not, many0, map, named, opt, separated_list, tag, value, }; use log::{error, info}; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ResultClass { Done, Running, Connected, Error, Exit, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BreakPointEvent { Created, Deleted, Modified, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ThreadEvent { Created, GroupStarted, Exited, GroupExited, Selected, } #[derive(Debug, Clone, PartialEq, Eq)] pub enum AsyncClass { Stopped, CmdParamChanged, LibraryLoaded, Thread(ThreadEvent), BreakPoint(BreakPointEvent), Other(String), //? } #[derive(Debug)] pub enum AsyncKind { Exec, Status, Notify, } #[derive(Debug)] pub enum StreamKind { Console, Target, Log, } #[derive(Debug)] pub struct ResultRecord { pub(crate) token: Option<Token>, pub class: ResultClass, pub results: Object, } #[derive(Debug)] pub enum OutOfBandRecord { AsyncRecord { token: Option<Token>, kind: AsyncKind, class: AsyncClass, results: Object, }, StreamRecord { kind: StreamKind, data: String, }, } #[derive(Debug)] enum Output { Result(ResultRecord), OutOfBand(OutOfBandRecord), GDBLine, SomethingElse(String), /* Debug */ } use crate::OutOfBandRecordSink; use nom::IResult; use std::io::{BufRead, BufReader, Read}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; pub fn process_output<T: Read, S: OutOfBandRecordSink>( output: T, result_pipe: Sender<ResultRecord>, out_of_band_pipe: S, is_running: Arc<AtomicBool>, ) { let mut reader = BufReader::new(output); loop { let mut buffer = String::new(); match reader.read_line(&mut buffer) { Ok(0) => { return; } Ok(_) => { info!("{}", buffer.trim_end()); let parse_result = match Output::parse(&buffer) { Ok(r) => r, Err(e) => { error!("PARSING ERROR: {}", e); continue; } }; match parse_result { Output::Result(record) => { match record.class { ResultClass::Running => is_running.store(true, Ordering::SeqCst), //Apparently sometimes gdb first claims to be running, only to then stop again (without notifying the user)... ResultClass::Error => is_running.store(false, Ordering::SeqCst), _ =>
} result_pipe.send(record).expect("send result to pipe"); } Output::OutOfBand(record) => { if let OutOfBandRecord::AsyncRecord { class: AsyncClass::Stopped, .. } = record { is_running.store(false, Ordering::SeqCst); } out_of_band_pipe.send(record); } Output::GDBLine => {} //Output::SomethingElse(_) => { /*println!("SOMETHING ELSE: {}", str);*/ } Output::SomethingElse(text) => { out_of_band_pipe.send(OutOfBandRecord::StreamRecord { kind: StreamKind::Target, data: text, }); } } } Err(e) => { panic!("{}", e); } } } } impl Output { fn parse(line: &str) -> Result<Self, String> { match output(line.as_bytes()) { IResult::Done(_, c) => Ok(c), IResult::Incomplete(e) => Err(format!("parsing line: incomplete {:?}", e)), //Is it okay to read the next bytes then? IResult::Error(e) => Err(format!("parse error: {}", e)), } } } named!( result_class<ResultClass>, alt!( value!(ResultClass::Done, tag!("done")) | value!(ResultClass::Running, tag!("running")) | value!(ResultClass::Connected, tag!("connected")) | value!(ResultClass::Error, tag!("error")) | value!(ResultClass::Exit, tag!("exit")) ) ); fn non_quote_byte(input: &[u8]) -> IResult<&[u8], u8> { let byte = input[0]; if byte == b'\"' { IResult::Error(::nom::ErrorKind::Custom(1)) //what are we supposed to return here?? } else { IResult::Done(&input[1..], byte) } } named!( escaped_character<u8>, alt!( value!(b'\n', tag!("\\n")) | value!(b'\r', tag!("\\r")) | value!(b'\t', tag!("\\t")) | value!(b'\"', tag!("\\\"")) | value!(b'\\', tag!("\\\\")) | non_quote_byte ) ); named!( string<String>, do_parse!( tag!("\"") >> s: many0!(escaped_character) >> tag!("\"") >> (String::from_utf8_lossy(s.as_slice()).into_owned()) ) ); fn to_map(v: Vec<(String, JsonValue)>) -> Object { //TODO: fix this and parse the map directly let mut obj = Object::new(); for (name, value) in v { debug_assert!(obj.get(&name).is_none(), "Duplicate object member!"); obj.insert(&name, value); } obj } fn to_list(v: Vec<(String, JsonValue)>) -> Vec<JsonValue> { //The gdbmi-grammar is really weird... //TODO: fix this and parse the map directly v.into_iter().map(|(_, value)| value).collect() } named!( value<JsonValue>, alt!( map!(string, JsonValue::String) | do_parse!( tag!("{") >> results: separated_list!(tag!(","), result) >> tag!("}") >> (JsonValue::Object(to_map(results))) ) | do_parse!( tag!("[") >> values: separated_list!(tag!(","), value) >> tag!("]") >> (JsonValue::Array(values)) ) | do_parse!( tag!("[") >> results: separated_list!(tag!(","), result) >> tag!("]") >> (JsonValue::Array(to_list(results))) ) ) ); // Don't even ask... Against its spec, gdb(mi) sometimes emits multiple values for a single tuple // in a comma separated list. named!( buggy_gdb_list_in_result<JsonValue>, map!(separated_list!(tag!(","), value), |values: Vec< JsonValue, >| { if values.len() == 1 { values .into_iter() .next() .expect("len == 1 => first element is guaranteed") } else { JsonValue::Array(values) } }) ); named!( result<(String, JsonValue)>, do_parse!( var: is_not!("={}" /* Do not allow =, {, nor } */) >> tag!("=") >> val: buggy_gdb_list_in_result >> (String::from_utf8_lossy(var).into_owned(), val) ) ); named!( token<Token>, map!(::nom::digit, |values: &[u8]| values .iter() .fold(0, |acc, &ascii_digit| 10 * acc + (ascii_digit - b'0') as u64)) ); named!( result_record<Output>, do_parse!( t: opt!(token) >> tag!("^") >> c: result_class >> res: many0!(do_parse!(tag!(",") >> r: result >> (r))) >> (Output::Result(ResultRecord { token: t, class: c, results: to_map(res), })) ) ); named!( async_kind<AsyncKind>, alt!( value!(AsyncKind::Exec, tag!("*")) | value!(AsyncKind::Status, tag!("+")) | value!(AsyncKind::Notify, tag!("=")) ) ); named!( async_class<AsyncClass>, alt!( value!(AsyncClass::Stopped, tag!("stopped")) | value!( AsyncClass::Thread(ThreadEvent::Created), tag!("thread-created") ) | value!( AsyncClass::Thread(ThreadEvent::GroupStarted), tag!("thread-group-started") ) | value!( AsyncClass::Thread(ThreadEvent::Exited), tag!("thread-exited") ) | value!( AsyncClass::Thread(ThreadEvent::GroupExited), tag!("thread-group-exited") ) | value!( AsyncClass::Thread(ThreadEvent::Selected), tag!("thread-selected") ) | value!(AsyncClass::CmdParamChanged, tag!("cmd-param-changed")) | value!(AsyncClass::LibraryLoaded, tag!("library-loaded")) | value!( AsyncClass::BreakPoint(BreakPointEvent::Created), tag!("breakpoint-created") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Deleted), tag!("breakpoint-deleted") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Modified), tag!("breakpoint-modified") ) | map!(is_not!(","), |msg| AsyncClass::Other( String::from_utf8_lossy(msg).into_owned() )) ) ); named!( async_record<OutOfBandRecord>, do_parse!( t: opt!(token) >> kind: async_kind >> class: async_class >> results: many0!(do_parse!(tag!(",") >> r: result >> (r))) >> (OutOfBandRecord::AsyncRecord { token: t, kind, class, results: to_map(results), }) ) ); named!( stream_kind<StreamKind>, alt!( value!(StreamKind::Console, tag!("~")) | value!(StreamKind::Target, tag!("@")) | value!(StreamKind::Log, tag!("&")) ) ); named!( stream_record<OutOfBandRecord>, do_parse!( kind: stream_kind >> msg: string >> (OutOfBandRecord::StreamRecord { kind, data: msg }) ) ); named!( out_of_band_record<Output>, map!(alt!(stream_record | async_record), |record| { Output::OutOfBand(record) }) ); named!( gdb_line<Output>, value!(Output::GDBLine, tag!("(gdb) ")) //TODO proper matching ); fn debug_line(i: &[u8]) -> IResult<&[u8], Output> { IResult::Done( i, Output::SomethingElse(String::from_utf8_lossy(i).into_owned()), ) } // Ends all records, but can probably ignored named!(nl, alt!(tag!("\n") | tag!("\r\n"))); named!( output<Output>, do_parse!( output: alt!(result_record | out_of_band_record | gdb_line | debug_line) >> nl >> (output) ) ); #[cfg(test)] mod test { use super::*; #[test] fn test_output() { let _ = Output::parse("=library-loaded,ranges=[{}]\n"); } }
{}
conditional_block
output.rs
use super::Token; pub use json::object::Object; pub use json::JsonValue; use nom::{ alt, call, do_parse, error_position, is_not, many0, map, named, opt, separated_list, tag, value, }; use log::{error, info}; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ResultClass { Done, Running, Connected, Error, Exit, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BreakPointEvent { Created, Deleted, Modified, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ThreadEvent { Created, GroupStarted, Exited, GroupExited, Selected, } #[derive(Debug, Clone, PartialEq, Eq)] pub enum AsyncClass { Stopped, CmdParamChanged, LibraryLoaded, Thread(ThreadEvent), BreakPoint(BreakPointEvent), Other(String), //? } #[derive(Debug)] pub enum AsyncKind { Exec, Status, Notify, } #[derive(Debug)] pub enum StreamKind { Console, Target, Log, } #[derive(Debug)] pub struct ResultRecord { pub(crate) token: Option<Token>, pub class: ResultClass, pub results: Object, } #[derive(Debug)] pub enum OutOfBandRecord { AsyncRecord { token: Option<Token>, kind: AsyncKind, class: AsyncClass, results: Object, }, StreamRecord { kind: StreamKind, data: String, }, } #[derive(Debug)] enum Output { Result(ResultRecord), OutOfBand(OutOfBandRecord), GDBLine, SomethingElse(String), /* Debug */ } use crate::OutOfBandRecordSink; use nom::IResult; use std::io::{BufRead, BufReader, Read}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; pub fn process_output<T: Read, S: OutOfBandRecordSink>( output: T, result_pipe: Sender<ResultRecord>, out_of_band_pipe: S, is_running: Arc<AtomicBool>, ) { let mut reader = BufReader::new(output); loop { let mut buffer = String::new(); match reader.read_line(&mut buffer) { Ok(0) => { return; } Ok(_) => { info!("{}", buffer.trim_end()); let parse_result = match Output::parse(&buffer) { Ok(r) => r, Err(e) => { error!("PARSING ERROR: {}", e); continue; } }; match parse_result { Output::Result(record) => { match record.class { ResultClass::Running => is_running.store(true, Ordering::SeqCst), //Apparently sometimes gdb first claims to be running, only to then stop again (without notifying the user)... ResultClass::Error => is_running.store(false, Ordering::SeqCst), _ => {} } result_pipe.send(record).expect("send result to pipe"); } Output::OutOfBand(record) => { if let OutOfBandRecord::AsyncRecord { class: AsyncClass::Stopped, .. } = record { is_running.store(false, Ordering::SeqCst); } out_of_band_pipe.send(record); } Output::GDBLine => {} //Output::SomethingElse(_) => { /*println!("SOMETHING ELSE: {}", str);*/ } Output::SomethingElse(text) => { out_of_band_pipe.send(OutOfBandRecord::StreamRecord { kind: StreamKind::Target, data: text, }); } } } Err(e) => { panic!("{}", e); } } } } impl Output { fn
(line: &str) -> Result<Self, String> { match output(line.as_bytes()) { IResult::Done(_, c) => Ok(c), IResult::Incomplete(e) => Err(format!("parsing line: incomplete {:?}", e)), //Is it okay to read the next bytes then? IResult::Error(e) => Err(format!("parse error: {}", e)), } } } named!( result_class<ResultClass>, alt!( value!(ResultClass::Done, tag!("done")) | value!(ResultClass::Running, tag!("running")) | value!(ResultClass::Connected, tag!("connected")) | value!(ResultClass::Error, tag!("error")) | value!(ResultClass::Exit, tag!("exit")) ) ); fn non_quote_byte(input: &[u8]) -> IResult<&[u8], u8> { let byte = input[0]; if byte == b'\"' { IResult::Error(::nom::ErrorKind::Custom(1)) //what are we supposed to return here?? } else { IResult::Done(&input[1..], byte) } } named!( escaped_character<u8>, alt!( value!(b'\n', tag!("\\n")) | value!(b'\r', tag!("\\r")) | value!(b'\t', tag!("\\t")) | value!(b'\"', tag!("\\\"")) | value!(b'\\', tag!("\\\\")) | non_quote_byte ) ); named!( string<String>, do_parse!( tag!("\"") >> s: many0!(escaped_character) >> tag!("\"") >> (String::from_utf8_lossy(s.as_slice()).into_owned()) ) ); fn to_map(v: Vec<(String, JsonValue)>) -> Object { //TODO: fix this and parse the map directly let mut obj = Object::new(); for (name, value) in v { debug_assert!(obj.get(&name).is_none(), "Duplicate object member!"); obj.insert(&name, value); } obj } fn to_list(v: Vec<(String, JsonValue)>) -> Vec<JsonValue> { //The gdbmi-grammar is really weird... //TODO: fix this and parse the map directly v.into_iter().map(|(_, value)| value).collect() } named!( value<JsonValue>, alt!( map!(string, JsonValue::String) | do_parse!( tag!("{") >> results: separated_list!(tag!(","), result) >> tag!("}") >> (JsonValue::Object(to_map(results))) ) | do_parse!( tag!("[") >> values: separated_list!(tag!(","), value) >> tag!("]") >> (JsonValue::Array(values)) ) | do_parse!( tag!("[") >> results: separated_list!(tag!(","), result) >> tag!("]") >> (JsonValue::Array(to_list(results))) ) ) ); // Don't even ask... Against its spec, gdb(mi) sometimes emits multiple values for a single tuple // in a comma separated list. named!( buggy_gdb_list_in_result<JsonValue>, map!(separated_list!(tag!(","), value), |values: Vec< JsonValue, >| { if values.len() == 1 { values .into_iter() .next() .expect("len == 1 => first element is guaranteed") } else { JsonValue::Array(values) } }) ); named!( result<(String, JsonValue)>, do_parse!( var: is_not!("={}" /* Do not allow =, {, nor } */) >> tag!("=") >> val: buggy_gdb_list_in_result >> (String::from_utf8_lossy(var).into_owned(), val) ) ); named!( token<Token>, map!(::nom::digit, |values: &[u8]| values .iter() .fold(0, |acc, &ascii_digit| 10 * acc + (ascii_digit - b'0') as u64)) ); named!( result_record<Output>, do_parse!( t: opt!(token) >> tag!("^") >> c: result_class >> res: many0!(do_parse!(tag!(",") >> r: result >> (r))) >> (Output::Result(ResultRecord { token: t, class: c, results: to_map(res), })) ) ); named!( async_kind<AsyncKind>, alt!( value!(AsyncKind::Exec, tag!("*")) | value!(AsyncKind::Status, tag!("+")) | value!(AsyncKind::Notify, tag!("=")) ) ); named!( async_class<AsyncClass>, alt!( value!(AsyncClass::Stopped, tag!("stopped")) | value!( AsyncClass::Thread(ThreadEvent::Created), tag!("thread-created") ) | value!( AsyncClass::Thread(ThreadEvent::GroupStarted), tag!("thread-group-started") ) | value!( AsyncClass::Thread(ThreadEvent::Exited), tag!("thread-exited") ) | value!( AsyncClass::Thread(ThreadEvent::GroupExited), tag!("thread-group-exited") ) | value!( AsyncClass::Thread(ThreadEvent::Selected), tag!("thread-selected") ) | value!(AsyncClass::CmdParamChanged, tag!("cmd-param-changed")) | value!(AsyncClass::LibraryLoaded, tag!("library-loaded")) | value!( AsyncClass::BreakPoint(BreakPointEvent::Created), tag!("breakpoint-created") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Deleted), tag!("breakpoint-deleted") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Modified), tag!("breakpoint-modified") ) | map!(is_not!(","), |msg| AsyncClass::Other( String::from_utf8_lossy(msg).into_owned() )) ) ); named!( async_record<OutOfBandRecord>, do_parse!( t: opt!(token) >> kind: async_kind >> class: async_class >> results: many0!(do_parse!(tag!(",") >> r: result >> (r))) >> (OutOfBandRecord::AsyncRecord { token: t, kind, class, results: to_map(results), }) ) ); named!( stream_kind<StreamKind>, alt!( value!(StreamKind::Console, tag!("~")) | value!(StreamKind::Target, tag!("@")) | value!(StreamKind::Log, tag!("&")) ) ); named!( stream_record<OutOfBandRecord>, do_parse!( kind: stream_kind >> msg: string >> (OutOfBandRecord::StreamRecord { kind, data: msg }) ) ); named!( out_of_band_record<Output>, map!(alt!(stream_record | async_record), |record| { Output::OutOfBand(record) }) ); named!( gdb_line<Output>, value!(Output::GDBLine, tag!("(gdb) ")) //TODO proper matching ); fn debug_line(i: &[u8]) -> IResult<&[u8], Output> { IResult::Done( i, Output::SomethingElse(String::from_utf8_lossy(i).into_owned()), ) } // Ends all records, but can probably ignored named!(nl, alt!(tag!("\n") | tag!("\r\n"))); named!( output<Output>, do_parse!( output: alt!(result_record | out_of_band_record | gdb_line | debug_line) >> nl >> (output) ) ); #[cfg(test)] mod test { use super::*; #[test] fn test_output() { let _ = Output::parse("=library-loaded,ranges=[{}]\n"); } }
parse
identifier_name
miopoll.rs
use crate::mio::event::{Event, Source}; use crate::mio::{Events, Interest, Poll, Token, Waker}; use slab::Slab; use stakker::{fwd_nop, Fwd, Stakker}; use std::cell::RefCell; use std::io::{Error, ErrorKind, Result}; use std::ops::{Deref, DerefMut}; use std::rc::Rc; use std::sync::Arc; use std::time::Duration; const WAKER_TOKEN: Token = Token(0); const MAX_PRI: u32 = 10; /// Wrapper for a mio `Source` instance /// /// This is returned by the [`MioPoll::add`] method. It takes care of /// both unregistering the token and dropping the `Source` instance /// when it is dropped. It derefs to the contained `Source` instance, /// so operations on the contained instance can be used directly. /// /// [`MioPoll::add`]: struct.MioPoll.html#method.add pub struct MioSource<S: Source> { token: Token, ctrl: Rc<RefCell<Control>>, source: S, } impl<S: Source> Drop for MioSource<S> { fn drop(&mut self) { let mut ctrl = self.ctrl.borrow_mut(); if let Err(e) = ctrl.del(self.token, &mut self.source) { // TODO: Report the errors some other way, e.g. logged? ctrl.errors.push(e); } } } impl<S: Source> Deref for MioSource<S> { type Target = S; fn deref(&self) -> &Self::Target { &self.source } } impl<S: Source> DerefMut for MioSource<S> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.source } } /// Handle EINTR failures by retrying #[inline] fn retry<R>(mut f: impl FnMut() -> Result<R>) -> Result<R> { loop { let rv = f(); match rv { Err(ref e) if e.kind() == ErrorKind::Interrupted => (), _ => return rv, } } } /// Ref-counting wrapper around a mio `Poll` instance /// /// After creation, pass cloned copies of this to all interested /// parties. A `MioPoll` reference is also available from the /// associated **Stakker** instance using /// `cx.anymap_get::<MioPoll>()`. pub struct MioPoll { rc: Rc<RefCell<Control>>, } impl MioPoll { /// Create a new MioPoll instance wrapping the given mio `Poll` /// instance and mio `Events` queue (which the caller should size /// according to their requirements). The waker priority should /// also be provided, in the range `0..=10`. Sets up the /// **Stakker** instance to use `MioPoll` as the poll-waker, and /// puts a `MioPoll` clone into the **Stakker** anymap. pub fn new(stakker: &mut Stakker, poll: Poll, events: Events, waker_pri: u32) -> Result<Self> { let mut token_map = Slab::with_capacity(256); let waker_pri = waker_pri.min(MAX_PRI); let waker_token = Token(token_map.insert(Entry { pri: waker_pri, fwd: fwd_nop!(), })); assert_eq!(waker_token, WAKER_TOKEN); let waker = Arc::new(retry(|| Waker::new(poll.registry(), WAKER_TOKEN))?); let waker2 = waker.clone(); let mut ctrl = Control { poll, token_map, queues: Default::default(), max_pri: waker_pri, events, errors: Vec::new(), waker, }; let deferrer = stakker.deferrer(); ctrl.set_wake_fwd(Fwd::new(move |_| deferrer.defer(|s| s.poll_wake()))); let miopoll = Self { rc: Rc::new(RefCell::new(ctrl)), }; stakker.anymap_set(miopoll.clone()); stakker.set_poll_waker(move || { if let Err(e) = retry(|| waker2.wake()) { panic!("Inter-thread poll waker failed: {}", e); } }); Ok(miopoll) } /// Register a mio `Source` object with the poll instance. /// Returns a [`MioSource`] which takes care of cleaning up the /// token and handler when it is dropped. /// /// This uses edge-triggering: whenever one of the Interest flags /// included in `ready` changes state, the given `Fwd` instance /// will be invoked with the new `Ready` value. The contract with /// the handler is that there may be spurious calls to it, so it /// must be ready for that. /// /// `pri` gives a priority level: `0..=10`. If handlers are /// registered at different priority levels, then higher priority /// events get handled before lower priority events. Under /// constant very heavy load, lower priority events might be /// delayed indefinitely. /// /// [`MioSource`]: struct.MioSource.html pub fn add<S: Source>( &self, mut source: S, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<MioSource<S>> { let token = self.rc.borrow_mut().add(&mut source, ready, pri, fwd)?; Ok(MioSource { token, ctrl: self.rc.clone(), source, }) } /// Poll for new events and queue all the events of the highest /// available priority level. Events of lower priority levels are /// queued internally to be used on a future call to this method. /// /// So the expected pattern is that highest-priority handlers get /// run, and when all the resulting processing has completed in /// **Stakker**, then the main loop polls again, and if more /// high-priority events have occurred, then those too will get /// processed. Lower-priority handlers will only get a chance to /// run when nothing higher-priority needs handling. /// /// On success returns `Ok(true)` if an event was processed, or /// `Ok(false)` if there were no new events. pub fn poll(&self, max_delay: Duration) -> Result<bool> { self.rc.borrow_mut().poll(max_delay) } /// Set the handler for "wake" events. There can only be one /// handler for "wake" events, so setting it here drops the /// previous handler. Don't call this unless you wish to override /// the default wake handling which calls /// [`stakker::Stakker::poll_wake`]. /// /// [`stakker::Stakker::poll_wake`]:../stakker/struct.Stakker.html#method.poll_wake pub fn set_wake_fwd(&mut self, fwd: Fwd<Ready>) { self.rc.borrow_mut().set_wake_fwd(fwd); } /// Get a cloned reference to the waker for this `MioPoll` /// instance. This can be passed to other threads, which can call /// `wake()` on it to cause the wake handler to be run in the main /// polling thread. pub fn waker(&mut self) -> Arc<Waker> { self.rc.borrow_mut().waker.clone() } } impl Clone for MioPoll { fn clone(&self) -> Self { Self { rc: self.rc.clone(), } } } struct QueueEvent { token: usize, ready: Ready, } struct Entry { pri: u32, fwd: Fwd<Ready>, } struct Control { token_map: Slab<Entry>, poll: Poll, // Highest priority in use goes on a fast path so we need queues
waker: Arc<Waker>, } impl Control { #[inline] fn del(&mut self, token: Token, handle: &mut impl Source) -> Result<()> { let rv = retry(|| self.poll.registry().deregister(handle)); if self.token_map.contains(token.into()) { self.token_map.remove(token.into()); return rv; } rv.and(Err(Error::from(ErrorKind::NotFound))) } #[inline] fn add( &mut self, handle: &mut impl Source, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<Token> { let pri = pri.min(MAX_PRI); self.max_pri = self.max_pri.max(pri); let token = Token(self.token_map.insert(Entry { pri, fwd })); retry(|| self.poll.registry().register(handle, token, ready))?; Ok(token) } fn poll(&mut self, max_delay: Duration) -> Result<bool> { retry(|| self.poll.poll(&mut self.events, Some(max_delay)))?; let mut done = false; for ev in &self.events { let token = ev.token().into(); if let Some(ref mut entry) = self.token_map.get_mut(token) { // Fast-path for highest priority level present in // registrations, so if user uses only one priority level, // there is no queuing necessary here. let ready = Ready::new(ev); if entry.pri == self.max_pri { done = true; entry.fwd.fwd(ready); } else { self.queues[entry.pri as usize].push(QueueEvent { token, ready }); } } } self.events.clear(); if!done { for qu in self.queues.iter_mut().rev() { if!qu.is_empty() { for qev in qu.drain(..) { if let Some(ref mut entry) = self.token_map.get_mut(qev.token) { done = true; entry.fwd.fwd(qev.ready); } } if done { break; } } } } Ok(done) } fn set_wake_fwd(&mut self, fwd: Fwd<Ready>) { self.token_map[WAKER_TOKEN.0].fwd = fwd; } } /// Readiness information from `mio` /// /// See [`mio::event::Event`] for an explanation of what these flags /// mean. /// /// [`mio::event::Event`]:../mio/event/struct.Event.html pub struct Ready(u16); const READY_RD: u16 = 1; const READY_WR: u16 = 2; const READY_ERROR: u16 = 4; const READY_RD_CLOSED: u16 = 8; const READY_WR_CLOSED: u16 = 16; const READY_PRIORITY: u16 = 32; const READY_AIO: u16 = 64; const READY_LIO: u16 = 128; impl Ready { fn new(ev: &Event) -> Self { macro_rules! test { ($test:expr, $val:expr) => { (if $test { $val } else { 0 }) }; } // TODO: Ask'mio' maintainers to add #[inline] if these // aren't getting inlined. Alternatively if it's very heavy, // add crate features to enable only what's required. let val = test!(ev.is_readable(), READY_RD) + test!(ev.is_writable(), READY_WR) + test!(ev.is_error(), READY_ERROR) + test!(ev.is_read_closed(), READY_RD_CLOSED) + test!(ev.is_write_closed(), READY_WR_CLOSED) + test!(ev.is_priority(), READY_PRIORITY) + test!(ev.is_aio(), READY_AIO) + test!(ev.is_lio(), READY_LIO); Self(val) } #[inline] pub fn is_readable(&self) -> bool { 0!= (READY_RD & self.0) } #[inline] pub fn is_writable(&self) -> bool { 0!= (READY_WR & self.0) } #[inline] pub fn is_error(&self) -> bool { 0!= (READY_ERROR & self.0) } #[inline] pub fn is_read_closed(&self) -> bool { 0!= (READY_RD_CLOSED & self.0) } #[inline] pub fn is_write_closed(&self) -> bool { 0!= (READY_WR_CLOSED & self.0) } #[inline] pub fn is_priority(&self) -> bool { 0!= (READY_PRIORITY & self.0) } #[inline] pub fn is_aio(&self) -> bool { 0!= (READY_AIO & self.0) } #[inline] pub fn is_lio(&self) -> bool { 0!= (READY_LIO & self.0) } }
// only for 0..=9 queues: [Vec<QueueEvent>; MAX_PRI as usize], max_pri: u32, events: Events, errors: Vec<Error>,
random_line_split
miopoll.rs
use crate::mio::event::{Event, Source}; use crate::mio::{Events, Interest, Poll, Token, Waker}; use slab::Slab; use stakker::{fwd_nop, Fwd, Stakker}; use std::cell::RefCell; use std::io::{Error, ErrorKind, Result}; use std::ops::{Deref, DerefMut}; use std::rc::Rc; use std::sync::Arc; use std::time::Duration; const WAKER_TOKEN: Token = Token(0); const MAX_PRI: u32 = 10; /// Wrapper for a mio `Source` instance /// /// This is returned by the [`MioPoll::add`] method. It takes care of /// both unregistering the token and dropping the `Source` instance /// when it is dropped. It derefs to the contained `Source` instance, /// so operations on the contained instance can be used directly. /// /// [`MioPoll::add`]: struct.MioPoll.html#method.add pub struct MioSource<S: Source> { token: Token, ctrl: Rc<RefCell<Control>>, source: S, } impl<S: Source> Drop for MioSource<S> { fn drop(&mut self) { let mut ctrl = self.ctrl.borrow_mut(); if let Err(e) = ctrl.del(self.token, &mut self.source) { // TODO: Report the errors some other way, e.g. logged? ctrl.errors.push(e); } } } impl<S: Source> Deref for MioSource<S> { type Target = S; fn deref(&self) -> &Self::Target { &self.source } } impl<S: Source> DerefMut for MioSource<S> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.source } } /// Handle EINTR failures by retrying #[inline] fn retry<R>(mut f: impl FnMut() -> Result<R>) -> Result<R> { loop { let rv = f(); match rv { Err(ref e) if e.kind() == ErrorKind::Interrupted => (), _ => return rv, } } } /// Ref-counting wrapper around a mio `Poll` instance /// /// After creation, pass cloned copies of this to all interested /// parties. A `MioPoll` reference is also available from the /// associated **Stakker** instance using /// `cx.anymap_get::<MioPoll>()`. pub struct MioPoll { rc: Rc<RefCell<Control>>, } impl MioPoll { /// Create a new MioPoll instance wrapping the given mio `Poll` /// instance and mio `Events` queue (which the caller should size /// according to their requirements). The waker priority should /// also be provided, in the range `0..=10`. Sets up the /// **Stakker** instance to use `MioPoll` as the poll-waker, and /// puts a `MioPoll` clone into the **Stakker** anymap. pub fn new(stakker: &mut Stakker, poll: Poll, events: Events, waker_pri: u32) -> Result<Self> { let mut token_map = Slab::with_capacity(256); let waker_pri = waker_pri.min(MAX_PRI); let waker_token = Token(token_map.insert(Entry { pri: waker_pri, fwd: fwd_nop!(), })); assert_eq!(waker_token, WAKER_TOKEN); let waker = Arc::new(retry(|| Waker::new(poll.registry(), WAKER_TOKEN))?); let waker2 = waker.clone(); let mut ctrl = Control { poll, token_map, queues: Default::default(), max_pri: waker_pri, events, errors: Vec::new(), waker, }; let deferrer = stakker.deferrer(); ctrl.set_wake_fwd(Fwd::new(move |_| deferrer.defer(|s| s.poll_wake()))); let miopoll = Self { rc: Rc::new(RefCell::new(ctrl)), }; stakker.anymap_set(miopoll.clone()); stakker.set_poll_waker(move || { if let Err(e) = retry(|| waker2.wake()) { panic!("Inter-thread poll waker failed: {}", e); } }); Ok(miopoll) } /// Register a mio `Source` object with the poll instance. /// Returns a [`MioSource`] which takes care of cleaning up the /// token and handler when it is dropped. /// /// This uses edge-triggering: whenever one of the Interest flags /// included in `ready` changes state, the given `Fwd` instance /// will be invoked with the new `Ready` value. The contract with /// the handler is that there may be spurious calls to it, so it /// must be ready for that. /// /// `pri` gives a priority level: `0..=10`. If handlers are /// registered at different priority levels, then higher priority /// events get handled before lower priority events. Under /// constant very heavy load, lower priority events might be /// delayed indefinitely. /// /// [`MioSource`]: struct.MioSource.html pub fn add<S: Source>( &self, mut source: S, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<MioSource<S>> { let token = self.rc.borrow_mut().add(&mut source, ready, pri, fwd)?; Ok(MioSource { token, ctrl: self.rc.clone(), source, }) } /// Poll for new events and queue all the events of the highest /// available priority level. Events of lower priority levels are /// queued internally to be used on a future call to this method. /// /// So the expected pattern is that highest-priority handlers get /// run, and when all the resulting processing has completed in /// **Stakker**, then the main loop polls again, and if more /// high-priority events have occurred, then those too will get /// processed. Lower-priority handlers will only get a chance to /// run when nothing higher-priority needs handling. /// /// On success returns `Ok(true)` if an event was processed, or /// `Ok(false)` if there were no new events. pub fn poll(&self, max_delay: Duration) -> Result<bool> { self.rc.borrow_mut().poll(max_delay) } /// Set the handler for "wake" events. There can only be one /// handler for "wake" events, so setting it here drops the /// previous handler. Don't call this unless you wish to override /// the default wake handling which calls /// [`stakker::Stakker::poll_wake`]. /// /// [`stakker::Stakker::poll_wake`]:../stakker/struct.Stakker.html#method.poll_wake pub fn set_wake_fwd(&mut self, fwd: Fwd<Ready>) { self.rc.borrow_mut().set_wake_fwd(fwd); } /// Get a cloned reference to the waker for this `MioPoll` /// instance. This can be passed to other threads, which can call /// `wake()` on it to cause the wake handler to be run in the main /// polling thread. pub fn waker(&mut self) -> Arc<Waker> { self.rc.borrow_mut().waker.clone() } } impl Clone for MioPoll { fn clone(&self) -> Self { Self { rc: self.rc.clone(), } } } struct QueueEvent { token: usize, ready: Ready, } struct Entry { pri: u32, fwd: Fwd<Ready>, } struct Control { token_map: Slab<Entry>, poll: Poll, // Highest priority in use goes on a fast path so we need queues // only for 0..=9 queues: [Vec<QueueEvent>; MAX_PRI as usize], max_pri: u32, events: Events, errors: Vec<Error>, waker: Arc<Waker>, } impl Control { #[inline] fn del(&mut self, token: Token, handle: &mut impl Source) -> Result<()> { let rv = retry(|| self.poll.registry().deregister(handle)); if self.token_map.contains(token.into()) { self.token_map.remove(token.into()); return rv; } rv.and(Err(Error::from(ErrorKind::NotFound))) } #[inline] fn add( &mut self, handle: &mut impl Source, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<Token> { let pri = pri.min(MAX_PRI); self.max_pri = self.max_pri.max(pri); let token = Token(self.token_map.insert(Entry { pri, fwd })); retry(|| self.poll.registry().register(handle, token, ready))?; Ok(token) } fn poll(&mut self, max_delay: Duration) -> Result<bool> { retry(|| self.poll.poll(&mut self.events, Some(max_delay)))?; let mut done = false; for ev in &self.events { let token = ev.token().into(); if let Some(ref mut entry) = self.token_map.get_mut(token)
} self.events.clear(); if!done { for qu in self.queues.iter_mut().rev() { if!qu.is_empty() { for qev in qu.drain(..) { if let Some(ref mut entry) = self.token_map.get_mut(qev.token) { done = true; entry.fwd.fwd(qev.ready); } } if done { break; } } } } Ok(done) } fn set_wake_fwd(&mut self, fwd: Fwd<Ready>) { self.token_map[WAKER_TOKEN.0].fwd = fwd; } } /// Readiness information from `mio` /// /// See [`mio::event::Event`] for an explanation of what these flags /// mean. /// /// [`mio::event::Event`]:../mio/event/struct.Event.html pub struct Ready(u16); const READY_RD: u16 = 1; const READY_WR: u16 = 2; const READY_ERROR: u16 = 4; const READY_RD_CLOSED: u16 = 8; const READY_WR_CLOSED: u16 = 16; const READY_PRIORITY: u16 = 32; const READY_AIO: u16 = 64; const READY_LIO: u16 = 128; impl Ready { fn new(ev: &Event) -> Self { macro_rules! test { ($test:expr, $val:expr) => { (if $test { $val } else { 0 }) }; } // TODO: Ask'mio' maintainers to add #[inline] if these // aren't getting inlined. Alternatively if it's very heavy, // add crate features to enable only what's required. let val = test!(ev.is_readable(), READY_RD) + test!(ev.is_writable(), READY_WR) + test!(ev.is_error(), READY_ERROR) + test!(ev.is_read_closed(), READY_RD_CLOSED) + test!(ev.is_write_closed(), READY_WR_CLOSED) + test!(ev.is_priority(), READY_PRIORITY) + test!(ev.is_aio(), READY_AIO) + test!(ev.is_lio(), READY_LIO); Self(val) } #[inline] pub fn is_readable(&self) -> bool { 0!= (READY_RD & self.0) } #[inline] pub fn is_writable(&self) -> bool { 0!= (READY_WR & self.0) } #[inline] pub fn is_error(&self) -> bool { 0!= (READY_ERROR & self.0) } #[inline] pub fn is_read_closed(&self) -> bool { 0!= (READY_RD_CLOSED & self.0) } #[inline] pub fn is_write_closed(&self) -> bool { 0!= (READY_WR_CLOSED & self.0) } #[inline] pub fn is_priority(&self) -> bool { 0!= (READY_PRIORITY & self.0) } #[inline] pub fn is_aio(&self) -> bool { 0!= (READY_AIO & self.0) } #[inline] pub fn is_lio(&self) -> bool { 0!= (READY_LIO & self.0) } }
{ // Fast-path for highest priority level present in // registrations, so if user uses only one priority level, // there is no queuing necessary here. let ready = Ready::new(ev); if entry.pri == self.max_pri { done = true; entry.fwd.fwd(ready); } else { self.queues[entry.pri as usize].push(QueueEvent { token, ready }); } }
conditional_block
miopoll.rs
use crate::mio::event::{Event, Source}; use crate::mio::{Events, Interest, Poll, Token, Waker}; use slab::Slab; use stakker::{fwd_nop, Fwd, Stakker}; use std::cell::RefCell; use std::io::{Error, ErrorKind, Result}; use std::ops::{Deref, DerefMut}; use std::rc::Rc; use std::sync::Arc; use std::time::Duration; const WAKER_TOKEN: Token = Token(0); const MAX_PRI: u32 = 10; /// Wrapper for a mio `Source` instance /// /// This is returned by the [`MioPoll::add`] method. It takes care of /// both unregistering the token and dropping the `Source` instance /// when it is dropped. It derefs to the contained `Source` instance, /// so operations on the contained instance can be used directly. /// /// [`MioPoll::add`]: struct.MioPoll.html#method.add pub struct MioSource<S: Source> { token: Token, ctrl: Rc<RefCell<Control>>, source: S, } impl<S: Source> Drop for MioSource<S> { fn drop(&mut self) { let mut ctrl = self.ctrl.borrow_mut(); if let Err(e) = ctrl.del(self.token, &mut self.source) { // TODO: Report the errors some other way, e.g. logged? ctrl.errors.push(e); } } } impl<S: Source> Deref for MioSource<S> { type Target = S; fn deref(&self) -> &Self::Target { &self.source } } impl<S: Source> DerefMut for MioSource<S> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.source } } /// Handle EINTR failures by retrying #[inline] fn retry<R>(mut f: impl FnMut() -> Result<R>) -> Result<R> { loop { let rv = f(); match rv { Err(ref e) if e.kind() == ErrorKind::Interrupted => (), _ => return rv, } } } /// Ref-counting wrapper around a mio `Poll` instance /// /// After creation, pass cloned copies of this to all interested /// parties. A `MioPoll` reference is also available from the /// associated **Stakker** instance using /// `cx.anymap_get::<MioPoll>()`. pub struct MioPoll { rc: Rc<RefCell<Control>>, } impl MioPoll { /// Create a new MioPoll instance wrapping the given mio `Poll` /// instance and mio `Events` queue (which the caller should size /// according to their requirements). The waker priority should /// also be provided, in the range `0..=10`. Sets up the /// **Stakker** instance to use `MioPoll` as the poll-waker, and /// puts a `MioPoll` clone into the **Stakker** anymap. pub fn new(stakker: &mut Stakker, poll: Poll, events: Events, waker_pri: u32) -> Result<Self> { let mut token_map = Slab::with_capacity(256); let waker_pri = waker_pri.min(MAX_PRI); let waker_token = Token(token_map.insert(Entry { pri: waker_pri, fwd: fwd_nop!(), })); assert_eq!(waker_token, WAKER_TOKEN); let waker = Arc::new(retry(|| Waker::new(poll.registry(), WAKER_TOKEN))?); let waker2 = waker.clone(); let mut ctrl = Control { poll, token_map, queues: Default::default(), max_pri: waker_pri, events, errors: Vec::new(), waker, }; let deferrer = stakker.deferrer(); ctrl.set_wake_fwd(Fwd::new(move |_| deferrer.defer(|s| s.poll_wake()))); let miopoll = Self { rc: Rc::new(RefCell::new(ctrl)), }; stakker.anymap_set(miopoll.clone()); stakker.set_poll_waker(move || { if let Err(e) = retry(|| waker2.wake()) { panic!("Inter-thread poll waker failed: {}", e); } }); Ok(miopoll) } /// Register a mio `Source` object with the poll instance. /// Returns a [`MioSource`] which takes care of cleaning up the /// token and handler when it is dropped. /// /// This uses edge-triggering: whenever one of the Interest flags /// included in `ready` changes state, the given `Fwd` instance /// will be invoked with the new `Ready` value. The contract with /// the handler is that there may be spurious calls to it, so it /// must be ready for that. /// /// `pri` gives a priority level: `0..=10`. If handlers are /// registered at different priority levels, then higher priority /// events get handled before lower priority events. Under /// constant very heavy load, lower priority events might be /// delayed indefinitely. /// /// [`MioSource`]: struct.MioSource.html pub fn add<S: Source>( &self, mut source: S, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<MioSource<S>> { let token = self.rc.borrow_mut().add(&mut source, ready, pri, fwd)?; Ok(MioSource { token, ctrl: self.rc.clone(), source, }) } /// Poll for new events and queue all the events of the highest /// available priority level. Events of lower priority levels are /// queued internally to be used on a future call to this method. /// /// So the expected pattern is that highest-priority handlers get /// run, and when all the resulting processing has completed in /// **Stakker**, then the main loop polls again, and if more /// high-priority events have occurred, then those too will get /// processed. Lower-priority handlers will only get a chance to /// run when nothing higher-priority needs handling. /// /// On success returns `Ok(true)` if an event was processed, or /// `Ok(false)` if there were no new events. pub fn poll(&self, max_delay: Duration) -> Result<bool> { self.rc.borrow_mut().poll(max_delay) } /// Set the handler for "wake" events. There can only be one /// handler for "wake" events, so setting it here drops the /// previous handler. Don't call this unless you wish to override /// the default wake handling which calls /// [`stakker::Stakker::poll_wake`]. /// /// [`stakker::Stakker::poll_wake`]:../stakker/struct.Stakker.html#method.poll_wake pub fn set_wake_fwd(&mut self, fwd: Fwd<Ready>) { self.rc.borrow_mut().set_wake_fwd(fwd); } /// Get a cloned reference to the waker for this `MioPoll` /// instance. This can be passed to other threads, which can call /// `wake()` on it to cause the wake handler to be run in the main /// polling thread. pub fn waker(&mut self) -> Arc<Waker> { self.rc.borrow_mut().waker.clone() } } impl Clone for MioPoll { fn clone(&self) -> Self { Self { rc: self.rc.clone(), } } } struct QueueEvent { token: usize, ready: Ready, } struct Entry { pri: u32, fwd: Fwd<Ready>, } struct
{ token_map: Slab<Entry>, poll: Poll, // Highest priority in use goes on a fast path so we need queues // only for 0..=9 queues: [Vec<QueueEvent>; MAX_PRI as usize], max_pri: u32, events: Events, errors: Vec<Error>, waker: Arc<Waker>, } impl Control { #[inline] fn del(&mut self, token: Token, handle: &mut impl Source) -> Result<()> { let rv = retry(|| self.poll.registry().deregister(handle)); if self.token_map.contains(token.into()) { self.token_map.remove(token.into()); return rv; } rv.and(Err(Error::from(ErrorKind::NotFound))) } #[inline] fn add( &mut self, handle: &mut impl Source, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<Token> { let pri = pri.min(MAX_PRI); self.max_pri = self.max_pri.max(pri); let token = Token(self.token_map.insert(Entry { pri, fwd })); retry(|| self.poll.registry().register(handle, token, ready))?; Ok(token) } fn poll(&mut self, max_delay: Duration) -> Result<bool> { retry(|| self.poll.poll(&mut self.events, Some(max_delay)))?; let mut done = false; for ev in &self.events { let token = ev.token().into(); if let Some(ref mut entry) = self.token_map.get_mut(token) { // Fast-path for highest priority level present in // registrations, so if user uses only one priority level, // there is no queuing necessary here. let ready = Ready::new(ev); if entry.pri == self.max_pri { done = true; entry.fwd.fwd(ready); } else { self.queues[entry.pri as usize].push(QueueEvent { token, ready }); } } } self.events.clear(); if!done { for qu in self.queues.iter_mut().rev() { if!qu.is_empty() { for qev in qu.drain(..) { if let Some(ref mut entry) = self.token_map.get_mut(qev.token) { done = true; entry.fwd.fwd(qev.ready); } } if done { break; } } } } Ok(done) } fn set_wake_fwd(&mut self, fwd: Fwd<Ready>) { self.token_map[WAKER_TOKEN.0].fwd = fwd; } } /// Readiness information from `mio` /// /// See [`mio::event::Event`] for an explanation of what these flags /// mean. /// /// [`mio::event::Event`]:../mio/event/struct.Event.html pub struct Ready(u16); const READY_RD: u16 = 1; const READY_WR: u16 = 2; const READY_ERROR: u16 = 4; const READY_RD_CLOSED: u16 = 8; const READY_WR_CLOSED: u16 = 16; const READY_PRIORITY: u16 = 32; const READY_AIO: u16 = 64; const READY_LIO: u16 = 128; impl Ready { fn new(ev: &Event) -> Self { macro_rules! test { ($test:expr, $val:expr) => { (if $test { $val } else { 0 }) }; } // TODO: Ask'mio' maintainers to add #[inline] if these // aren't getting inlined. Alternatively if it's very heavy, // add crate features to enable only what's required. let val = test!(ev.is_readable(), READY_RD) + test!(ev.is_writable(), READY_WR) + test!(ev.is_error(), READY_ERROR) + test!(ev.is_read_closed(), READY_RD_CLOSED) + test!(ev.is_write_closed(), READY_WR_CLOSED) + test!(ev.is_priority(), READY_PRIORITY) + test!(ev.is_aio(), READY_AIO) + test!(ev.is_lio(), READY_LIO); Self(val) } #[inline] pub fn is_readable(&self) -> bool { 0!= (READY_RD & self.0) } #[inline] pub fn is_writable(&self) -> bool { 0!= (READY_WR & self.0) } #[inline] pub fn is_error(&self) -> bool { 0!= (READY_ERROR & self.0) } #[inline] pub fn is_read_closed(&self) -> bool { 0!= (READY_RD_CLOSED & self.0) } #[inline] pub fn is_write_closed(&self) -> bool { 0!= (READY_WR_CLOSED & self.0) } #[inline] pub fn is_priority(&self) -> bool { 0!= (READY_PRIORITY & self.0) } #[inline] pub fn is_aio(&self) -> bool { 0!= (READY_AIO & self.0) } #[inline] pub fn is_lio(&self) -> bool { 0!= (READY_LIO & self.0) } }
Control
identifier_name
internals.rs
use rustfft::FftPlanner; use crate::utils::buffer::ComplexComponent; use crate::utils::buffer::{copy_complex_to_real, square_sum}; use crate::utils::buffer::{copy_real_to_complex, BufferPool}; use crate::utils::peak::choose_peak; use crate::utils::peak::correct_peak; use crate::utils::peak::detect_peaks; use crate::utils::peak::PeakCorrection; use crate::{float::Float, utils::buffer::modulus_squared}; /// A pitch's `frequency` as well as `clarity`, which is a measure /// of confidence in the pitch detection. pub struct
<T> where T: Float, { pub frequency: T, pub clarity: T, } /// Data structure to hold any buffers needed for pitch computation. /// For WASM it's best to allocate buffers once rather than allocate and /// free buffers repeatedly, so we use a `BufferPool` object to manage the buffers. pub struct DetectorInternals<T> where T: Float, { pub size: usize, pub padding: usize, pub buffers: BufferPool<T>, } impl<T> DetectorInternals<T> where T: Float, { pub fn new(size: usize, padding: usize) -> Self { let buffers = BufferPool::new(size + padding); DetectorInternals { size, padding, buffers, } } } /// Compute the autocorrelation of `signal` to `result`. All buffers but `signal` /// may be used as scratch. pub fn autocorrelation<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float, { let (ref1, ref2) = (buffers.get_complex_buffer(), buffers.get_complex_buffer()); let signal_complex = &mut ref1.borrow_mut()[..]; let scratch = &mut ref2.borrow_mut()[..]; let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal_complex.len()); let inv_fft = planner.plan_fft_inverse(signal_complex.len()); // Compute the autocorrelation copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); fft.process_with_scratch(signal_complex, scratch); modulus_squared(signal_complex); inv_fft.process_with_scratch(signal_complex, scratch); copy_complex_to_real(signal_complex, result, ComplexComponent::Re); } pub fn pitch_from_peaks<T>( input: &[T], sample_rate: usize, clarity_threshold: T, correction: PeakCorrection, ) -> Option<Pitch<T>> where T: Float, { let sample_rate = T::from_usize(sample_rate).unwrap(); let peaks = detect_peaks(input); choose_peak(peaks, clarity_threshold) .map(|peak| correct_peak(peak, input, correction)) .map(|peak| Pitch { frequency: sample_rate / peak.0, clarity: peak.1 / input[0], }) } fn m_of_tau<T>(signal: &[T], signal_square_sum: Option<T>, result: &mut [T]) where T: Float + std::iter::Sum, { assert!(result.len() >= signal.len()); let signal_square_sum = signal_square_sum.unwrap_or_else(|| square_sum(signal)); let start = T::from_usize(2).unwrap() * signal_square_sum; result[0] = start; let last = result[1..] .iter_mut() .zip(signal) .fold(start, |old, (r, &s)| { *r = old - s * s; *r }); // Pad the end of `result` with the last value result[signal.len()..].iter_mut().for_each(|r| *r = last); } pub fn normalized_square_difference<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float + std::iter::Sum, { let two = T::from_usize(2).unwrap(); let scratch_ref = buffers.get_real_buffer(); let scratch = &mut scratch_ref.borrow_mut()[..]; autocorrelation(signal, buffers, result); m_of_tau(signal, Some(result[0]), scratch); result .iter_mut() .zip(scratch) .for_each(|(r, s)| *r = two * *r / *s) } /// Compute the windowed autocorrelation of `signal` and put the result in `result`. /// For a signal _x=(x_0,x_1,...)_, the windowed autocorrelation with window size _w_ is /// the function /// /// > r(t) = sum_{i=0}^{w-1} x_i*x_{i+t} /// /// This function assumes `window_size` is at most half of the length of `signal`. pub fn windowed_autocorrelation<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( buffers.buffer_size >= signal.len(), "Buffers must have a length at least equal to `signal`." ); let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal.len()); let inv_fft = planner.plan_fft_inverse(signal.len()); let (scratch_ref1, scratch_ref2, scratch_ref3) = ( buffers.get_complex_buffer(), buffers.get_complex_buffer(), buffers.get_complex_buffer(), ); let signal_complex = &mut scratch_ref1.borrow_mut()[..signal.len()]; let truncated_signal_complex = &mut scratch_ref2.borrow_mut()[..signal.len()]; let scratch = &mut scratch_ref3.borrow_mut()[..signal.len()]; // To achieve the windowed autocorrelation, we compute the cross correlation between // the original signal and the signal truncated to lie in `0..window_size` copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); copy_real_to_complex( &signal[..window_size], truncated_signal_complex, ComplexComponent::Re, ); fft.process_with_scratch(signal_complex, scratch); fft.process_with_scratch(truncated_signal_complex, scratch); // rustfft doesn't normalize when it computes the fft, so we need to normalize ourselves by // dividing by `sqrt(signal.len())` each time we take an fft or inverse fft. // Since the fft is linear and we are doing fft -> inverse fft, we can just divide by // `signal.len()` once. let normalization_const = T::one() / T::from_usize(signal.len()).unwrap(); signal_complex .iter_mut() .zip(truncated_signal_complex.iter()) .for_each(|(a, b)| { *a = *a * normalization_const * b.conj(); }); inv_fft.process_with_scratch(signal_complex, scratch); // The result is valid only for `0..window_size` copy_complex_to_real(&signal_complex[..window_size], result, ComplexComponent::Re); } /// Compute the windowed square error, _d(t)_, of `signal`. For a window size of _w_ and a signal /// _x=(x_0,x_1,...)_, this is defined by /// /// > d(t) = sum_{i=0}^{w-1} (x_i - x_{i+t})^2 /// /// This function is computed efficiently using an FFT. It is assumed that `window_size` is at most half /// the length of `signal`. pub fn windowed_square_error<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( 2 * window_size <= signal.len(), "The window size cannot be more than half the signal length" ); let two = T::from_f64(2.).unwrap(); // The windowed square error function, d(t), can be computed // as d(t) = pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) // where pow_a^b is the sum of the square of `signal` on the window `a..b` // We proceed accordingly. windowed_autocorrelation(signal, window_size, buffers, result); let mut windowed_power = square_sum(&signal[..window_size]); let power = windowed_power; result.iter_mut().enumerate().for_each(|(i, a)| { // use the formula pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) *a = power + windowed_power - two * *a; // Since we're processing everything in order, we can computed pow_{t+1}^{t+1+w} // directly from pow_t^{t+w} by adding and subtracting the boundary terms. windowed_power = windowed_power - signal[i] * signal[i] + signal[i + window_size] * signal[i + window_size]; }) } /// Calculate the "cumulative mean normalized difference function" as /// specified in the YIN paper. If _d(t)_ is the square error function, /// compute _d'(0) = 1_ and for _t > 0_ /// /// > d'(t) = d(t) / [ (1/t) * sum_{i=0}^t d(i) ] pub fn yin_normalize_square_error<T: Float>(square_error: &mut [T]) { let mut sum = T::zero(); square_error[0] = T::one(); // square_error[0] should always be zero, so we don't need to worry about // adding this to our sum. square_error .iter_mut() .enumerate() .skip(1) .for_each(|(i, a)| { sum = sum + *a; *a = *a * T::from_usize(i + 1).unwrap() / sum; }); } #[cfg(test)] mod tests { use super::*; #[test] fn windowed_autocorrelation_test() { let signal: Vec<f64> = vec![0., 1., 2., 0., -1., -2.]; let window_size: usize = 3; let buffers = &mut BufferPool::new(signal.len()); let result: Vec<f64> = (0..window_size) .map(|i| { signal[..window_size] .iter() .zip(signal[i..(i + window_size)].iter()) .map(|(a, b)| *a * *b) .sum() }) .collect(); let mut computed_result = vec![0.; window_size]; windowed_autocorrelation(&signal, window_size, buffers, &mut computed_result); // Using an FFT loses precision; we don't care that much, so round generously. computed_result .iter_mut() .for_each(|x| *x = (*x * 100.).round() / 100.); assert_eq!(result, computed_result); } #[test] fn windowed_square_error_test() { let signal: Vec<f64> = vec![0., 1., 2., 0., -1., -2.]; let window_size: usize = 3; let buffers = &mut BufferPool::new(signal.len()); let result: Vec<f64> = (0..window_size) .map(|i| { signal[..window_size] .iter() .zip(signal[i..(i + window_size)].iter()) .map(|(x_j, x_j_tau)| (*x_j - *x_j_tau) * (*x_j - *x_j_tau)) .sum() }) .collect(); let mut computed_result = vec![0.; window_size]; windowed_square_error(&signal, window_size, buffers, &mut computed_result); // Using an FFT loses precision; we don't care that much, so round generously. computed_result .iter_mut() .for_each(|x| *x = (*x * 100.).round() / 100.); assert_eq!(result, computed_result); } #[test] fn yin_normalized_square_error_test() { let signal: &mut Vec<f64> = &mut vec![0., 6., 14.]; let result = vec![1., 2., 3. * 14. / (6. + 14.)]; yin_normalize_square_error(signal); assert_eq!(result, *signal); } }
Pitch
identifier_name
internals.rs
use rustfft::FftPlanner; use crate::utils::buffer::ComplexComponent; use crate::utils::buffer::{copy_complex_to_real, square_sum}; use crate::utils::buffer::{copy_real_to_complex, BufferPool}; use crate::utils::peak::choose_peak; use crate::utils::peak::correct_peak; use crate::utils::peak::detect_peaks; use crate::utils::peak::PeakCorrection; use crate::{float::Float, utils::buffer::modulus_squared}; /// A pitch's `frequency` as well as `clarity`, which is a measure /// of confidence in the pitch detection. pub struct Pitch<T> where T: Float, { pub frequency: T, pub clarity: T, } /// Data structure to hold any buffers needed for pitch computation. /// For WASM it's best to allocate buffers once rather than allocate and /// free buffers repeatedly, so we use a `BufferPool` object to manage the buffers. pub struct DetectorInternals<T> where T: Float, { pub size: usize, pub padding: usize, pub buffers: BufferPool<T>, } impl<T> DetectorInternals<T> where T: Float, { pub fn new(size: usize, padding: usize) -> Self { let buffers = BufferPool::new(size + padding); DetectorInternals { size, padding, buffers, } } } /// Compute the autocorrelation of `signal` to `result`. All buffers but `signal` /// may be used as scratch. pub fn autocorrelation<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float, { let (ref1, ref2) = (buffers.get_complex_buffer(), buffers.get_complex_buffer()); let signal_complex = &mut ref1.borrow_mut()[..]; let scratch = &mut ref2.borrow_mut()[..]; let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal_complex.len()); let inv_fft = planner.plan_fft_inverse(signal_complex.len()); // Compute the autocorrelation copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); fft.process_with_scratch(signal_complex, scratch); modulus_squared(signal_complex); inv_fft.process_with_scratch(signal_complex, scratch); copy_complex_to_real(signal_complex, result, ComplexComponent::Re); } pub fn pitch_from_peaks<T>( input: &[T], sample_rate: usize, clarity_threshold: T, correction: PeakCorrection, ) -> Option<Pitch<T>> where T: Float, { let sample_rate = T::from_usize(sample_rate).unwrap(); let peaks = detect_peaks(input); choose_peak(peaks, clarity_threshold) .map(|peak| correct_peak(peak, input, correction)) .map(|peak| Pitch { frequency: sample_rate / peak.0, clarity: peak.1 / input[0], }) } fn m_of_tau<T>(signal: &[T], signal_square_sum: Option<T>, result: &mut [T]) where T: Float + std::iter::Sum, { assert!(result.len() >= signal.len()); let signal_square_sum = signal_square_sum.unwrap_or_else(|| square_sum(signal)); let start = T::from_usize(2).unwrap() * signal_square_sum; result[0] = start; let last = result[1..] .iter_mut() .zip(signal) .fold(start, |old, (r, &s)| { *r = old - s * s; *r }); // Pad the end of `result` with the last value result[signal.len()..].iter_mut().for_each(|r| *r = last); } pub fn normalized_square_difference<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float + std::iter::Sum, { let two = T::from_usize(2).unwrap(); let scratch_ref = buffers.get_real_buffer(); let scratch = &mut scratch_ref.borrow_mut()[..]; autocorrelation(signal, buffers, result); m_of_tau(signal, Some(result[0]), scratch); result .iter_mut() .zip(scratch) .for_each(|(r, s)| *r = two * *r / *s) } /// Compute the windowed autocorrelation of `signal` and put the result in `result`. /// For a signal _x=(x_0,x_1,...)_, the windowed autocorrelation with window size _w_ is /// the function /// /// > r(t) = sum_{i=0}^{w-1} x_i*x_{i+t} /// /// This function assumes `window_size` is at most half of the length of `signal`. pub fn windowed_autocorrelation<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( buffers.buffer_size >= signal.len(), "Buffers must have a length at least equal to `signal`." ); let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal.len()); let inv_fft = planner.plan_fft_inverse(signal.len()); let (scratch_ref1, scratch_ref2, scratch_ref3) = ( buffers.get_complex_buffer(), buffers.get_complex_buffer(), buffers.get_complex_buffer(), ); let signal_complex = &mut scratch_ref1.borrow_mut()[..signal.len()]; let truncated_signal_complex = &mut scratch_ref2.borrow_mut()[..signal.len()]; let scratch = &mut scratch_ref3.borrow_mut()[..signal.len()]; // To achieve the windowed autocorrelation, we compute the cross correlation between // the original signal and the signal truncated to lie in `0..window_size` copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); copy_real_to_complex( &signal[..window_size], truncated_signal_complex, ComplexComponent::Re, ); fft.process_with_scratch(signal_complex, scratch); fft.process_with_scratch(truncated_signal_complex, scratch); // rustfft doesn't normalize when it computes the fft, so we need to normalize ourselves by // dividing by `sqrt(signal.len())` each time we take an fft or inverse fft. // Since the fft is linear and we are doing fft -> inverse fft, we can just divide by // `signal.len()` once. let normalization_const = T::one() / T::from_usize(signal.len()).unwrap(); signal_complex .iter_mut() .zip(truncated_signal_complex.iter()) .for_each(|(a, b)| { *a = *a * normalization_const * b.conj(); }); inv_fft.process_with_scratch(signal_complex, scratch); // The result is valid only for `0..window_size` copy_complex_to_real(&signal_complex[..window_size], result, ComplexComponent::Re); } /// Compute the windowed square error, _d(t)_, of `signal`. For a window size of _w_ and a signal /// _x=(x_0,x_1,...)_, this is defined by /// /// > d(t) = sum_{i=0}^{w-1} (x_i - x_{i+t})^2 /// /// This function is computed efficiently using an FFT. It is assumed that `window_size` is at most half /// the length of `signal`. pub fn windowed_square_error<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( 2 * window_size <= signal.len(), "The window size cannot be more than half the signal length" ); let two = T::from_f64(2.).unwrap(); // The windowed square error function, d(t), can be computed // as d(t) = pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) // where pow_a^b is the sum of the square of `signal` on the window `a..b` // We proceed accordingly. windowed_autocorrelation(signal, window_size, buffers, result); let mut windowed_power = square_sum(&signal[..window_size]); let power = windowed_power; result.iter_mut().enumerate().for_each(|(i, a)| { // use the formula pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) *a = power + windowed_power - two * *a; // Since we're processing everything in order, we can computed pow_{t+1}^{t+1+w} // directly from pow_t^{t+w} by adding and subtracting the boundary terms. windowed_power = windowed_power - signal[i] * signal[i] + signal[i + window_size] * signal[i + window_size]; }) } /// Calculate the "cumulative mean normalized difference function" as /// specified in the YIN paper. If _d(t)_ is the square error function, /// compute _d'(0) = 1_ and for _t > 0_ /// /// > d'(t) = d(t) / [ (1/t) * sum_{i=0}^t d(i) ] pub fn yin_normalize_square_error<T: Float>(square_error: &mut [T]) { let mut sum = T::zero();
// adding this to our sum. square_error .iter_mut() .enumerate() .skip(1) .for_each(|(i, a)| { sum = sum + *a; *a = *a * T::from_usize(i + 1).unwrap() / sum; }); } #[cfg(test)] mod tests { use super::*; #[test] fn windowed_autocorrelation_test() { let signal: Vec<f64> = vec![0., 1., 2., 0., -1., -2.]; let window_size: usize = 3; let buffers = &mut BufferPool::new(signal.len()); let result: Vec<f64> = (0..window_size) .map(|i| { signal[..window_size] .iter() .zip(signal[i..(i + window_size)].iter()) .map(|(a, b)| *a * *b) .sum() }) .collect(); let mut computed_result = vec![0.; window_size]; windowed_autocorrelation(&signal, window_size, buffers, &mut computed_result); // Using an FFT loses precision; we don't care that much, so round generously. computed_result .iter_mut() .for_each(|x| *x = (*x * 100.).round() / 100.); assert_eq!(result, computed_result); } #[test] fn windowed_square_error_test() { let signal: Vec<f64> = vec![0., 1., 2., 0., -1., -2.]; let window_size: usize = 3; let buffers = &mut BufferPool::new(signal.len()); let result: Vec<f64> = (0..window_size) .map(|i| { signal[..window_size] .iter() .zip(signal[i..(i + window_size)].iter()) .map(|(x_j, x_j_tau)| (*x_j - *x_j_tau) * (*x_j - *x_j_tau)) .sum() }) .collect(); let mut computed_result = vec![0.; window_size]; windowed_square_error(&signal, window_size, buffers, &mut computed_result); // Using an FFT loses precision; we don't care that much, so round generously. computed_result .iter_mut() .for_each(|x| *x = (*x * 100.).round() / 100.); assert_eq!(result, computed_result); } #[test] fn yin_normalized_square_error_test() { let signal: &mut Vec<f64> = &mut vec![0., 6., 14.]; let result = vec![1., 2., 3. * 14. / (6. + 14.)]; yin_normalize_square_error(signal); assert_eq!(result, *signal); } }
square_error[0] = T::one(); // square_error[0] should always be zero, so we don't need to worry about
random_line_split
internals.rs
use rustfft::FftPlanner; use crate::utils::buffer::ComplexComponent; use crate::utils::buffer::{copy_complex_to_real, square_sum}; use crate::utils::buffer::{copy_real_to_complex, BufferPool}; use crate::utils::peak::choose_peak; use crate::utils::peak::correct_peak; use crate::utils::peak::detect_peaks; use crate::utils::peak::PeakCorrection; use crate::{float::Float, utils::buffer::modulus_squared}; /// A pitch's `frequency` as well as `clarity`, which is a measure /// of confidence in the pitch detection. pub struct Pitch<T> where T: Float, { pub frequency: T, pub clarity: T, } /// Data structure to hold any buffers needed for pitch computation. /// For WASM it's best to allocate buffers once rather than allocate and /// free buffers repeatedly, so we use a `BufferPool` object to manage the buffers. pub struct DetectorInternals<T> where T: Float, { pub size: usize, pub padding: usize, pub buffers: BufferPool<T>, } impl<T> DetectorInternals<T> where T: Float, { pub fn new(size: usize, padding: usize) -> Self { let buffers = BufferPool::new(size + padding); DetectorInternals { size, padding, buffers, } } } /// Compute the autocorrelation of `signal` to `result`. All buffers but `signal` /// may be used as scratch. pub fn autocorrelation<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float, { let (ref1, ref2) = (buffers.get_complex_buffer(), buffers.get_complex_buffer()); let signal_complex = &mut ref1.borrow_mut()[..]; let scratch = &mut ref2.borrow_mut()[..]; let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal_complex.len()); let inv_fft = planner.plan_fft_inverse(signal_complex.len()); // Compute the autocorrelation copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); fft.process_with_scratch(signal_complex, scratch); modulus_squared(signal_complex); inv_fft.process_with_scratch(signal_complex, scratch); copy_complex_to_real(signal_complex, result, ComplexComponent::Re); } pub fn pitch_from_peaks<T>( input: &[T], sample_rate: usize, clarity_threshold: T, correction: PeakCorrection, ) -> Option<Pitch<T>> where T: Float, { let sample_rate = T::from_usize(sample_rate).unwrap(); let peaks = detect_peaks(input); choose_peak(peaks, clarity_threshold) .map(|peak| correct_peak(peak, input, correction)) .map(|peak| Pitch { frequency: sample_rate / peak.0, clarity: peak.1 / input[0], }) } fn m_of_tau<T>(signal: &[T], signal_square_sum: Option<T>, result: &mut [T]) where T: Float + std::iter::Sum, { assert!(result.len() >= signal.len()); let signal_square_sum = signal_square_sum.unwrap_or_else(|| square_sum(signal)); let start = T::from_usize(2).unwrap() * signal_square_sum; result[0] = start; let last = result[1..] .iter_mut() .zip(signal) .fold(start, |old, (r, &s)| { *r = old - s * s; *r }); // Pad the end of `result` with the last value result[signal.len()..].iter_mut().for_each(|r| *r = last); } pub fn normalized_square_difference<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float + std::iter::Sum, { let two = T::from_usize(2).unwrap(); let scratch_ref = buffers.get_real_buffer(); let scratch = &mut scratch_ref.borrow_mut()[..]; autocorrelation(signal, buffers, result); m_of_tau(signal, Some(result[0]), scratch); result .iter_mut() .zip(scratch) .for_each(|(r, s)| *r = two * *r / *s) } /// Compute the windowed autocorrelation of `signal` and put the result in `result`. /// For a signal _x=(x_0,x_1,...)_, the windowed autocorrelation with window size _w_ is /// the function /// /// > r(t) = sum_{i=0}^{w-1} x_i*x_{i+t} /// /// This function assumes `window_size` is at most half of the length of `signal`. pub fn windowed_autocorrelation<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( buffers.buffer_size >= signal.len(), "Buffers must have a length at least equal to `signal`." ); let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal.len()); let inv_fft = planner.plan_fft_inverse(signal.len()); let (scratch_ref1, scratch_ref2, scratch_ref3) = ( buffers.get_complex_buffer(), buffers.get_complex_buffer(), buffers.get_complex_buffer(), ); let signal_complex = &mut scratch_ref1.borrow_mut()[..signal.len()]; let truncated_signal_complex = &mut scratch_ref2.borrow_mut()[..signal.len()]; let scratch = &mut scratch_ref3.borrow_mut()[..signal.len()]; // To achieve the windowed autocorrelation, we compute the cross correlation between // the original signal and the signal truncated to lie in `0..window_size` copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); copy_real_to_complex( &signal[..window_size], truncated_signal_complex, ComplexComponent::Re, ); fft.process_with_scratch(signal_complex, scratch); fft.process_with_scratch(truncated_signal_complex, scratch); // rustfft doesn't normalize when it computes the fft, so we need to normalize ourselves by // dividing by `sqrt(signal.len())` each time we take an fft or inverse fft. // Since the fft is linear and we are doing fft -> inverse fft, we can just divide by // `signal.len()` once. let normalization_const = T::one() / T::from_usize(signal.len()).unwrap(); signal_complex .iter_mut() .zip(truncated_signal_complex.iter()) .for_each(|(a, b)| { *a = *a * normalization_const * b.conj(); }); inv_fft.process_with_scratch(signal_complex, scratch); // The result is valid only for `0..window_size` copy_complex_to_real(&signal_complex[..window_size], result, ComplexComponent::Re); } /// Compute the windowed square error, _d(t)_, of `signal`. For a window size of _w_ and a signal /// _x=(x_0,x_1,...)_, this is defined by /// /// > d(t) = sum_{i=0}^{w-1} (x_i - x_{i+t})^2 /// /// This function is computed efficiently using an FFT. It is assumed that `window_size` is at most half /// the length of `signal`. pub fn windowed_square_error<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( 2 * window_size <= signal.len(), "The window size cannot be more than half the signal length" ); let two = T::from_f64(2.).unwrap(); // The windowed square error function, d(t), can be computed // as d(t) = pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) // where pow_a^b is the sum of the square of `signal` on the window `a..b` // We proceed accordingly. windowed_autocorrelation(signal, window_size, buffers, result); let mut windowed_power = square_sum(&signal[..window_size]); let power = windowed_power; result.iter_mut().enumerate().for_each(|(i, a)| { // use the formula pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) *a = power + windowed_power - two * *a; // Since we're processing everything in order, we can computed pow_{t+1}^{t+1+w} // directly from pow_t^{t+w} by adding and subtracting the boundary terms. windowed_power = windowed_power - signal[i] * signal[i] + signal[i + window_size] * signal[i + window_size]; }) } /// Calculate the "cumulative mean normalized difference function" as /// specified in the YIN paper. If _d(t)_ is the square error function, /// compute _d'(0) = 1_ and for _t > 0_ /// /// > d'(t) = d(t) / [ (1/t) * sum_{i=0}^t d(i) ] pub fn yin_normalize_square_error<T: Float>(square_error: &mut [T]) { let mut sum = T::zero(); square_error[0] = T::one(); // square_error[0] should always be zero, so we don't need to worry about // adding this to our sum. square_error .iter_mut() .enumerate() .skip(1) .for_each(|(i, a)| { sum = sum + *a; *a = *a * T::from_usize(i + 1).unwrap() / sum; }); } #[cfg(test)] mod tests { use super::*; #[test] fn windowed_autocorrelation_test()
.iter_mut() .for_each(|x| *x = (*x * 100.).round() / 100.); assert_eq!(result, computed_result); } #[test] fn windowed_square_error_test() { let signal: Vec<f64> = vec![0., 1., 2., 0., -1., -2.]; let window_size: usize = 3; let buffers = &mut BufferPool::new(signal.len()); let result: Vec<f64> = (0..window_size) .map(|i| { signal[..window_size] .iter() .zip(signal[i..(i + window_size)].iter()) .map(|(x_j, x_j_tau)| (*x_j - *x_j_tau) * (*x_j - *x_j_tau)) .sum() }) .collect(); let mut computed_result = vec![0.; window_size]; windowed_square_error(&signal, window_size, buffers, &mut computed_result); // Using an FFT loses precision; we don't care that much, so round generously. computed_result .iter_mut() .for_each(|x| *x = (*x * 100.).round() / 100.); assert_eq!(result, computed_result); } #[test] fn yin_normalized_square_error_test() { let signal: &mut Vec<f64> = &mut vec![0., 6., 14.]; let result = vec![1., 2., 3. * 14. / (6. + 14.)]; yin_normalize_square_error(signal); assert_eq!(result, *signal); } }
{ let signal: Vec<f64> = vec![0., 1., 2., 0., -1., -2.]; let window_size: usize = 3; let buffers = &mut BufferPool::new(signal.len()); let result: Vec<f64> = (0..window_size) .map(|i| { signal[..window_size] .iter() .zip(signal[i..(i + window_size)].iter()) .map(|(a, b)| *a * *b) .sum() }) .collect(); let mut computed_result = vec![0.; window_size]; windowed_autocorrelation(&signal, window_size, buffers, &mut computed_result); // Using an FFT loses precision; we don't care that much, so round generously. computed_result
identifier_body
main.rs
#![cfg_attr(feature = "with-bench", feature(test))] extern crate actix_net; extern crate actix_web; extern crate bech32; extern crate bincode; extern crate bytes; extern crate cardano; extern crate cardano_storage; extern crate cbor_event; extern crate chain_addr; extern crate chain_core; extern crate chain_crypto; extern crate chain_impl_mockchain; extern crate chain_storage; extern crate chain_storage_sqlite; extern crate clap; extern crate cryptoxide; extern crate exe_common; extern crate futures; extern crate generic_array; extern crate http; extern crate sha2; #[macro_use] extern crate lazy_static; extern crate native_tls; extern crate network_core; extern crate network_grpc; extern crate poldercast; extern crate protocol_tokio as protocol; extern crate rand_chacha; extern crate tower_service; extern crate tokio; extern crate tokio_bus; #[cfg(test)] extern crate quickcheck; extern crate rand; extern crate regex; extern crate serde; #[macro_use] extern crate serde_derive; #[macro_use] extern crate serde_json; extern crate serde_yaml; #[macro_use(o)] extern crate slog; extern crate slog_async; extern crate slog_json; extern crate slog_term; extern crate structopt; #[cfg(test)] #[cfg(feature = "with-bench")] extern crate test; use std::io::{self, BufRead}; use std::sync::{mpsc::Receiver, Arc, Mutex, RwLock}; use chain_impl_mockchain::block::{message::MessageId, Message}; use futures::Future; use bech32::{u5, Bech32, FromBase32, ToBase32}; use blockcfg::{ genesis_data::ConfigGenesisData, genesis_data::GenesisData, mock::Mockchain as Cardano, }; use blockchain::{Blockchain, BlockchainR}; use chain_crypto::{ AsymmetricKey, Curve25519_2HashDH, Ed25519, Ed25519Bip32, Ed25519Extended, FakeMMM, }; use intercom::BlockMsg; use leadership::leadership_task; use rand::rngs::EntropyRng; use rand::SeedableRng; use rand_chacha::ChaChaRng; use rest::v0::node::stats::StatsCounter; use settings::{Command, GenPrivKeyType}; use transaction::{transaction_task, TPool}; use utils::task::{TaskBroadcastBox, Tasks}; #[macro_use] pub mod log_wrapper; pub mod blockcfg; pub mod blockchain; pub mod client; pub mod clock; // pub mod consensus; pub mod intercom; pub mod leadership; pub mod network; pub mod rest; pub mod secure; pub mod settings; pub mod state; pub mod transaction; pub mod utils; // TODO: consider an appropriate size for the broadcast buffer. // For the block task, there should hardly be a need to buffer more // than one block as the network task should be able to broadcast the // block notifications in time. const BLOCK_BUS_CAPACITY: usize = 2; pub type TODO = u32; fn block_task( blockchain: BlockchainR<Cardano>, _clock: clock::Clock, // FIXME: use it or lose it r: Receiver<BlockMsg<Cardano>>, stats_counter: StatsCounter, ) { let mut network_broadcast = TaskBroadcastBox::new(BLOCK_BUS_CAPACITY); loop { let bquery = r.recv().unwrap(); blockchain::process(&blockchain, bquery, &mut network_broadcast, &stats_counter); } } fn startup_info( gd: &GenesisData, blockchain: &Blockchain<Cardano>, _settings: &settings::start::Settings, ) { println!( "k={} tip={}", gd.epoch_stability_depth, blockchain.get_tip() ); } // Expand the type with more variants // when it becomes necessary to represent different error cases. type Error = settings::Error; fn start(settings: settings::start::Settings) -> Result<(), Error> { settings.log_settings.apply(); let genesis_data = settings.read_genesis_data().unwrap(); let clock = { let initial_epoch = clock::ClockEpochConfiguration { slot_duration: genesis_data.slot_duration, slots_per_epoch: genesis_data.epoch_stability_depth * 10, }; clock::Clock::new(genesis_data.start_time, initial_epoch) }; let leader_secret = if let Some(secret_path) = &settings.leadership { Some(secure::NodeSecret::load_from_file(secret_path.as_path())) } else { None }; //let mut state = State::new(); let blockchain_data = Blockchain::new(genesis_data.clone(), &settings.storage); startup_info(&genesis_data, &blockchain_data, &settings); let blockchain = Arc::new(RwLock::new(blockchain_data)); let mut tasks = Tasks::new(); // # Bootstrap phase // // done at every startup: we need to bootstrap from whatever local state (including nothing) // to the latest network state (or close to latest). until this happen, we don't participate in the network // (no block creation) and our network connection(s) is only use to download data. // // Various aspects to do, similar to hermes: // * download all the existing blocks // * verify all the downloaded blocks // * network / peer discoveries (?) // * gclock sync? // Read block state // init storage // create blockchain storage network::bootstrap(&settings.network, blockchain.clone()); // # Active phase // // now that we have caught up (or almost caught up) we download blocks from neighbor nodes, // listen to announcements and actively listen to synchronous queries // // There's two simultaenous roles to this: // * Leader: decided after global or local evaluation. Need to create and propagate a block // * Non-Leader: always. receive (pushed-) blocks from other peers, investigate the correct blockchain updates // // Also receive synchronous connection queries: // * new nodes subscribing to updates (blocks, transactions) // * client GetBlocks/Headers... let tpool_data: TPool<MessageId, Message> = TPool::new(); let tpool = Arc::new(RwLock::new(tpool_data)); // Validation of consensus settings should make sure that we always have // non-empty selection data. let stats_counter = StatsCounter::default(); let transaction_task = { let tpool = tpool.clone(); let blockchain = blockchain.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("transaction", move |r| { transaction_task(blockchain, tpool, r, stats_counter) }) }; let block_task = { let blockchain = blockchain.clone(); let clock = clock.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("block", move |r| { block_task(blockchain, clock, r, stats_counter) }) }; let client_task = { let blockchain = blockchain.clone(); tasks.task_create_with_inputs("client-query", move |r| client::client_task(blockchain, r)) }; // ** TODO ** // setup_network // connection-events: // poll: // recv_transaction: // check_transaction_valid // add transaction to pool // recv_block: // check block valid // try to extend blockchain with block // update utxo state // flush transaction pool if any txid made it // get block(s): // try to answer // { let client_msgbox = client_task.clone(); let transaction_msgbox = transaction_task.clone(); let block_msgbox = block_task.clone(); let config = settings.network.clone(); let channels = network::Channels { client_box: client_msgbox, transaction_box: transaction_msgbox, block_box: block_msgbox, }; tasks.task_create("network", move || { network::run(config, channels); }); }; if let Some(secret) = leader_secret // == settings::start::Leadership::Yes // && leadership::selection::can_lead(&selection) == leadership::IsLeading::Yes { let tpool = tpool.clone(); let clock = clock.clone(); let block_task = block_task.clone(); let blockchain = blockchain.clone(); let leader_id = chain_impl_mockchain::leadership::LeaderId::Bft(secret.public().block_publickey.into()); let pk = chain_impl_mockchain::leadership::Leader::BftLeader(secret.block_privatekey); tasks.task_create("leadership", move || { leadership_task(leader_id, pk, tpool, blockchain, clock, block_task) }); }; let rest_server = match settings.rest { Some(ref rest) => { let context = rest::Context { stats_counter, blockchain, transaction_task: Arc::new(Mutex::new(transaction_task)), }; Some(rest::start_rest_server(rest, context)?) } None => None, }; // periodically cleanup (custom): // storage cleanup/packing // tpool.gc() // FIXME some sort of join so that the main thread does something... tasks.join(); if let Some(server) = rest_server { server.stop().wait().unwrap() } Ok(()) } fn main()
GenPrivKeyType::Ed25519Extended => gen_priv_key_bech32::<Ed25519Extended>(), GenPrivKeyType::FakeMMM => gen_priv_key_bech32::<FakeMMM>(), GenPrivKeyType::Curve25519_2HashDH => gen_priv_key_bech32::<Curve25519_2HashDH>(), }; println!("{}", priv_key_bech32); } Command::GeneratePubKey(args) => { let stdin = io::stdin(); let bech32: Bech32 = if let Some(private_key_str) = args.private_key { private_key_str.parse().unwrap() } else { stdin .lock() .lines() .next() .unwrap() .unwrap() .parse() .unwrap() }; let pub_key_bech32 = match bech32.hrp() { Ed25519::SECRET_BECH32_HRP => gen_pub_key_bech32::<Ed25519>(bech32.data()), Ed25519Bip32::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Ed25519Bip32>(bech32.data()) } Ed25519Extended::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Ed25519Extended>(bech32.data()) } FakeMMM::SECRET_BECH32_HRP => gen_pub_key_bech32::<FakeMMM>(bech32.data()), Curve25519_2HashDH::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Curve25519_2HashDH>(bech32.data()) } other => panic!("Unrecognized private key bech32 HRP: {}", other), }; println!("{}", pub_key_bech32); } Command::Init(init_settings) => { let genesis = ConfigGenesisData::from_genesis(GenesisData { address_discrimination: init_settings.address_discrimination, start_time: init_settings.blockchain_start, slot_duration: init_settings.slot_duration, epoch_stability_depth: init_settings.epoch_stability_depth, initial_utxos: init_settings.initial_utxos, bft_leaders: init_settings.bft_leaders, allow_account_creation: init_settings.allow_account_creation, linear_fees: init_settings.linear_fee, }); serde_yaml::to_writer(std::io::stdout(), &genesis).unwrap(); } } } fn gen_priv_key_bech32<K: AsymmetricKey>() -> Bech32 { let rng = ChaChaRng::from_rng(EntropyRng::new()).unwrap(); let secret = K::generate(rng); let hrp = K::SECRET_BECH32_HRP.to_string(); Bech32::new(hrp, secret.to_base32()).unwrap() } fn gen_pub_key_bech32<K: AsymmetricKey>(priv_key_bech32: &[u5]) -> Bech32 { let priv_key_bytes = Vec::<u8>::from_base32(priv_key_bech32).unwrap(); let priv_key = K::secret_from_binary(&priv_key_bytes).unwrap(); let pub_key = K::compute_public(&priv_key); let hrp = K::PUBLIC_BECH32_HRP.to_string(); Bech32::new(hrp, pub_key.to_base32()).unwrap() }
{ let command = match Command::load() { Err(err) => { eprintln!("{}", err); std::process::exit(1); } Ok(v) => v, }; match command { Command::Start(start_settings) => { if let Err(error) = start(start_settings) { eprintln!("jormungandr error: {}", error); std::process::exit(1); } } Command::GeneratePrivKey(args) => { let priv_key_bech32 = match args.key_type { GenPrivKeyType::Ed25519 => gen_priv_key_bech32::<Ed25519>(), GenPrivKeyType::Ed25519Bip32 => gen_priv_key_bech32::<Ed25519Bip32>(),
identifier_body
main.rs
#![cfg_attr(feature = "with-bench", feature(test))] extern crate actix_net; extern crate actix_web; extern crate bech32; extern crate bincode; extern crate bytes; extern crate cardano; extern crate cardano_storage; extern crate cbor_event; extern crate chain_addr; extern crate chain_core; extern crate chain_crypto; extern crate chain_impl_mockchain; extern crate chain_storage; extern crate chain_storage_sqlite; extern crate clap; extern crate cryptoxide; extern crate exe_common; extern crate futures; extern crate generic_array; extern crate http; extern crate sha2; #[macro_use] extern crate lazy_static; extern crate native_tls; extern crate network_core; extern crate network_grpc; extern crate poldercast; extern crate protocol_tokio as protocol; extern crate rand_chacha; extern crate tower_service; extern crate tokio; extern crate tokio_bus; #[cfg(test)] extern crate quickcheck; extern crate rand; extern crate regex; extern crate serde; #[macro_use] extern crate serde_derive; #[macro_use] extern crate serde_json; extern crate serde_yaml; #[macro_use(o)] extern crate slog; extern crate slog_async; extern crate slog_json; extern crate slog_term; extern crate structopt; #[cfg(test)] #[cfg(feature = "with-bench")] extern crate test; use std::io::{self, BufRead}; use std::sync::{mpsc::Receiver, Arc, Mutex, RwLock}; use chain_impl_mockchain::block::{message::MessageId, Message}; use futures::Future; use bech32::{u5, Bech32, FromBase32, ToBase32}; use blockcfg::{ genesis_data::ConfigGenesisData, genesis_data::GenesisData, mock::Mockchain as Cardano, }; use blockchain::{Blockchain, BlockchainR}; use chain_crypto::{ AsymmetricKey, Curve25519_2HashDH, Ed25519, Ed25519Bip32, Ed25519Extended, FakeMMM, }; use intercom::BlockMsg; use leadership::leadership_task; use rand::rngs::EntropyRng; use rand::SeedableRng; use rand_chacha::ChaChaRng; use rest::v0::node::stats::StatsCounter; use settings::{Command, GenPrivKeyType}; use transaction::{transaction_task, TPool}; use utils::task::{TaskBroadcastBox, Tasks}; #[macro_use] pub mod log_wrapper; pub mod blockcfg; pub mod blockchain; pub mod client; pub mod clock; // pub mod consensus; pub mod intercom; pub mod leadership; pub mod network; pub mod rest; pub mod secure; pub mod settings; pub mod state; pub mod transaction; pub mod utils; // TODO: consider an appropriate size for the broadcast buffer. // For the block task, there should hardly be a need to buffer more // than one block as the network task should be able to broadcast the // block notifications in time. const BLOCK_BUS_CAPACITY: usize = 2; pub type TODO = u32; fn block_task( blockchain: BlockchainR<Cardano>, _clock: clock::Clock, // FIXME: use it or lose it r: Receiver<BlockMsg<Cardano>>, stats_counter: StatsCounter, ) { let mut network_broadcast = TaskBroadcastBox::new(BLOCK_BUS_CAPACITY); loop { let bquery = r.recv().unwrap(); blockchain::process(&blockchain, bquery, &mut network_broadcast, &stats_counter); } } fn startup_info( gd: &GenesisData, blockchain: &Blockchain<Cardano>, _settings: &settings::start::Settings, ) { println!( "k={} tip={}", gd.epoch_stability_depth, blockchain.get_tip() ); } // Expand the type with more variants // when it becomes necessary to represent different error cases. type Error = settings::Error; fn start(settings: settings::start::Settings) -> Result<(), Error> { settings.log_settings.apply(); let genesis_data = settings.read_genesis_data().unwrap(); let clock = { let initial_epoch = clock::ClockEpochConfiguration { slot_duration: genesis_data.slot_duration, slots_per_epoch: genesis_data.epoch_stability_depth * 10, }; clock::Clock::new(genesis_data.start_time, initial_epoch) }; let leader_secret = if let Some(secret_path) = &settings.leadership { Some(secure::NodeSecret::load_from_file(secret_path.as_path())) } else { None }; //let mut state = State::new(); let blockchain_data = Blockchain::new(genesis_data.clone(), &settings.storage); startup_info(&genesis_data, &blockchain_data, &settings); let blockchain = Arc::new(RwLock::new(blockchain_data)); let mut tasks = Tasks::new(); // # Bootstrap phase // // done at every startup: we need to bootstrap from whatever local state (including nothing) // to the latest network state (or close to latest). until this happen, we don't participate in the network // (no block creation) and our network connection(s) is only use to download data. // // Various aspects to do, similar to hermes: // * download all the existing blocks // * verify all the downloaded blocks // * network / peer discoveries (?) // * gclock sync? // Read block state // init storage // create blockchain storage network::bootstrap(&settings.network, blockchain.clone()); // # Active phase // // now that we have caught up (or almost caught up) we download blocks from neighbor nodes, // listen to announcements and actively listen to synchronous queries // // There's two simultaenous roles to this: // * Leader: decided after global or local evaluation. Need to create and propagate a block // * Non-Leader: always. receive (pushed-) blocks from other peers, investigate the correct blockchain updates // // Also receive synchronous connection queries: // * new nodes subscribing to updates (blocks, transactions) // * client GetBlocks/Headers... let tpool_data: TPool<MessageId, Message> = TPool::new(); let tpool = Arc::new(RwLock::new(tpool_data)); // Validation of consensus settings should make sure that we always have // non-empty selection data. let stats_counter = StatsCounter::default(); let transaction_task = { let tpool = tpool.clone(); let blockchain = blockchain.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("transaction", move |r| { transaction_task(blockchain, tpool, r, stats_counter) }) }; let block_task = { let blockchain = blockchain.clone(); let clock = clock.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("block", move |r| { block_task(blockchain, clock, r, stats_counter) }) }; let client_task = { let blockchain = blockchain.clone(); tasks.task_create_with_inputs("client-query", move |r| client::client_task(blockchain, r)) }; // ** TODO ** // setup_network // connection-events: // poll: // recv_transaction: // check_transaction_valid // add transaction to pool // recv_block: // check block valid // try to extend blockchain with block // update utxo state // flush transaction pool if any txid made it // get block(s): // try to answer // { let client_msgbox = client_task.clone(); let transaction_msgbox = transaction_task.clone(); let block_msgbox = block_task.clone(); let config = settings.network.clone(); let channels = network::Channels { client_box: client_msgbox, transaction_box: transaction_msgbox, block_box: block_msgbox, }; tasks.task_create("network", move || { network::run(config, channels); }); }; if let Some(secret) = leader_secret // == settings::start::Leadership::Yes // && leadership::selection::can_lead(&selection) == leadership::IsLeading::Yes { let tpool = tpool.clone(); let clock = clock.clone(); let block_task = block_task.clone(); let blockchain = blockchain.clone(); let leader_id = chain_impl_mockchain::leadership::LeaderId::Bft(secret.public().block_publickey.into()); let pk = chain_impl_mockchain::leadership::Leader::BftLeader(secret.block_privatekey); tasks.task_create("leadership", move || { leadership_task(leader_id, pk, tpool, blockchain, clock, block_task) }); }; let rest_server = match settings.rest { Some(ref rest) => { let context = rest::Context { stats_counter, blockchain, transaction_task: Arc::new(Mutex::new(transaction_task)), }; Some(rest::start_rest_server(rest, context)?) } None => None, }; // periodically cleanup (custom): // storage cleanup/packing // tpool.gc() // FIXME some sort of join so that the main thread does something... tasks.join(); if let Some(server) = rest_server { server.stop().wait().unwrap() } Ok(()) } fn main() { let command = match Command::load() { Err(err) => { eprintln!("{}", err); std::process::exit(1); } Ok(v) => v, };
match command { Command::Start(start_settings) => { if let Err(error) = start(start_settings) { eprintln!("jormungandr error: {}", error); std::process::exit(1); } } Command::GeneratePrivKey(args) => { let priv_key_bech32 = match args.key_type { GenPrivKeyType::Ed25519 => gen_priv_key_bech32::<Ed25519>(), GenPrivKeyType::Ed25519Bip32 => gen_priv_key_bech32::<Ed25519Bip32>(), GenPrivKeyType::Ed25519Extended => gen_priv_key_bech32::<Ed25519Extended>(), GenPrivKeyType::FakeMMM => gen_priv_key_bech32::<FakeMMM>(), GenPrivKeyType::Curve25519_2HashDH => gen_priv_key_bech32::<Curve25519_2HashDH>(), }; println!("{}", priv_key_bech32); } Command::GeneratePubKey(args) => { let stdin = io::stdin(); let bech32: Bech32 = if let Some(private_key_str) = args.private_key { private_key_str.parse().unwrap() } else { stdin .lock() .lines() .next() .unwrap() .unwrap() .parse() .unwrap() }; let pub_key_bech32 = match bech32.hrp() { Ed25519::SECRET_BECH32_HRP => gen_pub_key_bech32::<Ed25519>(bech32.data()), Ed25519Bip32::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Ed25519Bip32>(bech32.data()) } Ed25519Extended::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Ed25519Extended>(bech32.data()) } FakeMMM::SECRET_BECH32_HRP => gen_pub_key_bech32::<FakeMMM>(bech32.data()), Curve25519_2HashDH::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Curve25519_2HashDH>(bech32.data()) } other => panic!("Unrecognized private key bech32 HRP: {}", other), }; println!("{}", pub_key_bech32); } Command::Init(init_settings) => { let genesis = ConfigGenesisData::from_genesis(GenesisData { address_discrimination: init_settings.address_discrimination, start_time: init_settings.blockchain_start, slot_duration: init_settings.slot_duration, epoch_stability_depth: init_settings.epoch_stability_depth, initial_utxos: init_settings.initial_utxos, bft_leaders: init_settings.bft_leaders, allow_account_creation: init_settings.allow_account_creation, linear_fees: init_settings.linear_fee, }); serde_yaml::to_writer(std::io::stdout(), &genesis).unwrap(); } } } fn gen_priv_key_bech32<K: AsymmetricKey>() -> Bech32 { let rng = ChaChaRng::from_rng(EntropyRng::new()).unwrap(); let secret = K::generate(rng); let hrp = K::SECRET_BECH32_HRP.to_string(); Bech32::new(hrp, secret.to_base32()).unwrap() } fn gen_pub_key_bech32<K: AsymmetricKey>(priv_key_bech32: &[u5]) -> Bech32 { let priv_key_bytes = Vec::<u8>::from_base32(priv_key_bech32).unwrap(); let priv_key = K::secret_from_binary(&priv_key_bytes).unwrap(); let pub_key = K::compute_public(&priv_key); let hrp = K::PUBLIC_BECH32_HRP.to_string(); Bech32::new(hrp, pub_key.to_base32()).unwrap() }
random_line_split
main.rs
#![cfg_attr(feature = "with-bench", feature(test))] extern crate actix_net; extern crate actix_web; extern crate bech32; extern crate bincode; extern crate bytes; extern crate cardano; extern crate cardano_storage; extern crate cbor_event; extern crate chain_addr; extern crate chain_core; extern crate chain_crypto; extern crate chain_impl_mockchain; extern crate chain_storage; extern crate chain_storage_sqlite; extern crate clap; extern crate cryptoxide; extern crate exe_common; extern crate futures; extern crate generic_array; extern crate http; extern crate sha2; #[macro_use] extern crate lazy_static; extern crate native_tls; extern crate network_core; extern crate network_grpc; extern crate poldercast; extern crate protocol_tokio as protocol; extern crate rand_chacha; extern crate tower_service; extern crate tokio; extern crate tokio_bus; #[cfg(test)] extern crate quickcheck; extern crate rand; extern crate regex; extern crate serde; #[macro_use] extern crate serde_derive; #[macro_use] extern crate serde_json; extern crate serde_yaml; #[macro_use(o)] extern crate slog; extern crate slog_async; extern crate slog_json; extern crate slog_term; extern crate structopt; #[cfg(test)] #[cfg(feature = "with-bench")] extern crate test; use std::io::{self, BufRead}; use std::sync::{mpsc::Receiver, Arc, Mutex, RwLock}; use chain_impl_mockchain::block::{message::MessageId, Message}; use futures::Future; use bech32::{u5, Bech32, FromBase32, ToBase32}; use blockcfg::{ genesis_data::ConfigGenesisData, genesis_data::GenesisData, mock::Mockchain as Cardano, }; use blockchain::{Blockchain, BlockchainR}; use chain_crypto::{ AsymmetricKey, Curve25519_2HashDH, Ed25519, Ed25519Bip32, Ed25519Extended, FakeMMM, }; use intercom::BlockMsg; use leadership::leadership_task; use rand::rngs::EntropyRng; use rand::SeedableRng; use rand_chacha::ChaChaRng; use rest::v0::node::stats::StatsCounter; use settings::{Command, GenPrivKeyType}; use transaction::{transaction_task, TPool}; use utils::task::{TaskBroadcastBox, Tasks}; #[macro_use] pub mod log_wrapper; pub mod blockcfg; pub mod blockchain; pub mod client; pub mod clock; // pub mod consensus; pub mod intercom; pub mod leadership; pub mod network; pub mod rest; pub mod secure; pub mod settings; pub mod state; pub mod transaction; pub mod utils; // TODO: consider an appropriate size for the broadcast buffer. // For the block task, there should hardly be a need to buffer more // than one block as the network task should be able to broadcast the // block notifications in time. const BLOCK_BUS_CAPACITY: usize = 2; pub type TODO = u32; fn block_task( blockchain: BlockchainR<Cardano>, _clock: clock::Clock, // FIXME: use it or lose it r: Receiver<BlockMsg<Cardano>>, stats_counter: StatsCounter, ) { let mut network_broadcast = TaskBroadcastBox::new(BLOCK_BUS_CAPACITY); loop { let bquery = r.recv().unwrap(); blockchain::process(&blockchain, bquery, &mut network_broadcast, &stats_counter); } } fn startup_info( gd: &GenesisData, blockchain: &Blockchain<Cardano>, _settings: &settings::start::Settings, ) { println!( "k={} tip={}", gd.epoch_stability_depth, blockchain.get_tip() ); } // Expand the type with more variants // when it becomes necessary to represent different error cases. type Error = settings::Error; fn start(settings: settings::start::Settings) -> Result<(), Error> { settings.log_settings.apply(); let genesis_data = settings.read_genesis_data().unwrap(); let clock = { let initial_epoch = clock::ClockEpochConfiguration { slot_duration: genesis_data.slot_duration, slots_per_epoch: genesis_data.epoch_stability_depth * 10, }; clock::Clock::new(genesis_data.start_time, initial_epoch) }; let leader_secret = if let Some(secret_path) = &settings.leadership { Some(secure::NodeSecret::load_from_file(secret_path.as_path())) } else { None }; //let mut state = State::new(); let blockchain_data = Blockchain::new(genesis_data.clone(), &settings.storage); startup_info(&genesis_data, &blockchain_data, &settings); let blockchain = Arc::new(RwLock::new(blockchain_data)); let mut tasks = Tasks::new(); // # Bootstrap phase // // done at every startup: we need to bootstrap from whatever local state (including nothing) // to the latest network state (or close to latest). until this happen, we don't participate in the network // (no block creation) and our network connection(s) is only use to download data. // // Various aspects to do, similar to hermes: // * download all the existing blocks // * verify all the downloaded blocks // * network / peer discoveries (?) // * gclock sync? // Read block state // init storage // create blockchain storage network::bootstrap(&settings.network, blockchain.clone()); // # Active phase // // now that we have caught up (or almost caught up) we download blocks from neighbor nodes, // listen to announcements and actively listen to synchronous queries // // There's two simultaenous roles to this: // * Leader: decided after global or local evaluation. Need to create and propagate a block // * Non-Leader: always. receive (pushed-) blocks from other peers, investigate the correct blockchain updates // // Also receive synchronous connection queries: // * new nodes subscribing to updates (blocks, transactions) // * client GetBlocks/Headers... let tpool_data: TPool<MessageId, Message> = TPool::new(); let tpool = Arc::new(RwLock::new(tpool_data)); // Validation of consensus settings should make sure that we always have // non-empty selection data. let stats_counter = StatsCounter::default(); let transaction_task = { let tpool = tpool.clone(); let blockchain = blockchain.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("transaction", move |r| { transaction_task(blockchain, tpool, r, stats_counter) }) }; let block_task = { let blockchain = blockchain.clone(); let clock = clock.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("block", move |r| { block_task(blockchain, clock, r, stats_counter) }) }; let client_task = { let blockchain = blockchain.clone(); tasks.task_create_with_inputs("client-query", move |r| client::client_task(blockchain, r)) }; // ** TODO ** // setup_network // connection-events: // poll: // recv_transaction: // check_transaction_valid // add transaction to pool // recv_block: // check block valid // try to extend blockchain with block // update utxo state // flush transaction pool if any txid made it // get block(s): // try to answer // { let client_msgbox = client_task.clone(); let transaction_msgbox = transaction_task.clone(); let block_msgbox = block_task.clone(); let config = settings.network.clone(); let channels = network::Channels { client_box: client_msgbox, transaction_box: transaction_msgbox, block_box: block_msgbox, }; tasks.task_create("network", move || { network::run(config, channels); }); }; if let Some(secret) = leader_secret // == settings::start::Leadership::Yes // && leadership::selection::can_lead(&selection) == leadership::IsLeading::Yes { let tpool = tpool.clone(); let clock = clock.clone(); let block_task = block_task.clone(); let blockchain = blockchain.clone(); let leader_id = chain_impl_mockchain::leadership::LeaderId::Bft(secret.public().block_publickey.into()); let pk = chain_impl_mockchain::leadership::Leader::BftLeader(secret.block_privatekey); tasks.task_create("leadership", move || { leadership_task(leader_id, pk, tpool, blockchain, clock, block_task) }); }; let rest_server = match settings.rest { Some(ref rest) => { let context = rest::Context { stats_counter, blockchain, transaction_task: Arc::new(Mutex::new(transaction_task)), }; Some(rest::start_rest_server(rest, context)?) } None => None, }; // periodically cleanup (custom): // storage cleanup/packing // tpool.gc() // FIXME some sort of join so that the main thread does something... tasks.join(); if let Some(server) = rest_server { server.stop().wait().unwrap() } Ok(()) } fn main() { let command = match Command::load() { Err(err) =>
Ok(v) => v, }; match command { Command::Start(start_settings) => { if let Err(error) = start(start_settings) { eprintln!("jormungandr error: {}", error); std::process::exit(1); } } Command::GeneratePrivKey(args) => { let priv_key_bech32 = match args.key_type { GenPrivKeyType::Ed25519 => gen_priv_key_bech32::<Ed25519>(), GenPrivKeyType::Ed25519Bip32 => gen_priv_key_bech32::<Ed25519Bip32>(), GenPrivKeyType::Ed25519Extended => gen_priv_key_bech32::<Ed25519Extended>(), GenPrivKeyType::FakeMMM => gen_priv_key_bech32::<FakeMMM>(), GenPrivKeyType::Curve25519_2HashDH => gen_priv_key_bech32::<Curve25519_2HashDH>(), }; println!("{}", priv_key_bech32); } Command::GeneratePubKey(args) => { let stdin = io::stdin(); let bech32: Bech32 = if let Some(private_key_str) = args.private_key { private_key_str.parse().unwrap() } else { stdin .lock() .lines() .next() .unwrap() .unwrap() .parse() .unwrap() }; let pub_key_bech32 = match bech32.hrp() { Ed25519::SECRET_BECH32_HRP => gen_pub_key_bech32::<Ed25519>(bech32.data()), Ed25519Bip32::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Ed25519Bip32>(bech32.data()) } Ed25519Extended::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Ed25519Extended>(bech32.data()) } FakeMMM::SECRET_BECH32_HRP => gen_pub_key_bech32::<FakeMMM>(bech32.data()), Curve25519_2HashDH::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Curve25519_2HashDH>(bech32.data()) } other => panic!("Unrecognized private key bech32 HRP: {}", other), }; println!("{}", pub_key_bech32); } Command::Init(init_settings) => { let genesis = ConfigGenesisData::from_genesis(GenesisData { address_discrimination: init_settings.address_discrimination, start_time: init_settings.blockchain_start, slot_duration: init_settings.slot_duration, epoch_stability_depth: init_settings.epoch_stability_depth, initial_utxos: init_settings.initial_utxos, bft_leaders: init_settings.bft_leaders, allow_account_creation: init_settings.allow_account_creation, linear_fees: init_settings.linear_fee, }); serde_yaml::to_writer(std::io::stdout(), &genesis).unwrap(); } } } fn gen_priv_key_bech32<K: AsymmetricKey>() -> Bech32 { let rng = ChaChaRng::from_rng(EntropyRng::new()).unwrap(); let secret = K::generate(rng); let hrp = K::SECRET_BECH32_HRP.to_string(); Bech32::new(hrp, secret.to_base32()).unwrap() } fn gen_pub_key_bech32<K: AsymmetricKey>(priv_key_bech32: &[u5]) -> Bech32 { let priv_key_bytes = Vec::<u8>::from_base32(priv_key_bech32).unwrap(); let priv_key = K::secret_from_binary(&priv_key_bytes).unwrap(); let pub_key = K::compute_public(&priv_key); let hrp = K::PUBLIC_BECH32_HRP.to_string(); Bech32::new(hrp, pub_key.to_base32()).unwrap() }
{ eprintln!("{}", err); std::process::exit(1); }
conditional_block
main.rs
#![cfg_attr(feature = "with-bench", feature(test))] extern crate actix_net; extern crate actix_web; extern crate bech32; extern crate bincode; extern crate bytes; extern crate cardano; extern crate cardano_storage; extern crate cbor_event; extern crate chain_addr; extern crate chain_core; extern crate chain_crypto; extern crate chain_impl_mockchain; extern crate chain_storage; extern crate chain_storage_sqlite; extern crate clap; extern crate cryptoxide; extern crate exe_common; extern crate futures; extern crate generic_array; extern crate http; extern crate sha2; #[macro_use] extern crate lazy_static; extern crate native_tls; extern crate network_core; extern crate network_grpc; extern crate poldercast; extern crate protocol_tokio as protocol; extern crate rand_chacha; extern crate tower_service; extern crate tokio; extern crate tokio_bus; #[cfg(test)] extern crate quickcheck; extern crate rand; extern crate regex; extern crate serde; #[macro_use] extern crate serde_derive; #[macro_use] extern crate serde_json; extern crate serde_yaml; #[macro_use(o)] extern crate slog; extern crate slog_async; extern crate slog_json; extern crate slog_term; extern crate structopt; #[cfg(test)] #[cfg(feature = "with-bench")] extern crate test; use std::io::{self, BufRead}; use std::sync::{mpsc::Receiver, Arc, Mutex, RwLock}; use chain_impl_mockchain::block::{message::MessageId, Message}; use futures::Future; use bech32::{u5, Bech32, FromBase32, ToBase32}; use blockcfg::{ genesis_data::ConfigGenesisData, genesis_data::GenesisData, mock::Mockchain as Cardano, }; use blockchain::{Blockchain, BlockchainR}; use chain_crypto::{ AsymmetricKey, Curve25519_2HashDH, Ed25519, Ed25519Bip32, Ed25519Extended, FakeMMM, }; use intercom::BlockMsg; use leadership::leadership_task; use rand::rngs::EntropyRng; use rand::SeedableRng; use rand_chacha::ChaChaRng; use rest::v0::node::stats::StatsCounter; use settings::{Command, GenPrivKeyType}; use transaction::{transaction_task, TPool}; use utils::task::{TaskBroadcastBox, Tasks}; #[macro_use] pub mod log_wrapper; pub mod blockcfg; pub mod blockchain; pub mod client; pub mod clock; // pub mod consensus; pub mod intercom; pub mod leadership; pub mod network; pub mod rest; pub mod secure; pub mod settings; pub mod state; pub mod transaction; pub mod utils; // TODO: consider an appropriate size for the broadcast buffer. // For the block task, there should hardly be a need to buffer more // than one block as the network task should be able to broadcast the // block notifications in time. const BLOCK_BUS_CAPACITY: usize = 2; pub type TODO = u32; fn block_task( blockchain: BlockchainR<Cardano>, _clock: clock::Clock, // FIXME: use it or lose it r: Receiver<BlockMsg<Cardano>>, stats_counter: StatsCounter, ) { let mut network_broadcast = TaskBroadcastBox::new(BLOCK_BUS_CAPACITY); loop { let bquery = r.recv().unwrap(); blockchain::process(&blockchain, bquery, &mut network_broadcast, &stats_counter); } } fn
( gd: &GenesisData, blockchain: &Blockchain<Cardano>, _settings: &settings::start::Settings, ) { println!( "k={} tip={}", gd.epoch_stability_depth, blockchain.get_tip() ); } // Expand the type with more variants // when it becomes necessary to represent different error cases. type Error = settings::Error; fn start(settings: settings::start::Settings) -> Result<(), Error> { settings.log_settings.apply(); let genesis_data = settings.read_genesis_data().unwrap(); let clock = { let initial_epoch = clock::ClockEpochConfiguration { slot_duration: genesis_data.slot_duration, slots_per_epoch: genesis_data.epoch_stability_depth * 10, }; clock::Clock::new(genesis_data.start_time, initial_epoch) }; let leader_secret = if let Some(secret_path) = &settings.leadership { Some(secure::NodeSecret::load_from_file(secret_path.as_path())) } else { None }; //let mut state = State::new(); let blockchain_data = Blockchain::new(genesis_data.clone(), &settings.storage); startup_info(&genesis_data, &blockchain_data, &settings); let blockchain = Arc::new(RwLock::new(blockchain_data)); let mut tasks = Tasks::new(); // # Bootstrap phase // // done at every startup: we need to bootstrap from whatever local state (including nothing) // to the latest network state (or close to latest). until this happen, we don't participate in the network // (no block creation) and our network connection(s) is only use to download data. // // Various aspects to do, similar to hermes: // * download all the existing blocks // * verify all the downloaded blocks // * network / peer discoveries (?) // * gclock sync? // Read block state // init storage // create blockchain storage network::bootstrap(&settings.network, blockchain.clone()); // # Active phase // // now that we have caught up (or almost caught up) we download blocks from neighbor nodes, // listen to announcements and actively listen to synchronous queries // // There's two simultaenous roles to this: // * Leader: decided after global or local evaluation. Need to create and propagate a block // * Non-Leader: always. receive (pushed-) blocks from other peers, investigate the correct blockchain updates // // Also receive synchronous connection queries: // * new nodes subscribing to updates (blocks, transactions) // * client GetBlocks/Headers... let tpool_data: TPool<MessageId, Message> = TPool::new(); let tpool = Arc::new(RwLock::new(tpool_data)); // Validation of consensus settings should make sure that we always have // non-empty selection data. let stats_counter = StatsCounter::default(); let transaction_task = { let tpool = tpool.clone(); let blockchain = blockchain.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("transaction", move |r| { transaction_task(blockchain, tpool, r, stats_counter) }) }; let block_task = { let blockchain = blockchain.clone(); let clock = clock.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("block", move |r| { block_task(blockchain, clock, r, stats_counter) }) }; let client_task = { let blockchain = blockchain.clone(); tasks.task_create_with_inputs("client-query", move |r| client::client_task(blockchain, r)) }; // ** TODO ** // setup_network // connection-events: // poll: // recv_transaction: // check_transaction_valid // add transaction to pool // recv_block: // check block valid // try to extend blockchain with block // update utxo state // flush transaction pool if any txid made it // get block(s): // try to answer // { let client_msgbox = client_task.clone(); let transaction_msgbox = transaction_task.clone(); let block_msgbox = block_task.clone(); let config = settings.network.clone(); let channels = network::Channels { client_box: client_msgbox, transaction_box: transaction_msgbox, block_box: block_msgbox, }; tasks.task_create("network", move || { network::run(config, channels); }); }; if let Some(secret) = leader_secret // == settings::start::Leadership::Yes // && leadership::selection::can_lead(&selection) == leadership::IsLeading::Yes { let tpool = tpool.clone(); let clock = clock.clone(); let block_task = block_task.clone(); let blockchain = blockchain.clone(); let leader_id = chain_impl_mockchain::leadership::LeaderId::Bft(secret.public().block_publickey.into()); let pk = chain_impl_mockchain::leadership::Leader::BftLeader(secret.block_privatekey); tasks.task_create("leadership", move || { leadership_task(leader_id, pk, tpool, blockchain, clock, block_task) }); }; let rest_server = match settings.rest { Some(ref rest) => { let context = rest::Context { stats_counter, blockchain, transaction_task: Arc::new(Mutex::new(transaction_task)), }; Some(rest::start_rest_server(rest, context)?) } None => None, }; // periodically cleanup (custom): // storage cleanup/packing // tpool.gc() // FIXME some sort of join so that the main thread does something... tasks.join(); if let Some(server) = rest_server { server.stop().wait().unwrap() } Ok(()) } fn main() { let command = match Command::load() { Err(err) => { eprintln!("{}", err); std::process::exit(1); } Ok(v) => v, }; match command { Command::Start(start_settings) => { if let Err(error) = start(start_settings) { eprintln!("jormungandr error: {}", error); std::process::exit(1); } } Command::GeneratePrivKey(args) => { let priv_key_bech32 = match args.key_type { GenPrivKeyType::Ed25519 => gen_priv_key_bech32::<Ed25519>(), GenPrivKeyType::Ed25519Bip32 => gen_priv_key_bech32::<Ed25519Bip32>(), GenPrivKeyType::Ed25519Extended => gen_priv_key_bech32::<Ed25519Extended>(), GenPrivKeyType::FakeMMM => gen_priv_key_bech32::<FakeMMM>(), GenPrivKeyType::Curve25519_2HashDH => gen_priv_key_bech32::<Curve25519_2HashDH>(), }; println!("{}", priv_key_bech32); } Command::GeneratePubKey(args) => { let stdin = io::stdin(); let bech32: Bech32 = if let Some(private_key_str) = args.private_key { private_key_str.parse().unwrap() } else { stdin .lock() .lines() .next() .unwrap() .unwrap() .parse() .unwrap() }; let pub_key_bech32 = match bech32.hrp() { Ed25519::SECRET_BECH32_HRP => gen_pub_key_bech32::<Ed25519>(bech32.data()), Ed25519Bip32::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Ed25519Bip32>(bech32.data()) } Ed25519Extended::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Ed25519Extended>(bech32.data()) } FakeMMM::SECRET_BECH32_HRP => gen_pub_key_bech32::<FakeMMM>(bech32.data()), Curve25519_2HashDH::SECRET_BECH32_HRP => { gen_pub_key_bech32::<Curve25519_2HashDH>(bech32.data()) } other => panic!("Unrecognized private key bech32 HRP: {}", other), }; println!("{}", pub_key_bech32); } Command::Init(init_settings) => { let genesis = ConfigGenesisData::from_genesis(GenesisData { address_discrimination: init_settings.address_discrimination, start_time: init_settings.blockchain_start, slot_duration: init_settings.slot_duration, epoch_stability_depth: init_settings.epoch_stability_depth, initial_utxos: init_settings.initial_utxos, bft_leaders: init_settings.bft_leaders, allow_account_creation: init_settings.allow_account_creation, linear_fees: init_settings.linear_fee, }); serde_yaml::to_writer(std::io::stdout(), &genesis).unwrap(); } } } fn gen_priv_key_bech32<K: AsymmetricKey>() -> Bech32 { let rng = ChaChaRng::from_rng(EntropyRng::new()).unwrap(); let secret = K::generate(rng); let hrp = K::SECRET_BECH32_HRP.to_string(); Bech32::new(hrp, secret.to_base32()).unwrap() } fn gen_pub_key_bech32<K: AsymmetricKey>(priv_key_bech32: &[u5]) -> Bech32 { let priv_key_bytes = Vec::<u8>::from_base32(priv_key_bech32).unwrap(); let priv_key = K::secret_from_binary(&priv_key_bytes).unwrap(); let pub_key = K::compute_public(&priv_key); let hrp = K::PUBLIC_BECH32_HRP.to_string(); Bech32::new(hrp, pub_key.to_base32()).unwrap() }
startup_info
identifier_name
table.rs
use std::convert::TryFrom; use std::io::{self, Write}; #[derive(Debug)] #[derive(Clone)] pub struct Table { // A vector of columns (vectors) of rows. Each column represents an implicant set. entries: Vec<Vec<Row>>, // the SOP min-term list all_implicants: Vec<u32>, // bit size of the data bit_size: usize, } #[derive(Debug)] #[derive(Clone)] pub struct Row { // vecor of the binary bin: Vec<u32>, // the number of ones in the row ones: u32, // the implicant(s) within the row implicants: Vec<u32> } pub fn table_print(table: Vec<Row>) { let stdout = io::stdout(); let stdout_lock = stdout.lock(); let mut handle = io::BufWriter::new(stdout_lock); write!(handle, "\n").expect("write error"); for column in 0..table.len() { write!(handle, "|").expect("write error"); for row in 0..table[0].bin.len() { if table[column].bin[row] == 2 { write!(handle, " -").expect("write error"); } else { write!(handle, " {}", table[column].bin[row]).expect("write error"); } } write!(handle, " |\n").expect("write error"); } write!(handle, "\nNumber of prime essential implicants: {}\n", table.len()).expect("write error"); } // Reduce the table to prime, essential implicants pub fn reduce_to_prime_implicants (table: Table) -> Vec<Row> { // imps contains a vector of the found implicants; primed with the last row, last column let mut imps: Vec<u32> = Vec::new(); // Get the last column let mut end_column: usize = table.entries.len() -1; // Get the last column, minus the already primed imps. let mut end_row: usize = table.entries.last().unwrap().len() -1; // Vector of the Rows that are prime implicants, primed with the first one let mut prime_imps: Vec<Row> = Vec::new(); // Loop until all of the imps have been found. loop { // Check each implicant entry to see if it is already included for i in 0..table.entries[end_column][end_row].implicants.len() { // If not, then add all of the implicants in the entry and push the Row if! imps.contains(& table.entries[end_column][end_row].implicants[i]) { imps.extend(table.entries[end_column][end_row].implicants.clone()); prime_imps.push(table.entries[end_column][end_row].clone()); } } // Check to see if we are done if vec_in( & imps, & table.all_implicants) { break; } // Decriment the counters if end_row == 0 { end_column -= 1; end_row = table.entries[end_column].len() -1; } else { end_row -= 1; } } // Return prime implicants prime_imps } // Check to see if vec_b is contained within vec_a fn vec_in (vec_a: & Vec<u32>, vec_b: & Vec<u32>) -> bool { for i in 0..vec_b.len() { if! vec_a.contains(& vec_b[i]) { return false } } true } // If there is a dublicate, return true. Else, return false fn implicant_duplicate (imps_a: & Vec<Row>, imps_b: & Vec<u32>) -> bool { // Test to see if the implicant has already been found for b in 0..imps_a.len() { if vec_in(& imps_a[b].implicants, & imps_b) { return true; } } return false; } // Compare the implicants pub fn comparison (mut table: Table) -> Option<Table> { let mut bin: Vec<u32> = Vec::new(); let mut diffs: u32 = 0; let mut imps: Vec<Row> = Vec::new(); let mut temp_implicants: Vec<u32>; let mut dashes1: Vec<usize> = Vec::new(); let mut dashes2: Vec<usize> = Vec::new(); // For lazyness clone the set of data needed to increase readability...maybe should // be switched to refernces and dereferences let work_set: Vec<Row> = table.entries.last().unwrap().clone(); // For each Row in the last vector in table.entries for i in 0..(work_set.len()) { // Find the indexes of the dashes for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == 2 { dashes1.push(n); } } // For each Row that has one more "one" than the above Row for a in i..(work_set.len()) { dashes2.clear(); // This could be put in a function if work_set[a].ones == work_set[i].ones + 1 { // Get the indexes of the dashes for n in 0..(work_set[a].bin.len()) { if work_set[a].bin[n] == 2 { dashes2.push(n); } } // Compare the indexes of the dashes. If they are not the same, pass along if dashes1!= dashes2 { continue; } // Compare the differences for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == work_set[a].bin[n] { bin.push(work_set[i].bin[n]); } else { bin.push(2); diffs += 1; } // Check to see if the difference is greater than one if diffs > 1 { break; } } // Check to see if the differences is greater than one if diffs > 1
// Put together the base implicants of the candidate new implicant temp_implicants = [work_set[i].implicants.clone(), work_set[a].implicants.clone()].concat(); // LOgic not right!!!!!! // Test to see if the implicant has already been found // if Yes, Move on! if implicant_duplicate(& imps, & temp_implicants) { temp_implicants.clear(); bin.clear(); diffs = 0; continue; } // Push the row to the imps imps.push(Row { bin: bin.clone(), ones: work_set[i].ones, implicants: temp_implicants.clone() }); // clear out the variables temp_implicants.clear(); bin.clear(); diffs = 0; } // If the number of ones is greater than one differnt, break the loop else if work_set[a].ones >= work_set[i].ones + 1 { break; } } // Reset bin, diffs, dashes dashes1.clear(); } // return the result wrapped in an option. if imps.len() == 0 { None } else { table.entries.push(imps); Some(table) } } // Do the inital comparison throwing in the first set of dashes (2's...because u32 doesn't // include dashes) pub fn initial_comparison (mut table: Table) -> Table { // imps is a vector of rows that houses the new column of implicants let mut imps: Vec<Row> = Vec::new(); // num_dashes is a u32 that contains the number of dashes (don't cares) in a row. If // there is more or less than one then the rows cannot be combined. let mut num_dashes: u32 = 0; // temp is a vector of binary implicants. let mut temp: Vec<u32> = Vec::new(); // iterate over each entry in the array for i in 0..(table.entries[0].len()) { // For each array entry, compare it to all the entries following it. for n in i..table.entries[0].len() { // Only compare the entries that have one more "one" in it. if table.entries[0][n].ones == table.entries[0][i].ones + 1 { // Compare each entry for x in 0..(table.entries[0][i].bin.len()) { // if the entries match, push the entry to the temp vector if table.entries[0][i].bin[x] == table.entries[0][n].bin[x] { temp.push(table.entries[0][i].bin[x]); // if they don't match, increment the number of dashes and push 2 } else { num_dashes += 1; temp.push(2); } // Check to see if there is more than one dash and break if so if num_dashes > 1 { break; } } // if all of the bits have been compared, and there is only one dash, push // the new implicant into imps if num_dashes == 1 { imps.push(Row { bin: temp.clone(), ones: table.entries[0][n].ones, implicants: [table.entries[0][n].implicants.clone(), table.entries[0][i].implicants.clone()].concat() }) } // Rest for the next iteration num_dashes = 0; temp.clear(); } // check to see if the loop ieterations have passed the one different "one" else if table.entries[0][n].ones > table.entries[0][i].ones + 1 { break; } } } // Push the new implications into another column of the entries table. if! imps.is_empty() { table.entries.push(imps); } // return it! table } // Quickly sort the truth table entries by the number of ones they have in them pub fn quick_sort (table: Vec<Row>) -> Vec<Row> { // If the array has a length less than or equal to one then it is already sorted if & table.len() <= & 1 { return table } // delare the three vectors let mut smaller: Vec<Row> = Vec::new(); let mut equal: Vec<Row> = Vec::new(); let mut larger: Vec<Row> = Vec::new(); // Get the pivot in the middle of the array // The ends are bad choices because often the list is already almost sorted let pivot = & table[(& table.len()/2)].ones; // Iterate and devide the values into the respective vectors for x in & table { if x.ones < * pivot { smaller.push(x.clone()); } else if x.ones == * pivot { equal.push(x.clone()); } else { larger.push(x.clone()); } } // return recursivly. [quick_sort(smaller), equal, quick_sort(larger)].concat() } pub fn initialize_table (sop: & Vec<u32>) -> Table { // Get the bit size needed to hold all of the SOP implicants let bit_size = max_n(&sop); // initialze a temporary row let mut the_row = Row { bin: vec![0,0,0,0], ones: 0, implicants: vec![0], }; // initialize a vector of row let mut vec_of_rows: Vec<Row> = Vec::new(); // Throw a row into the vector of rows for i in sop { the_row.bin = dec_2_bin_vec(i, &bit_size); the_row.ones = sum_bin_vec(& the_row.bin); the_row.implicants = vec![*i]; vec_of_rows.push(the_row.clone()); } // Quick sort the rows by the number of ones vec_of_rows = quick_sort(vec_of_rows); // Create the table let the_table = Table { entries: vec![vec_of_rows], all_implicants: sop.clone(), bit_size: bit_size, }; // Return it!! the_table } fn sum_bin_vec (bin: & Vec<u32>) -> u32 { bin.iter().sum::<u32>() } fn dec_2_bin_vec (dec: & u32, bit_size: & usize) -> Vec<u32> { let mut temp: Vec<u32> = Vec::new(); let mut q = dec.clone(); // Iterate through each value and push a 1 or 0 respectivly while q > 0 { if q%2 == 1 { // if there is a remainder, push 1 temp.push(1) } else { // if there is no remainder, push 0 temp.push(0) } q = q/2; } // Fill in extra zeros as needed while temp.len() < * bit_size{ temp.push(0); } // reverse the values to put them in the correct order temp.reverse(); // return temp temp } // Find the needed number of bits fn max_n ( sop: & Vec<u32> ) -> usize { // Find the max value in the SOP value let mut max = & sop[0]; for i in sop.iter() { if i > max { max = & i; } } // Find the number of binary digits needed let mut int_value = 2; // the non remaining value let mut n2 = 1; // the number of digits while int_value <= *max { int_value = int_value*2; n2 += 1; } // Retrn a usize usize::try_from(n2).unwrap() }
{ continue; }
conditional_block
table.rs
use std::convert::TryFrom; use std::io::{self, Write}; #[derive(Debug)] #[derive(Clone)] pub struct Table { // A vector of columns (vectors) of rows. Each column represents an implicant set. entries: Vec<Vec<Row>>, // the SOP min-term list all_implicants: Vec<u32>, // bit size of the data bit_size: usize, } #[derive(Debug)] #[derive(Clone)] pub struct Row { // vecor of the binary bin: Vec<u32>, // the number of ones in the row ones: u32, // the implicant(s) within the row implicants: Vec<u32> } pub fn table_print(table: Vec<Row>) { let stdout = io::stdout(); let stdout_lock = stdout.lock(); let mut handle = io::BufWriter::new(stdout_lock); write!(handle, "\n").expect("write error"); for column in 0..table.len() { write!(handle, "|").expect("write error"); for row in 0..table[0].bin.len() { if table[column].bin[row] == 2 { write!(handle, " -").expect("write error"); } else { write!(handle, " {}", table[column].bin[row]).expect("write error"); } } write!(handle, " |\n").expect("write error"); } write!(handle, "\nNumber of prime essential implicants: {}\n", table.len()).expect("write error"); } // Reduce the table to prime, essential implicants pub fn reduce_to_prime_implicants (table: Table) -> Vec<Row> { // imps contains a vector of the found implicants; primed with the last row, last column let mut imps: Vec<u32> = Vec::new(); // Get the last column let mut end_column: usize = table.entries.len() -1; // Get the last column, minus the already primed imps. let mut end_row: usize = table.entries.last().unwrap().len() -1; // Vector of the Rows that are prime implicants, primed with the first one let mut prime_imps: Vec<Row> = Vec::new(); // Loop until all of the imps have been found. loop { // Check each implicant entry to see if it is already included for i in 0..table.entries[end_column][end_row].implicants.len() { // If not, then add all of the implicants in the entry and push the Row if! imps.contains(& table.entries[end_column][end_row].implicants[i]) { imps.extend(table.entries[end_column][end_row].implicants.clone()); prime_imps.push(table.entries[end_column][end_row].clone()); } } // Check to see if we are done if vec_in( & imps, & table.all_implicants) { break; } // Decriment the counters if end_row == 0 { end_column -= 1; end_row = table.entries[end_column].len() -1; } else { end_row -= 1; } } // Return prime implicants prime_imps } // Check to see if vec_b is contained within vec_a fn vec_in (vec_a: & Vec<u32>, vec_b: & Vec<u32>) -> bool { for i in 0..vec_b.len() { if! vec_a.contains(& vec_b[i]) { return false } } true } // If there is a dublicate, return true. Else, return false fn implicant_duplicate (imps_a: & Vec<Row>, imps_b: & Vec<u32>) -> bool { // Test to see if the implicant has already been found for b in 0..imps_a.len() { if vec_in(& imps_a[b].implicants, & imps_b) { return true; } } return false; } // Compare the implicants pub fn comparison (mut table: Table) -> Option<Table> { let mut bin: Vec<u32> = Vec::new(); let mut diffs: u32 = 0; let mut imps: Vec<Row> = Vec::new(); let mut temp_implicants: Vec<u32>; let mut dashes1: Vec<usize> = Vec::new(); let mut dashes2: Vec<usize> = Vec::new(); // For lazyness clone the set of data needed to increase readability...maybe should // be switched to refernces and dereferences let work_set: Vec<Row> = table.entries.last().unwrap().clone(); // For each Row in the last vector in table.entries for i in 0..(work_set.len()) { // Find the indexes of the dashes for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == 2 { dashes1.push(n); } } // For each Row that has one more "one" than the above Row for a in i..(work_set.len()) { dashes2.clear(); // This could be put in a function if work_set[a].ones == work_set[i].ones + 1 { // Get the indexes of the dashes for n in 0..(work_set[a].bin.len()) { if work_set[a].bin[n] == 2 { dashes2.push(n); } } // Compare the indexes of the dashes. If they are not the same, pass along if dashes1!= dashes2 { continue; } // Compare the differences for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == work_set[a].bin[n] { bin.push(work_set[i].bin[n]); } else { bin.push(2); diffs += 1; } // Check to see if the difference is greater than one if diffs > 1 { break; } } // Check to see if the differences is greater than one if diffs > 1 { continue; } // Put together the base implicants of the candidate new implicant temp_implicants = [work_set[i].implicants.clone(), work_set[a].implicants.clone()].concat(); // LOgic not right!!!!!! // Test to see if the implicant has already been found // if Yes, Move on! if implicant_duplicate(& imps, & temp_implicants) { temp_implicants.clear(); bin.clear(); diffs = 0; continue; } // Push the row to the imps imps.push(Row { bin: bin.clone(), ones: work_set[i].ones, implicants: temp_implicants.clone() }); // clear out the variables temp_implicants.clear(); bin.clear(); diffs = 0; } // If the number of ones is greater than one differnt, break the loop else if work_set[a].ones >= work_set[i].ones + 1 { break; } } // Reset bin, diffs, dashes dashes1.clear(); } // return the result wrapped in an option. if imps.len() == 0 { None } else { table.entries.push(imps); Some(table) } } // Do the inital comparison throwing in the first set of dashes (2's...because u32 doesn't // include dashes) pub fn initial_comparison (mut table: Table) -> Table { // imps is a vector of rows that houses the new column of implicants let mut imps: Vec<Row> = Vec::new(); // num_dashes is a u32 that contains the number of dashes (don't cares) in a row. If // there is more or less than one then the rows cannot be combined. let mut num_dashes: u32 = 0; // temp is a vector of binary implicants. let mut temp: Vec<u32> = Vec::new(); // iterate over each entry in the array for i in 0..(table.entries[0].len()) { // For each array entry, compare it to all the entries following it. for n in i..table.entries[0].len() { // Only compare the entries that have one more "one" in it. if table.entries[0][n].ones == table.entries[0][i].ones + 1 { // Compare each entry for x in 0..(table.entries[0][i].bin.len()) { // if the entries match, push the entry to the temp vector if table.entries[0][i].bin[x] == table.entries[0][n].bin[x] { temp.push(table.entries[0][i].bin[x]); // if they don't match, increment the number of dashes and push 2 } else { num_dashes += 1; temp.push(2); } // Check to see if there is more than one dash and break if so if num_dashes > 1 { break; } } // if all of the bits have been compared, and there is only one dash, push // the new implicant into imps if num_dashes == 1 { imps.push(Row { bin: temp.clone(), ones: table.entries[0][n].ones, implicants: [table.entries[0][n].implicants.clone(), table.entries[0][i].implicants.clone()].concat() }) } // Rest for the next iteration num_dashes = 0; temp.clear(); } // check to see if the loop ieterations have passed the one different "one" else if table.entries[0][n].ones > table.entries[0][i].ones + 1 { break; } } } // Push the new implications into another column of the entries table. if! imps.is_empty() { table.entries.push(imps); } // return it! table } // Quickly sort the truth table entries by the number of ones they have in them pub fn quick_sort (table: Vec<Row>) -> Vec<Row>
} else if x.ones == * pivot { equal.push(x.clone()); } else { larger.push(x.clone()); } } // return recursivly. [quick_sort(smaller), equal, quick_sort(larger)].concat() } pub fn initialize_table (sop: & Vec<u32>) -> Table { // Get the bit size needed to hold all of the SOP implicants let bit_size = max_n(&sop); // initialze a temporary row let mut the_row = Row { bin: vec![0,0,0,0], ones: 0, implicants: vec![0], }; // initialize a vector of row let mut vec_of_rows: Vec<Row> = Vec::new(); // Throw a row into the vector of rows for i in sop { the_row.bin = dec_2_bin_vec(i, &bit_size); the_row.ones = sum_bin_vec(& the_row.bin); the_row.implicants = vec![*i]; vec_of_rows.push(the_row.clone()); } // Quick sort the rows by the number of ones vec_of_rows = quick_sort(vec_of_rows); // Create the table let the_table = Table { entries: vec![vec_of_rows], all_implicants: sop.clone(), bit_size: bit_size, }; // Return it!! the_table } fn sum_bin_vec (bin: & Vec<u32>) -> u32 { bin.iter().sum::<u32>() } fn dec_2_bin_vec (dec: & u32, bit_size: & usize) -> Vec<u32> { let mut temp: Vec<u32> = Vec::new(); let mut q = dec.clone(); // Iterate through each value and push a 1 or 0 respectivly while q > 0 { if q%2 == 1 { // if there is a remainder, push 1 temp.push(1) } else { // if there is no remainder, push 0 temp.push(0) } q = q/2; } // Fill in extra zeros as needed while temp.len() < * bit_size{ temp.push(0); } // reverse the values to put them in the correct order temp.reverse(); // return temp temp } // Find the needed number of bits fn max_n ( sop: & Vec<u32> ) -> usize { // Find the max value in the SOP value let mut max = & sop[0]; for i in sop.iter() { if i > max { max = & i; } } // Find the number of binary digits needed let mut int_value = 2; // the non remaining value let mut n2 = 1; // the number of digits while int_value <= *max { int_value = int_value*2; n2 += 1; } // Retrn a usize usize::try_from(n2).unwrap() }
{ // If the array has a length less than or equal to one then it is already sorted if & table.len() <= & 1 { return table } // delare the three vectors let mut smaller: Vec<Row> = Vec::new(); let mut equal: Vec<Row> = Vec::new(); let mut larger: Vec<Row> = Vec::new(); // Get the pivot in the middle of the array // The ends are bad choices because often the list is already almost sorted let pivot = & table[(& table.len()/2)].ones; // Iterate and devide the values into the respective vectors for x in & table { if x.ones < * pivot { smaller.push(x.clone());
identifier_body
table.rs
use std::convert::TryFrom; use std::io::{self, Write}; #[derive(Debug)] #[derive(Clone)] pub struct Table { // A vector of columns (vectors) of rows. Each column represents an implicant set. entries: Vec<Vec<Row>>, // the SOP min-term list all_implicants: Vec<u32>, // bit size of the data bit_size: usize, } #[derive(Debug)] #[derive(Clone)] pub struct Row { // vecor of the binary bin: Vec<u32>, // the number of ones in the row ones: u32, // the implicant(s) within the row implicants: Vec<u32> } pub fn table_print(table: Vec<Row>) { let stdout = io::stdout(); let stdout_lock = stdout.lock(); let mut handle = io::BufWriter::new(stdout_lock); write!(handle, "\n").expect("write error"); for column in 0..table.len() { write!(handle, "|").expect("write error"); for row in 0..table[0].bin.len() { if table[column].bin[row] == 2 { write!(handle, " -").expect("write error"); } else { write!(handle, " {}", table[column].bin[row]).expect("write error"); } } write!(handle, " |\n").expect("write error"); } write!(handle, "\nNumber of prime essential implicants: {}\n", table.len()).expect("write error"); } // Reduce the table to prime, essential implicants pub fn reduce_to_prime_implicants (table: Table) -> Vec<Row> { // imps contains a vector of the found implicants; primed with the last row, last column let mut imps: Vec<u32> = Vec::new(); // Get the last column let mut end_column: usize = table.entries.len() -1; // Get the last column, minus the already primed imps. let mut end_row: usize = table.entries.last().unwrap().len() -1; // Vector of the Rows that are prime implicants, primed with the first one let mut prime_imps: Vec<Row> = Vec::new(); // Loop until all of the imps have been found. loop { // Check each implicant entry to see if it is already included for i in 0..table.entries[end_column][end_row].implicants.len() { // If not, then add all of the implicants in the entry and push the Row if! imps.contains(& table.entries[end_column][end_row].implicants[i]) { imps.extend(table.entries[end_column][end_row].implicants.clone()); prime_imps.push(table.entries[end_column][end_row].clone()); } } // Check to see if we are done if vec_in( & imps, & table.all_implicants) { break; } // Decriment the counters if end_row == 0 { end_column -= 1; end_row = table.entries[end_column].len() -1; } else { end_row -= 1; } } // Return prime implicants prime_imps } // Check to see if vec_b is contained within vec_a fn vec_in (vec_a: & Vec<u32>, vec_b: & Vec<u32>) -> bool { for i in 0..vec_b.len() { if! vec_a.contains(& vec_b[i]) { return false } } true } // If there is a dublicate, return true. Else, return false fn implicant_duplicate (imps_a: & Vec<Row>, imps_b: & Vec<u32>) -> bool { // Test to see if the implicant has already been found for b in 0..imps_a.len() { if vec_in(& imps_a[b].implicants, & imps_b) { return true; } } return false; } // Compare the implicants pub fn comparison (mut table: Table) -> Option<Table> { let mut bin: Vec<u32> = Vec::new(); let mut diffs: u32 = 0; let mut imps: Vec<Row> = Vec::new(); let mut temp_implicants: Vec<u32>; let mut dashes1: Vec<usize> = Vec::new(); let mut dashes2: Vec<usize> = Vec::new(); // For lazyness clone the set of data needed to increase readability...maybe should // be switched to refernces and dereferences let work_set: Vec<Row> = table.entries.last().unwrap().clone(); // For each Row in the last vector in table.entries for i in 0..(work_set.len()) { // Find the indexes of the dashes for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == 2 { dashes1.push(n); } } // For each Row that has one more "one" than the above Row for a in i..(work_set.len()) { dashes2.clear(); // This could be put in a function if work_set[a].ones == work_set[i].ones + 1 { // Get the indexes of the dashes for n in 0..(work_set[a].bin.len()) { if work_set[a].bin[n] == 2 { dashes2.push(n); } } // Compare the indexes of the dashes. If they are not the same, pass along if dashes1!= dashes2 { continue; } // Compare the differences for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == work_set[a].bin[n] { bin.push(work_set[i].bin[n]); } else { bin.push(2); diffs += 1; } // Check to see if the difference is greater than one if diffs > 1 { break; } } // Check to see if the differences is greater than one if diffs > 1 { continue; } // Put together the base implicants of the candidate new implicant temp_implicants = [work_set[i].implicants.clone(), work_set[a].implicants.clone()].concat(); // LOgic not right!!!!!! // Test to see if the implicant has already been found // if Yes, Move on! if implicant_duplicate(& imps, & temp_implicants) { temp_implicants.clear(); bin.clear(); diffs = 0; continue; } // Push the row to the imps imps.push(Row { bin: bin.clone(), ones: work_set[i].ones, implicants: temp_implicants.clone() }); // clear out the variables temp_implicants.clear(); bin.clear(); diffs = 0; } // If the number of ones is greater than one differnt, break the loop else if work_set[a].ones >= work_set[i].ones + 1 { break; } } // Reset bin, diffs, dashes dashes1.clear(); } // return the result wrapped in an option. if imps.len() == 0 { None } else { table.entries.push(imps); Some(table) } } // Do the inital comparison throwing in the first set of dashes (2's...because u32 doesn't // include dashes) pub fn initial_comparison (mut table: Table) -> Table { // imps is a vector of rows that houses the new column of implicants let mut imps: Vec<Row> = Vec::new(); // num_dashes is a u32 that contains the number of dashes (don't cares) in a row. If // there is more or less than one then the rows cannot be combined. let mut num_dashes: u32 = 0; // temp is a vector of binary implicants. let mut temp: Vec<u32> = Vec::new(); // iterate over each entry in the array for i in 0..(table.entries[0].len()) { // For each array entry, compare it to all the entries following it. for n in i..table.entries[0].len() { // Only compare the entries that have one more "one" in it. if table.entries[0][n].ones == table.entries[0][i].ones + 1 { // Compare each entry for x in 0..(table.entries[0][i].bin.len()) { // if the entries match, push the entry to the temp vector if table.entries[0][i].bin[x] == table.entries[0][n].bin[x] { temp.push(table.entries[0][i].bin[x]); // if they don't match, increment the number of dashes and push 2 } else { num_dashes += 1; temp.push(2); } // Check to see if there is more than one dash and break if so if num_dashes > 1 { break; } } // if all of the bits have been compared, and there is only one dash, push // the new implicant into imps if num_dashes == 1 { imps.push(Row { bin: temp.clone(), ones: table.entries[0][n].ones, implicants: [table.entries[0][n].implicants.clone(), table.entries[0][i].implicants.clone()].concat() }) } // Rest for the next iteration num_dashes = 0; temp.clear(); } // check to see if the loop ieterations have passed the one different "one" else if table.entries[0][n].ones > table.entries[0][i].ones + 1 { break; } } } // Push the new implications into another column of the entries table. if! imps.is_empty() { table.entries.push(imps); } // return it! table } // Quickly sort the truth table entries by the number of ones they have in them pub fn quick_sort (table: Vec<Row>) -> Vec<Row> { // If the array has a length less than or equal to one then it is already sorted if & table.len() <= & 1 { return table } // delare the three vectors let mut smaller: Vec<Row> = Vec::new(); let mut equal: Vec<Row> = Vec::new(); let mut larger: Vec<Row> = Vec::new(); // Get the pivot in the middle of the array // The ends are bad choices because often the list is already almost sorted let pivot = & table[(& table.len()/2)].ones; // Iterate and devide the values into the respective vectors for x in & table { if x.ones < * pivot { smaller.push(x.clone()); } else if x.ones == * pivot { equal.push(x.clone()); } else { larger.push(x.clone()); } } // return recursivly. [quick_sort(smaller), equal, quick_sort(larger)].concat() } pub fn initialize_table (sop: & Vec<u32>) -> Table { // Get the bit size needed to hold all of the SOP implicants let bit_size = max_n(&sop); // initialze a temporary row let mut the_row = Row {
}; // initialize a vector of row let mut vec_of_rows: Vec<Row> = Vec::new(); // Throw a row into the vector of rows for i in sop { the_row.bin = dec_2_bin_vec(i, &bit_size); the_row.ones = sum_bin_vec(& the_row.bin); the_row.implicants = vec![*i]; vec_of_rows.push(the_row.clone()); } // Quick sort the rows by the number of ones vec_of_rows = quick_sort(vec_of_rows); // Create the table let the_table = Table { entries: vec![vec_of_rows], all_implicants: sop.clone(), bit_size: bit_size, }; // Return it!! the_table } fn sum_bin_vec (bin: & Vec<u32>) -> u32 { bin.iter().sum::<u32>() } fn dec_2_bin_vec (dec: & u32, bit_size: & usize) -> Vec<u32> { let mut temp: Vec<u32> = Vec::new(); let mut q = dec.clone(); // Iterate through each value and push a 1 or 0 respectivly while q > 0 { if q%2 == 1 { // if there is a remainder, push 1 temp.push(1) } else { // if there is no remainder, push 0 temp.push(0) } q = q/2; } // Fill in extra zeros as needed while temp.len() < * bit_size{ temp.push(0); } // reverse the values to put them in the correct order temp.reverse(); // return temp temp } // Find the needed number of bits fn max_n ( sop: & Vec<u32> ) -> usize { // Find the max value in the SOP value let mut max = & sop[0]; for i in sop.iter() { if i > max { max = & i; } } // Find the number of binary digits needed let mut int_value = 2; // the non remaining value let mut n2 = 1; // the number of digits while int_value <= *max { int_value = int_value*2; n2 += 1; } // Retrn a usize usize::try_from(n2).unwrap() }
bin: vec![0,0,0,0], ones: 0, implicants: vec![0],
random_line_split
table.rs
use std::convert::TryFrom; use std::io::{self, Write}; #[derive(Debug)] #[derive(Clone)] pub struct Table { // A vector of columns (vectors) of rows. Each column represents an implicant set. entries: Vec<Vec<Row>>, // the SOP min-term list all_implicants: Vec<u32>, // bit size of the data bit_size: usize, } #[derive(Debug)] #[derive(Clone)] pub struct Row { // vecor of the binary bin: Vec<u32>, // the number of ones in the row ones: u32, // the implicant(s) within the row implicants: Vec<u32> } pub fn table_print(table: Vec<Row>) { let stdout = io::stdout(); let stdout_lock = stdout.lock(); let mut handle = io::BufWriter::new(stdout_lock); write!(handle, "\n").expect("write error"); for column in 0..table.len() { write!(handle, "|").expect("write error"); for row in 0..table[0].bin.len() { if table[column].bin[row] == 2 { write!(handle, " -").expect("write error"); } else { write!(handle, " {}", table[column].bin[row]).expect("write error"); } } write!(handle, " |\n").expect("write error"); } write!(handle, "\nNumber of prime essential implicants: {}\n", table.len()).expect("write error"); } // Reduce the table to prime, essential implicants pub fn reduce_to_prime_implicants (table: Table) -> Vec<Row> { // imps contains a vector of the found implicants; primed with the last row, last column let mut imps: Vec<u32> = Vec::new(); // Get the last column let mut end_column: usize = table.entries.len() -1; // Get the last column, minus the already primed imps. let mut end_row: usize = table.entries.last().unwrap().len() -1; // Vector of the Rows that are prime implicants, primed with the first one let mut prime_imps: Vec<Row> = Vec::new(); // Loop until all of the imps have been found. loop { // Check each implicant entry to see if it is already included for i in 0..table.entries[end_column][end_row].implicants.len() { // If not, then add all of the implicants in the entry and push the Row if! imps.contains(& table.entries[end_column][end_row].implicants[i]) { imps.extend(table.entries[end_column][end_row].implicants.clone()); prime_imps.push(table.entries[end_column][end_row].clone()); } } // Check to see if we are done if vec_in( & imps, & table.all_implicants) { break; } // Decriment the counters if end_row == 0 { end_column -= 1; end_row = table.entries[end_column].len() -1; } else { end_row -= 1; } } // Return prime implicants prime_imps } // Check to see if vec_b is contained within vec_a fn vec_in (vec_a: & Vec<u32>, vec_b: & Vec<u32>) -> bool { for i in 0..vec_b.len() { if! vec_a.contains(& vec_b[i]) { return false } } true } // If there is a dublicate, return true. Else, return false fn implicant_duplicate (imps_a: & Vec<Row>, imps_b: & Vec<u32>) -> bool { // Test to see if the implicant has already been found for b in 0..imps_a.len() { if vec_in(& imps_a[b].implicants, & imps_b) { return true; } } return false; } // Compare the implicants pub fn comparison (mut table: Table) -> Option<Table> { let mut bin: Vec<u32> = Vec::new(); let mut diffs: u32 = 0; let mut imps: Vec<Row> = Vec::new(); let mut temp_implicants: Vec<u32>; let mut dashes1: Vec<usize> = Vec::new(); let mut dashes2: Vec<usize> = Vec::new(); // For lazyness clone the set of data needed to increase readability...maybe should // be switched to refernces and dereferences let work_set: Vec<Row> = table.entries.last().unwrap().clone(); // For each Row in the last vector in table.entries for i in 0..(work_set.len()) { // Find the indexes of the dashes for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == 2 { dashes1.push(n); } } // For each Row that has one more "one" than the above Row for a in i..(work_set.len()) { dashes2.clear(); // This could be put in a function if work_set[a].ones == work_set[i].ones + 1 { // Get the indexes of the dashes for n in 0..(work_set[a].bin.len()) { if work_set[a].bin[n] == 2 { dashes2.push(n); } } // Compare the indexes of the dashes. If they are not the same, pass along if dashes1!= dashes2 { continue; } // Compare the differences for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == work_set[a].bin[n] { bin.push(work_set[i].bin[n]); } else { bin.push(2); diffs += 1; } // Check to see if the difference is greater than one if diffs > 1 { break; } } // Check to see if the differences is greater than one if diffs > 1 { continue; } // Put together the base implicants of the candidate new implicant temp_implicants = [work_set[i].implicants.clone(), work_set[a].implicants.clone()].concat(); // LOgic not right!!!!!! // Test to see if the implicant has already been found // if Yes, Move on! if implicant_duplicate(& imps, & temp_implicants) { temp_implicants.clear(); bin.clear(); diffs = 0; continue; } // Push the row to the imps imps.push(Row { bin: bin.clone(), ones: work_set[i].ones, implicants: temp_implicants.clone() }); // clear out the variables temp_implicants.clear(); bin.clear(); diffs = 0; } // If the number of ones is greater than one differnt, break the loop else if work_set[a].ones >= work_set[i].ones + 1 { break; } } // Reset bin, diffs, dashes dashes1.clear(); } // return the result wrapped in an option. if imps.len() == 0 { None } else { table.entries.push(imps); Some(table) } } // Do the inital comparison throwing in the first set of dashes (2's...because u32 doesn't // include dashes) pub fn
(mut table: Table) -> Table { // imps is a vector of rows that houses the new column of implicants let mut imps: Vec<Row> = Vec::new(); // num_dashes is a u32 that contains the number of dashes (don't cares) in a row. If // there is more or less than one then the rows cannot be combined. let mut num_dashes: u32 = 0; // temp is a vector of binary implicants. let mut temp: Vec<u32> = Vec::new(); // iterate over each entry in the array for i in 0..(table.entries[0].len()) { // For each array entry, compare it to all the entries following it. for n in i..table.entries[0].len() { // Only compare the entries that have one more "one" in it. if table.entries[0][n].ones == table.entries[0][i].ones + 1 { // Compare each entry for x in 0..(table.entries[0][i].bin.len()) { // if the entries match, push the entry to the temp vector if table.entries[0][i].bin[x] == table.entries[0][n].bin[x] { temp.push(table.entries[0][i].bin[x]); // if they don't match, increment the number of dashes and push 2 } else { num_dashes += 1; temp.push(2); } // Check to see if there is more than one dash and break if so if num_dashes > 1 { break; } } // if all of the bits have been compared, and there is only one dash, push // the new implicant into imps if num_dashes == 1 { imps.push(Row { bin: temp.clone(), ones: table.entries[0][n].ones, implicants: [table.entries[0][n].implicants.clone(), table.entries[0][i].implicants.clone()].concat() }) } // Rest for the next iteration num_dashes = 0; temp.clear(); } // check to see if the loop ieterations have passed the one different "one" else if table.entries[0][n].ones > table.entries[0][i].ones + 1 { break; } } } // Push the new implications into another column of the entries table. if! imps.is_empty() { table.entries.push(imps); } // return it! table } // Quickly sort the truth table entries by the number of ones they have in them pub fn quick_sort (table: Vec<Row>) -> Vec<Row> { // If the array has a length less than or equal to one then it is already sorted if & table.len() <= & 1 { return table } // delare the three vectors let mut smaller: Vec<Row> = Vec::new(); let mut equal: Vec<Row> = Vec::new(); let mut larger: Vec<Row> = Vec::new(); // Get the pivot in the middle of the array // The ends are bad choices because often the list is already almost sorted let pivot = & table[(& table.len()/2)].ones; // Iterate and devide the values into the respective vectors for x in & table { if x.ones < * pivot { smaller.push(x.clone()); } else if x.ones == * pivot { equal.push(x.clone()); } else { larger.push(x.clone()); } } // return recursivly. [quick_sort(smaller), equal, quick_sort(larger)].concat() } pub fn initialize_table (sop: & Vec<u32>) -> Table { // Get the bit size needed to hold all of the SOP implicants let bit_size = max_n(&sop); // initialze a temporary row let mut the_row = Row { bin: vec![0,0,0,0], ones: 0, implicants: vec![0], }; // initialize a vector of row let mut vec_of_rows: Vec<Row> = Vec::new(); // Throw a row into the vector of rows for i in sop { the_row.bin = dec_2_bin_vec(i, &bit_size); the_row.ones = sum_bin_vec(& the_row.bin); the_row.implicants = vec![*i]; vec_of_rows.push(the_row.clone()); } // Quick sort the rows by the number of ones vec_of_rows = quick_sort(vec_of_rows); // Create the table let the_table = Table { entries: vec![vec_of_rows], all_implicants: sop.clone(), bit_size: bit_size, }; // Return it!! the_table } fn sum_bin_vec (bin: & Vec<u32>) -> u32 { bin.iter().sum::<u32>() } fn dec_2_bin_vec (dec: & u32, bit_size: & usize) -> Vec<u32> { let mut temp: Vec<u32> = Vec::new(); let mut q = dec.clone(); // Iterate through each value and push a 1 or 0 respectivly while q > 0 { if q%2 == 1 { // if there is a remainder, push 1 temp.push(1) } else { // if there is no remainder, push 0 temp.push(0) } q = q/2; } // Fill in extra zeros as needed while temp.len() < * bit_size{ temp.push(0); } // reverse the values to put them in the correct order temp.reverse(); // return temp temp } // Find the needed number of bits fn max_n ( sop: & Vec<u32> ) -> usize { // Find the max value in the SOP value let mut max = & sop[0]; for i in sop.iter() { if i > max { max = & i; } } // Find the number of binary digits needed let mut int_value = 2; // the non remaining value let mut n2 = 1; // the number of digits while int_value <= *max { int_value = int_value*2; n2 += 1; } // Retrn a usize usize::try_from(n2).unwrap() }
initial_comparison
identifier_name
utils.rs
use crate::config::{config, CheckLook, CritterRates, MovingRates, SenseRates}; use tnf_common::{ dll::param_getters, engine_types::{critter::Critter, map::Map}, primitives::{Hex, MaybeInvalid}, utils::map::{ get_distance_hex, server::{get_hex_in_path, get_hex_in_path_wall}, HexExt, }, }; #[no_mangle] pub extern "C" fn get_hex_coord_wall( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, end_x: &mut u16, end_y: &mut u16, angle: f32, dist: u32, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let end_hex = get_hex_in_path_wall( map, Hex { x: hex_x, y: hex_y }, Hex { x: *end_x, y: *end_y, }, angle, dist, ); *end_x = end_hex.x; *end_y = end_hex.y; } } #[no_mangle] pub extern "C" fn get_hex_coord( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, end_x: &mut u16, end_y: &mut u16, angle: f32, dist: u32, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let end_hex = get_hex_in_path( map, Hex { x: hex_x, y: hex_y }, Hex { x: *end_x, y: *end_y, }, angle, dist, ); *end_x = end_hex.x; *end_y = end_hex.y; } } /* #[no_mangle] pub extern "C" fn test_hex_flags( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, raked: bool, passed: bool, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let hex = Hex { x: hex_x, y: hex_y }; let flags = map.get_hex_flags_with_proto(hex); let mut wrong = false; if raked!= map.is_hex_raked(hex) { wrong = true; print!("Raked - should be {}, but {}; ", raked,!raked); } if passed!= map.is_hex_passed(hex) { wrong = true; print!("Passed - should be {}, but {}; ", passed,!passed); } if wrong { println!("Hex: {:?}, flags: {:016b}", hex, flags); } } } */ macro_rules! validate { ($this:expr, $default:expr) => { match $this.and_then(MaybeInvalid::validate) { Some(this) => this, None => return $default, } }; } /* #[no_mangle] pub extern "C" fn is_gM(Critter& player)
{ if(!player.IsPlayer() ) return false; if(!isLoadedGMs ) LoadGMs( player, 0, 0, 0 ); if( player.StatBase[ ST_ACCESS_LEVEL ] < ACCESS_MODER && ( player.GetAccess() >= ACCESS_MODER || isPocketGM( player.Id ) ) ) player.StatBase[ ST_ACCESS_LEVEL ] = ACCESS_MODER; return player.StatBase[ ST_ACCESS_LEVEL ] >= ACCESS_MODER && ( checkVision? player.ParamBase[ QST_VISION ] > 0 : true ); }*/ #[no_mangle] pub extern "C" fn check_look( map: Option<&MaybeInvalid<Map>>, cr: Option<&MaybeInvalid<Critter>>, opponent: Option<&MaybeInvalid<Critter>>, ) -> bool { // Consider remove this let map = validate!(map, false); let cr = validate!(cr, false); let opponent = validate!(opponent, false); let config = &config().check_look; let smart = check_look_smart(config, map, cr, opponent); /*let old = check_look_old(config, map, cr, opponent); if old!= smart { println!("old!= smart: {:?}!= {:?}", old, smart); }*/ /*let mut config_default = CheckLook::default(); config_default.npc_fast.enable = config.npc_fast.enable; let smart_default = check_look_smart(&config_default, map, cr, opponent); if smart!= smart_default { println!("smart!= smart_default: {:?}!= {:?}", smart, smart_default); }*/ smart } fn check_look_smart(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool { if map.proto_id() == config.map_utility_start && opponent.is_player() && cr.is_player() &&!cr.have_gm_vision() { return false; } let cr_hex = cr.hex(); let opp_hex = opponent.hex(); let dist = cr_hex.get_distance(opp_hex); use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param}; let cr_vision = cr.uparam(Param::QST_VISION); let opp_invis = opponent.uparam(Param::QST_INVIS); if cr_vision >= dist && opp_invis <= dist { return true; } if opp_invis!= 0 && (opp_invis - 1) < dist { // && (!( cr.IsPlayer() ) || cr.IsPlayer() &&!isGM( cr ) ) ) return false; } if opp_invis > dist || cr_vision >= dist { return true; } let cr_perception = param_getters::getParam_Perception(cr, 0) as u32; assert!(cr_perception >= 1 && cr_perception <= 10); fn basic_dist(rates: &CritterRates, perception: u32) -> u32 { rates.basic_bonus + perception * rates.basic_perception_rate } let self_is_npc = cr.is_npc(); if self_is_npc { if cr.is_dead() { return false; } let npc_fast = &config.npc_fast; if npc_fast.enable && cr.ProtoId >= npc_fast.fast_from && cr.ProtoId <= npc_fast.fast_to { return basic_dist(&config.senses[npc_fast.sense_index].npc, cr_perception) >= dist; } } let start_dir = cr_hex.get_direction(opp_hex); let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление if look_dir > 3 { look_dir = 6 - look_dir } assert!(look_dir >= 0 && look_dir <= 3); fn moving_rate(cr: &Critter, rates: &MovingRates) -> f32 { if cr.IsRuning { rates.running //} else if cr.is_walking() { // rates.walking } else { rates.still } } fn sense_mul(rates: &SenseRates, cr: &Critter, opponent: &Critter, look_dir: i8) -> f32 { rates.dir_rate[look_dir as usize] * moving_rate(cr, &rates.self_moving) * moving_rate(opponent, &rates.target_moving) } let senses: Vec<(f32, f32)> = config .senses .iter() .map(|sense| { let critter_rates = if self_is_npc { &sense.npc } else { &sense.player }; let basic_dist = basic_dist(critter_rates, cr_perception); let sense_mul = sense_mul(sense, cr, opponent, look_dir); let wall_mul = sense.wall_rate[cr_perception as usize - 1]; let clear_dist = basic_dist as f32 * sense_mul; //dbg!(clear_dist, wall_mul); (clear_dist, wall_mul) }) .collect(); let max_dist = senses .iter() .map(|(dist, _wall_mul)| *dist as u32) .max() .expect("At least one sense"); //dbg!(dist, max_dist); if dist > max_dist { return false; } let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist); if dist > cr_hex.get_distance(end_hex) { for (basic_dist, wall_mull) in senses { //dbg!(basic_dist * wall_mull, dist); if (basic_dist * wall_mull) as u32 >= dist { return true; } } false } else { true } } fn _check_look_old(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool { if map.proto_id() == config.map_utility_start && opponent.is_player() && cr.is_player() &&!cr.have_gm_vision() { return false; } let cr_hex = cr.hex(); let opp_hex = opponent.hex(); let dist = cr_hex.get_distance(opp_hex); use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param}; let cr_vision = cr.uparam(Param::QST_VISION); let cr_perception = param_getters::getParam_Perception(cr, 0) as u32; //cr.uparam(Param::ST_PERCEPTION); let opp_invis = opponent.uparam(Param::QST_INVIS); if cr_vision >= dist && opp_invis <= dist { return true; } if opp_invis!= 0 && (opp_invis - 1) < dist { // && (!( cr.IsPlayer() ) || cr.IsPlayer() &&!isGM( cr ) ) ) return false; } if opp_invis > dist || cr_vision >= dist { return true; } if cr.is_npc() { // упрощенный расчет для нпц, учитывает только дистанцию if cr.is_dead() { return false; } let cfg_npc = &config.npc_fast; if cfg_npc.enable && cr.ProtoId >= cfg_npc.fast_from && cr.ProtoId <= cfg_npc.fast_to { return (10 + cr_perception * 5) >= dist; } } let max_view = 10 + cr_perception * 5; let mut max_hear = 5 + cr_perception * 2; if cr.is_npc() { max_hear += 20; } let mut is_view = true; let mut is_hear = true; let start_dir = cr_hex.get_direction(opp_hex); let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление if look_dir > 3 { look_dir = 6 - look_dir } let (view_mul, mut hear_mul) = match look_dir { 0 => (1.0, 0.8), 1 => (0.8, 1.0), 2 => (0.5, 0.8), 3 => (0.4, 0.8), _ => unreachable!(), }; if opponent.IsRuning { hear_mul *= 3.0; } if cr.IsRuning { hear_mul *= 0.8; } let max_view = (max_view as f32 * view_mul) as u32; let tmp_max_hear = (max_hear as f32 * hear_mul) as u32; //dbg!(dist, max_view, tmp_max_hear); // new optimization: return early if distance larger than max_view and max_hear if dist > max_view && dist > tmp_max_hear { return false; } let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist); if dist > cr_hex.get_distance(end_hex) { is_view = false; hear_mul *= match cr_perception { 1..=4 => 0.1, 5..=8 => 0.3, 9..=10 => 0.4, _ => 1.0, }; } if dist > max_view { is_view = false; } let max_hear = (max_hear as f32 * hear_mul) as u32; //dbg!(max_hear); if dist > max_hear { is_hear = false; } return is_view || is_hear; }
random_line_split
utils.rs
use crate::config::{config, CheckLook, CritterRates, MovingRates, SenseRates}; use tnf_common::{ dll::param_getters, engine_types::{critter::Critter, map::Map}, primitives::{Hex, MaybeInvalid}, utils::map::{ get_distance_hex, server::{get_hex_in_path, get_hex_in_path_wall}, HexExt, }, }; #[no_mangle] pub extern "C" fn get_hex_coord_wall( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, end_x: &mut u16, end_y: &mut u16, angle: f32, dist: u32, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let end_hex = get_hex_in_path_wall( map, Hex { x: hex_x, y: hex_y }, Hex { x: *end_x, y: *end_y, }, angle, dist, ); *end_x = end_hex.x; *end_y = end_hex.y; } } #[no_mangle] pub extern "C" fn get_hex_coord( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, end_x: &mut u16, end_y: &mut u16, angle: f32, dist: u32, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let end_hex = get_hex_in_path( map, Hex { x: hex_x, y: hex_y }, Hex { x: *end_x, y: *end_y, }, angle, dist, ); *end_x = end_hex.x; *end_y = end_hex.y; } } /* #[no_mangle] pub extern "C" fn test_hex_flags( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, raked: bool, passed: bool, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let hex = Hex { x: hex_x, y: hex_y }; let flags = map.get_hex_flags_with_proto(hex); let mut wrong = false; if raked!= map.is_hex_raked(hex) { wrong = true; print!("Raked - should be {}, but {}; ", raked,!raked); } if passed!= map.is_hex_passed(hex) { wrong = true; print!("Passed - should be {}, but {}; ", passed,!passed); } if wrong { println!("Hex: {:?}, flags: {:016b}", hex, flags); } } } */ macro_rules! validate { ($this:expr, $default:expr) => { match $this.and_then(MaybeInvalid::validate) { Some(this) => this, None => return $default, } }; } /* #[no_mangle] pub extern "C" fn is_gM(Critter& player) { if(!player.IsPlayer() ) return false; if(!isLoadedGMs ) LoadGMs( player, 0, 0, 0 ); if( player.StatBase[ ST_ACCESS_LEVEL ] < ACCESS_MODER && ( player.GetAccess() >= ACCESS_MODER || isPocketGM( player.Id ) ) ) player.StatBase[ ST_ACCESS_LEVEL ] = ACCESS_MODER; return player.StatBase[ ST_ACCESS_LEVEL ] >= ACCESS_MODER && ( checkVision? player.ParamBase[ QST_VISION ] > 0 : true ); }*/ #[no_mangle] pub extern "C" fn check_look( map: Option<&MaybeInvalid<Map>>, cr: Option<&MaybeInvalid<Critter>>, opponent: Option<&MaybeInvalid<Critter>>, ) -> bool { // Consider remove this let map = validate!(map, false); let cr = validate!(cr, false); let opponent = validate!(opponent, false); let config = &config().check_look; let smart = check_look_smart(config, map, cr, opponent); /*let old = check_look_old(config, map, cr, opponent); if old!= smart { println!("old!= smart: {:?}!= {:?}", old, smart); }*/ /*let mut config_default = CheckLook::default(); config_default.npc_fast.enable = config.npc_fast.enable; let smart_default = check_look_smart(&config_default, map, cr, opponent); if smart!= smart_default { println!("smart!= smart_default: {:?}!= {:?}", smart, smart_default); }*/ smart } fn check_look_smart(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool { if map.proto_id() == config.map_utility_start && opponent.is_player() && cr.is_player() &&!cr.have_gm_vision() { return false; } let cr_hex = cr.hex(); let opp_hex = opponent.hex(); let dist = cr_hex.get_distance(opp_hex); use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param}; let cr_vision = cr.uparam(Param::QST_VISION); let opp_invis = opponent.uparam(Param::QST_INVIS); if cr_vision >= dist && opp_invis <= dist { return true; } if opp_invis!= 0 && (opp_invis - 1) < dist { // && (!( cr.IsPlayer() ) || cr.IsPlayer() &&!isGM( cr ) ) ) return false; } if opp_invis > dist || cr_vision >= dist { return true; } let cr_perception = param_getters::getParam_Perception(cr, 0) as u32; assert!(cr_perception >= 1 && cr_perception <= 10); fn basic_dist(rates: &CritterRates, perception: u32) -> u32 { rates.basic_bonus + perception * rates.basic_perception_rate } let self_is_npc = cr.is_npc(); if self_is_npc { if cr.is_dead() { return false; } let npc_fast = &config.npc_fast; if npc_fast.enable && cr.ProtoId >= npc_fast.fast_from && cr.ProtoId <= npc_fast.fast_to { return basic_dist(&config.senses[npc_fast.sense_index].npc, cr_perception) >= dist; } } let start_dir = cr_hex.get_direction(opp_hex); let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление if look_dir > 3 { look_dir = 6 - look_dir } assert!(look_dir >= 0 && look_dir <= 3); fn moving_rate
er, rates: &MovingRates) -> f32 { if cr.IsRuning { rates.running //} else if cr.is_walking() { // rates.walking } else { rates.still } } fn sense_mul(rates: &SenseRates, cr: &Critter, opponent: &Critter, look_dir: i8) -> f32 { rates.dir_rate[look_dir as usize] * moving_rate(cr, &rates.self_moving) * moving_rate(opponent, &rates.target_moving) } let senses: Vec<(f32, f32)> = config .senses .iter() .map(|sense| { let critter_rates = if self_is_npc { &sense.npc } else { &sense.player }; let basic_dist = basic_dist(critter_rates, cr_perception); let sense_mul = sense_mul(sense, cr, opponent, look_dir); let wall_mul = sense.wall_rate[cr_perception as usize - 1]; let clear_dist = basic_dist as f32 * sense_mul; //dbg!(clear_dist, wall_mul); (clear_dist, wall_mul) }) .collect(); let max_dist = senses .iter() .map(|(dist, _wall_mul)| *dist as u32) .max() .expect("At least one sense"); //dbg!(dist, max_dist); if dist > max_dist { return false; } let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist); if dist > cr_hex.get_distance(end_hex) { for (basic_dist, wall_mull) in senses { //dbg!(basic_dist * wall_mull, dist); if (basic_dist * wall_mull) as u32 >= dist { return true; } } false } else { true } } fn _check_look_old(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool { if map.proto_id() == config.map_utility_start && opponent.is_player() && cr.is_player() &&!cr.have_gm_vision() { return false; } let cr_hex = cr.hex(); let opp_hex = opponent.hex(); let dist = cr_hex.get_distance(opp_hex); use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param}; let cr_vision = cr.uparam(Param::QST_VISION); let cr_perception = param_getters::getParam_Perception(cr, 0) as u32; //cr.uparam(Param::ST_PERCEPTION); let opp_invis = opponent.uparam(Param::QST_INVIS); if cr_vision >= dist && opp_invis <= dist { return true; } if opp_invis!= 0 && (opp_invis - 1) < dist { // && (!( cr.IsPlayer() ) || cr.IsPlayer() &&!isGM( cr ) ) ) return false; } if opp_invis > dist || cr_vision >= dist { return true; } if cr.is_npc() { // упрощенный расчет для нпц, учитывает только дистанцию if cr.is_dead() { return false; } let cfg_npc = &config.npc_fast; if cfg_npc.enable && cr.ProtoId >= cfg_npc.fast_from && cr.ProtoId <= cfg_npc.fast_to { return (10 + cr_perception * 5) >= dist; } } let max_view = 10 + cr_perception * 5; let mut max_hear = 5 + cr_perception * 2; if cr.is_npc() { max_hear += 20; } let mut is_view = true; let mut is_hear = true; let start_dir = cr_hex.get_direction(opp_hex); let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление if look_dir > 3 { look_dir = 6 - look_dir } let (view_mul, mut hear_mul) = match look_dir { 0 => (1.0, 0.8), 1 => (0.8, 1.0), 2 => (0.5, 0.8), 3 => (0.4, 0.8), _ => unreachable!(), }; if opponent.IsRuning { hear_mul *= 3.0; } if cr.IsRuning { hear_mul *= 0.8; } let max_view = (max_view as f32 * view_mul) as u32; let tmp_max_hear = (max_hear as f32 * hear_mul) as u32; //dbg!(dist, max_view, tmp_max_hear); // new optimization: return early if distance larger than max_view and max_hear if dist > max_view && dist > tmp_max_hear { return false; } let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist); if dist > cr_hex.get_distance(end_hex) { is_view = false; hear_mul *= match cr_perception { 1..=4 => 0.1, 5..=8 => 0.3, 9..=10 => 0.4, _ => 1.0, }; } if dist > max_view { is_view = false; } let max_hear = (max_hear as f32 * hear_mul) as u32; //dbg!(max_hear); if dist > max_hear { is_hear = false; } return is_view || is_hear; }
(cr: &Critt
identifier_name
utils.rs
use crate::config::{config, CheckLook, CritterRates, MovingRates, SenseRates}; use tnf_common::{ dll::param_getters, engine_types::{critter::Critter, map::Map}, primitives::{Hex, MaybeInvalid}, utils::map::{ get_distance_hex, server::{get_hex_in_path, get_hex_in_path_wall}, HexExt, }, }; #[no_mangle] pub extern "C" fn get_hex_coord_wall( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, end_x: &mut u16, end_y: &mut u16, angle: f32, dist: u32, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let end_hex = get_hex_in_path_wall( map, Hex { x: hex_x, y: hex_y }, Hex { x: *end_x, y: *end_y, }, angle, dist, ); *end_x = end_hex.x; *end_y = end_hex.y; } } #[no_mangle] pub extern "C" fn get_hex_coord( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, end_x: &mut u16, end_y: &mut u16, angle: f32, dist: u32, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let end_hex = get_hex_in_path( map, Hex { x: hex_x, y: hex_y }, Hex { x: *end_x, y: *end_y, }, angle, dist, ); *end_x = end_hex.x; *end_y = end_hex.y; } } /* #[no_mangle] pub extern "C" fn test_hex_flags( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, raked: bool, passed: bool, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let hex = Hex { x: hex_x, y: hex_y }; let flags = map.get_hex_flags_with_proto(hex); let mut wrong = false; if raked!= map.is_hex_raked(hex) { wrong = true; print!("Raked - should be {}, but {}; ", raked,!raked); } if passed!= map.is_hex_passed(hex) { wrong = true; print!("Passed - should be {}, but {}; ", passed,!passed); } if wrong { println!("Hex: {:?}, flags: {:016b}", hex, flags); } } } */ macro_rules! validate { ($this:expr, $default:expr) => { match $this.and_then(MaybeInvalid::validate) { Some(this) => this, None => return $default, } }; } /* #[no_mangle] pub extern "C" fn is_gM(Critter& player) { if(!player.IsPlayer() ) return false; if(!isLoadedGMs ) LoadGMs( player, 0, 0, 0 ); if( player.StatBase[ ST_ACCESS_LEVEL ] < ACCESS_MODER && ( player.GetAccess() >= ACCESS_MODER || isPocketGM( player.Id ) ) ) player.StatBase[ ST_ACCESS_LEVEL ] = ACCESS_MODER; return player.StatBase[ ST_ACCESS_LEVEL ] >= ACCESS_MODER && ( checkVision? player.ParamBase[ QST_VISION ] > 0 : true ); }*/ #[no_mangle] pub extern "C" fn check_look( map: Option<&MaybeInvalid<Map>>, cr: Option<&MaybeInvalid<Critter>>, opponent: Option<&MaybeInvalid<Critter>>, ) -> bool { // Consider remove this let map = validate!(map, false); let cr = validate!(cr, false); let opponent = validate!(opponent, false); let config = &config().check_look; let smart = check_look_smart(config, map, cr, opponent); /*let old = check_look_old(config, map, cr, opponent); if old!= smart { println!("old!= smart: {:?}!= {:?}", old, smart); }*/ /*let mut config_default = CheckLook::default(); config_default.npc_fast.enable = config.npc_fast.enable; let smart_default = check_look_smart(&config_default, map, cr, opponent); if smart!= smart_default { println!("smart!= smart_default: {:?}!= {:?}", smart, smart_default); }*/ smart } fn check_look_smart(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool { if map.proto_id() == config.map_utility_start && opponent.is_player() && cr.is_player() &&!cr.have_gm_vision() { return false; } let cr_hex = cr.hex(); let opp_hex = opponent.hex(); let dist = cr_hex.get_distance(opp_hex); use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param}; let cr_vision = cr.uparam(Param::QST_VISION); let opp_invis = opponent.uparam(Param::QST_INVIS); if cr_vision >= dist && opp_invis <= dist { return true; } if opp_invis!= 0 && (opp_invis - 1) < dist { // && (!( cr.IsPlayer() ) || cr.IsPlayer() &&!isGM( cr ) ) ) return false; } if opp_invis > dist || cr_vision >= dist { return true; } let cr_perception = param_getters::getParam_Perception(cr, 0) as u32; assert!(cr_perception >= 1 && cr_perception <= 10); fn basic_dist(rates: &CritterRates, perception: u32) -> u32 { rates.basic_bonus + perception * rates.basic_perception_rate } let self_is_npc = cr.is_npc(); if self_is_npc { if cr.is_dead() { return false; } let npc_fast = &config.npc_fast; if npc_fast.enable && cr.ProtoId >= npc_fast.fast_from && cr.ProtoId <= npc_fast.fast_to { return basic_dist(&config.senses[npc_fast.sense_index].npc, cr_perception) >= dist; } } let start_dir = cr_hex.get_direction(opp_hex); let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление if look_dir > 3 { look_dir = 6 - look_dir } assert!(look_dir >= 0 && look_dir <= 3); fn moving_rate(cr: &Critter, rates: &MovingRates) -> f32 { if cr.IsRuning { rates.running //} else if cr.is_walking() { // rates.walking } else { rates.still } } fn sense_mul(rates: &SenseRates, cr: &Critter, opponent: &Critter, look_dir: i8) -> f32 { r
enses: Vec<(f32, f32)> = config .senses .iter() .map(|sense| { let critter_rates = if self_is_npc { &sense.npc } else { &sense.player }; let basic_dist = basic_dist(critter_rates, cr_perception); let sense_mul = sense_mul(sense, cr, opponent, look_dir); let wall_mul = sense.wall_rate[cr_perception as usize - 1]; let clear_dist = basic_dist as f32 * sense_mul; //dbg!(clear_dist, wall_mul); (clear_dist, wall_mul) }) .collect(); let max_dist = senses .iter() .map(|(dist, _wall_mul)| *dist as u32) .max() .expect("At least one sense"); //dbg!(dist, max_dist); if dist > max_dist { return false; } let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist); if dist > cr_hex.get_distance(end_hex) { for (basic_dist, wall_mull) in senses { //dbg!(basic_dist * wall_mull, dist); if (basic_dist * wall_mull) as u32 >= dist { return true; } } false } else { true } } fn _check_look_old(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool { if map.proto_id() == config.map_utility_start && opponent.is_player() && cr.is_player() &&!cr.have_gm_vision() { return false; } let cr_hex = cr.hex(); let opp_hex = opponent.hex(); let dist = cr_hex.get_distance(opp_hex); use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param}; let cr_vision = cr.uparam(Param::QST_VISION); let cr_perception = param_getters::getParam_Perception(cr, 0) as u32; //cr.uparam(Param::ST_PERCEPTION); let opp_invis = opponent.uparam(Param::QST_INVIS); if cr_vision >= dist && opp_invis <= dist { return true; } if opp_invis!= 0 && (opp_invis - 1) < dist { // && (!( cr.IsPlayer() ) || cr.IsPlayer() &&!isGM( cr ) ) ) return false; } if opp_invis > dist || cr_vision >= dist { return true; } if cr.is_npc() { // упрощенный расчет для нпц, учитывает только дистанцию if cr.is_dead() { return false; } let cfg_npc = &config.npc_fast; if cfg_npc.enable && cr.ProtoId >= cfg_npc.fast_from && cr.ProtoId <= cfg_npc.fast_to { return (10 + cr_perception * 5) >= dist; } } let max_view = 10 + cr_perception * 5; let mut max_hear = 5 + cr_perception * 2; if cr.is_npc() { max_hear += 20; } let mut is_view = true; let mut is_hear = true; let start_dir = cr_hex.get_direction(opp_hex); let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление if look_dir > 3 { look_dir = 6 - look_dir } let (view_mul, mut hear_mul) = match look_dir { 0 => (1.0, 0.8), 1 => (0.8, 1.0), 2 => (0.5, 0.8), 3 => (0.4, 0.8), _ => unreachable!(), }; if opponent.IsRuning { hear_mul *= 3.0; } if cr.IsRuning { hear_mul *= 0.8; } let max_view = (max_view as f32 * view_mul) as u32; let tmp_max_hear = (max_hear as f32 * hear_mul) as u32; //dbg!(dist, max_view, tmp_max_hear); // new optimization: return early if distance larger than max_view and max_hear if dist > max_view && dist > tmp_max_hear { return false; } let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist); if dist > cr_hex.get_distance(end_hex) { is_view = false; hear_mul *= match cr_perception { 1..=4 => 0.1, 5..=8 => 0.3, 9..=10 => 0.4, _ => 1.0, }; } if dist > max_view { is_view = false; } let max_hear = (max_hear as f32 * hear_mul) as u32; //dbg!(max_hear); if dist > max_hear { is_hear = false; } return is_view || is_hear; }
ates.dir_rate[look_dir as usize] * moving_rate(cr, &rates.self_moving) * moving_rate(opponent, &rates.target_moving) } let s
identifier_body
utils.rs
use crate::config::{config, CheckLook, CritterRates, MovingRates, SenseRates}; use tnf_common::{ dll::param_getters, engine_types::{critter::Critter, map::Map}, primitives::{Hex, MaybeInvalid}, utils::map::{ get_distance_hex, server::{get_hex_in_path, get_hex_in_path_wall}, HexExt, }, }; #[no_mangle] pub extern "C" fn get_hex_coord_wall( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, end_x: &mut u16, end_y: &mut u16, angle: f32, dist: u32, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let end_hex = get_hex_in_path_wall( map, Hex { x: hex_x, y: hex_y }, Hex { x: *end_x, y: *end_y, }, angle, dist, ); *end_x = end_hex.x; *end_y = end_hex.y; } } #[no_mangle] pub extern "C" fn get_hex_coord( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, end_x: &mut u16, end_y: &mut u16, angle: f32, dist: u32, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let end_hex = get_hex_in_path( map, Hex { x: hex_x, y: hex_y }, Hex { x: *end_x, y: *end_y, }, angle, dist, ); *end_x = end_hex.x; *end_y = end_hex.y; } } /* #[no_mangle] pub extern "C" fn test_hex_flags( map: Option<&MaybeInvalid<Map>>, hex_x: u16, hex_y: u16, raked: bool, passed: bool, ) { if let Some(map) = map.and_then(MaybeInvalid::validate) { let hex = Hex { x: hex_x, y: hex_y }; let flags = map.get_hex_flags_with_proto(hex); let mut wrong = false; if raked!= map.is_hex_raked(hex) { wrong = true; print!("Raked - should be {}, but {}; ", raked,!raked); } if passed!= map.is_hex_passed(hex) { wrong = true; print!("Passed - should be {}, but {}; ", passed,!passed); } if wrong { println!("Hex: {:?}, flags: {:016b}", hex, flags); } } } */ macro_rules! validate { ($this:expr, $default:expr) => { match $this.and_then(MaybeInvalid::validate) { Some(this) => this, None => return $default, } }; } /* #[no_mangle] pub extern "C" fn is_gM(Critter& player) { if(!player.IsPlayer() ) return false; if(!isLoadedGMs ) LoadGMs( player, 0, 0, 0 ); if( player.StatBase[ ST_ACCESS_LEVEL ] < ACCESS_MODER && ( player.GetAccess() >= ACCESS_MODER || isPocketGM( player.Id ) ) ) player.StatBase[ ST_ACCESS_LEVEL ] = ACCESS_MODER; return player.StatBase[ ST_ACCESS_LEVEL ] >= ACCESS_MODER && ( checkVision? player.ParamBase[ QST_VISION ] > 0 : true ); }*/ #[no_mangle] pub extern "C" fn check_look( map: Option<&MaybeInvalid<Map>>, cr: Option<&MaybeInvalid<Critter>>, opponent: Option<&MaybeInvalid<Critter>>, ) -> bool { // Consider remove this let map = validate!(map, false); let cr = validate!(cr, false); let opponent = validate!(opponent, false); let config = &config().check_look; let smart = check_look_smart(config, map, cr, opponent); /*let old = check_look_old(config, map, cr, opponent); if old!= smart { println!("old!= smart: {:?}!= {:?}", old, smart); }*/ /*let mut config_default = CheckLook::default(); config_default.npc_fast.enable = config.npc_fast.enable; let smart_default = check_look_smart(&config_default, map, cr, opponent); if smart!= smart_default { println!("smart!= smart_default: {:?}!= {:?}", smart, smart_default); }*/ smart } fn check_look_smart(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool { if map.proto_id() == config.map_utility_start && opponent.is_player() && cr.is_player() &&!cr.have_gm_vision() { return false; } let cr_hex = cr.hex(); let opp_hex = opponent.hex(); let dist = cr_hex.get_distance(opp_hex); use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param}; let cr_vision = cr.uparam(Param::QST_VISION); let opp_invis = opponent.uparam(Param::QST_INVIS); if cr_vision >= dist && opp_invis <= dist { return true; } if opp_invis!= 0 && (opp_invis - 1) < dist { // && (!( cr.IsPlayer() ) || cr.IsPlayer() &&!isGM( cr ) ) ) return false; } if opp_invis > dist || cr_vision >= dist { return true; } let cr_perception = param_getters::getParam_Perception(cr, 0) as u32; assert!(cr_perception >= 1 && cr_perception <= 10); fn basic_dist(rates: &CritterRates, perception: u32) -> u32 { rates.basic_bonus + perception * rates.basic_perception_rate } let self_is_npc = cr.is_npc(); if self_is_npc { if cr.is_dead() { return false; } let npc_fast = &config.npc_fast; if npc_fast.enable && cr.ProtoId >= npc_fast.fast_from && cr.ProtoId <= npc_fast.fast_to { return basic_dist(&config.senses[npc_fast.sense_index].npc, cr_perception) >= dist; } } let start_dir = cr_hex.get_direction(opp_hex); let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление if look_dir > 3 { look_dir = 6 - look_dir } assert!(look_dir >= 0 && look_dir <= 3); fn moving_rate(cr: &Critter, rates: &MovingRates) -> f32 { if cr.IsRuning { rates.running //} else if cr.is_walking() { // rates.walking } else { rates.still } } fn sense_mul(rates: &SenseRates, cr: &Critter, opponent: &Critter, look_dir: i8) -> f32 { rates.dir_rate[look_dir as usize] * moving_rate(cr, &rates.self_moving) * moving_rate(opponent, &rates.target_moving) } let senses: Vec<(f32, f32)> = config .senses .iter() .map(|sense| { let critter_rates = if self_is_npc { &sense.npc } else { &sense.player }; let basic_dist = basic_dist(critter_rates, cr_perception); let sense_mul = sense_mul(sense, cr, opponent, look_dir); let wall_mul = sense.wall_rate[cr_perception as usize - 1]; let clear_dist = basic_dist as f32 * sense_mul; //dbg!(clear_dist, wall_mul); (clear_dist, wall_mul) }) .collect(); let max_dist = senses .iter() .map(|(dist, _wall_mul)| *dist as u32) .max() .expect("At least one sense"); //dbg!(dist, max_dist); if dist > max_dist { return false; } let end_hex = get_hex_in_path(map, cr_hex, opp_hex, 0.0, dist); if dist > cr_hex.get_distance(end_hex) { for (basic_dist, wall_mull) in senses { //dbg!(basic_dist * wall_mull, dist); if (basic_dist * wall_mull) as u32 >= dist { return true; } } false } else { true } } fn _check_look_old(config: &CheckLook, map: &Map, cr: &Critter, opponent: &Critter) -> bool { if map.proto_id() == config.map_utility_start && opponent.is_player() && cr.is_player() &&!cr.have_gm_vision() { return false; } let cr_hex = cr.hex(); let opp_hex = opponent.hex(); let dist = cr_hex.get_distance(opp_hex); use tnf_common::{defines::CritterParam, defines_fo4rp::param::Param}; let cr_vision = cr.uparam(Param::QST_VISION); let cr_perception = param_getters::getParam_Perception(cr, 0) as u32; //cr.uparam(Param::ST_PERCEPTION); let opp_invis = opponent.uparam(Param::QST_INVIS); if cr_vision >= dist && opp_invis <= dist { return true; } if opp_invis!= 0 && (opp_invis - 1) < dist { // && (!( cr.IsPlayer() ) || cr.IsPlayer() &&!isGM( cr ) ) ) return false; } if opp_invis > dist || cr_vision >= dist { return true; } if cr.is_npc() { // упрощенный расчет для нпц, учитывает только дистанцию if cr.is_dead() { return false; } let cfg_npc = &config.npc_fast; if cfg_npc.enable && cr.ProtoId >= cfg_npc.fast_from && cr.ProtoId <= cfg_npc.fast_to { return (10 + cr_perception * 5) >= dist; } } let max_view = 10 + cr_perception * 5; let mut max_hear = 5 + cr_perception * 2; if cr.is_npc() { max_hear += 20; } let mut is_view = true; let mut is_hear = true; let start_dir = cr_hex.get_direction(opp_hex); let mut look_dir = i8::abs(start_dir as i8 - cr.Dir as i8); //Направление if look_dir > 3 { look_dir = 6 - look_dir } let (view_mul, mut hear_mul) = match look_dir { 0 => (1.0, 0.8), 1 => (0.8, 1.0), 2 => (0.5, 0.8), 3 => (0.4, 0.8), _ => unreachable!(), }; if opponent.IsRuning { hear_mul *= 3.0; } if cr.IsRuning { hear_mul *= 0.8; } let max_view = (max_view as f32 * view_mul) as u32; let tmp_max_hear = (max_hear as f32 * hear_mul) as u32; //dbg!(dist, max_view, tmp_max_hear); // new optimization: return early if distance larger than max_view and max_hear if dist > max_view && dist > tmp_max_hear { return false; } let end_hex = get_hex_in_path(map
; if dist > cr_hex.get_distance(end_hex) { is_view = false; hear_mul *= match cr_perception { 1..=4 => 0.1, 5..=8 => 0.3, 9..=10 => 0.4, _ => 1.0, }; } if dist > max_view { is_view = false; } let max_hear = (max_hear as f32 * hear_mul) as u32; //dbg!(max_hear); if dist > max_hear { is_hear = false; } return is_view || is_hear; }
, cr_hex, opp_hex, 0.0, dist)
conditional_block
fifo.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! This test demonstrates the DataFusion FIFO capabilities. //! #[cfg(not(target_os = "windows"))] #[cfg(test)] mod unix_test { use arrow::array::Array; use arrow::csv::ReaderBuilder; use arrow::datatypes::{DataType, Field, Schema}; use datafusion::test_util::register_unbounded_file_with_ordering; use datafusion::{ prelude::{CsvReadOptions, SessionConfig, SessionContext}, test_util::{aggr_test_schema, arrow_test_data}, }; use datafusion_common::{DataFusionError, Result}; use futures::StreamExt; use itertools::enumerate; use nix::sys::stat; use nix::unistd; use rstest::*; use std::fs::{File, OpenOptions}; use std::io::Write; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread; use std::thread::JoinHandle; use std::time::{Duration, Instant}; use tempfile::TempDir; //! For the sake of the test, do not alter the numbers.! // Session batch size const TEST_BATCH_SIZE: usize = 20; // Number of lines written to FIFO const TEST_DATA_SIZE: usize = 20_000; // Number of lines what can be joined. Each joinable key produced 20 lines with // aggregate_test_100 dataset. We will use these joinable keys for understanding // incremental execution. const TEST_JOIN_RATIO: f64 = 0.01; fn create_fifo_file(tmp_dir: &TempDir, file_name: &str) -> Result<PathBuf> { let file_path = tmp_dir.path().join(file_name); // Simulate an infinite environment via a FIFO file if let Err(e) = unistd::mkfifo(&file_path, stat::Mode::S_IRWXU) { Err(DataFusionError::Execution(e.to_string())) } else { Ok(file_path) } } fn write_to_fifo( mut file: &File, line: &str, ref_time: Instant, broken_pipe_timeout: Duration, ) -> Result<()> { // We need to handle broken pipe error until the reader is ready. This // is why we use a timeout to limit the wait duration for the reader. // If the error is different than broken pipe, we fail immediately. while let Err(e) = file.write_all(line.as_bytes()) { if e.raw_os_error().unwrap() == 32 { let interval = Instant::now().duration_since(ref_time); if interval < broken_pipe_timeout { thread::sleep(Duration::from_millis(100)); continue; } } return Err(DataFusionError::Execution(e.to_string())); } Ok(()) } // This test provides a relatively realistic end-to-end scenario where // we swap join sides to accommodate a FIFO source. #[rstest] #[timeout(std::time::Duration::from_secs(30))] #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn
( #[values(true, false)] unbounded_file: bool, ) -> Result<()> { // Create session context let config = SessionConfig::new() .with_batch_size(TEST_BATCH_SIZE) .with_collect_statistics(false) .with_target_partitions(1); let ctx = SessionContext::with_config(config); // To make unbounded deterministic let waiting = Arc::new(AtomicBool::new(unbounded_file)); // Create a new temporary FIFO file let tmp_dir = TempDir::new()?; let fifo_path = create_fifo_file(&tmp_dir, &format!("fifo_{unbounded_file:?}.csv"))?; // Execution can calculated at least one RecordBatch after the number of // "joinable_lines_length" lines are read. let joinable_lines_length = (TEST_DATA_SIZE as f64 * TEST_JOIN_RATIO).round() as usize; // The row including "a" is joinable with aggregate_test_100.c1 let joinable_iterator = (0..joinable_lines_length).map(|_| "a".to_string()); let second_joinable_iterator = (0..joinable_lines_length).map(|_| "a".to_string()); // The row including "zzz" is not joinable with aggregate_test_100.c1 let non_joinable_iterator = (0..(TEST_DATA_SIZE - joinable_lines_length)).map(|_| "zzz".to_string()); let lines = joinable_iterator .chain(non_joinable_iterator) .chain(second_joinable_iterator) .zip(0..TEST_DATA_SIZE) .map(|(a1, a2)| format!("{a1},{a2}\n")) .collect::<Vec<_>>(); // Create writing threads for the left and right FIFO files let task = create_writing_thread( fifo_path.clone(), "a1,a2\n".to_owned(), lines, waiting.clone(), joinable_lines_length, ); // Data Schema let schema = Arc::new(Schema::new(vec![ Field::new("a1", DataType::Utf8, false), Field::new("a2", DataType::UInt32, false), ])); // Create a file with bounded or unbounded flag. ctx.register_csv( "left", fifo_path.as_os_str().to_str().unwrap(), CsvReadOptions::new() .schema(schema.as_ref()) .mark_infinite(unbounded_file), ) .await?; // Register right table let schema = aggr_test_schema(); let test_data = arrow_test_data(); ctx.register_csv( "right", &format!("{test_data}/csv/aggregate_test_100.csv"), CsvReadOptions::new().schema(schema.as_ref()), ) .await?; // Execute the query let df = ctx.sql("SELECT t1.a2, t2.c1, t2.c4, t2.c5 FROM left as t1 JOIN right as t2 ON t1.a1 = t2.c1").await?; let mut stream = df.execute_stream().await?; while (stream.next().await).is_some() { waiting.store(false, Ordering::SeqCst); } task.join().unwrap(); Ok(()) } #[derive(Debug, PartialEq)] enum JoinOperation { LeftUnmatched, RightUnmatched, Equal, } fn create_writing_thread( file_path: PathBuf, header: String, lines: Vec<String>, waiting_lock: Arc<AtomicBool>, wait_until: usize, ) -> JoinHandle<()> { // Timeout for a long period of BrokenPipe error let broken_pipe_timeout = Duration::from_secs(10); // Spawn a new thread to write to the FIFO file thread::spawn(move || { let file = OpenOptions::new().write(true).open(file_path).unwrap(); // Reference time to use when deciding to fail the test let execution_start = Instant::now(); write_to_fifo(&file, &header, execution_start, broken_pipe_timeout).unwrap(); for (cnt, line) in enumerate(lines) { while waiting_lock.load(Ordering::SeqCst) && cnt > wait_until { thread::sleep(Duration::from_millis(50)); } write_to_fifo(&file, &line, execution_start, broken_pipe_timeout) .unwrap(); } drop(file); }) } // This test provides a relatively realistic end-to-end scenario where // we change the join into a [SymmetricHashJoin] to accommodate two // unbounded (FIFO) sources. #[rstest] #[timeout(std::time::Duration::from_secs(30))] #[tokio::test(flavor = "multi_thread")] #[ignore] async fn unbounded_file_with_symmetric_join() -> Result<()> { // Create session context let config = SessionConfig::new() .with_batch_size(TEST_BATCH_SIZE) .set_bool("datafusion.execution.coalesce_batches", false) .with_target_partitions(1); let ctx = SessionContext::with_config(config); // Tasks let mut tasks: Vec<JoinHandle<()>> = vec![]; // Join filter let a1_iter = 0..TEST_DATA_SIZE; // Join key let a2_iter = (0..TEST_DATA_SIZE).map(|x| x % 10); let lines = a1_iter .zip(a2_iter) .map(|(a1, a2)| format!("{a1},{a2}\n")) .collect::<Vec<_>>(); // Create a new temporary FIFO file let tmp_dir = TempDir::new()?; // Create a FIFO file for the left input source. let left_fifo = create_fifo_file(&tmp_dir, "left.csv")?; // Create a FIFO file for the right input source. let right_fifo = create_fifo_file(&tmp_dir, "right.csv")?; // Create a mutex for tracking if the right input source is waiting for data. let waiting = Arc::new(AtomicBool::new(true)); // Create writing threads for the left and right FIFO files tasks.push(create_writing_thread( left_fifo.clone(), "a1,a2\n".to_owned(), lines.clone(), waiting.clone(), TEST_BATCH_SIZE, )); tasks.push(create_writing_thread( right_fifo.clone(), "a1,a2\n".to_owned(), lines.clone(), waiting.clone(), TEST_BATCH_SIZE, )); // Create schema let schema = Arc::new(Schema::new(vec![ Field::new("a1", DataType::UInt32, false), Field::new("a2", DataType::UInt32, false), ])); // Specify the ordering: let file_sort_order = vec![[datafusion_expr::col("a1")] .into_iter() .map(|e| { let ascending = true; let nulls_first = false; e.sort(ascending, nulls_first) }) .collect::<Vec<_>>()]; // Set unbounded sorted files read configuration register_unbounded_file_with_ordering( &ctx, schema.clone(), &left_fifo, "left", file_sort_order.clone(), true, ) .await?; register_unbounded_file_with_ordering( &ctx, schema, &right_fifo, "right", file_sort_order, true, ) .await?; // Execute the query, with no matching rows. (since key is modulus 10) let df = ctx .sql( "SELECT t1.a1, t1.a2, t2.a1, t2.a2 FROM left as t1 FULL JOIN right as t2 ON t1.a2 = t2.a2 AND t1.a1 > t2.a1 + 4 AND t1.a1 < t2.a1 + 9", ) .await?; let mut stream = df.execute_stream().await?; let mut operations = vec![]; // Partial. while let Some(Ok(batch)) = stream.next().await { waiting.store(false, Ordering::SeqCst); let left_unmatched = batch.column(2).null_count(); let right_unmatched = batch.column(0).null_count(); let op = if left_unmatched == 0 && right_unmatched == 0 { JoinOperation::Equal } else if right_unmatched > left_unmatched { JoinOperation::RightUnmatched } else { JoinOperation::LeftUnmatched }; operations.push(op); } tasks.into_iter().for_each(|jh| jh.join().unwrap()); // The SymmetricHashJoin executor produces FULL join results at every // pruning, which happens before it reaches the end of input and more // than once. In this test, we feed partially joinable data to both // sides in order to ensure that left or right unmatched results are // generated more than once during the test. assert!( operations .iter() .filter(|&n| JoinOperation::RightUnmatched.eq(n)) .count() > 1 && operations .iter() .filter(|&n| JoinOperation::LeftUnmatched.eq(n)) .count() > 1 ); Ok(()) } /// It tests the INSERT INTO functionality. #[tokio::test] async fn test_sql_insert_into_fifo() -> Result<()> { // To make unbounded deterministic let waiting = Arc::new(AtomicBool::new(true)); let waiting_thread = waiting.clone(); // create local execution context let config = SessionConfig::new().with_batch_size(TEST_BATCH_SIZE); let ctx = SessionContext::with_config(config); // Create a new temporary FIFO file let tmp_dir = TempDir::new()?; let source_fifo_path = create_fifo_file(&tmp_dir, "source.csv")?; // Prevent move let (source_fifo_path_thread, source_display_fifo_path) = (source_fifo_path.clone(), source_fifo_path.display()); // Tasks let mut tasks: Vec<JoinHandle<()>> = vec![]; // TEST_BATCH_SIZE + 1 rows will be provided. However, after processing precisely // TEST_BATCH_SIZE rows, the program will pause and wait for a batch to be read in another // thread. This approach ensures that the pipeline remains unbroken. tasks.push(create_writing_thread( source_fifo_path_thread, "a1,a2\n".to_owned(), (0..TEST_DATA_SIZE) .map(|_| "a,1\n".to_string()) .collect::<Vec<_>>(), waiting, TEST_BATCH_SIZE, )); // Create a new temporary FIFO file let sink_fifo_path = create_fifo_file(&tmp_dir, "sink.csv")?; // Prevent move let (sink_fifo_path_thread, sink_display_fifo_path) = (sink_fifo_path.clone(), sink_fifo_path.display()); // Spawn a new thread to read sink EXTERNAL TABLE. tasks.push(thread::spawn(move || { let file = File::open(sink_fifo_path_thread).unwrap(); let schema = Arc::new(Schema::new(vec![ Field::new("a1", DataType::Utf8, false), Field::new("a2", DataType::UInt32, false), ])); let mut reader = ReaderBuilder::new(schema) .has_header(true) .with_batch_size(TEST_BATCH_SIZE) .build(file) .map_err(|e| DataFusionError::Internal(e.to_string())) .unwrap(); while let Some(Ok(_)) = reader.next() { waiting_thread.store(false, Ordering::SeqCst); } })); // register second csv file with the SQL (create an empty file if not found) ctx.sql(&format!( "CREATE EXTERNAL TABLE source_table ( a1 VARCHAR NOT NULL, a2 INT NOT NULL ) STORED AS CSV WITH HEADER ROW OPTIONS ('UNBOUNDED' 'TRUE') LOCATION '{source_display_fifo_path}'" )) .await?; // register csv file with the SQL ctx.sql(&format!( "CREATE EXTERNAL TABLE sink_table ( a1 VARCHAR NOT NULL, a2 INT NOT NULL ) STORED AS CSV WITH HEADER ROW OPTIONS ('UNBOUNDED' 'TRUE') LOCATION '{sink_display_fifo_path}'" )) .await?; let df = ctx .sql( "INSERT INTO sink_table SELECT a1, a2 FROM source_table", ) .await?; df.collect().await?; tasks.into_iter().for_each(|jh| jh.join().unwrap()); Ok(()) } }
unbounded_file_with_swapped_join
identifier_name
fifo.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! This test demonstrates the DataFusion FIFO capabilities. //! #[cfg(not(target_os = "windows"))] #[cfg(test)] mod unix_test { use arrow::array::Array; use arrow::csv::ReaderBuilder; use arrow::datatypes::{DataType, Field, Schema}; use datafusion::test_util::register_unbounded_file_with_ordering; use datafusion::{ prelude::{CsvReadOptions, SessionConfig, SessionContext}, test_util::{aggr_test_schema, arrow_test_data}, }; use datafusion_common::{DataFusionError, Result}; use futures::StreamExt; use itertools::enumerate; use nix::sys::stat; use nix::unistd; use rstest::*; use std::fs::{File, OpenOptions}; use std::io::Write; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread; use std::thread::JoinHandle; use std::time::{Duration, Instant}; use tempfile::TempDir; //! For the sake of the test, do not alter the numbers.! // Session batch size const TEST_BATCH_SIZE: usize = 20; // Number of lines written to FIFO const TEST_DATA_SIZE: usize = 20_000; // Number of lines what can be joined. Each joinable key produced 20 lines with // aggregate_test_100 dataset. We will use these joinable keys for understanding // incremental execution. const TEST_JOIN_RATIO: f64 = 0.01; fn create_fifo_file(tmp_dir: &TempDir, file_name: &str) -> Result<PathBuf> { let file_path = tmp_dir.path().join(file_name); // Simulate an infinite environment via a FIFO file if let Err(e) = unistd::mkfifo(&file_path, stat::Mode::S_IRWXU) { Err(DataFusionError::Execution(e.to_string())) } else { Ok(file_path) } } fn write_to_fifo( mut file: &File, line: &str, ref_time: Instant, broken_pipe_timeout: Duration, ) -> Result<()> { // We need to handle broken pipe error until the reader is ready. This // is why we use a timeout to limit the wait duration for the reader. // If the error is different than broken pipe, we fail immediately. while let Err(e) = file.write_all(line.as_bytes()) { if e.raw_os_error().unwrap() == 32 { let interval = Instant::now().duration_since(ref_time); if interval < broken_pipe_timeout { thread::sleep(Duration::from_millis(100)); continue; } } return Err(DataFusionError::Execution(e.to_string())); } Ok(()) } // This test provides a relatively realistic end-to-end scenario where // we swap join sides to accommodate a FIFO source. #[rstest] #[timeout(std::time::Duration::from_secs(30))] #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn unbounded_file_with_swapped_join( #[values(true, false)] unbounded_file: bool, ) -> Result<()> { // Create session context let config = SessionConfig::new() .with_batch_size(TEST_BATCH_SIZE) .with_collect_statistics(false) .with_target_partitions(1); let ctx = SessionContext::with_config(config); // To make unbounded deterministic let waiting = Arc::new(AtomicBool::new(unbounded_file)); // Create a new temporary FIFO file let tmp_dir = TempDir::new()?; let fifo_path = create_fifo_file(&tmp_dir, &format!("fifo_{unbounded_file:?}.csv"))?; // Execution can calculated at least one RecordBatch after the number of // "joinable_lines_length" lines are read. let joinable_lines_length = (TEST_DATA_SIZE as f64 * TEST_JOIN_RATIO).round() as usize; // The row including "a" is joinable with aggregate_test_100.c1 let joinable_iterator = (0..joinable_lines_length).map(|_| "a".to_string()); let second_joinable_iterator = (0..joinable_lines_length).map(|_| "a".to_string()); // The row including "zzz" is not joinable with aggregate_test_100.c1 let non_joinable_iterator = (0..(TEST_DATA_SIZE - joinable_lines_length)).map(|_| "zzz".to_string()); let lines = joinable_iterator .chain(non_joinable_iterator) .chain(second_joinable_iterator) .zip(0..TEST_DATA_SIZE) .map(|(a1, a2)| format!("{a1},{a2}\n")) .collect::<Vec<_>>(); // Create writing threads for the left and right FIFO files let task = create_writing_thread( fifo_path.clone(), "a1,a2\n".to_owned(), lines, waiting.clone(), joinable_lines_length, ); // Data Schema let schema = Arc::new(Schema::new(vec![ Field::new("a1", DataType::Utf8, false), Field::new("a2", DataType::UInt32, false), ])); // Create a file with bounded or unbounded flag. ctx.register_csv( "left", fifo_path.as_os_str().to_str().unwrap(), CsvReadOptions::new() .schema(schema.as_ref()) .mark_infinite(unbounded_file), ) .await?; // Register right table let schema = aggr_test_schema(); let test_data = arrow_test_data(); ctx.register_csv( "right", &format!("{test_data}/csv/aggregate_test_100.csv"), CsvReadOptions::new().schema(schema.as_ref()), ) .await?; // Execute the query let df = ctx.sql("SELECT t1.a2, t2.c1, t2.c4, t2.c5 FROM left as t1 JOIN right as t2 ON t1.a1 = t2.c1").await?; let mut stream = df.execute_stream().await?; while (stream.next().await).is_some() { waiting.store(false, Ordering::SeqCst); } task.join().unwrap(); Ok(()) } #[derive(Debug, PartialEq)] enum JoinOperation { LeftUnmatched, RightUnmatched, Equal, } fn create_writing_thread( file_path: PathBuf, header: String, lines: Vec<String>, waiting_lock: Arc<AtomicBool>, wait_until: usize, ) -> JoinHandle<()> { // Timeout for a long period of BrokenPipe error let broken_pipe_timeout = Duration::from_secs(10); // Spawn a new thread to write to the FIFO file thread::spawn(move || { let file = OpenOptions::new().write(true).open(file_path).unwrap(); // Reference time to use when deciding to fail the test let execution_start = Instant::now(); write_to_fifo(&file, &header, execution_start, broken_pipe_timeout).unwrap(); for (cnt, line) in enumerate(lines) { while waiting_lock.load(Ordering::SeqCst) && cnt > wait_until { thread::sleep(Duration::from_millis(50)); } write_to_fifo(&file, &line, execution_start, broken_pipe_timeout) .unwrap(); } drop(file); }) } // This test provides a relatively realistic end-to-end scenario where // we change the join into a [SymmetricHashJoin] to accommodate two // unbounded (FIFO) sources. #[rstest] #[timeout(std::time::Duration::from_secs(30))] #[tokio::test(flavor = "multi_thread")] #[ignore] async fn unbounded_file_with_symmetric_join() -> Result<()> { // Create session context let config = SessionConfig::new() .with_batch_size(TEST_BATCH_SIZE) .set_bool("datafusion.execution.coalesce_batches", false) .with_target_partitions(1); let ctx = SessionContext::with_config(config); // Tasks let mut tasks: Vec<JoinHandle<()>> = vec![]; // Join filter let a1_iter = 0..TEST_DATA_SIZE; // Join key let a2_iter = (0..TEST_DATA_SIZE).map(|x| x % 10); let lines = a1_iter .zip(a2_iter) .map(|(a1, a2)| format!("{a1},{a2}\n")) .collect::<Vec<_>>(); // Create a new temporary FIFO file let tmp_dir = TempDir::new()?; // Create a FIFO file for the left input source. let left_fifo = create_fifo_file(&tmp_dir, "left.csv")?; // Create a FIFO file for the right input source. let right_fifo = create_fifo_file(&tmp_dir, "right.csv")?; // Create a mutex for tracking if the right input source is waiting for data. let waiting = Arc::new(AtomicBool::new(true)); // Create writing threads for the left and right FIFO files tasks.push(create_writing_thread( left_fifo.clone(), "a1,a2\n".to_owned(), lines.clone(), waiting.clone(), TEST_BATCH_SIZE, )); tasks.push(create_writing_thread( right_fifo.clone(), "a1,a2\n".to_owned(), lines.clone(), waiting.clone(), TEST_BATCH_SIZE, )); // Create schema let schema = Arc::new(Schema::new(vec![ Field::new("a1", DataType::UInt32, false), Field::new("a2", DataType::UInt32, false), ])); // Specify the ordering: let file_sort_order = vec![[datafusion_expr::col("a1")] .into_iter() .map(|e| { let ascending = true; let nulls_first = false; e.sort(ascending, nulls_first) }) .collect::<Vec<_>>()]; // Set unbounded sorted files read configuration register_unbounded_file_with_ordering( &ctx, schema.clone(), &left_fifo, "left", file_sort_order.clone(), true, ) .await?; register_unbounded_file_with_ordering( &ctx, schema, &right_fifo, "right", file_sort_order, true, ) .await?; // Execute the query, with no matching rows. (since key is modulus 10) let df = ctx .sql( "SELECT t1.a1, t1.a2, t2.a1, t2.a2 FROM left as t1 FULL JOIN right as t2 ON t1.a2 = t2.a2 AND t1.a1 > t2.a1 + 4 AND t1.a1 < t2.a1 + 9", ) .await?; let mut stream = df.execute_stream().await?; let mut operations = vec![]; // Partial. while let Some(Ok(batch)) = stream.next().await { waiting.store(false, Ordering::SeqCst); let left_unmatched = batch.column(2).null_count(); let right_unmatched = batch.column(0).null_count(); let op = if left_unmatched == 0 && right_unmatched == 0 { JoinOperation::Equal } else if right_unmatched > left_unmatched
else { JoinOperation::LeftUnmatched }; operations.push(op); } tasks.into_iter().for_each(|jh| jh.join().unwrap()); // The SymmetricHashJoin executor produces FULL join results at every // pruning, which happens before it reaches the end of input and more // than once. In this test, we feed partially joinable data to both // sides in order to ensure that left or right unmatched results are // generated more than once during the test. assert!( operations .iter() .filter(|&n| JoinOperation::RightUnmatched.eq(n)) .count() > 1 && operations .iter() .filter(|&n| JoinOperation::LeftUnmatched.eq(n)) .count() > 1 ); Ok(()) } /// It tests the INSERT INTO functionality. #[tokio::test] async fn test_sql_insert_into_fifo() -> Result<()> { // To make unbounded deterministic let waiting = Arc::new(AtomicBool::new(true)); let waiting_thread = waiting.clone(); // create local execution context let config = SessionConfig::new().with_batch_size(TEST_BATCH_SIZE); let ctx = SessionContext::with_config(config); // Create a new temporary FIFO file let tmp_dir = TempDir::new()?; let source_fifo_path = create_fifo_file(&tmp_dir, "source.csv")?; // Prevent move let (source_fifo_path_thread, source_display_fifo_path) = (source_fifo_path.clone(), source_fifo_path.display()); // Tasks let mut tasks: Vec<JoinHandle<()>> = vec![]; // TEST_BATCH_SIZE + 1 rows will be provided. However, after processing precisely // TEST_BATCH_SIZE rows, the program will pause and wait for a batch to be read in another // thread. This approach ensures that the pipeline remains unbroken. tasks.push(create_writing_thread( source_fifo_path_thread, "a1,a2\n".to_owned(), (0..TEST_DATA_SIZE) .map(|_| "a,1\n".to_string()) .collect::<Vec<_>>(), waiting, TEST_BATCH_SIZE, )); // Create a new temporary FIFO file let sink_fifo_path = create_fifo_file(&tmp_dir, "sink.csv")?; // Prevent move let (sink_fifo_path_thread, sink_display_fifo_path) = (sink_fifo_path.clone(), sink_fifo_path.display()); // Spawn a new thread to read sink EXTERNAL TABLE. tasks.push(thread::spawn(move || { let file = File::open(sink_fifo_path_thread).unwrap(); let schema = Arc::new(Schema::new(vec![ Field::new("a1", DataType::Utf8, false), Field::new("a2", DataType::UInt32, false), ])); let mut reader = ReaderBuilder::new(schema) .has_header(true) .with_batch_size(TEST_BATCH_SIZE) .build(file) .map_err(|e| DataFusionError::Internal(e.to_string())) .unwrap(); while let Some(Ok(_)) = reader.next() { waiting_thread.store(false, Ordering::SeqCst); } })); // register second csv file with the SQL (create an empty file if not found) ctx.sql(&format!( "CREATE EXTERNAL TABLE source_table ( a1 VARCHAR NOT NULL, a2 INT NOT NULL ) STORED AS CSV WITH HEADER ROW OPTIONS ('UNBOUNDED' 'TRUE') LOCATION '{source_display_fifo_path}'" )) .await?; // register csv file with the SQL ctx.sql(&format!( "CREATE EXTERNAL TABLE sink_table ( a1 VARCHAR NOT NULL, a2 INT NOT NULL ) STORED AS CSV WITH HEADER ROW OPTIONS ('UNBOUNDED' 'TRUE') LOCATION '{sink_display_fifo_path}'" )) .await?; let df = ctx .sql( "INSERT INTO sink_table SELECT a1, a2 FROM source_table", ) .await?; df.collect().await?; tasks.into_iter().for_each(|jh| jh.join().unwrap()); Ok(()) } }
{ JoinOperation::RightUnmatched }
conditional_block
fifo.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! This test demonstrates the DataFusion FIFO capabilities. //! #[cfg(not(target_os = "windows"))] #[cfg(test)] mod unix_test { use arrow::array::Array; use arrow::csv::ReaderBuilder; use arrow::datatypes::{DataType, Field, Schema}; use datafusion::test_util::register_unbounded_file_with_ordering; use datafusion::{ prelude::{CsvReadOptions, SessionConfig, SessionContext}, test_util::{aggr_test_schema, arrow_test_data}, }; use datafusion_common::{DataFusionError, Result}; use futures::StreamExt; use itertools::enumerate; use nix::sys::stat; use nix::unistd; use rstest::*; use std::fs::{File, OpenOptions}; use std::io::Write; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread; use std::thread::JoinHandle; use std::time::{Duration, Instant}; use tempfile::TempDir; //! For the sake of the test, do not alter the numbers.! // Session batch size const TEST_BATCH_SIZE: usize = 20; // Number of lines written to FIFO const TEST_DATA_SIZE: usize = 20_000; // Number of lines what can be joined. Each joinable key produced 20 lines with // aggregate_test_100 dataset. We will use these joinable keys for understanding // incremental execution. const TEST_JOIN_RATIO: f64 = 0.01; fn create_fifo_file(tmp_dir: &TempDir, file_name: &str) -> Result<PathBuf> { let file_path = tmp_dir.path().join(file_name); // Simulate an infinite environment via a FIFO file if let Err(e) = unistd::mkfifo(&file_path, stat::Mode::S_IRWXU) { Err(DataFusionError::Execution(e.to_string())) } else { Ok(file_path) } } fn write_to_fifo( mut file: &File, line: &str, ref_time: Instant, broken_pipe_timeout: Duration, ) -> Result<()> { // We need to handle broken pipe error until the reader is ready. This // is why we use a timeout to limit the wait duration for the reader. // If the error is different than broken pipe, we fail immediately. while let Err(e) = file.write_all(line.as_bytes()) { if e.raw_os_error().unwrap() == 32 { let interval = Instant::now().duration_since(ref_time); if interval < broken_pipe_timeout { thread::sleep(Duration::from_millis(100)); continue;
} // This test provides a relatively realistic end-to-end scenario where // we swap join sides to accommodate a FIFO source. #[rstest] #[timeout(std::time::Duration::from_secs(30))] #[tokio::test(flavor = "multi_thread", worker_threads = 8)] async fn unbounded_file_with_swapped_join( #[values(true, false)] unbounded_file: bool, ) -> Result<()> { // Create session context let config = SessionConfig::new() .with_batch_size(TEST_BATCH_SIZE) .with_collect_statistics(false) .with_target_partitions(1); let ctx = SessionContext::with_config(config); // To make unbounded deterministic let waiting = Arc::new(AtomicBool::new(unbounded_file)); // Create a new temporary FIFO file let tmp_dir = TempDir::new()?; let fifo_path = create_fifo_file(&tmp_dir, &format!("fifo_{unbounded_file:?}.csv"))?; // Execution can calculated at least one RecordBatch after the number of // "joinable_lines_length" lines are read. let joinable_lines_length = (TEST_DATA_SIZE as f64 * TEST_JOIN_RATIO).round() as usize; // The row including "a" is joinable with aggregate_test_100.c1 let joinable_iterator = (0..joinable_lines_length).map(|_| "a".to_string()); let second_joinable_iterator = (0..joinable_lines_length).map(|_| "a".to_string()); // The row including "zzz" is not joinable with aggregate_test_100.c1 let non_joinable_iterator = (0..(TEST_DATA_SIZE - joinable_lines_length)).map(|_| "zzz".to_string()); let lines = joinable_iterator .chain(non_joinable_iterator) .chain(second_joinable_iterator) .zip(0..TEST_DATA_SIZE) .map(|(a1, a2)| format!("{a1},{a2}\n")) .collect::<Vec<_>>(); // Create writing threads for the left and right FIFO files let task = create_writing_thread( fifo_path.clone(), "a1,a2\n".to_owned(), lines, waiting.clone(), joinable_lines_length, ); // Data Schema let schema = Arc::new(Schema::new(vec![ Field::new("a1", DataType::Utf8, false), Field::new("a2", DataType::UInt32, false), ])); // Create a file with bounded or unbounded flag. ctx.register_csv( "left", fifo_path.as_os_str().to_str().unwrap(), CsvReadOptions::new() .schema(schema.as_ref()) .mark_infinite(unbounded_file), ) .await?; // Register right table let schema = aggr_test_schema(); let test_data = arrow_test_data(); ctx.register_csv( "right", &format!("{test_data}/csv/aggregate_test_100.csv"), CsvReadOptions::new().schema(schema.as_ref()), ) .await?; // Execute the query let df = ctx.sql("SELECT t1.a2, t2.c1, t2.c4, t2.c5 FROM left as t1 JOIN right as t2 ON t1.a1 = t2.c1").await?; let mut stream = df.execute_stream().await?; while (stream.next().await).is_some() { waiting.store(false, Ordering::SeqCst); } task.join().unwrap(); Ok(()) } #[derive(Debug, PartialEq)] enum JoinOperation { LeftUnmatched, RightUnmatched, Equal, } fn create_writing_thread( file_path: PathBuf, header: String, lines: Vec<String>, waiting_lock: Arc<AtomicBool>, wait_until: usize, ) -> JoinHandle<()> { // Timeout for a long period of BrokenPipe error let broken_pipe_timeout = Duration::from_secs(10); // Spawn a new thread to write to the FIFO file thread::spawn(move || { let file = OpenOptions::new().write(true).open(file_path).unwrap(); // Reference time to use when deciding to fail the test let execution_start = Instant::now(); write_to_fifo(&file, &header, execution_start, broken_pipe_timeout).unwrap(); for (cnt, line) in enumerate(lines) { while waiting_lock.load(Ordering::SeqCst) && cnt > wait_until { thread::sleep(Duration::from_millis(50)); } write_to_fifo(&file, &line, execution_start, broken_pipe_timeout) .unwrap(); } drop(file); }) } // This test provides a relatively realistic end-to-end scenario where // we change the join into a [SymmetricHashJoin] to accommodate two // unbounded (FIFO) sources. #[rstest] #[timeout(std::time::Duration::from_secs(30))] #[tokio::test(flavor = "multi_thread")] #[ignore] async fn unbounded_file_with_symmetric_join() -> Result<()> { // Create session context let config = SessionConfig::new() .with_batch_size(TEST_BATCH_SIZE) .set_bool("datafusion.execution.coalesce_batches", false) .with_target_partitions(1); let ctx = SessionContext::with_config(config); // Tasks let mut tasks: Vec<JoinHandle<()>> = vec![]; // Join filter let a1_iter = 0..TEST_DATA_SIZE; // Join key let a2_iter = (0..TEST_DATA_SIZE).map(|x| x % 10); let lines = a1_iter .zip(a2_iter) .map(|(a1, a2)| format!("{a1},{a2}\n")) .collect::<Vec<_>>(); // Create a new temporary FIFO file let tmp_dir = TempDir::new()?; // Create a FIFO file for the left input source. let left_fifo = create_fifo_file(&tmp_dir, "left.csv")?; // Create a FIFO file for the right input source. let right_fifo = create_fifo_file(&tmp_dir, "right.csv")?; // Create a mutex for tracking if the right input source is waiting for data. let waiting = Arc::new(AtomicBool::new(true)); // Create writing threads for the left and right FIFO files tasks.push(create_writing_thread( left_fifo.clone(), "a1,a2\n".to_owned(), lines.clone(), waiting.clone(), TEST_BATCH_SIZE, )); tasks.push(create_writing_thread( right_fifo.clone(), "a1,a2\n".to_owned(), lines.clone(), waiting.clone(), TEST_BATCH_SIZE, )); // Create schema let schema = Arc::new(Schema::new(vec![ Field::new("a1", DataType::UInt32, false), Field::new("a2", DataType::UInt32, false), ])); // Specify the ordering: let file_sort_order = vec![[datafusion_expr::col("a1")] .into_iter() .map(|e| { let ascending = true; let nulls_first = false; e.sort(ascending, nulls_first) }) .collect::<Vec<_>>()]; // Set unbounded sorted files read configuration register_unbounded_file_with_ordering( &ctx, schema.clone(), &left_fifo, "left", file_sort_order.clone(), true, ) .await?; register_unbounded_file_with_ordering( &ctx, schema, &right_fifo, "right", file_sort_order, true, ) .await?; // Execute the query, with no matching rows. (since key is modulus 10) let df = ctx .sql( "SELECT t1.a1, t1.a2, t2.a1, t2.a2 FROM left as t1 FULL JOIN right as t2 ON t1.a2 = t2.a2 AND t1.a1 > t2.a1 + 4 AND t1.a1 < t2.a1 + 9", ) .await?; let mut stream = df.execute_stream().await?; let mut operations = vec![]; // Partial. while let Some(Ok(batch)) = stream.next().await { waiting.store(false, Ordering::SeqCst); let left_unmatched = batch.column(2).null_count(); let right_unmatched = batch.column(0).null_count(); let op = if left_unmatched == 0 && right_unmatched == 0 { JoinOperation::Equal } else if right_unmatched > left_unmatched { JoinOperation::RightUnmatched } else { JoinOperation::LeftUnmatched }; operations.push(op); } tasks.into_iter().for_each(|jh| jh.join().unwrap()); // The SymmetricHashJoin executor produces FULL join results at every // pruning, which happens before it reaches the end of input and more // than once. In this test, we feed partially joinable data to both // sides in order to ensure that left or right unmatched results are // generated more than once during the test. assert!( operations .iter() .filter(|&n| JoinOperation::RightUnmatched.eq(n)) .count() > 1 && operations .iter() .filter(|&n| JoinOperation::LeftUnmatched.eq(n)) .count() > 1 ); Ok(()) } /// It tests the INSERT INTO functionality. #[tokio::test] async fn test_sql_insert_into_fifo() -> Result<()> { // To make unbounded deterministic let waiting = Arc::new(AtomicBool::new(true)); let waiting_thread = waiting.clone(); // create local execution context let config = SessionConfig::new().with_batch_size(TEST_BATCH_SIZE); let ctx = SessionContext::with_config(config); // Create a new temporary FIFO file let tmp_dir = TempDir::new()?; let source_fifo_path = create_fifo_file(&tmp_dir, "source.csv")?; // Prevent move let (source_fifo_path_thread, source_display_fifo_path) = (source_fifo_path.clone(), source_fifo_path.display()); // Tasks let mut tasks: Vec<JoinHandle<()>> = vec![]; // TEST_BATCH_SIZE + 1 rows will be provided. However, after processing precisely // TEST_BATCH_SIZE rows, the program will pause and wait for a batch to be read in another // thread. This approach ensures that the pipeline remains unbroken. tasks.push(create_writing_thread( source_fifo_path_thread, "a1,a2\n".to_owned(), (0..TEST_DATA_SIZE) .map(|_| "a,1\n".to_string()) .collect::<Vec<_>>(), waiting, TEST_BATCH_SIZE, )); // Create a new temporary FIFO file let sink_fifo_path = create_fifo_file(&tmp_dir, "sink.csv")?; // Prevent move let (sink_fifo_path_thread, sink_display_fifo_path) = (sink_fifo_path.clone(), sink_fifo_path.display()); // Spawn a new thread to read sink EXTERNAL TABLE. tasks.push(thread::spawn(move || { let file = File::open(sink_fifo_path_thread).unwrap(); let schema = Arc::new(Schema::new(vec![ Field::new("a1", DataType::Utf8, false), Field::new("a2", DataType::UInt32, false), ])); let mut reader = ReaderBuilder::new(schema) .has_header(true) .with_batch_size(TEST_BATCH_SIZE) .build(file) .map_err(|e| DataFusionError::Internal(e.to_string())) .unwrap(); while let Some(Ok(_)) = reader.next() { waiting_thread.store(false, Ordering::SeqCst); } })); // register second csv file with the SQL (create an empty file if not found) ctx.sql(&format!( "CREATE EXTERNAL TABLE source_table ( a1 VARCHAR NOT NULL, a2 INT NOT NULL ) STORED AS CSV WITH HEADER ROW OPTIONS ('UNBOUNDED' 'TRUE') LOCATION '{source_display_fifo_path}'" )) .await?; // register csv file with the SQL ctx.sql(&format!( "CREATE EXTERNAL TABLE sink_table ( a1 VARCHAR NOT NULL, a2 INT NOT NULL ) STORED AS CSV WITH HEADER ROW OPTIONS ('UNBOUNDED' 'TRUE') LOCATION '{sink_display_fifo_path}'" )) .await?; let df = ctx .sql( "INSERT INTO sink_table SELECT a1, a2 FROM source_table", ) .await?; df.collect().await?; tasks.into_iter().for_each(|jh| jh.join().unwrap()); Ok(()) } }
} } return Err(DataFusionError::Execution(e.to_string())); } Ok(())
random_line_split
main.rs
use clap::{App, AppSettings, Arg, SubCommand}; use default_boxed::DefaultBoxed; #[derive(DefaultBoxed)] struct Outer<'a, 'b> { inner: HeapApp<'a, 'b>, } struct HeapApp<'a, 'b> { app: App<'a, 'b>, } impl<'a, 'b> Default for HeapApp<'a, 'b> { fn
() -> Self { let mut app = App::new("servicemanagement1") .setting(clap::AppSettings::ColoredHelp) .author("Sebastian Thiel <[email protected]>") .version("0.1.0-20200619") .about("Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.") .after_help("All documentation details can be found at <TODO figure out URL>") .arg(Arg::with_name("scope") .long("scope") .help("Specify the authentication method should be executed in. Each scope requires the user to grant this application permission to use it. If unset, it defaults to the shortest scope url for a particular method.") .multiple(true) .takes_value(true)) .arg(Arg::with_name("folder") .long("config-dir") .help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation." ) .multiple(false) .takes_value(true)) .arg(Arg::with_name("debug") .long("debug") .help("Provide more output to aid with debugging") .multiple(false) .takes_value(false)); let mut operations0 = SubCommand::with_name("operations") .setting(AppSettings::ColoredHelp) .about("methods: get and list"); { let mcmd = SubCommand::with_name("get").about("Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice."); operations0 = operations0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list") .about("Lists service operations that match the specified filter in the request."); operations0 = operations0.subcommand(mcmd); } let mut services0 = SubCommand::with_name("services") .setting(AppSettings::ColoredHelp) .about("methods: create, delete, disable, enable, generate_config_report, get, get_config, get_iam_policy, list, set_iam_policy, test_iam_permissions and undelete"); { let mcmd = SubCommand::with_name("create").about("Creates a new managed service.\n\nA managed service is immutable, and is subject to mandatory 30-day\ndata retention. You cannot move a service or recreate it within 30 days\nafter deletion.\n\nOne producer project can own no more than 500 services. For security and\nreliability purposes, a production service should be hosted in a\ndedicated producer project.\n\nOperation<response: ManagedService>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("delete").about("Deletes a managed service. This method will change the service to the\n`Soft-Delete` state for 30 days. Within this period, service producers may\ncall UndeleteService to restore the service.\nAfter 30 days, the service will be permanently deleted.\n\nOperation<response: google.protobuf.Empty>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("disable").about("Disables a service for a project, so it can no longer be\nbe used for the project. It prevents accidental usage that may cause\nunexpected billing charges or security leaks.\n\nOperation<response: DisableServiceResponse>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("enable").about("Enables a service for a project, so it can be used\nfor the project. See\n[Cloud Auth Guide](https://cloud.google.com/docs/authentication) for\nmore information.\n\nOperation<response: EnableServiceResponse>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("generate_config_report").about("Generates and returns a report (errors, warnings and changes from\nexisting configurations) associated with\nGenerateConfigReportRequest.new_value\n\nIf GenerateConfigReportRequest.old_value is specified,\nGenerateConfigReportRequest will contain a single ChangeReport based on the\ncomparison between GenerateConfigReportRequest.new_value and\nGenerateConfigReportRequest.old_value.\nIf GenerateConfigReportRequest.old_value is not specified, this method\nwill compare GenerateConfigReportRequest.new_value with the last pushed\nservice configuration."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get").about( "Gets a managed service. Authentication is required unless the service is\npublic.", ); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get_config") .about("Gets a service configuration (version) for a managed service."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list").about("Lists managed services.\n\nReturns all public services. For authenticated users, also returns all\nservices the calling user has \"servicemanagement.services.get\" permission\nfor.\n\n**BETA:** If the caller specifies the `consumer_id`, it returns only the\nservices enabled on the consumer. The `consumer_id` must have the format\nof \"project:{PROJECT-ID}\"."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("undelete").about("Revives a previously deleted managed service. The method restores the\nservice using the configuration at the time the service was deleted.\nThe target service must exist and must have been deleted within the\nlast 30 days.\n\nOperation<response: UndeleteServiceResponse>"); services0 = services0.subcommand(mcmd); } let mut configs1 = SubCommand::with_name("configs") .setting(AppSettings::ColoredHelp) .about("methods: create, get, list and submit"); { let mcmd = SubCommand::with_name("create").about("Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.\n\nOnly the 100 most recent service configurations and ones referenced by\nexisting rollouts are kept for each service. The rest will be deleted\neventually."); configs1 = configs1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get") .about("Gets a service configuration (version) for a managed service."); configs1 = configs1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration for a managed service,\nfrom the newest to the oldest."); configs1 = configs1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("submit").about("Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOnly the 100 most recent configuration sources and ones referenced by\nexisting service configurtions are kept for each service. The rest will be\ndeleted eventually.\n\nOperation<response: SubmitConfigSourceResponse>"); configs1 = configs1.subcommand(mcmd); } let mut consumers1 = SubCommand::with_name("consumers") .setting(AppSettings::ColoredHelp) .about("methods: get_iam_policy, set_iam_policy and test_iam_permissions"); { let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset."); consumers1 = consumers1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors."); consumers1 = consumers1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning."); consumers1 = consumers1.subcommand(mcmd); } let mut rollouts1 = SubCommand::with_name("rollouts") .setting(AppSettings::ColoredHelp) .about("methods: create, get and list"); { let mcmd = SubCommand::with_name("create").about("Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOnly the 100 most recent (in any state) and the last 10 successful (if not\nalready part of the set of 100 most recent) rollouts are kept for each\nservice. The rest will be deleted eventually.\n\nOperation<response: Rollout>"); rollouts1 = rollouts1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get").about("Gets a service configuration rollout."); rollouts1 = rollouts1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration rollouts for a managed\nservice, from the newest to the oldest."); rollouts1 = rollouts1.subcommand(mcmd); } services0 = services0.subcommand(rollouts1); services0 = services0.subcommand(consumers1); services0 = services0.subcommand(configs1); app = app.subcommand(services0); app = app.subcommand(operations0); Self { app } } } use google_servicemanagement1 as api; fn main() { // TODO: set homedir afterwards, once the address is unmovable, or use Pin for the very first time // to allow a self-referential structure :D! let _home_dir = dirs::config_dir() .expect("configuration directory can be obtained") .join("google-service-cli"); let outer = Outer::default_boxed(); let app = outer.inner.app; let _matches = app.get_matches(); }
default
identifier_name
main.rs
use clap::{App, AppSettings, Arg, SubCommand}; use default_boxed::DefaultBoxed; #[derive(DefaultBoxed)] struct Outer<'a, 'b> { inner: HeapApp<'a, 'b>, } struct HeapApp<'a, 'b> { app: App<'a, 'b>, } impl<'a, 'b> Default for HeapApp<'a, 'b> { fn default() -> Self { let mut app = App::new("servicemanagement1") .setting(clap::AppSettings::ColoredHelp) .author("Sebastian Thiel <[email protected]>") .version("0.1.0-20200619") .about("Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.") .after_help("All documentation details can be found at <TODO figure out URL>") .arg(Arg::with_name("scope") .long("scope") .help("Specify the authentication method should be executed in. Each scope requires the user to grant this application permission to use it. If unset, it defaults to the shortest scope url for a particular method.") .multiple(true) .takes_value(true)) .arg(Arg::with_name("folder") .long("config-dir") .help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation." ) .multiple(false) .takes_value(true)) .arg(Arg::with_name("debug") .long("debug") .help("Provide more output to aid with debugging") .multiple(false) .takes_value(false)); let mut operations0 = SubCommand::with_name("operations") .setting(AppSettings::ColoredHelp) .about("methods: get and list"); { let mcmd = SubCommand::with_name("get").about("Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice."); operations0 = operations0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list") .about("Lists service operations that match the specified filter in the request."); operations0 = operations0.subcommand(mcmd); } let mut services0 = SubCommand::with_name("services") .setting(AppSettings::ColoredHelp) .about("methods: create, delete, disable, enable, generate_config_report, get, get_config, get_iam_policy, list, set_iam_policy, test_iam_permissions and undelete"); { let mcmd = SubCommand::with_name("create").about("Creates a new managed service.\n\nA managed service is immutable, and is subject to mandatory 30-day\ndata retention. You cannot move a service or recreate it within 30 days\nafter deletion.\n\nOne producer project can own no more than 500 services. For security and\nreliability purposes, a production service should be hosted in a\ndedicated producer project.\n\nOperation<response: ManagedService>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("delete").about("Deletes a managed service. This method will change the service to the\n`Soft-Delete` state for 30 days. Within this period, service producers may\ncall UndeleteService to restore the service.\nAfter 30 days, the service will be permanently deleted.\n\nOperation<response: google.protobuf.Empty>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("disable").about("Disables a service for a project, so it can no longer be\nbe used for the project. It prevents accidental usage that may cause\nunexpected billing charges or security leaks.\n\nOperation<response: DisableServiceResponse>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("enable").about("Enables a service for a project, so it can be used\nfor the project. See\n[Cloud Auth Guide](https://cloud.google.com/docs/authentication) for\nmore information.\n\nOperation<response: EnableServiceResponse>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("generate_config_report").about("Generates and returns a report (errors, warnings and changes from\nexisting configurations) associated with\nGenerateConfigReportRequest.new_value\n\nIf GenerateConfigReportRequest.old_value is specified,\nGenerateConfigReportRequest will contain a single ChangeReport based on the\ncomparison between GenerateConfigReportRequest.new_value and\nGenerateConfigReportRequest.old_value.\nIf GenerateConfigReportRequest.old_value is not specified, this method\nwill compare GenerateConfigReportRequest.new_value with the last pushed\nservice configuration."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get").about( "Gets a managed service. Authentication is required unless the service is\npublic.", ); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get_config") .about("Gets a service configuration (version) for a managed service."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list").about("Lists managed services.\n\nReturns all public services. For authenticated users, also returns all\nservices the calling user has \"servicemanagement.services.get\" permission\nfor.\n\n**BETA:** If the caller specifies the `consumer_id`, it returns only the\nservices enabled on the consumer. The `consumer_id` must have the format\nof \"project:{PROJECT-ID}\"."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("undelete").about("Revives a previously deleted managed service. The method restores the\nservice using the configuration at the time the service was deleted.\nThe target service must exist and must have been deleted within the\nlast 30 days.\n\nOperation<response: UndeleteServiceResponse>"); services0 = services0.subcommand(mcmd); } let mut configs1 = SubCommand::with_name("configs") .setting(AppSettings::ColoredHelp) .about("methods: create, get, list and submit"); { let mcmd = SubCommand::with_name("create").about("Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.\n\nOnly the 100 most recent service configurations and ones referenced by\nexisting rollouts are kept for each service. The rest will be deleted\neventually."); configs1 = configs1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get") .about("Gets a service configuration (version) for a managed service."); configs1 = configs1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration for a managed service,\nfrom the newest to the oldest."); configs1 = configs1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("submit").about("Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOnly the 100 most recent configuration sources and ones referenced by\nexisting service configurtions are kept for each service. The rest will be\ndeleted eventually.\n\nOperation<response: SubmitConfigSourceResponse>"); configs1 = configs1.subcommand(mcmd); } let mut consumers1 = SubCommand::with_name("consumers") .setting(AppSettings::ColoredHelp) .about("methods: get_iam_policy, set_iam_policy and test_iam_permissions"); { let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset."); consumers1 = consumers1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors."); consumers1 = consumers1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning."); consumers1 = consumers1.subcommand(mcmd); } let mut rollouts1 = SubCommand::with_name("rollouts") .setting(AppSettings::ColoredHelp) .about("methods: create, get and list"); { let mcmd = SubCommand::with_name("create").about("Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOnly the 100 most recent (in any state) and the last 10 successful (if not\nalready part of the set of 100 most recent) rollouts are kept for each\nservice. The rest will be deleted eventually.\n\nOperation<response: Rollout>"); rollouts1 = rollouts1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get").about("Gets a service configuration rollout."); rollouts1 = rollouts1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration rollouts for a managed\nservice, from the newest to the oldest."); rollouts1 = rollouts1.subcommand(mcmd); } services0 = services0.subcommand(rollouts1); services0 = services0.subcommand(consumers1); services0 = services0.subcommand(configs1); app = app.subcommand(services0); app = app.subcommand(operations0); Self { app } } }
use google_servicemanagement1 as api; fn main() { // TODO: set homedir afterwards, once the address is unmovable, or use Pin for the very first time // to allow a self-referential structure :D! let _home_dir = dirs::config_dir() .expect("configuration directory can be obtained") .join("google-service-cli"); let outer = Outer::default_boxed(); let app = outer.inner.app; let _matches = app.get_matches(); }
random_line_split
main.rs
use clap::{App, AppSettings, Arg, SubCommand}; use default_boxed::DefaultBoxed; #[derive(DefaultBoxed)] struct Outer<'a, 'b> { inner: HeapApp<'a, 'b>, } struct HeapApp<'a, 'b> { app: App<'a, 'b>, } impl<'a, 'b> Default for HeapApp<'a, 'b> { fn default() -> Self { let mut app = App::new("servicemanagement1") .setting(clap::AppSettings::ColoredHelp) .author("Sebastian Thiel <[email protected]>") .version("0.1.0-20200619") .about("Google Service Management allows service producers to publish their services on Google Cloud Platform so that they can be discovered and used by service consumers.") .after_help("All documentation details can be found at <TODO figure out URL>") .arg(Arg::with_name("scope") .long("scope") .help("Specify the authentication method should be executed in. Each scope requires the user to grant this application permission to use it. If unset, it defaults to the shortest scope url for a particular method.") .multiple(true) .takes_value(true)) .arg(Arg::with_name("folder") .long("config-dir") .help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation." ) .multiple(false) .takes_value(true)) .arg(Arg::with_name("debug") .long("debug") .help("Provide more output to aid with debugging") .multiple(false) .takes_value(false)); let mut operations0 = SubCommand::with_name("operations") .setting(AppSettings::ColoredHelp) .about("methods: get and list"); { let mcmd = SubCommand::with_name("get").about("Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice."); operations0 = operations0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list") .about("Lists service operations that match the specified filter in the request."); operations0 = operations0.subcommand(mcmd); } let mut services0 = SubCommand::with_name("services") .setting(AppSettings::ColoredHelp) .about("methods: create, delete, disable, enable, generate_config_report, get, get_config, get_iam_policy, list, set_iam_policy, test_iam_permissions and undelete"); { let mcmd = SubCommand::with_name("create").about("Creates a new managed service.\n\nA managed service is immutable, and is subject to mandatory 30-day\ndata retention. You cannot move a service or recreate it within 30 days\nafter deletion.\n\nOne producer project can own no more than 500 services. For security and\nreliability purposes, a production service should be hosted in a\ndedicated producer project.\n\nOperation<response: ManagedService>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("delete").about("Deletes a managed service. This method will change the service to the\n`Soft-Delete` state for 30 days. Within this period, service producers may\ncall UndeleteService to restore the service.\nAfter 30 days, the service will be permanently deleted.\n\nOperation<response: google.protobuf.Empty>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("disable").about("Disables a service for a project, so it can no longer be\nbe used for the project. It prevents accidental usage that may cause\nunexpected billing charges or security leaks.\n\nOperation<response: DisableServiceResponse>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("enable").about("Enables a service for a project, so it can be used\nfor the project. See\n[Cloud Auth Guide](https://cloud.google.com/docs/authentication) for\nmore information.\n\nOperation<response: EnableServiceResponse>"); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("generate_config_report").about("Generates and returns a report (errors, warnings and changes from\nexisting configurations) associated with\nGenerateConfigReportRequest.new_value\n\nIf GenerateConfigReportRequest.old_value is specified,\nGenerateConfigReportRequest will contain a single ChangeReport based on the\ncomparison between GenerateConfigReportRequest.new_value and\nGenerateConfigReportRequest.old_value.\nIf GenerateConfigReportRequest.old_value is not specified, this method\nwill compare GenerateConfigReportRequest.new_value with the last pushed\nservice configuration."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get").about( "Gets a managed service. Authentication is required unless the service is\npublic.", ); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get_config") .about("Gets a service configuration (version) for a managed service."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list").about("Lists managed services.\n\nReturns all public services. For authenticated users, also returns all\nservices the calling user has \"servicemanagement.services.get\" permission\nfor.\n\n**BETA:** If the caller specifies the `consumer_id`, it returns only the\nservices enabled on the consumer. The `consumer_id` must have the format\nof \"project:{PROJECT-ID}\"."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning."); services0 = services0.subcommand(mcmd); } { let mcmd = SubCommand::with_name("undelete").about("Revives a previously deleted managed service. The method restores the\nservice using the configuration at the time the service was deleted.\nThe target service must exist and must have been deleted within the\nlast 30 days.\n\nOperation<response: UndeleteServiceResponse>"); services0 = services0.subcommand(mcmd); } let mut configs1 = SubCommand::with_name("configs") .setting(AppSettings::ColoredHelp) .about("methods: create, get, list and submit"); { let mcmd = SubCommand::with_name("create").about("Creates a new service configuration (version) for a managed service.\nThis method only stores the service configuration. To roll out the service\nconfiguration to backend systems please call\nCreateServiceRollout.\n\nOnly the 100 most recent service configurations and ones referenced by\nexisting rollouts are kept for each service. The rest will be deleted\neventually."); configs1 = configs1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get") .about("Gets a service configuration (version) for a managed service."); configs1 = configs1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration for a managed service,\nfrom the newest to the oldest."); configs1 = configs1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("submit").about("Creates a new service configuration (version) for a managed service based\non\nuser-supplied configuration source files (for example: OpenAPI\nSpecification). This method stores the source configurations as well as the\ngenerated service configuration. To rollout the service configuration to\nother services,\nplease call CreateServiceRollout.\n\nOnly the 100 most recent configuration sources and ones referenced by\nexisting service configurtions are kept for each service. The rest will be\ndeleted eventually.\n\nOperation<response: SubmitConfigSourceResponse>"); configs1 = configs1.subcommand(mcmd); } let mut consumers1 = SubCommand::with_name("consumers") .setting(AppSettings::ColoredHelp) .about("methods: get_iam_policy, set_iam_policy and test_iam_permissions"); { let mcmd = SubCommand::with_name("get_iam_policy").about("Gets the access control policy for a resource.\nReturns an empty policy if the resource exists and does not have a policy\nset."); consumers1 = consumers1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("set_iam_policy").about("Sets the access control policy on the specified resource. Replaces any\nexisting policy.\n\nCan return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors."); consumers1 = consumers1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("test_iam_permissions").about("Returns permissions that a caller has on the specified resource.\nIf the resource does not exist, this will return an empty set of\npermissions, not a `NOT_FOUND` error.\n\nNote: This operation is designed to be used for building permission-aware\nUIs and command-line tools, not for authorization checking. This operation\nmay \"fail open\" without warning."); consumers1 = consumers1.subcommand(mcmd); } let mut rollouts1 = SubCommand::with_name("rollouts") .setting(AppSettings::ColoredHelp) .about("methods: create, get and list"); { let mcmd = SubCommand::with_name("create").about("Creates a new service configuration rollout. Based on rollout, the\nGoogle Service Management will roll out the service configurations to\ndifferent backend services. For example, the logging configuration will be\npushed to Google Cloud Logging.\n\nPlease note that any previous pending and running Rollouts and associated\nOperations will be automatically cancelled so that the latest Rollout will\nnot be blocked by previous Rollouts.\n\nOnly the 100 most recent (in any state) and the last 10 successful (if not\nalready part of the set of 100 most recent) rollouts are kept for each\nservice. The rest will be deleted eventually.\n\nOperation<response: Rollout>"); rollouts1 = rollouts1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("get").about("Gets a service configuration rollout."); rollouts1 = rollouts1.subcommand(mcmd); } { let mcmd = SubCommand::with_name("list").about("Lists the history of the service configuration rollouts for a managed\nservice, from the newest to the oldest."); rollouts1 = rollouts1.subcommand(mcmd); } services0 = services0.subcommand(rollouts1); services0 = services0.subcommand(consumers1); services0 = services0.subcommand(configs1); app = app.subcommand(services0); app = app.subcommand(operations0); Self { app } } } use google_servicemanagement1 as api; fn main()
{ // TODO: set homedir afterwards, once the address is unmovable, or use Pin for the very first time // to allow a self-referential structure :D! let _home_dir = dirs::config_dir() .expect("configuration directory can be obtained") .join("google-service-cli"); let outer = Outer::default_boxed(); let app = outer.inner.app; let _matches = app.get_matches(); }
identifier_body
ycsb.rs
the data stored under a bytestring key of `self.key_len` bytes. // - set: A function that stores the data stored under a bytestring key of `self.key_len` bytes // with a bytestring value of `self.value_len` bytes. // # Return // A three tuple consisting of the duration that this thread ran the benchmark, the // number of gets it performed, and the number of puts it performed. pub fn abc<G, P, R>(&mut self, mut get: G, mut put: P) -> R where G: FnMut(u32, &[u8]) -> R, P: FnMut(u32, &[u8], &[u8]) -> R, { let is_get = (self.rng.gen::<u32>() % 100) >= self.put_pct as u32; // Sample a tenant. let t = self.tenant_rng.sample(&mut self.rng) as u32; // Sample a key, and convert into a little endian byte array. let k = self.key_rng.sample(&mut self.rng) as u32; let k: [u8; 4] = unsafe { transmute(k.to_le()) }; self.key_buf[0..mem::size_of::<u32>()].copy_from_slice(&k); if is_get { get(t, self.key_buf.as_slice()) } else { put(t, self.key_buf.as_slice(), self.value_buf.as_slice()) } } } /// Sends out YCSB based RPC requests to a Sandstorm server. struct YcsbSend { // The actual YCSB workload. Required to generate keys and values for get() and put() requests. workload: RefCell<Ycsb>, // Network stack required to actually send RPC requests out the network. sender: dispatch::Sender, // Total number of requests to be sent out. requests: u64, // Number of requests that have been sent out so far. sent: u64, // The inverse of the rate at which requests are to be generated. Basically, the time interval // between two request generations in cycles. rate_inv: u64, // The time stamp at which the workload started generating requests in cycles. start: u64, // The time stamp at which the next request must be issued in cycles. next: u64, // If true, RPC requests corresponding to native get() and put() operations are sent out. If // false, invoke() based RPC requests are sent out. native: bool, // Payload for an invoke() based get operation. Required in order to avoid making intermediate // copies of the extension name, table id, and key. payload_get: RefCell<Vec<u8>>, // Payload for an invoke() based put operation. Required in order to avoid making intermediate // copies of the extension name, table id, key length, key, and value. payload_put: RefCell<Vec<u8>>, } // Implementation of methods on YcsbSend. impl YcsbSend { /// Constructs a YcsbSend. /// /// # Arguments /// /// * `config`: Client configuration with YCSB related (key and value length etc.) as well as /// Network related (Server and Client MAC address etc.) parameters. /// * `port`: Network port over which requests will be sent out. /// * `reqs`: The number of requests to be issued to the server. /// * `dst_ports`: The total number of UDP ports the server is listening on. /// /// # Return /// /// A YCSB request generator. fn new( config: &config::ClientConfig, port: CacheAligned<PortQueue>, reqs: u64, dst_ports: u16, ) -> YcsbSend { // The payload on an invoke() based get request consists of the extensions name ("get"), // the table id to perform the lookup on, and the key to lookup. let payload_len = "get".as_bytes().len() + mem::size_of::<u64>() + config.key_len; let mut payload_get = Vec::with_capacity(payload_len); payload_get.extend_from_slice("get".as_bytes()); payload_get.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) }); payload_get.resize(payload_len, 0); // The payload on an invoke() based put request consists of the extensions name ("put"), // the table id to perform the lookup on, the length of the key to lookup, the key, and the // value to be inserted into the database. let payload_len = "put".as_bytes().len() + mem::size_of::<u64>() + mem::size_of::<u16>() + config.key_len + config.value_len; let mut payload_put = Vec::with_capacity(payload_len); payload_put.extend_from_slice("put".as_bytes()); payload_put.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) }); payload_put.extend_from_slice(&unsafe { transmute::<u16, [u8; 2]>((config.key_len as u16).to_le()) }); payload_put.resize(payload_len, 0); YcsbSend { workload: RefCell::new(Ycsb::new( config.key_len, config.value_len, config.n_keys, config.put_pct, config.skew, config.num_tenants, config.tenant_skew, )), sender: dispatch::Sender::new(config, port, dst_ports), requests: reqs, sent: 0, rate_inv: cycles::cycles_per_second() / config.req_rate as u64, start: cycles::rdtsc(), next: 0, native:!config.use_invoke, payload_get: RefCell::new(payload_get), payload_put: RefCell::new(payload_put), } } } // The Executable trait allowing YcsbSend to be scheduled by Netbricks. impl Executable for YcsbSend { // Called internally by Netbricks. fn execute(&mut self) { // Return if there are no more requests to generate. if self.requests <= self.sent { return; } // Get the current time stamp so that we can determine if it is time to issue the next RPC. let curr = cycles::rdtsc(); // If it is either time to send out a request, or if a request has never been sent out, // then, do so. if curr >= self.next || self.next == 0 { if self.native == true { // Configured to issue native RPCs, issue a regular get()/put() operation. self.workload.borrow_mut().abc( |tenant, key| self.sender.send_get(tenant, 1, key, curr), |tenant, key, val| self.sender.send_put(tenant, 1, key, val, curr), ); } else { // Configured to issue invoke() RPCs. let mut p_get = self.payload_get.borrow_mut(); let mut p_put = self.payload_put.borrow_mut(); // XXX Heavily dependent on how `Ycsb` creates a key. Only the first four // bytes of the key matter, the rest are zero. The value is always zero. self.workload.borrow_mut().abc( |tenant, key| { // First 11 bytes on the payload were already pre-populated with the // extension name (3 bytes), and the table id (8 bytes). Just write in the // first 4 bytes of the key. p_get[11..15].copy_from_slice(&key[0..4]); self.sender.send_invoke(tenant, 3, &p_get, curr) }, |tenant, key, _val| { // First 13 bytes on the payload were already pre-populated with the // extension name (3 bytes), the table id (8 bytes), and the key length (2 // bytes). Just write in the first 4 bytes of the key. The value is anyway // always zero. p_put[13..17].copy_from_slice(&key[0..4]); self.sender.send_invoke(tenant, 3, &p_put, curr) }, ); } // Update the time stamp at which the next request should be generated, assuming that // the first request was sent out at self.start. self.sent += 1; self.next = self.start + self.sent * self.rate_inv; } } fn dependencies(&mut self) -> Vec<usize> { vec![] } } /// Receives responses to YCSB requests sent out by YcsbSend. struct YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { // The network stack required to receives RPC response packets from a network port. receiver: dispatch::Receiver<T>, // The number of response packets to wait for before printing out statistics. responses: u64, // Time stamp in cycles at which measurement started. Required to calculate observed // throughput of the Sandstorm server. start: u64, // The total number of responses received so far. recvd: u64, // Vector of sampled request latencies. Required to calculate distributions once all responses // have been received. latencies: Vec<u64>, // If true, this receiver will make latency measurements. master: bool, // If true, then responses will be considered to correspond to native gets and puts. native: bool, // Time stamp in cycles at which measurement stopped. stop: u64, } // Implementation of methods on YcsbRecv. impl<T> YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { /// Constructs a YcsbRecv. /// /// # Arguments /// /// * `port` : Network port on which responses will be polled for. /// * `resps`: The number of responses to wait for before calculating statistics. /// * `master`: Boolean indicating if the receiver should make latency measurements. /// * `native`: If true, responses will be considered to correspond to native gets and puts. /// /// # Return /// /// A YCSB response receiver that measures the median latency and throughput of a Sandstorm /// server. fn new(port: T, resps: u64, master: bool, native: bool) -> YcsbRecv<T> { YcsbRecv { receiver: dispatch::Receiver::new(port), responses: resps, start: cycles::rdtsc(), recvd: 0, latencies: Vec::with_capacity(resps as usize), master: master, native: native, stop: 0, } } } // Implementation of the `Drop` trait on YcsbRecv. impl<T> Drop for YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { fn drop(&mut self) { // Calculate & print the throughput for all client threads. println!( "YCSB Throughput {}", self.recvd as f64 / cycles::to_seconds(self.stop - self.start) ); // Calculate & print median & tail latency only on the master thread. if self.master { self.latencies.sort(); let m; let t = self.latencies[(self.latencies.len() * 99) / 100]; match self.latencies.len() % 2 { 0 => { let n = self.latencies.len(); m = (self.latencies[n / 2] + self.latencies[(n / 2) + 1]) / 2; } _ => m = self.latencies[self.latencies.len() / 2], } println!( ">>> {} {}", cycles::to_seconds(m) * 1e9, cycles::to_seconds(t) * 1e9 ); } } } // Executable trait allowing YcsbRecv to be scheduled by Netbricks. impl<T> Executable for YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { // Called internally by Netbricks. fn execute(&mut self) { // Don't do anything after all responses have been received. if self.responses <= self.recvd { return; } // Try to receive packets from the network port. // If there are packets, sample the latency of the server. if let Some(mut packets) = self.receiver.recv_res() { while let Some(packet) = packets.pop() { self.recvd += 1; // Measure latency on the master client after the first 2 million requests. // The start timestamp is present on the RPC response header. if self.recvd > 2 * 1000 * 1000 && self.master { let curr = cycles::rdtsc(); match self.native { // The response corresponds to an invoke() RPC. false => { let p = packet.parse_header::<InvokeResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } // The response corresponds to a get() or put() RPC. // The opcode on the response identifies the RPC type. true => match parse_rpc_opcode(&packet) { OpCode::SandstormGetRpc => { let p = packet.parse_header::<GetResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } OpCode::SandstormPutRpc => { let p = packet.parse_header::<PutResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } _ => packet.free_packet(), }, } } else { packet.free_packet(); } } } // The moment all response packets have been received, set the value of the // stop timestamp so that throughput can be estimated later. if self.responses <= self.recvd { self.stop = cycles::rdtsc(); } } fn dependencies(&mut self) -> Vec<usize> { vec![] } } /// Sets up YcsbSend by adding it to a Netbricks scheduler. /// /// # Arguments /// /// * `config`: Network related configuration such as the MAC and IP address. /// * `ports`: Network port on which packets will be sent. /// * `scheduler`: Netbricks scheduler to which YcsbSend will be added. fn setup_send<S>( config: &config::ClientConfig, ports: Vec<CacheAligned<PortQueue>>, scheduler: &mut S, _core: i32, ) where S: Scheduler + Sized, { if ports.len()!= 1 { error!("Client should be configured with exactly 1 port!"); std::process::exit(1); } // Add the sender to a netbricks pipeline. match scheduler.add_task(YcsbSend::new( config, ports[0].clone(), config.num_reqs as u64, config.server_udp_ports as u16, )) { Ok(_) => { info!( "Successfully added YcsbSend with tx queue {}.", ports[0].txq() ); } Err(ref err) => { error!("Error while adding to Netbricks pipeline {}", err); std::process::exit(1); } } } /// Sets up YcsbRecv by adding it to a Netbricks scheduler. /// /// # Arguments /// /// * `ports`: Network port on which packets will be sent. /// * `scheduler`: Netbricks scheduler to which YcsbRecv will be added. /// * `master`: If true, the added YcsbRecv will make latency measurements. /// * `native`: If true, the added YcsbRecv will assume that responses correspond to gets /// and puts. fn setup_recv<S>( ports: Vec<CacheAligned<PortQueue>>, scheduler: &mut S, _core: i32, master: bool, native: bool, ) where S: Scheduler + Sized, { if ports.len()!= 1
// Add the receiver to a netbricks pipeline. match scheduler.add_task(YcsbRecv::new( ports[0].clone(), 34 * 1000 * 1000 as u64, master, native, )) { Ok(_) => { info!( "Successfully added YcsbRecv with rx queue {}.", ports[0].rxq() ); } Err(ref err) => { error!("Error while adding to Netbricks pipeline {}", err); std::process::exit(1); } } } fn main() { db::env_logger::init().expect("ERROR: failed to initialize logger!"); let config = config::ClientConfig::load(); info!("Starting up Sandstorm client with config {:?}", config); // Based on the supplied client configuration, compute the amount of time it will take to send // out `num_reqs` requests at a rate of `req_rate` requests per second. let exec = config.num_reqs / config.req_rate; // Setup Netbricks. let mut net_context = setup::config_and_init_netbricks(&config); // Setup the client pipeline. net_context.start_schedulers(); // The core id's which will run the sender and receiver threads. // XXX The following two arrays heavily depend on the set of cores // configured in setup.rs let senders = [0, 2, 4, 6]; let receive = [1, 3, 5, 7]; assert!((senders.len() == 4) && (receive.len() == 4)); // Setup 4 senders, and 4 receivers. for i in 0..4 { // First, retrieve a tx-rx queue pair from Netbricks let port = net_context .rx_queues .get(&senders[i]) .expect("Failed to retrieve network port!") .clone(); let mut master = false; if i == 0 { master = true; } let native =!config.use_invoke; // Setup the receive side. net_context .add_pipeline_to_core( receive[i], Arc::new( move |_ports, sched: &mut StandaloneScheduler, core: i32, _sibling| { setup_recv(port.clone(), sched, core, master, native) }, ), ) .expect("Failed to initialize receive side."); // Setup the send side. net_context .add_pipeline_to_core( senders[i], Arc::new(
{ error!("Client should be configured with exactly 1 port!"); std::process::exit(1); }
conditional_block
ycsb.rs
the data stored under a bytestring key of `self.key_len` bytes. // - set: A function that stores the data stored under a bytestring key of `self.key_len` bytes // with a bytestring value of `self.value_len` bytes. // # Return // A three tuple consisting of the duration that this thread ran the benchmark, the // number of gets it performed, and the number of puts it performed. pub fn abc<G, P, R>(&mut self, mut get: G, mut put: P) -> R where G: FnMut(u32, &[u8]) -> R, P: FnMut(u32, &[u8], &[u8]) -> R, { let is_get = (self.rng.gen::<u32>() % 100) >= self.put_pct as u32; // Sample a tenant. let t = self.tenant_rng.sample(&mut self.rng) as u32; // Sample a key, and convert into a little endian byte array. let k = self.key_rng.sample(&mut self.rng) as u32; let k: [u8; 4] = unsafe { transmute(k.to_le()) }; self.key_buf[0..mem::size_of::<u32>()].copy_from_slice(&k); if is_get { get(t, self.key_buf.as_slice()) } else { put(t, self.key_buf.as_slice(), self.value_buf.as_slice()) } } } /// Sends out YCSB based RPC requests to a Sandstorm server. struct YcsbSend { // The actual YCSB workload. Required to generate keys and values for get() and put() requests. workload: RefCell<Ycsb>, // Network stack required to actually send RPC requests out the network. sender: dispatch::Sender, // Total number of requests to be sent out. requests: u64, // Number of requests that have been sent out so far. sent: u64, // The inverse of the rate at which requests are to be generated. Basically, the time interval // between two request generations in cycles. rate_inv: u64, // The time stamp at which the workload started generating requests in cycles. start: u64, // The time stamp at which the next request must be issued in cycles. next: u64, // If true, RPC requests corresponding to native get() and put() operations are sent out. If // false, invoke() based RPC requests are sent out. native: bool, // Payload for an invoke() based get operation. Required in order to avoid making intermediate // copies of the extension name, table id, and key. payload_get: RefCell<Vec<u8>>, // Payload for an invoke() based put operation. Required in order to avoid making intermediate // copies of the extension name, table id, key length, key, and value. payload_put: RefCell<Vec<u8>>, } // Implementation of methods on YcsbSend. impl YcsbSend { /// Constructs a YcsbSend. /// /// # Arguments /// /// * `config`: Client configuration with YCSB related (key and value length etc.) as well as /// Network related (Server and Client MAC address etc.) parameters. /// * `port`: Network port over which requests will be sent out. /// * `reqs`: The number of requests to be issued to the server. /// * `dst_ports`: The total number of UDP ports the server is listening on. /// /// # Return /// /// A YCSB request generator. fn new( config: &config::ClientConfig, port: CacheAligned<PortQueue>, reqs: u64, dst_ports: u16, ) -> YcsbSend { // The payload on an invoke() based get request consists of the extensions name ("get"), // the table id to perform the lookup on, and the key to lookup. let payload_len = "get".as_bytes().len() + mem::size_of::<u64>() + config.key_len; let mut payload_get = Vec::with_capacity(payload_len); payload_get.extend_from_slice("get".as_bytes()); payload_get.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) }); payload_get.resize(payload_len, 0); // The payload on an invoke() based put request consists of the extensions name ("put"), // the table id to perform the lookup on, the length of the key to lookup, the key, and the // value to be inserted into the database. let payload_len = "put".as_bytes().len() + mem::size_of::<u64>() + mem::size_of::<u16>() + config.key_len + config.value_len; let mut payload_put = Vec::with_capacity(payload_len); payload_put.extend_from_slice("put".as_bytes()); payload_put.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) }); payload_put.extend_from_slice(&unsafe { transmute::<u16, [u8; 2]>((config.key_len as u16).to_le()) }); payload_put.resize(payload_len, 0); YcsbSend { workload: RefCell::new(Ycsb::new( config.key_len, config.value_len, config.n_keys, config.put_pct, config.skew, config.num_tenants, config.tenant_skew, )), sender: dispatch::Sender::new(config, port, dst_ports), requests: reqs, sent: 0, rate_inv: cycles::cycles_per_second() / config.req_rate as u64, start: cycles::rdtsc(), next: 0, native:!config.use_invoke, payload_get: RefCell::new(payload_get), payload_put: RefCell::new(payload_put), } } } // The Executable trait allowing YcsbSend to be scheduled by Netbricks. impl Executable for YcsbSend { // Called internally by Netbricks. fn execute(&mut self)
let mut p_get = self.payload_get.borrow_mut(); let mut p_put = self.payload_put.borrow_mut(); // XXX Heavily dependent on how `Ycsb` creates a key. Only the first four // bytes of the key matter, the rest are zero. The value is always zero. self.workload.borrow_mut().abc( |tenant, key| { // First 11 bytes on the payload were already pre-populated with the // extension name (3 bytes), and the table id (8 bytes). Just write in the // first 4 bytes of the key. p_get[11..15].copy_from_slice(&key[0..4]); self.sender.send_invoke(tenant, 3, &p_get, curr) }, |tenant, key, _val| { // First 13 bytes on the payload were already pre-populated with the // extension name (3 bytes), the table id (8 bytes), and the key length (2 // bytes). Just write in the first 4 bytes of the key. The value is anyway // always zero. p_put[13..17].copy_from_slice(&key[0..4]); self.sender.send_invoke(tenant, 3, &p_put, curr) }, ); } // Update the time stamp at which the next request should be generated, assuming that // the first request was sent out at self.start. self.sent += 1; self.next = self.start + self.sent * self.rate_inv; } } fn dependencies(&mut self) -> Vec<usize> { vec![] } } /// Receives responses to YCSB requests sent out by YcsbSend. struct YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { // The network stack required to receives RPC response packets from a network port. receiver: dispatch::Receiver<T>, // The number of response packets to wait for before printing out statistics. responses: u64, // Time stamp in cycles at which measurement started. Required to calculate observed // throughput of the Sandstorm server. start: u64, // The total number of responses received so far. recvd: u64, // Vector of sampled request latencies. Required to calculate distributions once all responses // have been received. latencies: Vec<u64>, // If true, this receiver will make latency measurements. master: bool, // If true, then responses will be considered to correspond to native gets and puts. native: bool, // Time stamp in cycles at which measurement stopped. stop: u64, } // Implementation of methods on YcsbRecv. impl<T> YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { /// Constructs a YcsbRecv. /// /// # Arguments /// /// * `port` : Network port on which responses will be polled for. /// * `resps`: The number of responses to wait for before calculating statistics. /// * `master`: Boolean indicating if the receiver should make latency measurements. /// * `native`: If true, responses will be considered to correspond to native gets and puts. /// /// # Return /// /// A YCSB response receiver that measures the median latency and throughput of a Sandstorm /// server. fn new(port: T, resps: u64, master: bool, native: bool) -> YcsbRecv<T> { YcsbRecv { receiver: dispatch::Receiver::new(port), responses: resps, start: cycles::rdtsc(), recvd: 0, latencies: Vec::with_capacity(resps as usize), master: master, native: native, stop: 0, } } } // Implementation of the `Drop` trait on YcsbRecv. impl<T> Drop for YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { fn drop(&mut self) { // Calculate & print the throughput for all client threads. println!( "YCSB Throughput {}", self.recvd as f64 / cycles::to_seconds(self.stop - self.start) ); // Calculate & print median & tail latency only on the master thread. if self.master { self.latencies.sort(); let m; let t = self.latencies[(self.latencies.len() * 99) / 100]; match self.latencies.len() % 2 { 0 => { let n = self.latencies.len(); m = (self.latencies[n / 2] + self.latencies[(n / 2) + 1]) / 2; } _ => m = self.latencies[self.latencies.len() / 2], } println!( ">>> {} {}", cycles::to_seconds(m) * 1e9, cycles::to_seconds(t) * 1e9 ); } } } // Executable trait allowing YcsbRecv to be scheduled by Netbricks. impl<T> Executable for YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { // Called internally by Netbricks. fn execute(&mut self) { // Don't do anything after all responses have been received. if self.responses <= self.recvd { return; } // Try to receive packets from the network port. // If there are packets, sample the latency of the server. if let Some(mut packets) = self.receiver.recv_res() { while let Some(packet) = packets.pop() { self.recvd += 1; // Measure latency on the master client after the first 2 million requests. // The start timestamp is present on the RPC response header. if self.recvd > 2 * 1000 * 1000 && self.master { let curr = cycles::rdtsc(); match self.native { // The response corresponds to an invoke() RPC. false => { let p = packet.parse_header::<InvokeResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } // The response corresponds to a get() or put() RPC. // The opcode on the response identifies the RPC type. true => match parse_rpc_opcode(&packet) { OpCode::SandstormGetRpc => { let p = packet.parse_header::<GetResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } OpCode::SandstormPutRpc => { let p = packet.parse_header::<PutResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } _ => packet.free_packet(), }, } } else { packet.free_packet(); } } } // The moment all response packets have been received, set the value of the // stop timestamp so that throughput can be estimated later. if self.responses <= self.recvd { self.stop = cycles::rdtsc(); } } fn dependencies(&mut self) -> Vec<usize> { vec![] } } /// Sets up YcsbSend by adding it to a Netbricks scheduler. /// /// # Arguments /// /// * `config`: Network related configuration such as the MAC and IP address. /// * `ports`: Network port on which packets will be sent. /// * `scheduler`: Netbricks scheduler to which YcsbSend will be added. fn setup_send<S>( config: &config::ClientConfig, ports: Vec<CacheAligned<PortQueue>>, scheduler: &mut S, _core: i32, ) where S: Scheduler + Sized, { if ports.len()!= 1 { error!("Client should be configured with exactly 1 port!"); std::process::exit(1); } // Add the sender to a netbricks pipeline. match scheduler.add_task(YcsbSend::new( config, ports[0].clone(), config.num_reqs as u64, config.server_udp_ports as u16, )) { Ok(_) => { info!( "Successfully added YcsbSend with tx queue {}.", ports[0].txq() ); } Err(ref err) => { error!("Error while adding to Netbricks pipeline {}", err); std::process::exit(1); } } } /// Sets up YcsbRecv by adding it to a Netbricks scheduler. /// /// # Arguments /// /// * `ports`: Network port on which packets will be sent. /// * `scheduler`: Netbricks scheduler to which YcsbRecv will be added. /// * `master`: If true, the added YcsbRecv will make latency measurements. /// * `native`: If true, the added YcsbRecv will assume that responses correspond to gets /// and puts. fn setup_recv<S>( ports: Vec<CacheAligned<PortQueue>>, scheduler: &mut S, _core: i32, master: bool, native: bool, ) where S: Scheduler + Sized, { if ports.len()!= 1 { error!("Client should be configured with exactly 1 port!"); std::process::exit(1); } // Add the receiver to a netbricks pipeline. match scheduler.add_task(YcsbRecv::new( ports[0].clone(), 34 * 1000 * 1000 as u64, master, native, )) { Ok(_) => { info!( "Successfully added YcsbRecv with rx queue {}.", ports[0].rxq() ); } Err(ref err) => { error!("Error while adding to Netbricks pipeline {}", err); std::process::exit(1); } } } fn main() { db::env_logger::init().expect("ERROR: failed to initialize logger!"); let config = config::ClientConfig::load(); info!("Starting up Sandstorm client with config {:?}", config); // Based on the supplied client configuration, compute the amount of time it will take to send // out `num_reqs` requests at a rate of `req_rate` requests per second. let exec = config.num_reqs / config.req_rate; // Setup Netbricks. let mut net_context = setup::config_and_init_netbricks(&config); // Setup the client pipeline. net_context.start_schedulers(); // The core id's which will run the sender and receiver threads. // XXX The following two arrays heavily depend on the set of cores // configured in setup.rs let senders = [0, 2, 4, 6]; let receive = [1, 3, 5, 7]; assert!((senders.len() == 4) && (receive.len() == 4)); // Setup 4 senders, and 4 receivers. for i in 0..4 { // First, retrieve a tx-rx queue pair from Netbricks let port = net_context .rx_queues .get(&senders[i]) .expect("Failed to retrieve network port!") .clone(); let mut master = false; if i == 0 { master = true; } let native =!config.use_invoke; // Setup the receive side. net_context .add_pipeline_to_core( receive[i], Arc::new( move |_ports, sched: &mut StandaloneScheduler, core: i32, _sibling| { setup_recv(port.clone(), sched, core, master, native) }, ), ) .expect("Failed to initialize receive side."); // Setup the send side. net_context .add_pipeline_to_core( senders[i], Arc::new(
{ // Return if there are no more requests to generate. if self.requests <= self.sent { return; } // Get the current time stamp so that we can determine if it is time to issue the next RPC. let curr = cycles::rdtsc(); // If it is either time to send out a request, or if a request has never been sent out, // then, do so. if curr >= self.next || self.next == 0 { if self.native == true { // Configured to issue native RPCs, issue a regular get()/put() operation. self.workload.borrow_mut().abc( |tenant, key| self.sender.send_get(tenant, 1, key, curr), |tenant, key, val| self.sender.send_put(tenant, 1, key, val, curr), ); } else { // Configured to issue invoke() RPCs.
identifier_body
ycsb.rs
fetches the data stored under a bytestring key of `self.key_len` bytes. // - set: A function that stores the data stored under a bytestring key of `self.key_len` bytes // with a bytestring value of `self.value_len` bytes. // # Return // A three tuple consisting of the duration that this thread ran the benchmark, the // number of gets it performed, and the number of puts it performed. pub fn abc<G, P, R>(&mut self, mut get: G, mut put: P) -> R where G: FnMut(u32, &[u8]) -> R, P: FnMut(u32, &[u8], &[u8]) -> R, { let is_get = (self.rng.gen::<u32>() % 100) >= self.put_pct as u32; // Sample a tenant. let t = self.tenant_rng.sample(&mut self.rng) as u32; // Sample a key, and convert into a little endian byte array. let k = self.key_rng.sample(&mut self.rng) as u32; let k: [u8; 4] = unsafe { transmute(k.to_le()) }; self.key_buf[0..mem::size_of::<u32>()].copy_from_slice(&k); if is_get { get(t, self.key_buf.as_slice()) } else { put(t, self.key_buf.as_slice(), self.value_buf.as_slice()) } } } /// Sends out YCSB based RPC requests to a Sandstorm server. struct YcsbSend { // The actual YCSB workload. Required to generate keys and values for get() and put() requests. workload: RefCell<Ycsb>, // Network stack required to actually send RPC requests out the network. sender: dispatch::Sender, // Total number of requests to be sent out. requests: u64, // Number of requests that have been sent out so far. sent: u64, // The inverse of the rate at which requests are to be generated. Basically, the time interval // between two request generations in cycles. rate_inv: u64, // The time stamp at which the workload started generating requests in cycles. start: u64, // The time stamp at which the next request must be issued in cycles. next: u64, // If true, RPC requests corresponding to native get() and put() operations are sent out. If // false, invoke() based RPC requests are sent out. native: bool, // Payload for an invoke() based get operation. Required in order to avoid making intermediate // copies of the extension name, table id, and key. payload_get: RefCell<Vec<u8>>, // Payload for an invoke() based put operation. Required in order to avoid making intermediate // copies of the extension name, table id, key length, key, and value. payload_put: RefCell<Vec<u8>>, } // Implementation of methods on YcsbSend. impl YcsbSend { /// Constructs a YcsbSend. /// /// # Arguments /// /// * `config`: Client configuration with YCSB related (key and value length etc.) as well as /// Network related (Server and Client MAC address etc.) parameters. /// * `port`: Network port over which requests will be sent out. /// * `reqs`: The number of requests to be issued to the server. /// * `dst_ports`: The total number of UDP ports the server is listening on. /// /// # Return /// /// A YCSB request generator. fn new( config: &config::ClientConfig, port: CacheAligned<PortQueue>, reqs: u64, dst_ports: u16, ) -> YcsbSend { // The payload on an invoke() based get request consists of the extensions name ("get"), // the table id to perform the lookup on, and the key to lookup. let payload_len = "get".as_bytes().len() + mem::size_of::<u64>() + config.key_len; let mut payload_get = Vec::with_capacity(payload_len); payload_get.extend_from_slice("get".as_bytes()); payload_get.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) }); payload_get.resize(payload_len, 0); // The payload on an invoke() based put request consists of the extensions name ("put"), // the table id to perform the lookup on, the length of the key to lookup, the key, and the // value to be inserted into the database. let payload_len = "put".as_bytes().len() + mem::size_of::<u64>() + mem::size_of::<u16>() + config.key_len + config.value_len; let mut payload_put = Vec::with_capacity(payload_len); payload_put.extend_from_slice("put".as_bytes()); payload_put.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) }); payload_put.extend_from_slice(&unsafe { transmute::<u16, [u8; 2]>((config.key_len as u16).to_le()) }); payload_put.resize(payload_len, 0); YcsbSend { workload: RefCell::new(Ycsb::new( config.key_len, config.value_len, config.n_keys, config.put_pct, config.skew, config.num_tenants, config.tenant_skew, )), sender: dispatch::Sender::new(config, port, dst_ports), requests: reqs, sent: 0, rate_inv: cycles::cycles_per_second() / config.req_rate as u64, start: cycles::rdtsc(), next: 0, native:!config.use_invoke, payload_get: RefCell::new(payload_get), payload_put: RefCell::new(payload_put), } } } // The Executable trait allowing YcsbSend to be scheduled by Netbricks. impl Executable for YcsbSend { // Called internally by Netbricks. fn execute(&mut self) { // Return if there are no more requests to generate. if self.requests <= self.sent { return; } // Get the current time stamp so that we can determine if it is time to issue the next RPC. let curr = cycles::rdtsc(); // If it is either time to send out a request, or if a request has never been sent out, // then, do so. if curr >= self.next || self.next == 0 { if self.native == true { // Configured to issue native RPCs, issue a regular get()/put() operation. self.workload.borrow_mut().abc( |tenant, key| self.sender.send_get(tenant, 1, key, curr), |tenant, key, val| self.sender.send_put(tenant, 1, key, val, curr), ); } else { // Configured to issue invoke() RPCs. let mut p_get = self.payload_get.borrow_mut(); let mut p_put = self.payload_put.borrow_mut(); // XXX Heavily dependent on how `Ycsb` creates a key. Only the first four // bytes of the key matter, the rest are zero. The value is always zero. self.workload.borrow_mut().abc( |tenant, key| { // First 11 bytes on the payload were already pre-populated with the // extension name (3 bytes), and the table id (8 bytes). Just write in the // first 4 bytes of the key. p_get[11..15].copy_from_slice(&key[0..4]); self.sender.send_invoke(tenant, 3, &p_get, curr) }, |tenant, key, _val| { // First 13 bytes on the payload were already pre-populated with the // extension name (3 bytes), the table id (8 bytes), and the key length (2 // bytes). Just write in the first 4 bytes of the key. The value is anyway // always zero. p_put[13..17].copy_from_slice(&key[0..4]); self.sender.send_invoke(tenant, 3, &p_put, curr) }, ); } // Update the time stamp at which the next request should be generated, assuming that // the first request was sent out at self.start. self.sent += 1; self.next = self.start + self.sent * self.rate_inv; } } fn dependencies(&mut self) -> Vec<usize> { vec![] } } /// Receives responses to YCSB requests sent out by YcsbSend. struct YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { // The network stack required to receives RPC response packets from a network port. receiver: dispatch::Receiver<T>, // The number of response packets to wait for before printing out statistics. responses: u64, // Time stamp in cycles at which measurement started. Required to calculate observed // throughput of the Sandstorm server. start: u64, // The total number of responses received so far. recvd: u64, // Vector of sampled request latencies. Required to calculate distributions once all responses // have been received. latencies: Vec<u64>, // If true, this receiver will make latency measurements. master: bool, // If true, then responses will be considered to correspond to native gets and puts. native: bool, // Time stamp in cycles at which measurement stopped. stop: u64, } // Implementation of methods on YcsbRecv. impl<T> YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { /// Constructs a YcsbRecv. /// /// # Arguments /// /// * `port` : Network port on which responses will be polled for. /// * `resps`: The number of responses to wait for before calculating statistics. /// * `master`: Boolean indicating if the receiver should make latency measurements. /// * `native`: If true, responses will be considered to correspond to native gets and puts. /// /// # Return /// /// A YCSB response receiver that measures the median latency and throughput of a Sandstorm /// server. fn new(port: T, resps: u64, master: bool, native: bool) -> YcsbRecv<T> { YcsbRecv { receiver: dispatch::Receiver::new(port), responses: resps, start: cycles::rdtsc(), recvd: 0, latencies: Vec::with_capacity(resps as usize), master: master, native: native, stop: 0, } } } // Implementation of the `Drop` trait on YcsbRecv. impl<T> Drop for YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { fn drop(&mut self) { // Calculate & print the throughput for all client threads. println!( "YCSB Throughput {}", self.recvd as f64 / cycles::to_seconds(self.stop - self.start) ); // Calculate & print median & tail latency only on the master thread. if self.master { self.latencies.sort(); let m; let t = self.latencies[(self.latencies.len() * 99) / 100]; match self.latencies.len() % 2 { 0 => { let n = self.latencies.len(); m = (self.latencies[n / 2] + self.latencies[(n / 2) + 1]) / 2; } _ => m = self.latencies[self.latencies.len() / 2], } println!( ">>> {} {}", cycles::to_seconds(m) * 1e9, cycles::to_seconds(t) * 1e9 ); } } } // Executable trait allowing YcsbRecv to be scheduled by Netbricks. impl<T> Executable for YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { // Called internally by Netbricks. fn execute(&mut self) { // Don't do anything after all responses have been received. if self.responses <= self.recvd { return; } // Try to receive packets from the network port. // If there are packets, sample the latency of the server. if let Some(mut packets) = self.receiver.recv_res() { while let Some(packet) = packets.pop() { self.recvd += 1; // Measure latency on the master client after the first 2 million requests. // The start timestamp is present on the RPC response header. if self.recvd > 2 * 1000 * 1000 && self.master { let curr = cycles::rdtsc(); match self.native { // The response corresponds to an invoke() RPC. false => { let p = packet.parse_header::<InvokeResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } // The response corresponds to a get() or put() RPC. // The opcode on the response identifies the RPC type. true => match parse_rpc_opcode(&packet) { OpCode::SandstormGetRpc => { let p = packet.parse_header::<GetResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } OpCode::SandstormPutRpc => { let p = packet.parse_header::<PutResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } _ => packet.free_packet(), }, } } else { packet.free_packet(); } } } // The moment all response packets have been received, set the value of the // stop timestamp so that throughput can be estimated later. if self.responses <= self.recvd { self.stop = cycles::rdtsc(); } } fn dependencies(&mut self) -> Vec<usize> { vec![] } } /// Sets up YcsbSend by adding it to a Netbricks scheduler. /// /// # Arguments /// /// * `config`: Network related configuration such as the MAC and IP address. /// * `ports`: Network port on which packets will be sent. /// * `scheduler`: Netbricks scheduler to which YcsbSend will be added. fn setup_send<S>( config: &config::ClientConfig, ports: Vec<CacheAligned<PortQueue>>, scheduler: &mut S, _core: i32, ) where S: Scheduler + Sized, { if ports.len()!= 1 { error!("Client should be configured with exactly 1 port!"); std::process::exit(1); } // Add the sender to a netbricks pipeline. match scheduler.add_task(YcsbSend::new( config, ports[0].clone(), config.num_reqs as u64, config.server_udp_ports as u16, )) { Ok(_) => { info!( "Successfully added YcsbSend with tx queue {}.", ports[0].txq() ); } Err(ref err) => { error!("Error while adding to Netbricks pipeline {}", err); std::process::exit(1); } } } /// Sets up YcsbRecv by adding it to a Netbricks scheduler. /// /// # Arguments /// /// * `ports`: Network port on which packets will be sent. /// * `scheduler`: Netbricks scheduler to which YcsbRecv will be added. /// * `master`: If true, the added YcsbRecv will make latency measurements. /// * `native`: If true, the added YcsbRecv will assume that responses correspond to gets /// and puts. fn setup_recv<S>( ports: Vec<CacheAligned<PortQueue>>, scheduler: &mut S, _core: i32, master: bool, native: bool, ) where S: Scheduler + Sized, { if ports.len()!= 1 { error!("Client should be configured with exactly 1 port!"); std::process::exit(1); } // Add the receiver to a netbricks pipeline. match scheduler.add_task(YcsbRecv::new( ports[0].clone(), 34 * 1000 * 1000 as u64, master, native, )) { Ok(_) => { info!( "Successfully added YcsbRecv with rx queue {}.", ports[0].rxq() ); } Err(ref err) => { error!("Error while adding to Netbricks pipeline {}", err); std::process::exit(1); } } } fn main() { db::env_logger::init().expect("ERROR: failed to initialize logger!"); let config = config::ClientConfig::load(); info!("Starting up Sandstorm client with config {:?}", config); // Based on the supplied client configuration, compute the amount of time it will take to send // out `num_reqs` requests at a rate of `req_rate` requests per second. let exec = config.num_reqs / config.req_rate;
// Setup Netbricks. let mut net_context = setup::config_and_init_netbricks(&config); // Setup the client pipeline. net_context.start_schedulers(); // The core id's which will run the sender and receiver threads. // XXX The following two arrays heavily depend on the set of cores // configured in setup.rs let senders = [0, 2, 4, 6]; let receive = [1, 3, 5, 7]; assert!((senders.len() == 4) && (receive.len() == 4)); // Setup 4 senders, and 4 receivers. for i in 0..4 { // First, retrieve a tx-rx queue pair from Netbricks let port = net_context .rx_queues .get(&senders[i]) .expect("Failed to retrieve network port!") .clone(); let mut master = false; if i == 0 { master = true; } let native =!config.use_invoke; // Setup the receive side. net_context .add_pipeline_to_core( receive[i], Arc::new( move |_ports, sched: &mut StandaloneScheduler, core: i32, _sibling| { setup_recv(port.clone(), sched, core, master, native) }, ), ) .expect("Failed to initialize receive side."); // Setup the send side. net_context .add_pipeline_to_core( senders[i], Arc::new(
random_line_split
ycsb.rs
the data stored under a bytestring key of `self.key_len` bytes. // - set: A function that stores the data stored under a bytestring key of `self.key_len` bytes // with a bytestring value of `self.value_len` bytes. // # Return // A three tuple consisting of the duration that this thread ran the benchmark, the // number of gets it performed, and the number of puts it performed. pub fn abc<G, P, R>(&mut self, mut get: G, mut put: P) -> R where G: FnMut(u32, &[u8]) -> R, P: FnMut(u32, &[u8], &[u8]) -> R, { let is_get = (self.rng.gen::<u32>() % 100) >= self.put_pct as u32; // Sample a tenant. let t = self.tenant_rng.sample(&mut self.rng) as u32; // Sample a key, and convert into a little endian byte array. let k = self.key_rng.sample(&mut self.rng) as u32; let k: [u8; 4] = unsafe { transmute(k.to_le()) }; self.key_buf[0..mem::size_of::<u32>()].copy_from_slice(&k); if is_get { get(t, self.key_buf.as_slice()) } else { put(t, self.key_buf.as_slice(), self.value_buf.as_slice()) } } } /// Sends out YCSB based RPC requests to a Sandstorm server. struct YcsbSend { // The actual YCSB workload. Required to generate keys and values for get() and put() requests. workload: RefCell<Ycsb>, // Network stack required to actually send RPC requests out the network. sender: dispatch::Sender, // Total number of requests to be sent out. requests: u64, // Number of requests that have been sent out so far. sent: u64, // The inverse of the rate at which requests are to be generated. Basically, the time interval // between two request generations in cycles. rate_inv: u64, // The time stamp at which the workload started generating requests in cycles. start: u64, // The time stamp at which the next request must be issued in cycles. next: u64, // If true, RPC requests corresponding to native get() and put() operations are sent out. If // false, invoke() based RPC requests are sent out. native: bool, // Payload for an invoke() based get operation. Required in order to avoid making intermediate // copies of the extension name, table id, and key. payload_get: RefCell<Vec<u8>>, // Payload for an invoke() based put operation. Required in order to avoid making intermediate // copies of the extension name, table id, key length, key, and value. payload_put: RefCell<Vec<u8>>, } // Implementation of methods on YcsbSend. impl YcsbSend { /// Constructs a YcsbSend. /// /// # Arguments /// /// * `config`: Client configuration with YCSB related (key and value length etc.) as well as /// Network related (Server and Client MAC address etc.) parameters. /// * `port`: Network port over which requests will be sent out. /// * `reqs`: The number of requests to be issued to the server. /// * `dst_ports`: The total number of UDP ports the server is listening on. /// /// # Return /// /// A YCSB request generator. fn new( config: &config::ClientConfig, port: CacheAligned<PortQueue>, reqs: u64, dst_ports: u16, ) -> YcsbSend { // The payload on an invoke() based get request consists of the extensions name ("get"), // the table id to perform the lookup on, and the key to lookup. let payload_len = "get".as_bytes().len() + mem::size_of::<u64>() + config.key_len; let mut payload_get = Vec::with_capacity(payload_len); payload_get.extend_from_slice("get".as_bytes()); payload_get.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) }); payload_get.resize(payload_len, 0); // The payload on an invoke() based put request consists of the extensions name ("put"), // the table id to perform the lookup on, the length of the key to lookup, the key, and the // value to be inserted into the database. let payload_len = "put".as_bytes().len() + mem::size_of::<u64>() + mem::size_of::<u16>() + config.key_len + config.value_len; let mut payload_put = Vec::with_capacity(payload_len); payload_put.extend_from_slice("put".as_bytes()); payload_put.extend_from_slice(&unsafe { transmute::<u64, [u8; 8]>(1u64.to_le()) }); payload_put.extend_from_slice(&unsafe { transmute::<u16, [u8; 2]>((config.key_len as u16).to_le()) }); payload_put.resize(payload_len, 0); YcsbSend { workload: RefCell::new(Ycsb::new( config.key_len, config.value_len, config.n_keys, config.put_pct, config.skew, config.num_tenants, config.tenant_skew, )), sender: dispatch::Sender::new(config, port, dst_ports), requests: reqs, sent: 0, rate_inv: cycles::cycles_per_second() / config.req_rate as u64, start: cycles::rdtsc(), next: 0, native:!config.use_invoke, payload_get: RefCell::new(payload_get), payload_put: RefCell::new(payload_put), } } } // The Executable trait allowing YcsbSend to be scheduled by Netbricks. impl Executable for YcsbSend { // Called internally by Netbricks. fn execute(&mut self) { // Return if there are no more requests to generate. if self.requests <= self.sent { return; } // Get the current time stamp so that we can determine if it is time to issue the next RPC. let curr = cycles::rdtsc(); // If it is either time to send out a request, or if a request has never been sent out, // then, do so. if curr >= self.next || self.next == 0 { if self.native == true { // Configured to issue native RPCs, issue a regular get()/put() operation. self.workload.borrow_mut().abc( |tenant, key| self.sender.send_get(tenant, 1, key, curr), |tenant, key, val| self.sender.send_put(tenant, 1, key, val, curr), ); } else { // Configured to issue invoke() RPCs. let mut p_get = self.payload_get.borrow_mut(); let mut p_put = self.payload_put.borrow_mut(); // XXX Heavily dependent on how `Ycsb` creates a key. Only the first four // bytes of the key matter, the rest are zero. The value is always zero. self.workload.borrow_mut().abc( |tenant, key| { // First 11 bytes on the payload were already pre-populated with the // extension name (3 bytes), and the table id (8 bytes). Just write in the // first 4 bytes of the key. p_get[11..15].copy_from_slice(&key[0..4]); self.sender.send_invoke(tenant, 3, &p_get, curr) }, |tenant, key, _val| { // First 13 bytes on the payload were already pre-populated with the // extension name (3 bytes), the table id (8 bytes), and the key length (2 // bytes). Just write in the first 4 bytes of the key. The value is anyway // always zero. p_put[13..17].copy_from_slice(&key[0..4]); self.sender.send_invoke(tenant, 3, &p_put, curr) }, ); } // Update the time stamp at which the next request should be generated, assuming that // the first request was sent out at self.start. self.sent += 1; self.next = self.start + self.sent * self.rate_inv; } } fn dependencies(&mut self) -> Vec<usize> { vec![] } } /// Receives responses to YCSB requests sent out by YcsbSend. struct YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { // The network stack required to receives RPC response packets from a network port. receiver: dispatch::Receiver<T>, // The number of response packets to wait for before printing out statistics. responses: u64, // Time stamp in cycles at which measurement started. Required to calculate observed // throughput of the Sandstorm server. start: u64, // The total number of responses received so far. recvd: u64, // Vector of sampled request latencies. Required to calculate distributions once all responses // have been received. latencies: Vec<u64>, // If true, this receiver will make latency measurements. master: bool, // If true, then responses will be considered to correspond to native gets and puts. native: bool, // Time stamp in cycles at which measurement stopped. stop: u64, } // Implementation of methods on YcsbRecv. impl<T> YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { /// Constructs a YcsbRecv. /// /// # Arguments /// /// * `port` : Network port on which responses will be polled for. /// * `resps`: The number of responses to wait for before calculating statistics. /// * `master`: Boolean indicating if the receiver should make latency measurements. /// * `native`: If true, responses will be considered to correspond to native gets and puts. /// /// # Return /// /// A YCSB response receiver that measures the median latency and throughput of a Sandstorm /// server. fn
(port: T, resps: u64, master: bool, native: bool) -> YcsbRecv<T> { YcsbRecv { receiver: dispatch::Receiver::new(port), responses: resps, start: cycles::rdtsc(), recvd: 0, latencies: Vec::with_capacity(resps as usize), master: master, native: native, stop: 0, } } } // Implementation of the `Drop` trait on YcsbRecv. impl<T> Drop for YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { fn drop(&mut self) { // Calculate & print the throughput for all client threads. println!( "YCSB Throughput {}", self.recvd as f64 / cycles::to_seconds(self.stop - self.start) ); // Calculate & print median & tail latency only on the master thread. if self.master { self.latencies.sort(); let m; let t = self.latencies[(self.latencies.len() * 99) / 100]; match self.latencies.len() % 2 { 0 => { let n = self.latencies.len(); m = (self.latencies[n / 2] + self.latencies[(n / 2) + 1]) / 2; } _ => m = self.latencies[self.latencies.len() / 2], } println!( ">>> {} {}", cycles::to_seconds(m) * 1e9, cycles::to_seconds(t) * 1e9 ); } } } // Executable trait allowing YcsbRecv to be scheduled by Netbricks. impl<T> Executable for YcsbRecv<T> where T: PacketTx + PacketRx + Display + Clone +'static, { // Called internally by Netbricks. fn execute(&mut self) { // Don't do anything after all responses have been received. if self.responses <= self.recvd { return; } // Try to receive packets from the network port. // If there are packets, sample the latency of the server. if let Some(mut packets) = self.receiver.recv_res() { while let Some(packet) = packets.pop() { self.recvd += 1; // Measure latency on the master client after the first 2 million requests. // The start timestamp is present on the RPC response header. if self.recvd > 2 * 1000 * 1000 && self.master { let curr = cycles::rdtsc(); match self.native { // The response corresponds to an invoke() RPC. false => { let p = packet.parse_header::<InvokeResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } // The response corresponds to a get() or put() RPC. // The opcode on the response identifies the RPC type. true => match parse_rpc_opcode(&packet) { OpCode::SandstormGetRpc => { let p = packet.parse_header::<GetResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } OpCode::SandstormPutRpc => { let p = packet.parse_header::<PutResponse>(); self.latencies .push(curr - p.get_header().common_header.stamp); p.free_packet(); } _ => packet.free_packet(), }, } } else { packet.free_packet(); } } } // The moment all response packets have been received, set the value of the // stop timestamp so that throughput can be estimated later. if self.responses <= self.recvd { self.stop = cycles::rdtsc(); } } fn dependencies(&mut self) -> Vec<usize> { vec![] } } /// Sets up YcsbSend by adding it to a Netbricks scheduler. /// /// # Arguments /// /// * `config`: Network related configuration such as the MAC and IP address. /// * `ports`: Network port on which packets will be sent. /// * `scheduler`: Netbricks scheduler to which YcsbSend will be added. fn setup_send<S>( config: &config::ClientConfig, ports: Vec<CacheAligned<PortQueue>>, scheduler: &mut S, _core: i32, ) where S: Scheduler + Sized, { if ports.len()!= 1 { error!("Client should be configured with exactly 1 port!"); std::process::exit(1); } // Add the sender to a netbricks pipeline. match scheduler.add_task(YcsbSend::new( config, ports[0].clone(), config.num_reqs as u64, config.server_udp_ports as u16, )) { Ok(_) => { info!( "Successfully added YcsbSend with tx queue {}.", ports[0].txq() ); } Err(ref err) => { error!("Error while adding to Netbricks pipeline {}", err); std::process::exit(1); } } } /// Sets up YcsbRecv by adding it to a Netbricks scheduler. /// /// # Arguments /// /// * `ports`: Network port on which packets will be sent. /// * `scheduler`: Netbricks scheduler to which YcsbRecv will be added. /// * `master`: If true, the added YcsbRecv will make latency measurements. /// * `native`: If true, the added YcsbRecv will assume that responses correspond to gets /// and puts. fn setup_recv<S>( ports: Vec<CacheAligned<PortQueue>>, scheduler: &mut S, _core: i32, master: bool, native: bool, ) where S: Scheduler + Sized, { if ports.len()!= 1 { error!("Client should be configured with exactly 1 port!"); std::process::exit(1); } // Add the receiver to a netbricks pipeline. match scheduler.add_task(YcsbRecv::new( ports[0].clone(), 34 * 1000 * 1000 as u64, master, native, )) { Ok(_) => { info!( "Successfully added YcsbRecv with rx queue {}.", ports[0].rxq() ); } Err(ref err) => { error!("Error while adding to Netbricks pipeline {}", err); std::process::exit(1); } } } fn main() { db::env_logger::init().expect("ERROR: failed to initialize logger!"); let config = config::ClientConfig::load(); info!("Starting up Sandstorm client with config {:?}", config); // Based on the supplied client configuration, compute the amount of time it will take to send // out `num_reqs` requests at a rate of `req_rate` requests per second. let exec = config.num_reqs / config.req_rate; // Setup Netbricks. let mut net_context = setup::config_and_init_netbricks(&config); // Setup the client pipeline. net_context.start_schedulers(); // The core id's which will run the sender and receiver threads. // XXX The following two arrays heavily depend on the set of cores // configured in setup.rs let senders = [0, 2, 4, 6]; let receive = [1, 3, 5, 7]; assert!((senders.len() == 4) && (receive.len() == 4)); // Setup 4 senders, and 4 receivers. for i in 0..4 { // First, retrieve a tx-rx queue pair from Netbricks let port = net_context .rx_queues .get(&senders[i]) .expect("Failed to retrieve network port!") .clone(); let mut master = false; if i == 0 { master = true; } let native =!config.use_invoke; // Setup the receive side. net_context .add_pipeline_to_core( receive[i], Arc::new( move |_ports, sched: &mut StandaloneScheduler, core: i32, _sibling| { setup_recv(port.clone(), sched, core, master, native) }, ), ) .expect("Failed to initialize receive side."); // Setup the send side. net_context .add_pipeline_to_core( senders[i], Arc::new(
new
identifier_name
workspace.rs
Error>>>; pub type ReportFuture<T> = future::Shared<BoxFuture<(T, ReportTree), CancelError<ReportTree>>>; struct WorkspaceFileInner { workspace: Arc<RwLock<WorkspaceShared>>, pool: Arc<CpuPool>, cancel_token: CancelToken, source: Arc<RwLock<Source>>, message_locale: Locale, path: PathBuf, unit: Unit, // if Some, the file is managed by the client and the text is synchronized document: Option<OpenDocument>, // each parts are calculated on demand; in either case diagnostics are produced span: Option<IoFuture<Span>>, tokens: Option<ReportFuture<Arc<Vec<NestedToken>>>>, chunk: Option<ReportFuture<Arc<Chunk>>>, last_chunk: Option<Arc<Chunk>>, } type Inner = Arc<RwLock<WorkspaceFileInner>>; type InnerWrite<'a> = RwLockWriteGuard<'a, WorkspaceFileInner>; #[derive(Clone)] pub struct WorkspaceFile { inner: Inner, } impl fmt::Debug for WorkspaceFile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let inner = self.inner.read(); f.debug_struct("WorkspaceFile") .field("workspace", &Ellipsis) // avoid excess output .field("pool", &Ellipsis) .field("cancel_token", &inner.cancel_token) .field("source", &Ellipsis) .field("message_locale", &inner.message_locale) .field("path", &inner.path) .field("unit", &inner.unit) .field("document", &inner.document) .field("span", &inner.span.as_ref().map(|_| Ellipsis)) .field("tokens", &inner.tokens.as_ref().map(|_| Ellipsis)) .field("chunk", &inner.chunk.as_ref().map(|_| Ellipsis)) .field("last_chunk", &inner.last_chunk.as_ref().map(|_| Ellipsis)) .finish() } } impl WorkspaceFile { fn new(shared: &Arc<RwLock<WorkspaceShared>>, pool: &Arc<CpuPool>, source: &Arc<RwLock<Source>>, message_locale: Locale, path: PathBuf) -> WorkspaceFile { WorkspaceFile { inner: Arc::new(RwLock::new(WorkspaceFileInner { workspace: shared.clone(), pool: pool.clone(), cancel_token: CancelToken::new(), source: source.clone(), message_locale: message_locale, path: path, unit: Unit::dummy(), document: None, span: None, tokens: None, chunk: None, last_chunk: None, })), } } fn cancel(&self) { let mut inner = self.inner.write(); inner.cancel_token.cancel(); inner.cancel_token = CancelToken::new(); inner.span = None; inner.tokens = None; inner.chunk = None; // also signal the workspace to cancel jobs inner.workspace.write().cancel(); } #[allow(dead_code)] pub fn path(&self) -> PathBuf { self.inner.read().path.clone() } fn update_document<F, E>(&self, f: F) -> Result<(), E> where F: FnOnce(Option<OpenDocument>) -> Result<Option<OpenDocument>, E> { self.cancel(); let mut inner = self.inner.write(); inner.document = f(inner.document.take())?; Ok(()) } fn ensure_span_with_inner(spare_inner: Inner, inner: &mut InnerWrite) -> IoFuture<Span> { if inner.span.is_none() { let fut = future::lazy(move || -> Result<Span, CancelError<io::Error>> { let mut inner = spare_inner.write(); inner.cancel_token.keep_going()?; let file = if let Some(ref doc) = inner.document { SourceFile::from_u8(inner.path.display().to_string(), doc.last_text.as_bytes().to_owned()) } else { SourceFile::from_file(&inner.path)? }; let span = if inner.unit.is_dummy() { let span = inner.source.write().add(file); inner.unit = span.unit(); span } else { inner.source.write().replace(inner.unit, file).unwrap() }; Ok(span) }); inner.span = Some(inner.pool.spawn(fut).boxed().shared()); } inner.span.as_ref().unwrap().clone() } pub fn ensure_span(&self) -> IoFuture<Span> { let cloned = self.inner.clone(); Self::ensure_span_with_inner(cloned, &mut self.inner.write()) } fn ensure_tokens_with_inner(spare_inner: Inner, inner: &mut InnerWrite) -> ReportFuture<Arc<Vec<NestedToken>>> { if inner.tokens.is_none() { let span_fut = Self::ensure_span_with_inner(spare_inner.clone(), inner); // important: the task has to be spawned outside of the future. // this is because, otherwise for the thread pool of n workers // the future chain of n+1 or more tasks will block as the i-th task // will spawn the (i+1)-th task without removing itself from the pool queue! // chaining the already-spawned future will ensure that // the task body will be only spawned after the last future has been finished. let fut = span_fut.then(move |span_ret| { let inner = spare_inner.read(); match span_ret { Ok(span) => { inner.cancel_token.keep_going()?; let source = inner.source.read(); let path = source.file(span.unit()).map(|f| f.path()); let diags = ReportTree::new(inner.message_locale, path); let report = diags.report(|span| diags::translate_span(span, &source)); let tokens = collect_tokens(&source, *span, &report); Ok((Arc::new(tokens), diags)) }, Err(e) => { Err(e.as_ref().map(|e| { // translate an I/O error into a report let dummy_diag = |msg: &Localize| { protocol::Diagnostic { range: protocol::Range { start: protocol::Position { line: 0, character: 0 }, end: protocol::Position { line: 0, character: 0 }, }, severity: Some(protocol::DiagnosticSeverity::Error), code: None, source: None, message: Localized::new(msg, inner.message_locale).to_string(), } }; let path = inner.path.display().to_string(); let config_path = inner.workspace.read().base.config_path_or_default(); let config_path = config_path.display().to_string(); let diags = ReportTree::new(inner.message_locale, Some(&path)); diags.add_diag(path, dummy_diag(&m::CannotOpenStartPath { error: e })); diags.add_diag(config_path, dummy_diag(&m::RestartRequired {})); diags })) }, } }); inner.tokens = Some(inner.pool.spawn(fut).boxed().shared()); } inner.tokens.as_ref().unwrap().clone() } pub fn ensure_tokens(&self) -> ReportFuture<Arc<Vec<NestedToken>>> { let cloned = self.inner.clone(); Self::ensure_tokens_with_inner(cloned, &mut self.inner.write()) } fn ensure_chunk_with_inner(spare_inner: Inner, inner: &mut InnerWrite) -> ReportFuture<Arc<Chunk>> { if inner.chunk.is_none() { let tokens_fut = Self::ensure_tokens_with_inner(spare_inner.clone(), inner); let fut = tokens_fut.map_err(|e| (*e).clone()).and_then(move |tokens_ret| { let tokens = (*tokens_ret.0).clone(); let parent_diags = tokens_ret.1.clone(); let mut inner = spare_inner.write(); inner.cancel_token.keep_going()?; let diags = ReportTree::new(inner.message_locale, None); diags.add_parent(parent_diags); // in this future source access is only needed for reporting let chunk = { let report = diags.report(|span| { diags::translate_span(span, &inner.source.read()) }); parse_to_chunk(tokens, &report) }; match chunk { Ok(chunk) => { let chunk = Arc::new(chunk); inner.last_chunk = Some(chunk.clone()); Ok((chunk, diags)) }, Err(_) => Err(From::from(diags)), } }); inner.chunk = Some(inner.pool.spawn(fut).boxed().shared()); } inner.chunk.as_ref().unwrap().clone() } pub fn ensure_chunk(&self) -> ReportFuture<Arc<Chunk>> { let cloned = self.inner.clone(); Self::ensure_chunk_with_inner(cloned, &mut self.inner.write()) } pub fn last_chunk(&self) -> Option<Arc<Chunk>> { self.inner.read().last_chunk.clone() } pub fn translate_position(&self, pos: &protocol::Position) -> BoxFuture<Pos, CancelError<()>> { let pos = pos.clone(); let source = self.inner.read().source.clone(); self.ensure_span().then(move |res| { match res { Ok(span) => { let source = source.read(); if let Some(file) = source.file(span.unit()) { Ok(position_to_pos(file, &pos)) } else { Ok(Pos::dummy()) } }, Err(e) => Err(e.as_ref().map(|_| ())) } }).boxed() } pub fn
(&mut self, version: u64, event: protocol::TextDocumentContentChangeEvent) -> WorkspaceResult<()> { // TODO, there are several ambiguities with offsets? if event.range.is_some() || event.rangeLength.is_some() { return Err(WorkspaceError("incremental edits not yet supported")); } self.update_document(move |doc| { if let Some(mut doc) = doc { if doc.last_version >= version { return Err(WorkspaceError("non-increasing version")); } doc.last_version = version; doc.last_text = event.text; Ok(Some(doc)) } else { Err(WorkspaceError("change notification with non-existent or non-open file")) } }) } } #[derive(Clone, Debug)] enum WorkspaceBase { Config(kailua_workspace::Config), Workspace(kailua_workspace::Workspace), } impl WorkspaceBase { fn config_path(&self) -> Option<&Path> { match *self { WorkspaceBase::Config(ref config) => config.config_path(), WorkspaceBase::Workspace(ref ws) => ws.config_path(), } } fn config_path_or_default(&self) -> PathBuf { if let Some(config_path) = self.config_path() { config_path.to_owned() } else { // we allow both `kailua.json` or `.vscode/kailua.json`, // for now we will issue an error at the latter self.base_dir().join(".vscode").join("kailua.json") } } fn base_dir(&self) -> &Path { match *self { WorkspaceBase::Config(ref config) => config.base_dir(), WorkspaceBase::Workspace(ref ws) => ws.base_dir(), } } } // a portion of Workspace that should be shared across WorkspaceFile. // this should not be modified in the normal cases (otherwise it can be easily deadlocked), // with an exception of cascading cancellation. struct WorkspaceShared { cancel_token: CancelToken, // used for stopping ongoing checks base: WorkspaceBase, check_outputs: Vec<Option<ReportFuture<Arc<Output>>>>, last_check_outputs: Vec<Option<Arc<Output>>>, } type Shared = Arc<RwLock<WorkspaceShared>>; type SharedWrite<'a> = RwLockWriteGuard<'a, WorkspaceShared>; impl fmt::Debug for WorkspaceShared { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { struct DummyOptionList<'a, T: 'a>(&'a [Option<T>]); impl<'a, T: 'a> fmt::Debug for DummyOptionList<'a, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.0.iter().map(|e| e.as_ref().map(|_| Ellipsis))).finish() } } f.debug_struct("WorkspaceShared") .field("base", &self.base) .field("cancel_token", &self.cancel_token) .field("check_outputs", &DummyOptionList(&self.check_outputs)) .field("last_check_outputs", &DummyOptionList(&self.last_check_outputs)) .finish() } } impl WorkspaceShared { fn cancel(&mut self) { self.cancel_token.cancel(); self.cancel_token = CancelToken::new(); for output in &mut self.check_outputs { *output = None; } } } struct WorkspaceFsSourceInner { cancel_token: CancelToken, // will be used independently of WorkspaceShared files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>, source: Arc<RwLock<Source>>, temp_units: Vec<Unit>, // will be gone after checking temp_files: HashMap<PathBuf, Chunk>, message_locale: Locale, root_report: ReportTree, } #[derive(Clone)] struct WorkspaceFsSource { inner: Rc<RefCell<WorkspaceFsSourceInner>>, } impl FsSource for WorkspaceFsSource { fn chunk_from_path(&self, path: Spanned<&Path>, _report: &Report) -> Result<Option<Chunk>, Option<Stop>> { let mut fssource = self.inner.borrow_mut(); fssource.cancel_token.keep_going::<()>().map_err(|_| Stop)?; // try to use the client-maintained text as a source code let files = fssource.files.clone(); let files = files.read(); if let Some(file) = files.get(path.base) { let (chunk, diags) = match file.ensure_chunk().wait() { Ok(res) => { let (ref chunk, ref diags) = *res; (Some((**chunk).clone()), diags.clone()) }, Err(res) => match *res { CancelError::Canceled => return Err(Some(Stop)), CancelError::Error(ref diags) => (None, diags.clone()) }, }; // this can be called multiple times, which ReportTree handles correctly fssource.root_report.add_parent(diags); return Ok(chunk); } drop(files); // avoid prolonged lock // try to use the already-read temporary chunk if let Some(chunk) = fssource.temp_files.get(path.base) { return Ok(Some(chunk.clone())); } // try to read the file (and finally raise an error if it can't be read) let sourcefile = match SourceFile::from_file(path.base) { Ok(f) => f, Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None), Err(_) => return Err(None), }; let span = fssource.source.write().add(sourcefile); fssource.temp_units.push(span.unit()); let diags = ReportTree::new(fssource.message_locale, path.to_str()); fssource.root_report.add_parent(diags.clone()); let chunk = { let source = fssource.source.read(); let report = diags.report(|span| diags::translate_span(span, &source)); let tokens = collect_tokens(&source, span, &report); parse_to_chunk(tokens, &report) }; match chunk { Ok(chunk) => { fssource.temp_files.insert(path.base.to_owned(), chunk.clone()); Ok(Some(chunk)) }, Err(Stop) => Err(Some(Stop)), // we have already reported parsing errors } } } pub struct Workspace { message_locale: Locale, pool: Arc<CpuPool>, files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>, // conceptually this belongs to shared, but it is frequently updated by futures // unlike all other fields in shared, so getting this out avoids deadlock source: Arc<RwLock<Source>>, shared: Arc<RwLock<WorkspaceShared>>, } impl fmt::Debug for Workspace { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Workspace") .field("message_locale", &self.message_locale) .field("pool", &Ellipsis) .field("files", &self.files) .field("source", &Ellipsis) .field("shared", &self.shared) .finish() } } impl Workspace { pub fn new(base_dir: PathBuf, pool
apply_change
identifier_name
workspace.rs
::Error>>>; pub type ReportFuture<T> = future::Shared<BoxFuture<(T, ReportTree), CancelError<ReportTree>>>; struct WorkspaceFileInner { workspace: Arc<RwLock<WorkspaceShared>>, pool: Arc<CpuPool>, cancel_token: CancelToken, source: Arc<RwLock<Source>>, message_locale: Locale, path: PathBuf, unit: Unit, // if Some, the file is managed by the client and the text is synchronized document: Option<OpenDocument>, // each parts are calculated on demand; in either case diagnostics are produced span: Option<IoFuture<Span>>, tokens: Option<ReportFuture<Arc<Vec<NestedToken>>>>, chunk: Option<ReportFuture<Arc<Chunk>>>, last_chunk: Option<Arc<Chunk>>, } type Inner = Arc<RwLock<WorkspaceFileInner>>; type InnerWrite<'a> = RwLockWriteGuard<'a, WorkspaceFileInner>; #[derive(Clone)] pub struct WorkspaceFile { inner: Inner, } impl fmt::Debug for WorkspaceFile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let inner = self.inner.read(); f.debug_struct("WorkspaceFile") .field("workspace", &Ellipsis) // avoid excess output .field("pool", &Ellipsis) .field("cancel_token", &inner.cancel_token) .field("source", &Ellipsis) .field("message_locale", &inner.message_locale) .field("path", &inner.path) .field("unit", &inner.unit) .field("document", &inner.document) .field("span", &inner.span.as_ref().map(|_| Ellipsis)) .field("tokens", &inner.tokens.as_ref().map(|_| Ellipsis)) .field("chunk", &inner.chunk.as_ref().map(|_| Ellipsis)) .field("last_chunk", &inner.last_chunk.as_ref().map(|_| Ellipsis)) .finish() } } impl WorkspaceFile { fn new(shared: &Arc<RwLock<WorkspaceShared>>, pool: &Arc<CpuPool>, source: &Arc<RwLock<Source>>, message_locale: Locale, path: PathBuf) -> WorkspaceFile { WorkspaceFile { inner: Arc::new(RwLock::new(WorkspaceFileInner { workspace: shared.clone(), pool: pool.clone(), cancel_token: CancelToken::new(), source: source.clone(), message_locale: message_locale, path: path, unit: Unit::dummy(), document: None, span: None, tokens: None, chunk: None, last_chunk: None, })), } } fn cancel(&self) { let mut inner = self.inner.write(); inner.cancel_token.cancel(); inner.cancel_token = CancelToken::new(); inner.span = None; inner.tokens = None; inner.chunk = None; // also signal the workspace to cancel jobs inner.workspace.write().cancel(); } #[allow(dead_code)] pub fn path(&self) -> PathBuf { self.inner.read().path.clone() } fn update_document<F, E>(&self, f: F) -> Result<(), E> where F: FnOnce(Option<OpenDocument>) -> Result<Option<OpenDocument>, E> { self.cancel(); let mut inner = self.inner.write(); inner.document = f(inner.document.take())?; Ok(()) } fn ensure_span_with_inner(spare_inner: Inner, inner: &mut InnerWrite) -> IoFuture<Span> { if inner.span.is_none() { let fut = future::lazy(move || -> Result<Span, CancelError<io::Error>> { let mut inner = spare_inner.write(); inner.cancel_token.keep_going()?; let file = if let Some(ref doc) = inner.document { SourceFile::from_u8(inner.path.display().to_string(), doc.last_text.as_bytes().to_owned()) } else { SourceFile::from_file(&inner.path)? }; let span = if inner.unit.is_dummy() { let span = inner.source.write().add(file); inner.unit = span.unit(); span } else { inner.source.write().replace(inner.unit, file).unwrap() }; Ok(span) }); inner.span = Some(inner.pool.spawn(fut).boxed().shared()); } inner.span.as_ref().unwrap().clone() } pub fn ensure_span(&self) -> IoFuture<Span> { let cloned = self.inner.clone(); Self::ensure_span_with_inner(cloned, &mut self.inner.write()) } fn ensure_tokens_with_inner(spare_inner: Inner, inner: &mut InnerWrite) -> ReportFuture<Arc<Vec<NestedToken>>> { if inner.tokens.is_none() { let span_fut = Self::ensure_span_with_inner(spare_inner.clone(), inner); // important: the task has to be spawned outside of the future. // this is because, otherwise for the thread pool of n workers // the future chain of n+1 or more tasks will block as the i-th task // will spawn the (i+1)-th task without removing itself from the pool queue! // chaining the already-spawned future will ensure that // the task body will be only spawned after the last future has been finished. let fut = span_fut.then(move |span_ret| { let inner = spare_inner.read(); match span_ret { Ok(span) => { inner.cancel_token.keep_going()?; let source = inner.source.read(); let path = source.file(span.unit()).map(|f| f.path()); let diags = ReportTree::new(inner.message_locale, path); let report = diags.report(|span| diags::translate_span(span, &source)); let tokens = collect_tokens(&source, *span, &report); Ok((Arc::new(tokens), diags)) }, Err(e) => { Err(e.as_ref().map(|e| { // translate an I/O error into a report let dummy_diag = |msg: &Localize| { protocol::Diagnostic { range: protocol::Range { start: protocol::Position { line: 0, character: 0 }, end: protocol::Position { line: 0, character: 0 }, }, severity: Some(protocol::DiagnosticSeverity::Error), code: None, source: None, message: Localized::new(msg, inner.message_locale).to_string(), } }; let path = inner.path.display().to_string(); let config_path = inner.workspace.read().base.config_path_or_default(); let config_path = config_path.display().to_string(); let diags = ReportTree::new(inner.message_locale, Some(&path)); diags.add_diag(path, dummy_diag(&m::CannotOpenStartPath { error: e })); diags.add_diag(config_path, dummy_diag(&m::RestartRequired {})); diags })) }, } }); inner.tokens = Some(inner.pool.spawn(fut).boxed().shared()); } inner.tokens.as_ref().unwrap().clone() } pub fn ensure_tokens(&self) -> ReportFuture<Arc<Vec<NestedToken>>> { let cloned = self.inner.clone(); Self::ensure_tokens_with_inner(cloned, &mut self.inner.write()) } fn ensure_chunk_with_inner(spare_inner: Inner, inner: &mut InnerWrite) -> ReportFuture<Arc<Chunk>> { if inner.chunk.is_none() { let tokens_fut = Self::ensure_tokens_with_inner(spare_inner.clone(), inner); let fut = tokens_fut.map_err(|e| (*e).clone()).and_then(move |tokens_ret| { let tokens = (*tokens_ret.0).clone(); let parent_diags = tokens_ret.1.clone(); let mut inner = spare_inner.write(); inner.cancel_token.keep_going()?; let diags = ReportTree::new(inner.message_locale, None); diags.add_parent(parent_diags); // in this future source access is only needed for reporting let chunk = { let report = diags.report(|span| { diags::translate_span(span, &inner.source.read()) }); parse_to_chunk(tokens, &report) }; match chunk { Ok(chunk) => { let chunk = Arc::new(chunk); inner.last_chunk = Some(chunk.clone()); Ok((chunk, diags)) }, Err(_) => Err(From::from(diags)), } }); inner.chunk = Some(inner.pool.spawn(fut).boxed().shared()); } inner.chunk.as_ref().unwrap().clone() } pub fn ensure_chunk(&self) -> ReportFuture<Arc<Chunk>> { let cloned = self.inner.clone(); Self::ensure_chunk_with_inner(cloned, &mut self.inner.write()) } pub fn last_chunk(&self) -> Option<Arc<Chunk>> { self.inner.read().last_chunk.clone() } pub fn translate_position(&self, pos: &protocol::Position) -> BoxFuture<Pos, CancelError<()>> { let pos = pos.clone(); let source = self.inner.read().source.clone(); self.ensure_span().then(move |res| { match res { Ok(span) => { let source = source.read(); if let Some(file) = source.file(span.unit()) { Ok(position_to_pos(file, &pos)) } else { Ok(Pos::dummy()) } }, Err(e) => Err(e.as_ref().map(|_| ())) } }).boxed() } pub fn apply_change(&mut self, version: u64, event: protocol::TextDocumentContentChangeEvent) -> WorkspaceResult<()> { // TODO, there are several ambiguities with offsets? if event.range.is_some() || event.rangeLength.is_some() { return Err(WorkspaceError("incremental edits not yet supported")); } self.update_document(move |doc| { if let Some(mut doc) = doc { if doc.last_version >= version { return Err(WorkspaceError("non-increasing version")); } doc.last_version = version; doc.last_text = event.text; Ok(Some(doc)) } else { Err(WorkspaceError("change notification with non-existent or non-open file")) } }) } } #[derive(Clone, Debug)] enum WorkspaceBase { Config(kailua_workspace::Config), Workspace(kailua_workspace::Workspace), } impl WorkspaceBase { fn config_path(&self) -> Option<&Path> { match *self { WorkspaceBase::Config(ref config) => config.config_path(), WorkspaceBase::Workspace(ref ws) => ws.config_path(), } } fn config_path_or_default(&self) -> PathBuf { if let Some(config_path) = self.config_path() { config_path.to_owned() } else { // we allow both `kailua.json` or `.vscode/kailua.json`, // for now we will issue an error at the latter self.base_dir().join(".vscode").join("kailua.json") } } fn base_dir(&self) -> &Path { match *self { WorkspaceBase::Config(ref config) => config.base_dir(), WorkspaceBase::Workspace(ref ws) => ws.base_dir(), } } } // a portion of Workspace that should be shared across WorkspaceFile. // this should not be modified in the normal cases (otherwise it can be easily deadlocked), // with an exception of cascading cancellation. struct WorkspaceShared { cancel_token: CancelToken, // used for stopping ongoing checks base: WorkspaceBase, check_outputs: Vec<Option<ReportFuture<Arc<Output>>>>, last_check_outputs: Vec<Option<Arc<Output>>>, } type Shared = Arc<RwLock<WorkspaceShared>>; type SharedWrite<'a> = RwLockWriteGuard<'a, WorkspaceShared>; impl fmt::Debug for WorkspaceShared { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { struct DummyOptionList<'a, T: 'a>(&'a [Option<T>]); impl<'a, T: 'a> fmt::Debug for DummyOptionList<'a, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.0.iter().map(|e| e.as_ref().map(|_| Ellipsis))).finish() } } f.debug_struct("WorkspaceShared") .field("base", &self.base) .field("cancel_token", &self.cancel_token) .field("check_outputs", &DummyOptionList(&self.check_outputs)) .field("last_check_outputs", &DummyOptionList(&self.last_check_outputs)) .finish() } } impl WorkspaceShared { fn cancel(&mut self) { self.cancel_token.cancel(); self.cancel_token = CancelToken::new(); for output in &mut self.check_outputs { *output = None; } } } struct WorkspaceFsSourceInner { cancel_token: CancelToken, // will be used independently of WorkspaceShared files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>, source: Arc<RwLock<Source>>, temp_units: Vec<Unit>, // will be gone after checking temp_files: HashMap<PathBuf, Chunk>, message_locale: Locale, root_report: ReportTree, } #[derive(Clone)] struct WorkspaceFsSource { inner: Rc<RefCell<WorkspaceFsSourceInner>>, } impl FsSource for WorkspaceFsSource { fn chunk_from_path(&self, path: Spanned<&Path>, _report: &Report) -> Result<Option<Chunk>, Option<Stop>> { let mut fssource = self.inner.borrow_mut(); fssource.cancel_token.keep_going::<()>().map_err(|_| Stop)?; // try to use the client-maintained text as a source code let files = fssource.files.clone(); let files = files.read(); if let Some(file) = files.get(path.base) { let (chunk, diags) = match file.ensure_chunk().wait() { Ok(res) => { let (ref chunk, ref diags) = *res; (Some((**chunk).clone()), diags.clone()) }, Err(res) => match *res { CancelError::Canceled => return Err(Some(Stop)), CancelError::Error(ref diags) => (None, diags.clone()) }, }; // this can be called multiple times, which ReportTree handles correctly fssource.root_report.add_parent(diags); return Ok(chunk); } drop(files); // avoid prolonged lock // try to use the already-read temporary chunk if let Some(chunk) = fssource.temp_files.get(path.base) { return Ok(Some(chunk.clone())); } // try to read the file (and finally raise an error if it can't be read) let sourcefile = match SourceFile::from_file(path.base) { Ok(f) => f, Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None), Err(_) => return Err(None), }; let span = fssource.source.write().add(sourcefile); fssource.temp_units.push(span.unit()); let diags = ReportTree::new(fssource.message_locale, path.to_str()); fssource.root_report.add_parent(diags.clone()); let chunk = { let source = fssource.source.read(); let report = diags.report(|span| diags::translate_span(span, &source)); let tokens = collect_tokens(&source, span, &report); parse_to_chunk(tokens, &report) }; match chunk { Ok(chunk) => { fssource.temp_files.insert(path.base.to_owned(), chunk.clone()); Ok(Some(chunk)) }, Err(Stop) => Err(Some(Stop)), // we have already reported parsing errors } } } pub struct Workspace { message_locale: Locale, pool: Arc<CpuPool>, files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>, // conceptually this belongs to shared, but it is frequently updated by futures // unlike all other fields in shared, so getting this out avoids deadlock source: Arc<RwLock<Source>>, shared: Arc<RwLock<WorkspaceShared>>, }
impl fmt::Debug for Workspace { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Workspace") .field("message_locale", &self.message_locale) .field("pool", &Ellipsis) .field("files", &self.files) .field("source", &Ellipsis) .field("shared", &self.shared) .finish() } } impl Workspace { pub fn new(base_dir: PathBuf, pool:
random_line_split
workspace.rs
let source = inner.source.read(); let path = source.file(span.unit()).map(|f| f.path()); let diags = ReportTree::new(inner.message_locale, path); let report = diags.report(|span| diags::translate_span(span, &source)); let tokens = collect_tokens(&source, *span, &report); Ok((Arc::new(tokens), diags)) }, Err(e) => { Err(e.as_ref().map(|e| { // translate an I/O error into a report let dummy_diag = |msg: &Localize| { protocol::Diagnostic { range: protocol::Range { start: protocol::Position { line: 0, character: 0 }, end: protocol::Position { line: 0, character: 0 }, }, severity: Some(protocol::DiagnosticSeverity::Error), code: None, source: None, message: Localized::new(msg, inner.message_locale).to_string(), } }; let path = inner.path.display().to_string(); let config_path = inner.workspace.read().base.config_path_or_default(); let config_path = config_path.display().to_string(); let diags = ReportTree::new(inner.message_locale, Some(&path)); diags.add_diag(path, dummy_diag(&m::CannotOpenStartPath { error: e })); diags.add_diag(config_path, dummy_diag(&m::RestartRequired {})); diags })) }, } }); inner.tokens = Some(inner.pool.spawn(fut).boxed().shared()); } inner.tokens.as_ref().unwrap().clone() } pub fn ensure_tokens(&self) -> ReportFuture<Arc<Vec<NestedToken>>> { let cloned = self.inner.clone(); Self::ensure_tokens_with_inner(cloned, &mut self.inner.write()) } fn ensure_chunk_with_inner(spare_inner: Inner, inner: &mut InnerWrite) -> ReportFuture<Arc<Chunk>> { if inner.chunk.is_none() { let tokens_fut = Self::ensure_tokens_with_inner(spare_inner.clone(), inner); let fut = tokens_fut.map_err(|e| (*e).clone()).and_then(move |tokens_ret| { let tokens = (*tokens_ret.0).clone(); let parent_diags = tokens_ret.1.clone(); let mut inner = spare_inner.write(); inner.cancel_token.keep_going()?; let diags = ReportTree::new(inner.message_locale, None); diags.add_parent(parent_diags); // in this future source access is only needed for reporting let chunk = { let report = diags.report(|span| { diags::translate_span(span, &inner.source.read()) }); parse_to_chunk(tokens, &report) }; match chunk { Ok(chunk) => { let chunk = Arc::new(chunk); inner.last_chunk = Some(chunk.clone()); Ok((chunk, diags)) }, Err(_) => Err(From::from(diags)), } }); inner.chunk = Some(inner.pool.spawn(fut).boxed().shared()); } inner.chunk.as_ref().unwrap().clone() } pub fn ensure_chunk(&self) -> ReportFuture<Arc<Chunk>> { let cloned = self.inner.clone(); Self::ensure_chunk_with_inner(cloned, &mut self.inner.write()) } pub fn last_chunk(&self) -> Option<Arc<Chunk>> { self.inner.read().last_chunk.clone() } pub fn translate_position(&self, pos: &protocol::Position) -> BoxFuture<Pos, CancelError<()>> { let pos = pos.clone(); let source = self.inner.read().source.clone(); self.ensure_span().then(move |res| { match res { Ok(span) => { let source = source.read(); if let Some(file) = source.file(span.unit()) { Ok(position_to_pos(file, &pos)) } else { Ok(Pos::dummy()) } }, Err(e) => Err(e.as_ref().map(|_| ())) } }).boxed() } pub fn apply_change(&mut self, version: u64, event: protocol::TextDocumentContentChangeEvent) -> WorkspaceResult<()> { // TODO, there are several ambiguities with offsets? if event.range.is_some() || event.rangeLength.is_some() { return Err(WorkspaceError("incremental edits not yet supported")); } self.update_document(move |doc| { if let Some(mut doc) = doc { if doc.last_version >= version { return Err(WorkspaceError("non-increasing version")); } doc.last_version = version; doc.last_text = event.text; Ok(Some(doc)) } else { Err(WorkspaceError("change notification with non-existent or non-open file")) } }) } } #[derive(Clone, Debug)] enum WorkspaceBase { Config(kailua_workspace::Config), Workspace(kailua_workspace::Workspace), } impl WorkspaceBase { fn config_path(&self) -> Option<&Path> { match *self { WorkspaceBase::Config(ref config) => config.config_path(), WorkspaceBase::Workspace(ref ws) => ws.config_path(), } } fn config_path_or_default(&self) -> PathBuf { if let Some(config_path) = self.config_path() { config_path.to_owned() } else { // we allow both `kailua.json` or `.vscode/kailua.json`, // for now we will issue an error at the latter self.base_dir().join(".vscode").join("kailua.json") } } fn base_dir(&self) -> &Path { match *self { WorkspaceBase::Config(ref config) => config.base_dir(), WorkspaceBase::Workspace(ref ws) => ws.base_dir(), } } } // a portion of Workspace that should be shared across WorkspaceFile. // this should not be modified in the normal cases (otherwise it can be easily deadlocked), // with an exception of cascading cancellation. struct WorkspaceShared { cancel_token: CancelToken, // used for stopping ongoing checks base: WorkspaceBase, check_outputs: Vec<Option<ReportFuture<Arc<Output>>>>, last_check_outputs: Vec<Option<Arc<Output>>>, } type Shared = Arc<RwLock<WorkspaceShared>>; type SharedWrite<'a> = RwLockWriteGuard<'a, WorkspaceShared>; impl fmt::Debug for WorkspaceShared { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { struct DummyOptionList<'a, T: 'a>(&'a [Option<T>]); impl<'a, T: 'a> fmt::Debug for DummyOptionList<'a, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.0.iter().map(|e| e.as_ref().map(|_| Ellipsis))).finish() } } f.debug_struct("WorkspaceShared") .field("base", &self.base) .field("cancel_token", &self.cancel_token) .field("check_outputs", &DummyOptionList(&self.check_outputs)) .field("last_check_outputs", &DummyOptionList(&self.last_check_outputs)) .finish() } } impl WorkspaceShared { fn cancel(&mut self) { self.cancel_token.cancel(); self.cancel_token = CancelToken::new(); for output in &mut self.check_outputs { *output = None; } } } struct WorkspaceFsSourceInner { cancel_token: CancelToken, // will be used independently of WorkspaceShared files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>, source: Arc<RwLock<Source>>, temp_units: Vec<Unit>, // will be gone after checking temp_files: HashMap<PathBuf, Chunk>, message_locale: Locale, root_report: ReportTree, } #[derive(Clone)] struct WorkspaceFsSource { inner: Rc<RefCell<WorkspaceFsSourceInner>>, } impl FsSource for WorkspaceFsSource { fn chunk_from_path(&self, path: Spanned<&Path>, _report: &Report) -> Result<Option<Chunk>, Option<Stop>> { let mut fssource = self.inner.borrow_mut(); fssource.cancel_token.keep_going::<()>().map_err(|_| Stop)?; // try to use the client-maintained text as a source code let files = fssource.files.clone(); let files = files.read(); if let Some(file) = files.get(path.base) { let (chunk, diags) = match file.ensure_chunk().wait() { Ok(res) => { let (ref chunk, ref diags) = *res; (Some((**chunk).clone()), diags.clone()) }, Err(res) => match *res { CancelError::Canceled => return Err(Some(Stop)), CancelError::Error(ref diags) => (None, diags.clone()) }, }; // this can be called multiple times, which ReportTree handles correctly fssource.root_report.add_parent(diags); return Ok(chunk); } drop(files); // avoid prolonged lock // try to use the already-read temporary chunk if let Some(chunk) = fssource.temp_files.get(path.base) { return Ok(Some(chunk.clone())); } // try to read the file (and finally raise an error if it can't be read) let sourcefile = match SourceFile::from_file(path.base) { Ok(f) => f, Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None), Err(_) => return Err(None), }; let span = fssource.source.write().add(sourcefile); fssource.temp_units.push(span.unit()); let diags = ReportTree::new(fssource.message_locale, path.to_str()); fssource.root_report.add_parent(diags.clone()); let chunk = { let source = fssource.source.read(); let report = diags.report(|span| diags::translate_span(span, &source)); let tokens = collect_tokens(&source, span, &report); parse_to_chunk(tokens, &report) }; match chunk { Ok(chunk) => { fssource.temp_files.insert(path.base.to_owned(), chunk.clone()); Ok(Some(chunk)) }, Err(Stop) => Err(Some(Stop)), // we have already reported parsing errors } } } pub struct Workspace { message_locale: Locale, pool: Arc<CpuPool>, files: Arc<RwLock<HashMap<PathBuf, WorkspaceFile>>>, // conceptually this belongs to shared, but it is frequently updated by futures // unlike all other fields in shared, so getting this out avoids deadlock source: Arc<RwLock<Source>>, shared: Arc<RwLock<WorkspaceShared>>, } impl fmt::Debug for Workspace { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Workspace") .field("message_locale", &self.message_locale) .field("pool", &Ellipsis) .field("files", &self.files) .field("source", &Ellipsis) .field("shared", &self.shared) .finish() } } impl Workspace { pub fn new(base_dir: PathBuf, pool: Arc<CpuPool>, default_locale: Locale) -> Workspace { Workspace { message_locale: default_locale, pool: pool, files: Arc::new(RwLock::new(HashMap::new())), source: Arc::new(RwLock::new(Source::new())), shared: Arc::new(RwLock::new(WorkspaceShared { cancel_token: CancelToken::new(), base: WorkspaceBase::Config(kailua_workspace::Config::from_base_dir(base_dir)), check_outputs: Vec::new(), last_check_outputs: Vec::new(), })), } } pub fn pool(&self) -> &Arc<CpuPool> { &self.pool } pub fn source<'a>(&'a self) -> RwLockReadGuard<'a, Source> { self.source.read() } pub fn has_read_config(&self) -> bool { if let WorkspaceBase::Workspace(_) = self.shared.read().base { true } else { false } } #[allow(dead_code)] pub fn config_path(&self) -> Option<PathBuf> { self.shared.read().base.config_path().map(|p| p.to_owned()) } pub fn config_path_or_default(&self) -> PathBuf { self.shared.read().base.config_path_or_default() } pub fn read_config(&mut self) -> bool { let mut shared = self.shared.write(); let ws = if let WorkspaceBase::Config(ref mut config) = shared.base { config.use_default_config_paths(); if let Some(ws) = kailua_workspace::Workspace::new(config, self.message_locale) { Some(ws) } else { return false; } } else { None }; if let Some(ws) = ws { let noutputs = ws.start_paths().len(); shared.base = WorkspaceBase::Workspace(ws); shared.check_outputs.resize(noutputs, None); shared.last_check_outputs.resize(noutputs, None); } true } pub fn populate_watchlist(&mut self) { let walker = WalkDir::new(self.shared.read().base.base_dir()); for e in walker.follow_links(true) { // we don't care about I/O errors and (in Unix) symlink loops let e = if let Ok(e) = e { e } else { continue }; let ext = e.path().extension(); if ext == Some(OsStr::new("lua")) || ext == Some(OsStr::new("kailua")) { // TODO probably this should be of the lower priority let _ = self.ensure_file(e.path()).ensure_chunk(); } } } pub fn localize<'a, T: Localize +?Sized + 'a>(&self, msg: &'a T) -> Localized<'a, T> { Localized::new(msg, self.message_locale) } pub fn files<'a>(&'a self) -> RwLockReadGuard<'a, HashMap<PathBuf, WorkspaceFile>> { self.files.read() } pub fn file<'a>(&'a self, uri: &str) -> Option<WorkspaceFile> { match uri_to_path(uri) { Ok(path) => self.files.read().get(&path).cloned(), Err(_) => None, } } fn make_file(&self, path: PathBuf) -> WorkspaceFile { WorkspaceFile::new(&self.shared, &self.pool, &self.source, self.message_locale, path) } fn destroy_file(&self, file: WorkspaceFile) -> bool { file.cancel(); let file = file.inner.read(); let sourcefile = self.source.write().remove(file.unit); file.document.is_some() && sourcefile.is_some() } pub fn open_file(&self, item: protocol::TextDocumentItem) -> WorkspaceResult<()> { let path = uri_to_path(&item.uri)?; let mut files = self.files.write(); let file = files.entry(path.clone()).or_insert_with(|| self.make_file(path)); file.update_document(|doc| { if doc.is_some() { Err(WorkspaceError("open notification with duplicate file")) } else { Ok(Some(OpenDocument::new(item))) } }) } fn ensure_file(&self, path: &Path) -> WorkspaceFile { let mut files = self.files.write(); files.entry(path.to_owned()).or_insert_with(|| self.make_file(path.to_owned())).clone() } pub fn close_file(&self, uri: &str) -> WorkspaceResult<()> { let path = uri_to_path(uri)?; // closing file breaks the synchronization so the file should be re-read from fs let mut files = self.files.write(); let ok = if let hash_map::Entry::Occupied(mut e) = files.entry(path.clone()) { // replace the previous WorkspaceFile by a fresh WorkspaceFile let file = mem::replace(e.get_mut(), self.make_file(path)); self.destroy_file(file) } else { false }; if ok { Ok(()) } else { Err(WorkspaceError("close notification with non-existent or non-open file")) } } pub fn on_file_created(&self, uri: &str) -> Option<WorkspaceFile> { if let Ok(path) = uri_to_path(uri) { let file = self.ensure_file(&path); let _ = file.ensure_chunk(); Some(file) } else { None } } pub fn on_file_changed(&self, uri: &str) -> Option<WorkspaceFile>
{ if let Ok(path) = uri_to_path(uri) { let file = self.ensure_file(&path); file.cancel(); let _ = file.ensure_chunk(); Some(file) } else { None } }
identifier_body
menu.rs
//! Menu abstrction module use std::collections::HashMap; use std::rc::Rc; use std::cell::RefCell; use dbusmenu::ComCanonicalDbusmenu; use dbus::arg; use dbus; #[derive(Default)] pub struct Menu { /// - `revision: i32`: The revision number of the layout. /// For matching with layoutUpdated signals. revision: Rc<RefCell<i32>>, /// The window ID that the menu was created on pub window_id: Option<u32>, /// The actual Menu structure, indexed by their action name / identifier pub menu: HashMap<&'static str, SubMenu>, /// The current language. /// **NOTE** : The default is "en", so make sure to have at least one /// entry in the menu items labels that is indexed by "en" pub cur_language: &'static str, } /// Top-level submenu. Not to be confused with MenuData::SubMenuItem pub struct SubMenu { /// The label of the menu pub label: HashMap<String, String>, /// The menu items, indexed by their action name pub menu: HashMap<String, MenuItem>, } impl Menu { /// Creates a new window, but doesn't add it to any window yet /// Starts a new thread for maintaining the rendering loop pub fn new() -> Self { Self { revision: Rc::new(RefCell::new(0)), window_id: None, menu: HashMap::new(), cur_language: "en", } } /// Adds the menu to the window - takes XID of window as parameter pub fn add_to_window(&mut self, window_id: u32) { self.window_id = Some(window_id); // todo: notify app menu registrar here println!("registered window!"); } /// Removes the menu pub fn remove_from_window(&mut self) { self.window_id = None; // appmenu unregister window // should also be called on drop println!("unregistered window!"); } /// Removes an item from the menu list. /// Does not error out, but rather returns if the removal was successful pub fn remove_item<S: Into<String>>(item: S) -> bool { let item_id = item.into(); println!("remove_item: {:?}", item_id); false } /// Adds an item to the menu list. /// Does not error out, but rather returns if the add was successful pub fn add_item<S: Into<String>>(item: S) -> bool { let item_id = item.into(); println!("add item: {:?}", item_id); false } /// Actually constructs the window so that it shows the menu now /// Sends the menu over DBus pub fn show() { } } pub enum MenuItem { /// Text menu item, regular. Gets called if clicked TextMenuItem(MenuData<Box<Fn() -> ()>>), /// Checkbox menu item, CheckboxMenuItem(MenuData<Box<Fn(bool) -> ()>>), /// Radio menu item, consisting of multiple menu items. /// Callback gets a string of the currently selected value RadioMenuItem(Vec<MenuData<Box<Fn(String) -> ()>>>), /// Seperator Seperator(), /// Submenu SubMenuItem(String, Box<SubMenu>), } #[derive(Debug)] pub struct MenuData<F> { /// The action to execute, depends on the type of menu item pub action: F, /// Optional image as PNG bytes pub image: Option<Vec<u8>>, /// The label(s) of the menu item, indexed by language identifier /// /// For example: /// /// de - Datei öffnen /// en - Open file pub label: HashMap<String, String>, /// Should the menu entry be activated on hovering pub activate_on_hover: bool, /// Optional shortcuts in the format of a string /// `[["Control", "S"]]` /// `[["Control", "Q"], ["Alt", "X"]]` /// This is only a visual cue (todo: really?) pub shortcut: Option<Vec<ShortcutData>>, } #[derive(Debug, Clone)] pub enum ShortcutData { /// The "Control" in CTRL + S ControlChar(CtrlChar), /// The "S" in CTRL + S Char(String), } /// The four controls registered by dbus #[derive(Debug, Copy, Clone)] pub enum CtrlChar { Ctrl, Alt, Shift, Super, } /* 0 => [ "type" => "standard" | "seperator", "label" => "Hello", "enabled" => true, "visible" => true, "icon-name" => "hello.png", "icon-data" => Vec<u8>, "shortcut" => [["Control", "S"]], "toggle-type" => "checkmark" | "radio", "", "toggle-state" => MenuItemToggleState, "children-display" => "" | "submenu", ], defaults: type = "standard", label = "", enabled = "", visible = "", icon-name = "", icon-data = None, shortcut = None, toggle-type = "", toggle-state = -1 children-display = "", */ #[derive(Debug)] pub enum MenuItemToggleState { On, Off, Invalid, } impl Into<i32> for MenuItemToggleState { fn into(self) -> i32 { match self { MenuItemToggleState::On => 1, MenuItemToggleState::Off => 0, MenuItemToggleState::Invalid => -1, } } } /// Implement the ComCanonicalMenu so we can push it to the server impl ComCanonicalDbusmenu for Menu { type Err = dbus::tree::MethodErr; /// - `parent_id`: The ID of the parent node for the layout. For grabbing the layout from the root node use zero. /// - `recursion_depth`: The amount of levels of recursion to use. This affects the content of the second variant array. /// - -1: deliver all the items under the @a parentId. /// - 0: no recursion, the array will be empty. /// - n: array will contains items up to 'n' level depth. /// - `property_names`: The list of item properties we are interested in. If there are no entries in the list all of the properties will be sent. /// /// ### Outputs /// /// - `revision: i32`: The revision number of the layout. For matching with layoutUpdated signals. /// - `layout: HashMap`: The layout, as a recursive structure. /// fn get_layout(&self, parent_id: i32, recursion_depth: i32, property_names: Vec<&str>) -> Result<(u32, (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>)), Self::Err> { // I have no idea if this will actually work in any way possible // (u, (ia{sv}av)) // Nautilus: 0, 2, [] // Answer: 14 /* try!(m.as_result()); let mut i = m.iter_init(); let revision: u32 = try!(i.read()); let layout: (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>) = try!(i.read()); Ok((revision, layout)) */ use dbus::Message; use dbus::Member; println!("getlayout called!"); let mut m = Message::new_method_call("com.canonical.dbusmenu", "com/canonical/dbusmenu", "com.canonical.dbusmenu", Member::new("com.canonical.dbusmenu".as_bytes()).unwrap()).unwrap(); try!(m.as_result()); let mut i = m.iter_init(); let mut map = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new(); map.insert("data-hello".into(), arg::Variant::new_refarg(&mut i).unwrap()); *self.revision.borrow_mut() += 1; Ok((1, (*self.revision.borrow(), map, Vec::new()))) } fn get_group_properties(&self, ids: Vec<i32>, property_names: Vec<&str>) -> Result<Vec<(i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>)>, Self::Err> { // I AM NOT SURE IF THS WORKS! println!("get_group_properties called: {:?}, {:?}", ids, property_names); /* method call time=1510750424.121891 sender=:1.318 -> destination=org.freedesktop.DBus serial=1 path=/org/freedesktop/DBus; interface=org.freedesktop.DBus; member=Hello */ // warning: other method is also called "hello" // If Nautilus is called with [0], returns [(0, {'children-display':'submenu'})] let mut properties_hashmap = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new(); properties_hashmap.insert("label".into(), arg::Variant(Box::new("Hello".to_string()))); Ok(vec![(0, properties_hashmap)]) } fn get_property(&self, id: i32, name: &str) -> Result<arg::Variant<Box<arg::RefArg>>, Self::Err> { println!("get property called!"); // Nautilus get_propery(0, 'children-display') ->'submenu' Ok(arg::Variant(Box::new("everything is OK".to_string()))) } fn event(&self, id: i32, event_id: &str, data: arg::Variant<Box<arg::RefArg>>, timestamp: u32) -> Result<(), Self::Err> { println!("event called!"); if event_id == "clicked" { println!("received clicked event for menu item {:?}", id); } else if event_id == "hovered" { println!("received hovered event for menu item {:?}", id); } Ok(()) } fn about_to_show(&self, id: i32) -> Result<bool, Self::Err> { //??? "Whether this AboutToShow event should result in the menu being updated." // not sure what this means println!("about_to_show called, id: {:?}", id); Ok(true) } fn get_version(&self) -> Result<u32, Self::Err> {
fn get_status(&self) -> Result<String, Self::Err> { println!("get_status called!"); // Menus will always be in "normal" state, may change later on Ok("normal".into()) } } #[derive(Default, Clone)] pub struct MData; impl<'a> dbus::tree::DataType for MData { type Tree = (); type ObjectPath = Menu; // Every objectpath in the tree now owns a menu object. type Property = (); type Interface = (); type Method = (); type Signal = (); } /// Since parts of the menu are not printable, implement Debug trait manually /// Needed because of a bug in rust: https://github.com/rust-lang/rust/issues/31518 impl ::std::fmt::Debug for Menu { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "Menu {{ /* non-printable fields omitted */ }}") } }
// ???? println!("about_to_show called!"); Ok(3) }
identifier_body
menu.rs
//! Menu abstrction module use std::collections::HashMap; use std::rc::Rc; use std::cell::RefCell; use dbusmenu::ComCanonicalDbusmenu; use dbus::arg; use dbus; #[derive(Default)] pub struct Menu { /// - `revision: i32`: The revision number of the layout. /// For matching with layoutUpdated signals. revision: Rc<RefCell<i32>>, /// The window ID that the menu was created on pub window_id: Option<u32>, /// The actual Menu structure, indexed by their action name / identifier pub menu: HashMap<&'static str, SubMenu>, /// The current language. /// **NOTE** : The default is "en", so make sure to have at least one /// entry in the menu items labels that is indexed by "en" pub cur_language: &'static str, } /// Top-level submenu. Not to be confused with MenuData::SubMenuItem pub struct SubMenu { /// The label of the menu pub label: HashMap<String, String>, /// The menu items, indexed by their action name pub menu: HashMap<String, MenuItem>, } impl Menu { /// Creates a new window, but doesn't add it to any window yet /// Starts a new thread for maintaining the rendering loop pub fn new() -> Self { Self { revision: Rc::new(RefCell::new(0)), window_id: None, menu: HashMap::new(), cur_language: "en", } } /// Adds the menu to the window - takes XID of window as parameter pub fn add_to_window(&mut self, window_id: u32) { self.window_id = Some(window_id); // todo: notify app menu registrar here println!("registered window!"); } /// Removes the menu pub fn remove_from_window(&mut self) { self.window_id = None; // appmenu unregister window // should also be called on drop println!("unregistered window!"); } /// Removes an item from the menu list. /// Does not error out, but rather returns if the removal was successful pub fn remove_item<S: Into<String>>(item: S) -> bool { let item_id = item.into(); println!("remove_item: {:?}", item_id); false } /// Adds an item to the menu list. /// Does not error out, but rather returns if the add was successful pub fn add_item<S: Into<String>>(item: S) -> bool { let item_id = item.into(); println!("add item: {:?}", item_id); false } /// Actually constructs the window so that it shows the menu now /// Sends the menu over DBus pub fn show() { } } pub enum MenuItem { /// Text menu item, regular. Gets called if clicked TextMenuItem(MenuData<Box<Fn() -> ()>>), /// Checkbox menu item, CheckboxMenuItem(MenuData<Box<Fn(bool) -> ()>>), /// Radio menu item, consisting of multiple menu items. /// Callback gets a string of the currently selected value RadioMenuItem(Vec<MenuData<Box<Fn(String) -> ()>>>), /// Seperator Seperator(), /// Submenu SubMenuItem(String, Box<SubMenu>), } #[derive(Debug)] pub struct MenuData<F> { /// The action to execute, depends on the type of menu item pub action: F, /// Optional image as PNG bytes pub image: Option<Vec<u8>>, /// The label(s) of the menu item, indexed by language identifier /// /// For example: /// /// de - Datei öffnen /// en - Open file pub label: HashMap<String, String>, /// Should the menu entry be activated on hovering pub activate_on_hover: bool, /// Optional shortcuts in the format of a string /// `[["Control", "S"]]` /// `[["Control", "Q"], ["Alt", "X"]]` /// This is only a visual cue (todo: really?) pub shortcut: Option<Vec<ShortcutData>>, } #[derive(Debug, Clone)] pub enum ShortcutData { /// The "Control" in CTRL + S ControlChar(CtrlChar), /// The "S" in CTRL + S Char(String), } /// The four controls registered by dbus #[derive(Debug, Copy, Clone)] pub enum CtrlChar { Ctrl, Alt, Shift, Super, } /* 0 => [ "type" => "standard" | "seperator", "label" => "Hello", "enabled" => true, "visible" => true, "icon-name" => "hello.png", "icon-data" => Vec<u8>, "shortcut" => [["Control", "S"]], "toggle-type" => "checkmark" | "radio", "", "toggle-state" => MenuItemToggleState, "children-display" => "" | "submenu", ], defaults: type = "standard", label = "", enabled = "", visible = "", icon-name = "", icon-data = None, shortcut = None, toggle-type = "", toggle-state = -1 children-display = "", */ #[derive(Debug)] pub enum MenuItemToggleState { On, Off, Invalid, } impl Into<i32> for MenuItemToggleState { fn into(self) -> i32 { match self { MenuItemToggleState::On => 1, MenuItemToggleState::Off => 0, MenuItemToggleState::Invalid => -1, } } } /// Implement the ComCanonicalMenu so we can push it to the server impl ComCanonicalDbusmenu for Menu { type Err = dbus::tree::MethodErr; /// - `parent_id`: The ID of the parent node for the layout. For grabbing the layout from the root node use zero. /// - `recursion_depth`: The amount of levels of recursion to use. This affects the content of the second variant array. /// - -1: deliver all the items under the @a parentId. /// - 0: no recursion, the array will be empty. /// - n: array will contains items up to 'n' level depth. /// - `property_names`: The list of item properties we are interested in. If there are no entries in the list all of the properties will be sent. /// /// ### Outputs /// /// - `revision: i32`: The revision number of the layout. For matching with layoutUpdated signals. /// - `layout: HashMap`: The layout, as a recursive structure. /// fn get_layout(&self, parent_id: i32, recursion_depth: i32, property_names: Vec<&str>) -> Result<(u32, (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>)), Self::Err> { // I have no idea if this will actually work in any way possible // (u, (ia{sv}av)) // Nautilus: 0, 2, [] // Answer: 14 /* try!(m.as_result()); let mut i = m.iter_init(); let revision: u32 = try!(i.read()); let layout: (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>) = try!(i.read()); Ok((revision, layout)) */ use dbus::Message; use dbus::Member; println!("getlayout called!"); let mut m = Message::new_method_call("com.canonical.dbusmenu", "com/canonical/dbusmenu", "com.canonical.dbusmenu", Member::new("com.canonical.dbusmenu".as_bytes()).unwrap()).unwrap(); try!(m.as_result()); let mut i = m.iter_init(); let mut map = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new(); map.insert("data-hello".into(), arg::Variant::new_refarg(&mut i).unwrap()); *self.revision.borrow_mut() += 1; Ok((1, (*self.revision.borrow(), map, Vec::new()))) } fn get_group_properties(&self, ids: Vec<i32>, property_names: Vec<&str>) -> Result<Vec<(i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>)>, Self::Err> { // I AM NOT SURE IF THS WORKS! println!("get_group_properties called: {:?}, {:?}", ids, property_names); /* method call time=1510750424.121891 sender=:1.318 -> destination=org.freedesktop.DBus serial=1 path=/org/freedesktop/DBus; interface=org.freedesktop.DBus; member=Hello */ // warning: other method is also called "hello" // If Nautilus is called with [0], returns [(0, {'children-display':'submenu'})] let mut properties_hashmap = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new(); properties_hashmap.insert("label".into(), arg::Variant(Box::new("Hello".to_string()))); Ok(vec![(0, properties_hashmap)]) } fn get_property(&self, id: i32, name: &str) -> Result<arg::Variant<Box<arg::RefArg>>, Self::Err> { println!("get property called!"); // Nautilus get_propery(0, 'children-display') ->'submenu' Ok(arg::Variant(Box::new("everything is OK".to_string()))) } fn event(&self, id: i32, event_id: &str, data: arg::Variant<Box<arg::RefArg>>, timestamp: u32) -> Result<(), Self::Err> { println!("event called!"); if event_id == "clicked" { println!("received clicked event for menu item {:?}", id); } else if event_id == "hovered" { println!("received hovered event for menu item {:?}", id); } Ok(()) } fn about_to_show(&self, id: i32) -> Result<bool, Self::Err> { //??? "Whether this AboutToShow event should result in the menu being updated." // not sure what this means println!("about_to_show called, id: {:?}", id); Ok(true) } fn g
&self) -> Result<u32, Self::Err> { //???? println!("about_to_show called!"); Ok(3) } fn get_status(&self) -> Result<String, Self::Err> { println!("get_status called!"); // Menus will always be in "normal" state, may change later on Ok("normal".into()) } } #[derive(Default, Clone)] pub struct MData; impl<'a> dbus::tree::DataType for MData { type Tree = (); type ObjectPath = Menu; // Every objectpath in the tree now owns a menu object. type Property = (); type Interface = (); type Method = (); type Signal = (); } /// Since parts of the menu are not printable, implement Debug trait manually /// Needed because of a bug in rust: https://github.com/rust-lang/rust/issues/31518 impl ::std::fmt::Debug for Menu { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "Menu {{ /* non-printable fields omitted */ }}") } }
et_version(
identifier_name
menu.rs
//! Menu abstrction module use std::collections::HashMap; use std::rc::Rc; use std::cell::RefCell; use dbusmenu::ComCanonicalDbusmenu; use dbus::arg; use dbus; #[derive(Default)] pub struct Menu { /// - `revision: i32`: The revision number of the layout. /// For matching with layoutUpdated signals. revision: Rc<RefCell<i32>>, /// The window ID that the menu was created on pub window_id: Option<u32>, /// The actual Menu structure, indexed by their action name / identifier pub menu: HashMap<&'static str, SubMenu>, /// The current language. /// **NOTE** : The default is "en", so make sure to have at least one /// entry in the menu items labels that is indexed by "en" pub cur_language: &'static str, } /// Top-level submenu. Not to be confused with MenuData::SubMenuItem pub struct SubMenu { /// The label of the menu pub label: HashMap<String, String>, /// The menu items, indexed by their action name pub menu: HashMap<String, MenuItem>, } impl Menu { /// Creates a new window, but doesn't add it to any window yet /// Starts a new thread for maintaining the rendering loop pub fn new() -> Self { Self { revision: Rc::new(RefCell::new(0)), window_id: None, menu: HashMap::new(), cur_language: "en", } } /// Adds the menu to the window - takes XID of window as parameter pub fn add_to_window(&mut self, window_id: u32) { self.window_id = Some(window_id); // todo: notify app menu registrar here println!("registered window!"); } /// Removes the menu pub fn remove_from_window(&mut self) { self.window_id = None; // appmenu unregister window // should also be called on drop println!("unregistered window!"); } /// Removes an item from the menu list. /// Does not error out, but rather returns if the removal was successful pub fn remove_item<S: Into<String>>(item: S) -> bool { let item_id = item.into(); println!("remove_item: {:?}", item_id); false } /// Adds an item to the menu list. /// Does not error out, but rather returns if the add was successful pub fn add_item<S: Into<String>>(item: S) -> bool { let item_id = item.into(); println!("add item: {:?}", item_id); false } /// Actually constructs the window so that it shows the menu now /// Sends the menu over DBus pub fn show() { } } pub enum MenuItem { /// Text menu item, regular. Gets called if clicked TextMenuItem(MenuData<Box<Fn() -> ()>>), /// Checkbox menu item, CheckboxMenuItem(MenuData<Box<Fn(bool) -> ()>>), /// Radio menu item, consisting of multiple menu items. /// Callback gets a string of the currently selected value RadioMenuItem(Vec<MenuData<Box<Fn(String) -> ()>>>), /// Seperator Seperator(), /// Submenu SubMenuItem(String, Box<SubMenu>), } #[derive(Debug)] pub struct MenuData<F> { /// The action to execute, depends on the type of menu item pub action: F, /// Optional image as PNG bytes pub image: Option<Vec<u8>>, /// The label(s) of the menu item, indexed by language identifier /// /// For example: /// /// de - Datei öffnen /// en - Open file pub label: HashMap<String, String>, /// Should the menu entry be activated on hovering pub activate_on_hover: bool, /// Optional shortcuts in the format of a string /// `[["Control", "S"]]` /// `[["Control", "Q"], ["Alt", "X"]]` /// This is only a visual cue (todo: really?) pub shortcut: Option<Vec<ShortcutData>>, } #[derive(Debug, Clone)] pub enum ShortcutData { /// The "Control" in CTRL + S ControlChar(CtrlChar), /// The "S" in CTRL + S Char(String), } /// The four controls registered by dbus #[derive(Debug, Copy, Clone)] pub enum CtrlChar { Ctrl, Alt, Shift, Super, } /* 0 => [ "type" => "standard" | "seperator", "label" => "Hello", "enabled" => true, "visible" => true, "icon-name" => "hello.png", "icon-data" => Vec<u8>, "shortcut" => [["Control", "S"]], "toggle-type" => "checkmark" | "radio", "", "toggle-state" => MenuItemToggleState, "children-display" => "" | "submenu", ], defaults: type = "standard", label = "", enabled = "", visible = "", icon-name = "", icon-data = None, shortcut = None, toggle-type = "", toggle-state = -1 children-display = "", */ #[derive(Debug)] pub enum MenuItemToggleState { On, Off, Invalid, } impl Into<i32> for MenuItemToggleState { fn into(self) -> i32 { match self { MenuItemToggleState::On => 1, MenuItemToggleState::Off => 0, MenuItemToggleState::Invalid => -1, } } } /// Implement the ComCanonicalMenu so we can push it to the server impl ComCanonicalDbusmenu for Menu { type Err = dbus::tree::MethodErr; /// - `parent_id`: The ID of the parent node for the layout. For grabbing the layout from the root node use zero. /// - `recursion_depth`: The amount of levels of recursion to use. This affects the content of the second variant array. /// - -1: deliver all the items under the @a parentId. /// - 0: no recursion, the array will be empty. /// - n: array will contains items up to 'n' level depth. /// - `property_names`: The list of item properties we are interested in. If there are no entries in the list all of the properties will be sent. /// /// ### Outputs /// /// - `revision: i32`: The revision number of the layout. For matching with layoutUpdated signals. /// - `layout: HashMap`: The layout, as a recursive structure. /// fn get_layout(&self, parent_id: i32, recursion_depth: i32, property_names: Vec<&str>) -> Result<(u32, (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>)), Self::Err> { // I have no idea if this will actually work in any way possible // (u, (ia{sv}av)) // Nautilus: 0, 2, [] // Answer: 14 /* try!(m.as_result()); let mut i = m.iter_init(); let revision: u32 = try!(i.read()); let layout: (i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>, Vec<arg::Variant<Box<arg::RefArg>>>) = try!(i.read()); Ok((revision, layout)) */ use dbus::Message; use dbus::Member; println!("getlayout called!"); let mut m = Message::new_method_call("com.canonical.dbusmenu", "com/canonical/dbusmenu", "com.canonical.dbusmenu", Member::new("com.canonical.dbusmenu".as_bytes()).unwrap()).unwrap(); try!(m.as_result()); let mut i = m.iter_init(); let mut map = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new(); map.insert("data-hello".into(), arg::Variant::new_refarg(&mut i).unwrap()); *self.revision.borrow_mut() += 1; Ok((1, (*self.revision.borrow(), map, Vec::new()))) } fn get_group_properties(&self, ids: Vec<i32>, property_names: Vec<&str>) -> Result<Vec<(i32, ::std::collections::HashMap<String, arg::Variant<Box<arg::RefArg>>>)>, Self::Err> { // I AM NOT SURE IF THS WORKS! println!("get_group_properties called: {:?}, {:?}", ids, property_names); /* method call time=1510750424.121891 sender=:1.318 -> destination=org.freedesktop.DBus serial=1 path=/org/freedesktop/DBus; interface=org.freedesktop.DBus; member=Hello */ // warning: other method is also called "hello" // If Nautilus is called with [0], returns [(0, {'children-display':'submenu'})] let mut properties_hashmap = HashMap::<String, arg::Variant<Box<arg::RefArg>>>::new(); properties_hashmap.insert("label".into(), arg::Variant(Box::new("Hello".to_string()))); Ok(vec![(0, properties_hashmap)]) } fn get_property(&self, id: i32, name: &str) -> Result<arg::Variant<Box<arg::RefArg>>, Self::Err> { println!("get property called!"); // Nautilus get_propery(0, 'children-display') ->'submenu' Ok(arg::Variant(Box::new("everything is OK".to_string()))) } fn event(&self, id: i32, event_id: &str, data: arg::Variant<Box<arg::RefArg>>, timestamp: u32) -> Result<(), Self::Err> { println!("event called!"); if event_id == "clicked" { println!("received clicked event for menu item {:?}", id); } else if event_id == "hovered" { println!("received hovered event for menu item {:?}", id); } Ok(()) } fn about_to_show(&self, id: i32) -> Result<bool, Self::Err> { //??? "Whether this AboutToShow event should result in the menu being updated." // not sure what this means println!("about_to_show called, id: {:?}", id); Ok(true) } fn get_version(&self) -> Result<u32, Self::Err> { //???? println!("about_to_show called!"); Ok(3) } fn get_status(&self) -> Result<String, Self::Err> { println!("get_status called!"); // Menus will always be in "normal" state, may change later on Ok("normal".into()) }
} #[derive(Default, Clone)] pub struct MData; impl<'a> dbus::tree::DataType for MData { type Tree = (); type ObjectPath = Menu; // Every objectpath in the tree now owns a menu object. type Property = (); type Interface = (); type Method = (); type Signal = (); } /// Since parts of the menu are not printable, implement Debug trait manually /// Needed because of a bug in rust: https://github.com/rust-lang/rust/issues/31518 impl ::std::fmt::Debug for Menu { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "Menu {{ /* non-printable fields omitted */ }}") } }
random_line_split
poll_evented.rs
use crate::io::driver::{Direction, Handle, ReadyEvent}; use crate::io::registration::Registration; use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; use mio::event::Evented; use std::fmt; use std::io::{self, Read, Write}; use std::marker::Unpin; use std::pin::Pin; use std::task::{Context, Poll}; cfg_io_driver! { /// Associates an I/O resource that implements the [`std::io::Read`] and/or /// [`std::io::Write`] traits with the reactor that drives it. /// /// `PollEvented` uses [`Registration`] internally to take a type that /// implements [`mio::Evented`] as well as [`std::io::Read`] and or /// [`std::io::Write`] and associate it with a reactor that will drive it. /// /// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be /// used from within the future's execution model. As such, the /// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`] /// implementations using the underlying I/O resource as well as readiness /// events provided by the reactor. /// /// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is /// `Sync`), the caller must ensure that there are at most two tasks that /// use a `PollEvented` instance concurrently. One for reading and one for /// writing. While violating this requirement is "safe" from a Rust memory /// model point of view, it will result in unexpected behavior in the form /// of lost notifications and tasks hanging. /// /// ## Readiness events /// /// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, /// this type also supports access to the underlying readiness event stream. /// While similar in function to what [`Registration`] provides, the /// semantics are a bit different. /// /// Two functions are provided to access the readiness events: /// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the /// current readiness state of the `PollEvented` instance. If /// [`poll_read_ready`] indicates read readiness, immediately calling /// [`poll_read_ready`] again will also indicate read readiness. /// /// When the operation is attempted and is unable to succeed due to the I/O /// resource not being ready, the caller must call [`clear_read_ready`] or /// [`clear_write_ready`]. This clears the readiness state until a new /// readiness event is received. /// /// This allows the caller to implement additional functions. For example, /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and /// [`clear_read_ready`]. /// /// ## Platform-specific events /// /// `PollEvented` also allows receiving platform-specific `mio::Ready` events. /// These events are included as part of the read readiness event stream. The /// write readiness event stream is only for `Ready::writable()` events. /// /// [`std::io::Read`]: trait@std::io::Read /// [`std::io::Write`]: trait@std::io::Write /// [`AsyncRead`]: trait@AsyncRead /// [`AsyncWrite`]: trait@AsyncWrite /// [`mio::Evented`]: trait@mio::Evented /// [`Registration`]: struct@Registration /// [`TcpListener`]: struct@crate::net::TcpListener /// [`clear_read_ready`]: method@Self::clear_read_ready /// [`clear_write_ready`]: method@Self::clear_write_ready /// [`poll_read_ready`]: method@Self::poll_read_ready /// [`poll_write_ready`]: method@Self::poll_write_ready pub(crate) struct PollEvented<E: Evented> { io: Option<E>, registration: Registration, } } // ===== impl PollEvented ===== impl<E> PollEvented<E> where E: Evented, { /// Creates a new `PollEvented` associated with the default reactor. /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[cfg_attr(feature = "signal", allow(unused))] pub(crate) fn new(io: E) -> io::Result<Self> { PollEvented::new_with_ready(io, mio::Ready::all()) } /// Creates a new `PollEvented` associated with the default reactor, for specific `mio::Ready` /// state. `new_with_ready` should be used over `new` when you need control over the readiness /// state, such as when a file descriptor only allows reads. This does not add `hup` or `error` /// so if you are interested in those states, you will need to add them to the readiness state /// passed to this function. /// /// An example to listen to read only /// /// ```rust /// ##[cfg(unix)] /// mio::Ready::from_usize( /// mio::Ready::readable().as_usize() /// | mio::unix::UnixReady::error().as_usize() /// | mio::unix::UnixReady::hup().as_usize() /// ); /// ``` /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[cfg_attr(feature = "signal", allow(unused))] pub(crate) fn new_with_ready(io: E, ready: mio::Ready) -> io::Result<Self> { Self::new_with_ready_and_handle(io, ready, Handle::current()) } pub(crate) fn new_with_ready_and_handle( io: E, ready: mio::Ready, handle: Handle, ) -> io::Result<Self> { let registration = Registration::new_with_ready_and_handle(&io, ready, handle)?; Ok(Self { io: Some(io), registration, }) } /// Returns a shared reference to the underlying I/O object this readiness /// stream is wrapping. #[cfg(any( feature = "process", feature = "tcp", feature = "udp", feature = "uds", feature = "signal" ))] pub(crate) fn get_ref(&self) -> &E { self.io.as_ref().unwrap() } /// Returns a mutable reference to the underlying I/O object this readiness /// stream is wrapping. pub(crate) fn get_mut(&mut self) -> &mut E { self.io.as_mut().unwrap() } /// Consumes self, returning the inner I/O object /// /// This function will deregister the I/O resource from the reactor before /// returning. If the deregistration operation fails, an error is returned. /// /// Note that deregistering does not guarantee that the I/O resource can be /// registered with a different reactor. Some I/O resource types can only be /// associated with a single reactor instance for their lifetime. #[cfg(any(feature = "tcp", feature = "udp", feature = "uds"))] pub(crate) fn into_inner(mut self) -> io::Result<E> { let io = self.io.take().unwrap(); self.registration.deregister(&io)?; Ok(io) } pub(crate) fn clear_readiness(&self, event: ReadyEvent) { self.registration.clear_readiness(event); } /// Checks the I/O resource's read readiness state. /// /// The mask argument allows specifying what readiness to notify on. This /// can be any value, including platform specific readiness, **except** /// `writable`. HUP is always implicitly included on platforms that support /// it. /// /// If the resource is not ready for a read then `Poll::Pending` is returned /// and the current task is notified once a new event is received. /// /// The I/O resource will remain in a read-ready state until readiness is /// cleared by calling [`clear_read_ready`]. /// /// [`clear_read_ready`]: method@Self::clear_read_ready /// /// # Panics /// /// This function panics if: /// /// * `ready` includes writable. /// * called from outside of a task context. /// /// # Warning /// /// This method may not be called concurrently. It takes `&self` to allow /// calling it concurrently with `poll_write_ready`. pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> { self.registration.poll_readiness(cx, Direction::Read) } /// Checks the I/O resource's write readiness state. /// /// This always checks for writable readiness and also checks for HUP /// readiness on platforms that support it. /// /// If the resource is not ready for a write then `Poll::Pending` is /// returned and the current task is notified once a new event is received. /// /// The I/O resource will remain in a write-ready state until readiness is /// cleared by calling [`clear_write_ready`]. /// /// [`clear_write_ready`]: method@Self::clear_write_ready /// /// # Panics /// /// This function panics if: /// /// * `ready` contains bits besides `writable` and `hup`. /// * called from outside of a task context. /// /// # Warning /// /// This method may not be called concurrently. It takes `&self` to allow /// calling it concurrently with `poll_read_ready`. pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> { self.registration.poll_readiness(cx, Direction::Write) } } cfg_io_readiness! { impl<E> PollEvented<E> where E: Evented, { pub(crate) async fn readiness(&self, interest: mio::Ready) -> io::Result<ReadyEvent> { self.registration.readiness(interest).await } pub(crate) async fn async_io<F, R>(&self, interest: mio::Ready, mut op: F) -> io::Result<R> where F: FnMut(&E) -> io::Result<R>, { loop { let event = self.readiness(interest).await?; match op(self.get_ref()) { Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.clear_readiness(event); } x => return x, } } } } } // ===== Read / Write impls ===== impl<E> AsyncRead for PollEvented<E> where E: Evented + Read + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { loop { let ev = ready!(self.poll_read_ready(cx))?; // We can't assume the `Read` won't look at the read buffer, // so we have to force initialization here. let r = (*self).get_mut().read(buf.initialize_unfilled()); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r.map(|n| { buf.add_filled(n); })); } } } impl<E> AsyncWrite for PollEvented<E> where E: Evented + Write + Unpin, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { loop { let ev = ready!(self.poll_write_ready(cx))?; let r = (*self).get_mut().write(buf); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r); } } fn
(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { loop { let ev = ready!(self.poll_write_ready(cx))?; let r = (*self).get_mut().flush(); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r); } } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(Ok(())) } } fn is_wouldblock<T>(r: &io::Result<T>) -> bool { match *r { Ok(_) => false, Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, } } impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PollEvented").field("io", &self.io).finish() } } impl<E: Evented> Drop for PollEvented<E> { fn drop(&mut self) { if let Some(io) = self.io.take() { // Ignore errors let _ = self.registration.deregister(&io); } } }
poll_flush
identifier_name
poll_evented.rs
use crate::io::driver::{Direction, Handle, ReadyEvent}; use crate::io::registration::Registration; use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; use mio::event::Evented; use std::fmt; use std::io::{self, Read, Write}; use std::marker::Unpin; use std::pin::Pin; use std::task::{Context, Poll}; cfg_io_driver! { /// Associates an I/O resource that implements the [`std::io::Read`] and/or /// [`std::io::Write`] traits with the reactor that drives it. /// /// `PollEvented` uses [`Registration`] internally to take a type that /// implements [`mio::Evented`] as well as [`std::io::Read`] and or /// [`std::io::Write`] and associate it with a reactor that will drive it. /// /// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be /// used from within the future's execution model. As such, the /// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`] /// implementations using the underlying I/O resource as well as readiness /// events provided by the reactor. /// /// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is /// `Sync`), the caller must ensure that there are at most two tasks that /// use a `PollEvented` instance concurrently. One for reading and one for /// writing. While violating this requirement is "safe" from a Rust memory /// model point of view, it will result in unexpected behavior in the form /// of lost notifications and tasks hanging. /// /// ## Readiness events /// /// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, /// this type also supports access to the underlying readiness event stream. /// While similar in function to what [`Registration`] provides, the /// semantics are a bit different. /// /// Two functions are provided to access the readiness events: /// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the /// current readiness state of the `PollEvented` instance. If /// [`poll_read_ready`] indicates read readiness, immediately calling /// [`poll_read_ready`] again will also indicate read readiness. /// /// When the operation is attempted and is unable to succeed due to the I/O /// resource not being ready, the caller must call [`clear_read_ready`] or /// [`clear_write_ready`]. This clears the readiness state until a new /// readiness event is received. /// /// This allows the caller to implement additional functions. For example, /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and /// [`clear_read_ready`]. /// /// ## Platform-specific events /// /// `PollEvented` also allows receiving platform-specific `mio::Ready` events. /// These events are included as part of the read readiness event stream. The /// write readiness event stream is only for `Ready::writable()` events. /// /// [`std::io::Read`]: trait@std::io::Read /// [`std::io::Write`]: trait@std::io::Write /// [`AsyncRead`]: trait@AsyncRead /// [`AsyncWrite`]: trait@AsyncWrite /// [`mio::Evented`]: trait@mio::Evented /// [`Registration`]: struct@Registration /// [`TcpListener`]: struct@crate::net::TcpListener /// [`clear_read_ready`]: method@Self::clear_read_ready /// [`clear_write_ready`]: method@Self::clear_write_ready /// [`poll_read_ready`]: method@Self::poll_read_ready /// [`poll_write_ready`]: method@Self::poll_write_ready pub(crate) struct PollEvented<E: Evented> { io: Option<E>, registration: Registration, } } // ===== impl PollEvented ===== impl<E> PollEvented<E> where E: Evented, { /// Creates a new `PollEvented` associated with the default reactor. /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[cfg_attr(feature = "signal", allow(unused))] pub(crate) fn new(io: E) -> io::Result<Self> { PollEvented::new_with_ready(io, mio::Ready::all()) } /// Creates a new `PollEvented` associated with the default reactor, for specific `mio::Ready` /// state. `new_with_ready` should be used over `new` when you need control over the readiness /// state, such as when a file descriptor only allows reads. This does not add `hup` or `error` /// so if you are interested in those states, you will need to add them to the readiness state /// passed to this function. /// /// An example to listen to read only /// /// ```rust /// ##[cfg(unix)] /// mio::Ready::from_usize( /// mio::Ready::readable().as_usize() /// | mio::unix::UnixReady::error().as_usize() /// | mio::unix::UnixReady::hup().as_usize() /// ); /// ``` /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[cfg_attr(feature = "signal", allow(unused))] pub(crate) fn new_with_ready(io: E, ready: mio::Ready) -> io::Result<Self> { Self::new_with_ready_and_handle(io, ready, Handle::current()) } pub(crate) fn new_with_ready_and_handle( io: E, ready: mio::Ready, handle: Handle, ) -> io::Result<Self>
/// Returns a shared reference to the underlying I/O object this readiness /// stream is wrapping. #[cfg(any( feature = "process", feature = "tcp", feature = "udp", feature = "uds", feature = "signal" ))] pub(crate) fn get_ref(&self) -> &E { self.io.as_ref().unwrap() } /// Returns a mutable reference to the underlying I/O object this readiness /// stream is wrapping. pub(crate) fn get_mut(&mut self) -> &mut E { self.io.as_mut().unwrap() } /// Consumes self, returning the inner I/O object /// /// This function will deregister the I/O resource from the reactor before /// returning. If the deregistration operation fails, an error is returned. /// /// Note that deregistering does not guarantee that the I/O resource can be /// registered with a different reactor. Some I/O resource types can only be /// associated with a single reactor instance for their lifetime. #[cfg(any(feature = "tcp", feature = "udp", feature = "uds"))] pub(crate) fn into_inner(mut self) -> io::Result<E> { let io = self.io.take().unwrap(); self.registration.deregister(&io)?; Ok(io) } pub(crate) fn clear_readiness(&self, event: ReadyEvent) { self.registration.clear_readiness(event); } /// Checks the I/O resource's read readiness state. /// /// The mask argument allows specifying what readiness to notify on. This /// can be any value, including platform specific readiness, **except** /// `writable`. HUP is always implicitly included on platforms that support /// it. /// /// If the resource is not ready for a read then `Poll::Pending` is returned /// and the current task is notified once a new event is received. /// /// The I/O resource will remain in a read-ready state until readiness is /// cleared by calling [`clear_read_ready`]. /// /// [`clear_read_ready`]: method@Self::clear_read_ready /// /// # Panics /// /// This function panics if: /// /// * `ready` includes writable. /// * called from outside of a task context. /// /// # Warning /// /// This method may not be called concurrently. It takes `&self` to allow /// calling it concurrently with `poll_write_ready`. pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> { self.registration.poll_readiness(cx, Direction::Read) } /// Checks the I/O resource's write readiness state. /// /// This always checks for writable readiness and also checks for HUP /// readiness on platforms that support it. /// /// If the resource is not ready for a write then `Poll::Pending` is /// returned and the current task is notified once a new event is received. /// /// The I/O resource will remain in a write-ready state until readiness is /// cleared by calling [`clear_write_ready`]. /// /// [`clear_write_ready`]: method@Self::clear_write_ready /// /// # Panics /// /// This function panics if: /// /// * `ready` contains bits besides `writable` and `hup`. /// * called from outside of a task context. /// /// # Warning /// /// This method may not be called concurrently. It takes `&self` to allow /// calling it concurrently with `poll_read_ready`. pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> { self.registration.poll_readiness(cx, Direction::Write) } } cfg_io_readiness! { impl<E> PollEvented<E> where E: Evented, { pub(crate) async fn readiness(&self, interest: mio::Ready) -> io::Result<ReadyEvent> { self.registration.readiness(interest).await } pub(crate) async fn async_io<F, R>(&self, interest: mio::Ready, mut op: F) -> io::Result<R> where F: FnMut(&E) -> io::Result<R>, { loop { let event = self.readiness(interest).await?; match op(self.get_ref()) { Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.clear_readiness(event); } x => return x, } } } } } // ===== Read / Write impls ===== impl<E> AsyncRead for PollEvented<E> where E: Evented + Read + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { loop { let ev = ready!(self.poll_read_ready(cx))?; // We can't assume the `Read` won't look at the read buffer, // so we have to force initialization here. let r = (*self).get_mut().read(buf.initialize_unfilled()); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r.map(|n| { buf.add_filled(n); })); } } } impl<E> AsyncWrite for PollEvented<E> where E: Evented + Write + Unpin, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { loop { let ev = ready!(self.poll_write_ready(cx))?; let r = (*self).get_mut().write(buf); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r); } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { loop { let ev = ready!(self.poll_write_ready(cx))?; let r = (*self).get_mut().flush(); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r); } } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(Ok(())) } } fn is_wouldblock<T>(r: &io::Result<T>) -> bool { match *r { Ok(_) => false, Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, } } impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PollEvented").field("io", &self.io).finish() } } impl<E: Evented> Drop for PollEvented<E> { fn drop(&mut self) { if let Some(io) = self.io.take() { // Ignore errors let _ = self.registration.deregister(&io); } } }
{ let registration = Registration::new_with_ready_and_handle(&io, ready, handle)?; Ok(Self { io: Some(io), registration, }) }
identifier_body
poll_evented.rs
use crate::io::driver::{Direction, Handle, ReadyEvent}; use crate::io::registration::Registration; use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; use mio::event::Evented; use std::fmt; use std::io::{self, Read, Write}; use std::marker::Unpin; use std::pin::Pin; use std::task::{Context, Poll}; cfg_io_driver! { /// Associates an I/O resource that implements the [`std::io::Read`] and/or /// [`std::io::Write`] traits with the reactor that drives it. /// /// `PollEvented` uses [`Registration`] internally to take a type that /// implements [`mio::Evented`] as well as [`std::io::Read`] and or /// [`std::io::Write`] and associate it with a reactor that will drive it. /// /// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be /// used from within the future's execution model. As such, the /// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`] /// implementations using the underlying I/O resource as well as readiness /// events provided by the reactor. /// /// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is /// `Sync`), the caller must ensure that there are at most two tasks that /// use a `PollEvented` instance concurrently. One for reading and one for /// writing. While violating this requirement is "safe" from a Rust memory /// model point of view, it will result in unexpected behavior in the form /// of lost notifications and tasks hanging. /// /// ## Readiness events /// /// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, /// this type also supports access to the underlying readiness event stream. /// While similar in function to what [`Registration`] provides, the /// semantics are a bit different. /// /// Two functions are provided to access the readiness events: /// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the /// current readiness state of the `PollEvented` instance. If /// [`poll_read_ready`] indicates read readiness, immediately calling /// [`poll_read_ready`] again will also indicate read readiness. /// /// When the operation is attempted and is unable to succeed due to the I/O /// resource not being ready, the caller must call [`clear_read_ready`] or /// [`clear_write_ready`]. This clears the readiness state until a new /// readiness event is received. /// /// This allows the caller to implement additional functions. For example, /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and /// [`clear_read_ready`]. /// /// ## Platform-specific events /// /// `PollEvented` also allows receiving platform-specific `mio::Ready` events. /// These events are included as part of the read readiness event stream. The /// write readiness event stream is only for `Ready::writable()` events. /// /// [`std::io::Read`]: trait@std::io::Read /// [`std::io::Write`]: trait@std::io::Write /// [`AsyncRead`]: trait@AsyncRead /// [`AsyncWrite`]: trait@AsyncWrite /// [`mio::Evented`]: trait@mio::Evented /// [`Registration`]: struct@Registration /// [`TcpListener`]: struct@crate::net::TcpListener /// [`clear_read_ready`]: method@Self::clear_read_ready /// [`clear_write_ready`]: method@Self::clear_write_ready /// [`poll_read_ready`]: method@Self::poll_read_ready /// [`poll_write_ready`]: method@Self::poll_write_ready pub(crate) struct PollEvented<E: Evented> { io: Option<E>, registration: Registration, } } // ===== impl PollEvented ===== impl<E> PollEvented<E> where E: Evented, { /// Creates a new `PollEvented` associated with the default reactor. /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[cfg_attr(feature = "signal", allow(unused))] pub(crate) fn new(io: E) -> io::Result<Self> { PollEvented::new_with_ready(io, mio::Ready::all()) } /// Creates a new `PollEvented` associated with the default reactor, for specific `mio::Ready` /// state. `new_with_ready` should be used over `new` when you need control over the readiness /// state, such as when a file descriptor only allows reads. This does not add `hup` or `error` /// so if you are interested in those states, you will need to add them to the readiness state /// passed to this function. /// /// An example to listen to read only /// /// ```rust /// ##[cfg(unix)] /// mio::Ready::from_usize( /// mio::Ready::readable().as_usize() /// | mio::unix::UnixReady::error().as_usize() /// | mio::unix::UnixReady::hup().as_usize() /// ); /// ``` /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[cfg_attr(feature = "signal", allow(unused))] pub(crate) fn new_with_ready(io: E, ready: mio::Ready) -> io::Result<Self> { Self::new_with_ready_and_handle(io, ready, Handle::current()) } pub(crate) fn new_with_ready_and_handle( io: E, ready: mio::Ready, handle: Handle, ) -> io::Result<Self> { let registration = Registration::new_with_ready_and_handle(&io, ready, handle)?; Ok(Self { io: Some(io), registration, }) } /// Returns a shared reference to the underlying I/O object this readiness /// stream is wrapping. #[cfg(any( feature = "process", feature = "tcp", feature = "udp", feature = "uds", feature = "signal" ))] pub(crate) fn get_ref(&self) -> &E { self.io.as_ref().unwrap() } /// Returns a mutable reference to the underlying I/O object this readiness /// stream is wrapping. pub(crate) fn get_mut(&mut self) -> &mut E { self.io.as_mut().unwrap() } /// Consumes self, returning the inner I/O object /// /// This function will deregister the I/O resource from the reactor before /// returning. If the deregistration operation fails, an error is returned. /// /// Note that deregistering does not guarantee that the I/O resource can be /// registered with a different reactor. Some I/O resource types can only be /// associated with a single reactor instance for their lifetime. #[cfg(any(feature = "tcp", feature = "udp", feature = "uds"))] pub(crate) fn into_inner(mut self) -> io::Result<E> { let io = self.io.take().unwrap(); self.registration.deregister(&io)?; Ok(io) } pub(crate) fn clear_readiness(&self, event: ReadyEvent) { self.registration.clear_readiness(event); } /// Checks the I/O resource's read readiness state. /// /// The mask argument allows specifying what readiness to notify on. This /// can be any value, including platform specific readiness, **except** /// `writable`. HUP is always implicitly included on platforms that support /// it. /// /// If the resource is not ready for a read then `Poll::Pending` is returned /// and the current task is notified once a new event is received. /// /// The I/O resource will remain in a read-ready state until readiness is /// cleared by calling [`clear_read_ready`]. /// /// [`clear_read_ready`]: method@Self::clear_read_ready /// /// # Panics /// /// This function panics if: /// /// * `ready` includes writable. /// * called from outside of a task context. /// /// # Warning /// /// This method may not be called concurrently. It takes `&self` to allow /// calling it concurrently with `poll_write_ready`. pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> { self.registration.poll_readiness(cx, Direction::Read) } /// Checks the I/O resource's write readiness state. /// /// This always checks for writable readiness and also checks for HUP /// readiness on platforms that support it. /// /// If the resource is not ready for a write then `Poll::Pending` is /// returned and the current task is notified once a new event is received. /// /// The I/O resource will remain in a write-ready state until readiness is /// cleared by calling [`clear_write_ready`]. /// /// [`clear_write_ready`]: method@Self::clear_write_ready /// /// # Panics /// /// This function panics if: /// /// * `ready` contains bits besides `writable` and `hup`. /// * called from outside of a task context. /// /// # Warning /// /// This method may not be called concurrently. It takes `&self` to allow /// calling it concurrently with `poll_read_ready`. pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> { self.registration.poll_readiness(cx, Direction::Write) } } cfg_io_readiness! { impl<E> PollEvented<E> where E: Evented, { pub(crate) async fn readiness(&self, interest: mio::Ready) -> io::Result<ReadyEvent> { self.registration.readiness(interest).await } pub(crate) async fn async_io<F, R>(&self, interest: mio::Ready, mut op: F) -> io::Result<R> where F: FnMut(&E) -> io::Result<R>, { loop { let event = self.readiness(interest).await?; match op(self.get_ref()) { Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.clear_readiness(event); } x => return x, } } } } } // ===== Read / Write impls ===== impl<E> AsyncRead for PollEvented<E> where E: Evented + Read + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { loop { let ev = ready!(self.poll_read_ready(cx))?; // We can't assume the `Read` won't look at the read buffer, // so we have to force initialization here. let r = (*self).get_mut().read(buf.initialize_unfilled()); if is_wouldblock(&r)
return Poll::Ready(r.map(|n| { buf.add_filled(n); })); } } } impl<E> AsyncWrite for PollEvented<E> where E: Evented + Write + Unpin, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { loop { let ev = ready!(self.poll_write_ready(cx))?; let r = (*self).get_mut().write(buf); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r); } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { loop { let ev = ready!(self.poll_write_ready(cx))?; let r = (*self).get_mut().flush(); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r); } } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(Ok(())) } } fn is_wouldblock<T>(r: &io::Result<T>) -> bool { match *r { Ok(_) => false, Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, } } impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PollEvented").field("io", &self.io).finish() } } impl<E: Evented> Drop for PollEvented<E> { fn drop(&mut self) { if let Some(io) = self.io.take() { // Ignore errors let _ = self.registration.deregister(&io); } } }
{ self.clear_readiness(ev); continue; }
conditional_block
poll_evented.rs
use crate::io::driver::{Direction, Handle, ReadyEvent}; use crate::io::registration::Registration; use crate::io::{AsyncRead, AsyncWrite, ReadBuf}; use mio::event::Evented; use std::fmt; use std::io::{self, Read, Write}; use std::marker::Unpin; use std::pin::Pin; use std::task::{Context, Poll}; cfg_io_driver! { /// Associates an I/O resource that implements the [`std::io::Read`] and/or /// [`std::io::Write`] traits with the reactor that drives it. /// /// `PollEvented` uses [`Registration`] internally to take a type that /// implements [`mio::Evented`] as well as [`std::io::Read`] and or /// [`std::io::Write`] and associate it with a reactor that will drive it. /// /// Once the [`mio::Evented`] type is wrapped by `PollEvented`, it can be /// used from within the future's execution model. As such, the /// `PollEvented` type provides [`AsyncRead`] and [`AsyncWrite`] /// implementations using the underlying I/O resource as well as readiness /// events provided by the reactor. /// /// **Note**: While `PollEvented` is `Sync` (if the underlying I/O type is /// `Sync`), the caller must ensure that there are at most two tasks that /// use a `PollEvented` instance concurrently. One for reading and one for /// writing. While violating this requirement is "safe" from a Rust memory /// model point of view, it will result in unexpected behavior in the form /// of lost notifications and tasks hanging. /// /// ## Readiness events /// /// Besides just providing [`AsyncRead`] and [`AsyncWrite`] implementations, /// this type also supports access to the underlying readiness event stream. /// While similar in function to what [`Registration`] provides, the /// semantics are a bit different. /// /// Two functions are provided to access the readiness events: /// [`poll_read_ready`] and [`poll_write_ready`]. These functions return the /// current readiness state of the `PollEvented` instance. If /// [`poll_read_ready`] indicates read readiness, immediately calling /// [`poll_read_ready`] again will also indicate read readiness. /// /// When the operation is attempted and is unable to succeed due to the I/O /// resource not being ready, the caller must call [`clear_read_ready`] or /// [`clear_write_ready`]. This clears the readiness state until a new /// readiness event is received. /// /// This allows the caller to implement additional functions. For example, /// [`TcpListener`] implements poll_accept by using [`poll_read_ready`] and /// [`clear_read_ready`]. /// /// ## Platform-specific events /// /// `PollEvented` also allows receiving platform-specific `mio::Ready` events. /// These events are included as part of the read readiness event stream. The /// write readiness event stream is only for `Ready::writable()` events. /// /// [`std::io::Read`]: trait@std::io::Read /// [`std::io::Write`]: trait@std::io::Write /// [`AsyncRead`]: trait@AsyncRead /// [`AsyncWrite`]: trait@AsyncWrite /// [`mio::Evented`]: trait@mio::Evented /// [`Registration`]: struct@Registration /// [`TcpListener`]: struct@crate::net::TcpListener /// [`clear_read_ready`]: method@Self::clear_read_ready /// [`clear_write_ready`]: method@Self::clear_write_ready /// [`poll_read_ready`]: method@Self::poll_read_ready /// [`poll_write_ready`]: method@Self::poll_write_ready pub(crate) struct PollEvented<E: Evented> { io: Option<E>, registration: Registration, } } // ===== impl PollEvented ===== impl<E> PollEvented<E> where E: Evented, { /// Creates a new `PollEvented` associated with the default reactor. /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[cfg_attr(feature = "signal", allow(unused))] pub(crate) fn new(io: E) -> io::Result<Self> { PollEvented::new_with_ready(io, mio::Ready::all()) } /// Creates a new `PollEvented` associated with the default reactor, for specific `mio::Ready` /// state. `new_with_ready` should be used over `new` when you need control over the readiness /// state, such as when a file descriptor only allows reads. This does not add `hup` or `error` /// so if you are interested in those states, you will need to add them to the readiness state /// passed to this function. /// /// An example to listen to read only /// /// ```rust /// ##[cfg(unix)] /// mio::Ready::from_usize( /// mio::Ready::readable().as_usize() /// | mio::unix::UnixReady::error().as_usize() /// | mio::unix::UnixReady::hup().as_usize() /// ); /// ``` /// /// # Panics /// /// This function panics if thread-local runtime is not set. /// /// The runtime is usually set implicitly when this function is called /// from a future driven by a tokio runtime, otherwise runtime can be set /// explicitly with [`Runtime::enter`](crate::runtime::Runtime::enter) function. #[cfg_attr(feature = "signal", allow(unused))] pub(crate) fn new_with_ready(io: E, ready: mio::Ready) -> io::Result<Self> { Self::new_with_ready_and_handle(io, ready, Handle::current()) } pub(crate) fn new_with_ready_and_handle( io: E, ready: mio::Ready, handle: Handle, ) -> io::Result<Self> { let registration = Registration::new_with_ready_and_handle(&io, ready, handle)?; Ok(Self { io: Some(io), registration, }) } /// Returns a shared reference to the underlying I/O object this readiness /// stream is wrapping. #[cfg(any( feature = "process", feature = "tcp", feature = "udp", feature = "uds", feature = "signal" ))] pub(crate) fn get_ref(&self) -> &E { self.io.as_ref().unwrap() } /// Returns a mutable reference to the underlying I/O object this readiness /// stream is wrapping. pub(crate) fn get_mut(&mut self) -> &mut E { self.io.as_mut().unwrap() } /// Consumes self, returning the inner I/O object /// /// This function will deregister the I/O resource from the reactor before /// returning. If the deregistration operation fails, an error is returned. /// /// Note that deregistering does not guarantee that the I/O resource can be /// registered with a different reactor. Some I/O resource types can only be /// associated with a single reactor instance for their lifetime. #[cfg(any(feature = "tcp", feature = "udp", feature = "uds"))] pub(crate) fn into_inner(mut self) -> io::Result<E> { let io = self.io.take().unwrap(); self.registration.deregister(&io)?; Ok(io) } pub(crate) fn clear_readiness(&self, event: ReadyEvent) { self.registration.clear_readiness(event); } /// Checks the I/O resource's read readiness state. /// /// The mask argument allows specifying what readiness to notify on. This /// can be any value, including platform specific readiness, **except** /// `writable`. HUP is always implicitly included on platforms that support /// it. /// /// If the resource is not ready for a read then `Poll::Pending` is returned /// and the current task is notified once a new event is received. /// /// The I/O resource will remain in a read-ready state until readiness is /// cleared by calling [`clear_read_ready`]. /// /// [`clear_read_ready`]: method@Self::clear_read_ready /// /// # Panics ///
/// /// # Warning /// /// This method may not be called concurrently. It takes `&self` to allow /// calling it concurrently with `poll_write_ready`. pub(crate) fn poll_read_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> { self.registration.poll_readiness(cx, Direction::Read) } /// Checks the I/O resource's write readiness state. /// /// This always checks for writable readiness and also checks for HUP /// readiness on platforms that support it. /// /// If the resource is not ready for a write then `Poll::Pending` is /// returned and the current task is notified once a new event is received. /// /// The I/O resource will remain in a write-ready state until readiness is /// cleared by calling [`clear_write_ready`]. /// /// [`clear_write_ready`]: method@Self::clear_write_ready /// /// # Panics /// /// This function panics if: /// /// * `ready` contains bits besides `writable` and `hup`. /// * called from outside of a task context. /// /// # Warning /// /// This method may not be called concurrently. It takes `&self` to allow /// calling it concurrently with `poll_read_ready`. pub(crate) fn poll_write_ready(&self, cx: &mut Context<'_>) -> Poll<io::Result<ReadyEvent>> { self.registration.poll_readiness(cx, Direction::Write) } } cfg_io_readiness! { impl<E> PollEvented<E> where E: Evented, { pub(crate) async fn readiness(&self, interest: mio::Ready) -> io::Result<ReadyEvent> { self.registration.readiness(interest).await } pub(crate) async fn async_io<F, R>(&self, interest: mio::Ready, mut op: F) -> io::Result<R> where F: FnMut(&E) -> io::Result<R>, { loop { let event = self.readiness(interest).await?; match op(self.get_ref()) { Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { self.clear_readiness(event); } x => return x, } } } } } // ===== Read / Write impls ===== impl<E> AsyncRead for PollEvented<E> where E: Evented + Read + Unpin, { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll<io::Result<()>> { loop { let ev = ready!(self.poll_read_ready(cx))?; // We can't assume the `Read` won't look at the read buffer, // so we have to force initialization here. let r = (*self).get_mut().read(buf.initialize_unfilled()); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r.map(|n| { buf.add_filled(n); })); } } } impl<E> AsyncWrite for PollEvented<E> where E: Evented + Write + Unpin, { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<io::Result<usize>> { loop { let ev = ready!(self.poll_write_ready(cx))?; let r = (*self).get_mut().write(buf); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r); } } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> { loop { let ev = ready!(self.poll_write_ready(cx))?; let r = (*self).get_mut().flush(); if is_wouldblock(&r) { self.clear_readiness(ev); continue; } return Poll::Ready(r); } } fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> { Poll::Ready(Ok(())) } } fn is_wouldblock<T>(r: &io::Result<T>) -> bool { match *r { Ok(_) => false, Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, } } impl<E: Evented + fmt::Debug> fmt::Debug for PollEvented<E> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PollEvented").field("io", &self.io).finish() } } impl<E: Evented> Drop for PollEvented<E> { fn drop(&mut self) { if let Some(io) = self.io.take() { // Ignore errors let _ = self.registration.deregister(&io); } } }
/// This function panics if: /// /// * `ready` includes writable. /// * called from outside of a task context.
random_line_split
util.rs
// Copyright 2019-2023 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT use std::{ collections::VecDeque, future::Future, sync::{ atomic::{self, AtomicU64}, Arc, }, }; use crate::ipld::{CidHashSet, Ipld}; use crate::shim::clock::ChainEpoch; use crate::utils::db::car_stream::Block; use crate::utils::io::progress_log::WithProgressRaw; use crate::{ blocks::{BlockHeader, Tipset}, utils::encoding::from_slice_with_fallback, }; use cid::Cid; use futures::Stream; use fvm_ipld_blockstore::Blockstore; use lazy_static::lazy_static; use pin_project_lite::pin_project; use std::pin::Pin; use std::task::{Context, Poll}; /// Traverses all Cid links, hashing and loading all unique values and using the /// callback function to interact with the data. #[async_recursion::async_recursion] async fn traverse_ipld_links_hash<F, T>( walked: &mut CidHashSet, load_block: &mut F, ipld: &Ipld, on_inserted: &(impl Fn(usize) + Send + Sync), ) -> Result<(), anyhow::Error> where F: FnMut(Cid) -> T + Send, T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send, { match ipld { Ipld::Map(m) => { for (_, v) in m.iter() { traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?; } } Ipld::List(list) => { for v in list.iter() { traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?; } } &Ipld::Link(cid) => { // WASM blocks are stored as IPLD_RAW. They should be loaded but not traversed. if cid.codec() == crate::shim::crypto::IPLD_RAW { if!walked.insert(cid) { return Ok(()); } on_inserted(walked.len()); let _ = load_block(cid).await?; } if cid.codec() == fvm_ipld_encoding::DAG_CBOR { if!walked.insert(cid) { return Ok(()); } on_inserted(walked.len()); let bytes = load_block(cid).await?; let ipld = from_slice_with_fallback(&bytes)?; traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?; } } _ => (), } Ok(()) } /// Load and hash CIDs and resolve recursively. pub async fn recurse_links_hash<F, T>( walked: &mut CidHashSet, root: Cid, load_block: &mut F, on_inserted: &(impl Fn(usize) + Send + Sync), ) -> Result<(), anyhow::Error> where F: FnMut(Cid) -> T + Send, T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send, { if!walked.insert(root) { // Cid has already been traversed return Ok(()); } on_inserted(walked.len()); if root.codec()!= fvm_ipld_encoding::DAG_CBOR { return Ok(()); } let bytes = load_block(root).await?; let ipld = from_slice_with_fallback(&bytes)?; traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?; Ok(()) } pub type ProgressBarCurrentTotalPair = Arc<(AtomicU64, AtomicU64)>; lazy_static! { pub static ref WALK_SNAPSHOT_PROGRESS_DB_GC: ProgressBarCurrentTotalPair = Default::default(); } /// Walks over tipset and state data and loads all blocks not yet seen. /// This is tracked based on the callback function loading blocks. pub async fn walk_snapshot<F, T>( tipset: &Tipset, recent_roots: i64, mut load_block: F, progress_bar_message: Option<&str>, progress_tracker: Option<ProgressBarCurrentTotalPair>, estimated_total_records: Option<u64>, ) -> anyhow::Result<usize> where F: FnMut(Cid) -> T + Send, T: Future<Output = anyhow::Result<Vec<u8>>> + Send, { let estimated_total_records = estimated_total_records.unwrap_or_default(); let message = progress_bar_message.unwrap_or("Walking snapshot"); #[allow(deprecated)] // Tracking issue: https://github.com/ChainSafe/forest/issues/3157 let wp = WithProgressRaw::new(message, estimated_total_records); let mut seen = CidHashSet::default(); let mut blocks_to_walk: VecDeque<Cid> = tipset.cids().to_vec().into(); let mut current_min_height = tipset.epoch(); let incl_roots_epoch = tipset.epoch() - recent_roots; let on_inserted = { let wp = wp.clone(); let progress_tracker = progress_tracker.clone(); move |len: usize| { let progress = len as u64; let total = progress.max(estimated_total_records); wp.set(progress); wp.set_total(total); if let Some(progress_tracker) = &progress_tracker { progress_tracker .0 .store(progress, atomic::Ordering::Relaxed); progress_tracker.1.store(total, atomic::Ordering::Relaxed); } } }; while let Some(next) = blocks_to_walk.pop_front() { if!seen.insert(next) { continue; }; on_inserted(seen.len()); if!should_save_block_to_snapshot(next) { continue; } let data = load_block(next).await?; let h = from_slice_with_fallback::<BlockHeader>(&data)?; if current_min_height > h.epoch() { current_min_height = h.epoch(); } if h.epoch() > incl_roots_epoch { recurse_links_hash(&mut seen, *h.messages(), &mut load_block, &on_inserted).await?; } if h.epoch() > 0 { for p in h.parents().cids() { blocks_to_walk.push_back(*p); } } else { for p in h.parents().cids() { load_block(*p).await?; } } if h.epoch() == 0 || h.epoch() > incl_roots_epoch { recurse_links_hash(&mut seen, *h.state_root(), &mut load_block, &on_inserted).await?; } } Ok(seen.len()) } fn should_save_block_to_snapshot(cid: Cid) -> bool { // Don't include identity CIDs. // We only include raw and dagcbor, for now. // Raw for "code" CIDs. if cid.hash().code() == u64::from(cid::multihash::Code::Identity) { false } else { matches!( cid.codec(), crate::shim::crypto::IPLD_RAW | fvm_ipld_encoding::DAG_CBOR ) } } /// Depth-first-search iterator for `ipld` leaf nodes. /// /// This iterator consumes the given `ipld` structure and returns leaf nodes (i.e., /// no list or map) in depth-first order. The iterator can be extended at any /// point by the caller. /// /// Consider walking this `ipld` graph: /// ```text /// List /// ├ Integer(5) /// ├ Link(Y) /// └ String("string") /// /// Link(Y): /// Map /// ├ "key1" => Bool(true) /// └ "key2" => Float(3.14) /// ``` /// /// If we walk the above `ipld` graph (replacing `Link(Y)` when it is encountered), the leaf nodes will be seen in this order: /// 1. `Integer(5)` /// 2. `Bool(true)` /// 3. `Float(3.14)` /// 4. `String("string")` pub struct DfsIter { dfs: VecDeque<Ipld>, } impl DfsIter { pub fn new(root: Ipld) -> Self { DfsIter { dfs: VecDeque::from([root]), } } pub fn walk_next(&mut self, ipld: Ipld) { self.dfs.push_front(ipld) } } impl From<Cid> for DfsIter { fn from(cid: Cid) -> Self { DfsIter::new(Ipld::Link(cid)) } } impl Iterator for DfsIter { type Item = Ipld; fn next(&mut self) -> Option<Self::Item> { while let Some(ipld) = self.dfs.pop_front() { match ipld { Ipld::List(list) => list.into_iter().rev().for_each(|elt| self.walk_next(elt)),
} } enum Task { // Yield the block, don't visit it. Emit(Cid), // Visit all the elements, recursively. Iterate(DfsIter), } pin_project! { pub struct ChainStream<DB, T> { #[pin] tipset_iter: T, db: DB, dfs: VecDeque<Task>, // Depth-first work queue. seen: CidHashSet, stateroot_limit: ChainEpoch, fail_on_dead_links: bool, } } impl<DB, T> ChainStream<DB, T> { pub fn with_seen(self, seen: CidHashSet) -> Self { ChainStream { seen,..self } } pub fn into_seen(self) -> CidHashSet { self.seen } } /// Stream all blocks that are reachable before the `stateroot_limit` epoch. After this limit, only /// block headers are streamed. Any dead links are reported as errors. /// /// # Arguments /// /// * `db` - A database that implements [`Blockstore`] interface. /// * `tipset_iter` - An iterator of [`Tipset`], descending order `$child -> $parent`. /// * `stateroot_limit` - An epoch that signifies how far back we need to inspect tipsets. /// in-depth. This has to be pre-calculated using this formula: `$cur_epoch - $depth`, where /// `$depth` is the number of `[`Tipset`]` that needs inspection. pub fn stream_chain<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>( db: DB, tipset_iter: T, stateroot_limit: ChainEpoch, ) -> ChainStream<DB, T> { ChainStream { tipset_iter, db, dfs: VecDeque::new(), seen: CidHashSet::default(), stateroot_limit, fail_on_dead_links: true, } } // Stream available graph in a depth-first search. All reachable nodes are touched and dead-links // are ignored. pub fn stream_graph<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>( db: DB, tipset_iter: T, ) -> ChainStream<DB, T> { ChainStream { tipset_iter, db, dfs: VecDeque::new(), seen: CidHashSet::default(), stateroot_limit: 0, fail_on_dead_links: false, } } impl<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin> Stream for ChainStream<DB, T> { type Item = anyhow::Result<Block>; fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> { use Task::*; let mut this = self.project(); let stateroot_limit = *this.stateroot_limit; loop { while let Some(task) = this.dfs.front_mut() { match task { Emit(cid) => { let cid = *cid; this.dfs.pop_front(); if let Some(data) = this.db.get(&cid)? { return Poll::Ready(Some(Ok(Block { cid, data }))); } else if *this.fail_on_dead_links { return Poll::Ready(Some(Err(anyhow::anyhow!("missing key: {}", cid)))); } } Iterate(dfs_iter) => { while let Some(ipld) = dfs_iter.next() { if let Ipld::Link(cid) = ipld { // The link traversal implementation assumes there are three types of encoding: // 1. DAG_CBOR: needs to be reachable, so we add it to the queue and load. // 2. IPLD_RAW: WASM blocks, for example. Need to be loaded, but not traversed. // 3. _: ignore all other links // Don't revisit what's already been visited. if should_save_block_to_snapshot(cid) && this.seen.insert(cid) { if let Some(data) = this.db.get(&cid)? { if cid.codec() == fvm_ipld_encoding::DAG_CBOR { let ipld: Ipld = from_slice_with_fallback(&data)?; dfs_iter.walk_next(ipld); } return Poll::Ready(Some(Ok(Block { cid, data }))); } else if *this.fail_on_dead_links { return Poll::Ready(Some(Err(anyhow::anyhow!( "missing key: {}", cid )))); } } } } this.dfs.pop_front(); } } } // This consumes a [`Tipset`] from the iterator one at a time. The next iteration of the // enclosing loop is processing the queue. Once the desired depth has been reached - // yield the block without walking the graph it represents. if let Some(tipset) = this.tipset_iter.as_mut().next() { for block in tipset.into_blocks().into_iter() { if this.seen.insert(*block.cid()) { // Make sure we always yield a block otherwise. this.dfs.push_back(Emit(*block.cid())); if block.epoch() == 0 { // The genesis block has some kind of dummy parent that needs to be emitted. for p in block.parents().cids() { this.dfs.push_back(Emit(*p)); } } // Process block messages. if block.epoch() > stateroot_limit { this.dfs .push_back(Iterate(DfsIter::from(*block.messages()))); } // Visit the block if it's within required depth. And a special case for `0` // epoch to match Lotus' implementation. if block.epoch() == 0 || block.epoch() > stateroot_limit { // NOTE: In the original `walk_snapshot` implementation we walk the dag // immediately. Which is what we do here as well, but using a queue. this.dfs .push_back(Iterate(DfsIter::from(*block.state_root()))); } } } } else { // That's it, nothing else to do. End of stream. return Poll::Ready(None); } } } }
Ipld::Map(map) => map.into_values().rev().for_each(|elt| self.walk_next(elt)), other => return Some(other), } } None
random_line_split
util.rs
// Copyright 2019-2023 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT use std::{ collections::VecDeque, future::Future, sync::{ atomic::{self, AtomicU64}, Arc, }, }; use crate::ipld::{CidHashSet, Ipld}; use crate::shim::clock::ChainEpoch; use crate::utils::db::car_stream::Block; use crate::utils::io::progress_log::WithProgressRaw; use crate::{ blocks::{BlockHeader, Tipset}, utils::encoding::from_slice_with_fallback, }; use cid::Cid; use futures::Stream; use fvm_ipld_blockstore::Blockstore; use lazy_static::lazy_static; use pin_project_lite::pin_project; use std::pin::Pin; use std::task::{Context, Poll}; /// Traverses all Cid links, hashing and loading all unique values and using the /// callback function to interact with the data. #[async_recursion::async_recursion] async fn traverse_ipld_links_hash<F, T>( walked: &mut CidHashSet, load_block: &mut F, ipld: &Ipld, on_inserted: &(impl Fn(usize) + Send + Sync), ) -> Result<(), anyhow::Error> where F: FnMut(Cid) -> T + Send, T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send, { match ipld { Ipld::Map(m) => { for (_, v) in m.iter() { traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?; } } Ipld::List(list) => { for v in list.iter() { traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?; } } &Ipld::Link(cid) => { // WASM blocks are stored as IPLD_RAW. They should be loaded but not traversed. if cid.codec() == crate::shim::crypto::IPLD_RAW { if!walked.insert(cid) { return Ok(()); } on_inserted(walked.len()); let _ = load_block(cid).await?; } if cid.codec() == fvm_ipld_encoding::DAG_CBOR { if!walked.insert(cid) { return Ok(()); } on_inserted(walked.len()); let bytes = load_block(cid).await?; let ipld = from_slice_with_fallback(&bytes)?; traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?; } } _ => (), } Ok(()) } /// Load and hash CIDs and resolve recursively. pub async fn recurse_links_hash<F, T>( walked: &mut CidHashSet, root: Cid, load_block: &mut F, on_inserted: &(impl Fn(usize) + Send + Sync), ) -> Result<(), anyhow::Error> where F: FnMut(Cid) -> T + Send, T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send, { if!walked.insert(root) { // Cid has already been traversed return Ok(()); } on_inserted(walked.len()); if root.codec()!= fvm_ipld_encoding::DAG_CBOR { return Ok(()); } let bytes = load_block(root).await?; let ipld = from_slice_with_fallback(&bytes)?; traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?; Ok(()) } pub type ProgressBarCurrentTotalPair = Arc<(AtomicU64, AtomicU64)>; lazy_static! { pub static ref WALK_SNAPSHOT_PROGRESS_DB_GC: ProgressBarCurrentTotalPair = Default::default(); } /// Walks over tipset and state data and loads all blocks not yet seen. /// This is tracked based on the callback function loading blocks. pub async fn walk_snapshot<F, T>( tipset: &Tipset, recent_roots: i64, mut load_block: F, progress_bar_message: Option<&str>, progress_tracker: Option<ProgressBarCurrentTotalPair>, estimated_total_records: Option<u64>, ) -> anyhow::Result<usize> where F: FnMut(Cid) -> T + Send, T: Future<Output = anyhow::Result<Vec<u8>>> + Send, { let estimated_total_records = estimated_total_records.unwrap_or_default(); let message = progress_bar_message.unwrap_or("Walking snapshot"); #[allow(deprecated)] // Tracking issue: https://github.com/ChainSafe/forest/issues/3157 let wp = WithProgressRaw::new(message, estimated_total_records); let mut seen = CidHashSet::default(); let mut blocks_to_walk: VecDeque<Cid> = tipset.cids().to_vec().into(); let mut current_min_height = tipset.epoch(); let incl_roots_epoch = tipset.epoch() - recent_roots; let on_inserted = { let wp = wp.clone(); let progress_tracker = progress_tracker.clone(); move |len: usize| { let progress = len as u64; let total = progress.max(estimated_total_records); wp.set(progress); wp.set_total(total); if let Some(progress_tracker) = &progress_tracker { progress_tracker .0 .store(progress, atomic::Ordering::Relaxed); progress_tracker.1.store(total, atomic::Ordering::Relaxed); } } }; while let Some(next) = blocks_to_walk.pop_front() { if!seen.insert(next) { continue; }; on_inserted(seen.len()); if!should_save_block_to_snapshot(next) { continue; } let data = load_block(next).await?; let h = from_slice_with_fallback::<BlockHeader>(&data)?; if current_min_height > h.epoch() { current_min_height = h.epoch(); } if h.epoch() > incl_roots_epoch { recurse_links_hash(&mut seen, *h.messages(), &mut load_block, &on_inserted).await?; } if h.epoch() > 0 { for p in h.parents().cids() { blocks_to_walk.push_back(*p); } } else { for p in h.parents().cids() { load_block(*p).await?; } } if h.epoch() == 0 || h.epoch() > incl_roots_epoch { recurse_links_hash(&mut seen, *h.state_root(), &mut load_block, &on_inserted).await?; } } Ok(seen.len()) } fn should_save_block_to_snapshot(cid: Cid) -> bool
/// Depth-first-search iterator for `ipld` leaf nodes. /// /// This iterator consumes the given `ipld` structure and returns leaf nodes (i.e., /// no list or map) in depth-first order. The iterator can be extended at any /// point by the caller. /// /// Consider walking this `ipld` graph: /// ```text /// List /// ├ Integer(5) /// ├ Link(Y) /// └ String("string") /// /// Link(Y): /// Map /// ├ "key1" => Bool(true) /// └ "key2" => Float(3.14) /// ``` /// /// If we walk the above `ipld` graph (replacing `Link(Y)` when it is encountered), the leaf nodes will be seen in this order: /// 1. `Integer(5)` /// 2. `Bool(true)` /// 3. `Float(3.14)` /// 4. `String("string")` pub struct DfsIter { dfs: VecDeque<Ipld>, } impl DfsIter { pub fn new(root: Ipld) -> Self { DfsIter { dfs: VecDeque::from([root]), } } pub fn walk_next(&mut self, ipld: Ipld) { self.dfs.push_front(ipld) } } impl From<Cid> for DfsIter { fn from(cid: Cid) -> Self { DfsIter::new(Ipld::Link(cid)) } } impl Iterator for DfsIter { type Item = Ipld; fn next(&mut self) -> Option<Self::Item> { while let Some(ipld) = self.dfs.pop_front() { match ipld { Ipld::List(list) => list.into_iter().rev().for_each(|elt| self.walk_next(elt)), Ipld::Map(map) => map.into_values().rev().for_each(|elt| self.walk_next(elt)), other => return Some(other), } } None } } enum Task { // Yield the block, don't visit it. Emit(Cid), // Visit all the elements, recursively. Iterate(DfsIter), } pin_project! { pub struct ChainStream<DB, T> { #[pin] tipset_iter: T, db: DB, dfs: VecDeque<Task>, // Depth-first work queue. seen: CidHashSet, stateroot_limit: ChainEpoch, fail_on_dead_links: bool, } } impl<DB, T> ChainStream<DB, T> { pub fn with_seen(self, seen: CidHashSet) -> Self { ChainStream { seen,..self } } pub fn into_seen(self) -> CidHashSet { self.seen } } /// Stream all blocks that are reachable before the `stateroot_limit` epoch. After this limit, only /// block headers are streamed. Any dead links are reported as errors. /// /// # Arguments /// /// * `db` - A database that implements [`Blockstore`] interface. /// * `tipset_iter` - An iterator of [`Tipset`], descending order `$child -> $parent`. /// * `stateroot_limit` - An epoch that signifies how far back we need to inspect tipsets. /// in-depth. This has to be pre-calculated using this formula: `$cur_epoch - $depth`, where /// `$depth` is the number of `[`Tipset`]` that needs inspection. pub fn stream_chain<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>( db: DB, tipset_iter: T, stateroot_limit: ChainEpoch, ) -> ChainStream<DB, T> { ChainStream { tipset_iter, db, dfs: VecDeque::new(), seen: CidHashSet::default(), stateroot_limit, fail_on_dead_links: true, } } // Stream available graph in a depth-first search. All reachable nodes are touched and dead-links // are ignored. pub fn stream_graph<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>( db: DB, tipset_iter: T, ) -> ChainStream<DB, T> { ChainStream { tipset_iter, db, dfs: VecDeque::new(), seen: CidHashSet::default(), stateroot_limit: 0, fail_on_dead_links: false, } } impl<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin> Stream for ChainStream<DB, T> { type Item = anyhow::Result<Block>; fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> { use Task::*; let mut this = self.project(); let stateroot_limit = *this.stateroot_limit; loop { while let Some(task) = this.dfs.front_mut() { match task { Emit(cid) => { let cid = *cid; this.dfs.pop_front(); if let Some(data) = this.db.get(&cid)? { return Poll::Ready(Some(Ok(Block { cid, data }))); } else if *this.fail_on_dead_links { return Poll::Ready(Some(Err(anyhow::anyhow!("missing key: {}", cid)))); } } Iterate(dfs_iter) => { while let Some(ipld) = dfs_iter.next() { if let Ipld::Link(cid) = ipld { // The link traversal implementation assumes there are three types of encoding: // 1. DAG_CBOR: needs to be reachable, so we add it to the queue and load. // 2. IPLD_RAW: WASM blocks, for example. Need to be loaded, but not traversed. // 3. _: ignore all other links // Don't revisit what's already been visited. if should_save_block_to_snapshot(cid) && this.seen.insert(cid) { if let Some(data) = this.db.get(&cid)? { if cid.codec() == fvm_ipld_encoding::DAG_CBOR { let ipld: Ipld = from_slice_with_fallback(&data)?; dfs_iter.walk_next(ipld); } return Poll::Ready(Some(Ok(Block { cid, data }))); } else if *this.fail_on_dead_links { return Poll::Ready(Some(Err(anyhow::anyhow!( "missing key: {}", cid )))); } } } } this.dfs.pop_front(); } } } // This consumes a [`Tipset`] from the iterator one at a time. The next iteration of the // enclosing loop is processing the queue. Once the desired depth has been reached - // yield the block without walking the graph it represents. if let Some(tipset) = this.tipset_iter.as_mut().next() { for block in tipset.into_blocks().into_iter() { if this.seen.insert(*block.cid()) { // Make sure we always yield a block otherwise. this.dfs.push_back(Emit(*block.cid())); if block.epoch() == 0 { // The genesis block has some kind of dummy parent that needs to be emitted. for p in block.parents().cids() { this.dfs.push_back(Emit(*p)); } } // Process block messages. if block.epoch() > stateroot_limit { this.dfs .push_back(Iterate(DfsIter::from(*block.messages()))); } // Visit the block if it's within required depth. And a special case for `0` // epoch to match Lotus' implementation. if block.epoch() == 0 || block.epoch() > stateroot_limit { // NOTE: In the original `walk_snapshot` implementation we walk the dag // immediately. Which is what we do here as well, but using a queue. this.dfs .push_back(Iterate(DfsIter::from(*block.state_root()))); } } } } else { // That's it, nothing else to do. End of stream. return Poll::Ready(None); } } } }
{ // Don't include identity CIDs. // We only include raw and dagcbor, for now. // Raw for "code" CIDs. if cid.hash().code() == u64::from(cid::multihash::Code::Identity) { false } else { matches!( cid.codec(), crate::shim::crypto::IPLD_RAW | fvm_ipld_encoding::DAG_CBOR ) } }
identifier_body
util.rs
// Copyright 2019-2023 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT use std::{ collections::VecDeque, future::Future, sync::{ atomic::{self, AtomicU64}, Arc, }, }; use crate::ipld::{CidHashSet, Ipld}; use crate::shim::clock::ChainEpoch; use crate::utils::db::car_stream::Block; use crate::utils::io::progress_log::WithProgressRaw; use crate::{ blocks::{BlockHeader, Tipset}, utils::encoding::from_slice_with_fallback, }; use cid::Cid; use futures::Stream; use fvm_ipld_blockstore::Blockstore; use lazy_static::lazy_static; use pin_project_lite::pin_project; use std::pin::Pin; use std::task::{Context, Poll}; /// Traverses all Cid links, hashing and loading all unique values and using the /// callback function to interact with the data. #[async_recursion::async_recursion] async fn traverse_ipld_links_hash<F, T>( walked: &mut CidHashSet, load_block: &mut F, ipld: &Ipld, on_inserted: &(impl Fn(usize) + Send + Sync), ) -> Result<(), anyhow::Error> where F: FnMut(Cid) -> T + Send, T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send, { match ipld { Ipld::Map(m) => { for (_, v) in m.iter() { traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?; } } Ipld::List(list) => { for v in list.iter() { traverse_ipld_links_hash(walked, load_block, v, on_inserted).await?; } } &Ipld::Link(cid) => { // WASM blocks are stored as IPLD_RAW. They should be loaded but not traversed. if cid.codec() == crate::shim::crypto::IPLD_RAW { if!walked.insert(cid) { return Ok(()); } on_inserted(walked.len()); let _ = load_block(cid).await?; } if cid.codec() == fvm_ipld_encoding::DAG_CBOR { if!walked.insert(cid) { return Ok(()); } on_inserted(walked.len()); let bytes = load_block(cid).await?; let ipld = from_slice_with_fallback(&bytes)?; traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?; } } _ => (), } Ok(()) } /// Load and hash CIDs and resolve recursively. pub async fn recurse_links_hash<F, T>( walked: &mut CidHashSet, root: Cid, load_block: &mut F, on_inserted: &(impl Fn(usize) + Send + Sync), ) -> Result<(), anyhow::Error> where F: FnMut(Cid) -> T + Send, T: Future<Output = Result<Vec<u8>, anyhow::Error>> + Send, { if!walked.insert(root) { // Cid has already been traversed return Ok(()); } on_inserted(walked.len()); if root.codec()!= fvm_ipld_encoding::DAG_CBOR { return Ok(()); } let bytes = load_block(root).await?; let ipld = from_slice_with_fallback(&bytes)?; traverse_ipld_links_hash(walked, load_block, &ipld, on_inserted).await?; Ok(()) } pub type ProgressBarCurrentTotalPair = Arc<(AtomicU64, AtomicU64)>; lazy_static! { pub static ref WALK_SNAPSHOT_PROGRESS_DB_GC: ProgressBarCurrentTotalPair = Default::default(); } /// Walks over tipset and state data and loads all blocks not yet seen. /// This is tracked based on the callback function loading blocks. pub async fn walk_snapshot<F, T>( tipset: &Tipset, recent_roots: i64, mut load_block: F, progress_bar_message: Option<&str>, progress_tracker: Option<ProgressBarCurrentTotalPair>, estimated_total_records: Option<u64>, ) -> anyhow::Result<usize> where F: FnMut(Cid) -> T + Send, T: Future<Output = anyhow::Result<Vec<u8>>> + Send, { let estimated_total_records = estimated_total_records.unwrap_or_default(); let message = progress_bar_message.unwrap_or("Walking snapshot"); #[allow(deprecated)] // Tracking issue: https://github.com/ChainSafe/forest/issues/3157 let wp = WithProgressRaw::new(message, estimated_total_records); let mut seen = CidHashSet::default(); let mut blocks_to_walk: VecDeque<Cid> = tipset.cids().to_vec().into(); let mut current_min_height = tipset.epoch(); let incl_roots_epoch = tipset.epoch() - recent_roots; let on_inserted = { let wp = wp.clone(); let progress_tracker = progress_tracker.clone(); move |len: usize| { let progress = len as u64; let total = progress.max(estimated_total_records); wp.set(progress); wp.set_total(total); if let Some(progress_tracker) = &progress_tracker { progress_tracker .0 .store(progress, atomic::Ordering::Relaxed); progress_tracker.1.store(total, atomic::Ordering::Relaxed); } } }; while let Some(next) = blocks_to_walk.pop_front() { if!seen.insert(next) { continue; }; on_inserted(seen.len()); if!should_save_block_to_snapshot(next) { continue; } let data = load_block(next).await?; let h = from_slice_with_fallback::<BlockHeader>(&data)?; if current_min_height > h.epoch() { current_min_height = h.epoch(); } if h.epoch() > incl_roots_epoch { recurse_links_hash(&mut seen, *h.messages(), &mut load_block, &on_inserted).await?; } if h.epoch() > 0 { for p in h.parents().cids() { blocks_to_walk.push_back(*p); } } else { for p in h.parents().cids() { load_block(*p).await?; } } if h.epoch() == 0 || h.epoch() > incl_roots_epoch { recurse_links_hash(&mut seen, *h.state_root(), &mut load_block, &on_inserted).await?; } } Ok(seen.len()) } fn should_save_block_to_snapshot(cid: Cid) -> bool { // Don't include identity CIDs. // We only include raw and dagcbor, for now. // Raw for "code" CIDs. if cid.hash().code() == u64::from(cid::multihash::Code::Identity) { false } else { matches!( cid.codec(), crate::shim::crypto::IPLD_RAW | fvm_ipld_encoding::DAG_CBOR ) } } /// Depth-first-search iterator for `ipld` leaf nodes. /// /// This iterator consumes the given `ipld` structure and returns leaf nodes (i.e., /// no list or map) in depth-first order. The iterator can be extended at any /// point by the caller. /// /// Consider walking this `ipld` graph: /// ```text /// List /// ├ Integer(5) /// ├ Link(Y) /// └ String("string") /// /// Link(Y): /// Map /// ├ "key1" => Bool(true) /// └ "key2" => Float(3.14) /// ``` /// /// If we walk the above `ipld` graph (replacing `Link(Y)` when it is encountered), the leaf nodes will be seen in this order: /// 1. `Integer(5)` /// 2. `Bool(true)` /// 3. `Float(3.14)` /// 4. `String("string")` pub struct DfsIter { dfs: VecDeque<Ipld>, } impl DfsIter { pub fn new(root: Ipld) -> Self { DfsIter { dfs: VecDeque::from([root]), } } pub fn walk_next(&mut self, ipld: Ipld) { self.dfs.push_front(ipld) } } impl From<Cid> for DfsIter { fn from(cid: Cid) -> Self { DfsIter::new(Ipld::Link(cid)) } } impl Iterator for DfsIter { type Item = Ipld; fn next(&mut self) -> Option<Self::Item> { while let Some(ipld) = self.dfs.pop_front() { match ipld { Ipld::List(list) => list.into_iter().rev().for_each(|elt| self.walk_next(elt)), Ipld::Map(map) => map.into_values().rev().for_each(|elt| self.walk_next(elt)), other => return Some(other), } } None } } enum Task {
Yield the block, don't visit it. Emit(Cid), // Visit all the elements, recursively. Iterate(DfsIter), } pin_project! { pub struct ChainStream<DB, T> { #[pin] tipset_iter: T, db: DB, dfs: VecDeque<Task>, // Depth-first work queue. seen: CidHashSet, stateroot_limit: ChainEpoch, fail_on_dead_links: bool, } } impl<DB, T> ChainStream<DB, T> { pub fn with_seen(self, seen: CidHashSet) -> Self { ChainStream { seen,..self } } pub fn into_seen(self) -> CidHashSet { self.seen } } /// Stream all blocks that are reachable before the `stateroot_limit` epoch. After this limit, only /// block headers are streamed. Any dead links are reported as errors. /// /// # Arguments /// /// * `db` - A database that implements [`Blockstore`] interface. /// * `tipset_iter` - An iterator of [`Tipset`], descending order `$child -> $parent`. /// * `stateroot_limit` - An epoch that signifies how far back we need to inspect tipsets. /// in-depth. This has to be pre-calculated using this formula: `$cur_epoch - $depth`, where /// `$depth` is the number of `[`Tipset`]` that needs inspection. pub fn stream_chain<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>( db: DB, tipset_iter: T, stateroot_limit: ChainEpoch, ) -> ChainStream<DB, T> { ChainStream { tipset_iter, db, dfs: VecDeque::new(), seen: CidHashSet::default(), stateroot_limit, fail_on_dead_links: true, } } // Stream available graph in a depth-first search. All reachable nodes are touched and dead-links // are ignored. pub fn stream_graph<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin>( db: DB, tipset_iter: T, ) -> ChainStream<DB, T> { ChainStream { tipset_iter, db, dfs: VecDeque::new(), seen: CidHashSet::default(), stateroot_limit: 0, fail_on_dead_links: false, } } impl<DB: Blockstore, T: Iterator<Item = Tipset> + Unpin> Stream for ChainStream<DB, T> { type Item = anyhow::Result<Block>; fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> { use Task::*; let mut this = self.project(); let stateroot_limit = *this.stateroot_limit; loop { while let Some(task) = this.dfs.front_mut() { match task { Emit(cid) => { let cid = *cid; this.dfs.pop_front(); if let Some(data) = this.db.get(&cid)? { return Poll::Ready(Some(Ok(Block { cid, data }))); } else if *this.fail_on_dead_links { return Poll::Ready(Some(Err(anyhow::anyhow!("missing key: {}", cid)))); } } Iterate(dfs_iter) => { while let Some(ipld) = dfs_iter.next() { if let Ipld::Link(cid) = ipld { // The link traversal implementation assumes there are three types of encoding: // 1. DAG_CBOR: needs to be reachable, so we add it to the queue and load. // 2. IPLD_RAW: WASM blocks, for example. Need to be loaded, but not traversed. // 3. _: ignore all other links // Don't revisit what's already been visited. if should_save_block_to_snapshot(cid) && this.seen.insert(cid) { if let Some(data) = this.db.get(&cid)? { if cid.codec() == fvm_ipld_encoding::DAG_CBOR { let ipld: Ipld = from_slice_with_fallback(&data)?; dfs_iter.walk_next(ipld); } return Poll::Ready(Some(Ok(Block { cid, data }))); } else if *this.fail_on_dead_links { return Poll::Ready(Some(Err(anyhow::anyhow!( "missing key: {}", cid )))); } } } } this.dfs.pop_front(); } } } // This consumes a [`Tipset`] from the iterator one at a time. The next iteration of the // enclosing loop is processing the queue. Once the desired depth has been reached - // yield the block without walking the graph it represents. if let Some(tipset) = this.tipset_iter.as_mut().next() { for block in tipset.into_blocks().into_iter() { if this.seen.insert(*block.cid()) { // Make sure we always yield a block otherwise. this.dfs.push_back(Emit(*block.cid())); if block.epoch() == 0 { // The genesis block has some kind of dummy parent that needs to be emitted. for p in block.parents().cids() { this.dfs.push_back(Emit(*p)); } } // Process block messages. if block.epoch() > stateroot_limit { this.dfs .push_back(Iterate(DfsIter::from(*block.messages()))); } // Visit the block if it's within required depth. And a special case for `0` // epoch to match Lotus' implementation. if block.epoch() == 0 || block.epoch() > stateroot_limit { // NOTE: In the original `walk_snapshot` implementation we walk the dag // immediately. Which is what we do here as well, but using a queue. this.dfs .push_back(Iterate(DfsIter::from(*block.state_root()))); } } } } else { // That's it, nothing else to do. End of stream. return Poll::Ready(None); } } } }
//
identifier_name
octree_gui.rs
use std::collections::VecDeque; use std::ops::RangeInclusive; use std::sync::{Arc, Mutex}; use std::time::Duration; use cgmath::{Rotation, Vector3}; use glium::{Display, Surface}; use glium::glutin; use glium::glutin::event::WindowEvent; use glium::glutin::window::WindowBuilder; use imgui::*; use imgui::{Context, FontConfig, FontGlyphRanges, FontSource}; use imgui_glium_renderer::Renderer; use imgui_winit_support::{HiDpiMode, WinitPlatform}; use num_traits::{FromPrimitive, ToPrimitive}; use winit::event::{ElementState, Event, VirtualKeyCode}; use crate::core::{Filter, Message, Payload, System}; use crate::NSE; use crate::rendering::{Camera, Mesh, Octree, OctreeConfig, OctreeInfo, OctreeOptimizations, RenderSystem, Transformation}; use crate::rendering::fractal_generators::FractalSelection; pub struct OctreeGuiSystem { imgui: Arc<Mutex<Context>>, platform: WinitPlatform, renderer: Renderer, display: Arc<Mutex<glium::Display>>, // octree data octree_config: OctreeConfig, octree_optimizations: OctreeOptimizations, // profiling data profiling_data: ProfilingData, frame_times: VecDeque<f32>, // message passing messages: Vec<Message>, } impl OctreeGuiSystem { pub fn new(nse: &NSE, _render_system: Arc<Mutex<RenderSystem>>) -> Arc<Mutex<Self>> { let mut imgui = Context::create(); // configure imgui-rs Context if necessary imgui.set_ini_filename(None); let context = glutin::ContextBuilder::new().with_vsync(false); let builder = WindowBuilder::new() .with_title("Octree - Config") .with_decorations(true) .with_inner_size(glutin::dpi::LogicalSize::new(420f64, 768f64)); let display = Display::new(builder, context, &nse.event_loop).expect("Failed to initialize display"); let mut platform = WinitPlatform::init(&mut imgui); // step 1 platform.attach_window( imgui.io_mut(), &display.gl_window().window(), HiDpiMode::Default, ); // step 2 let hidpi_factor = platform.hidpi_factor(); let font_size = (13.0 * hidpi_factor) as f32; imgui.fonts().add_font(&[ FontSource::DefaultFontData { config: Some(FontConfig { size_pixels: font_size, ..FontConfig::default() }), }, FontSource::TtfData { data: include_bytes!("resources/mplus-1p-regular.ttf"), size_pixels: font_size, config: Some(FontConfig { rasterizer_multiply: 1.75, glyph_ranges: FontGlyphRanges::japanese(), ..FontConfig::default() }), }, ]); imgui.io_mut().font_global_scale = (1.0 / hidpi_factor) as f32; let renderer = Renderer::init(&mut imgui, &display).expect("Failed to initialize renderer"); let mut frame_times = VecDeque::new(); frame_times.resize(500, 0.0); Arc::new(Mutex::new(OctreeGuiSystem { imgui: Arc::new(Mutex::new(imgui)), platform, renderer, display: Arc::new(Mutex::new(display)), octree_config: OctreeConfig::default(), octree_optimizations: OctreeOptimizations::default(), profiling_data: ProfilingData::default(), frame_times, messages: vec![], })) } fn display_octree_ui(&mut self, ui: &Ui, _config: &OctreeConfig, _info: &OctreeInfo) { if CollapsingHeader::new(im_str!("Settings")) .default_open(true) .build(&ui) { // reset the reset flag self.octree_config.reset = None; let mut modified = false; ui.text(format!("Fractal Selection")); let mut selected_fractal = self.octree_config.fractal.as_mut().unwrap().to_usize().unwrap_or(0); let mut fractal_names = vec![]; for x in 0.. { match FromPrimitive::from_i32(x) { Some(FractalSelection::MandelBulb) => fractal_names.push(im_str!("Mandel Bulb")), Some(FractalSelection::MandelBrot) => fractal_names.push(im_str!("Mandel Brot")), Some(FractalSelection::SierpinskiPyramid) => fractal_names.push(im_str!("Sierpinski Pyramid")), Some(FractalSelection::SierpinskiTetrahedron) => fractal_names.push(im_str!("Sierpinski Tetrahedron")), Some(FractalSelection::MengerSponge) => fractal_names.push(im_str!("Menger Sponge")), Some(FractalSelection::MidpointDisplacement) => fractal_names.push(im_str!("Midpoint Displacement")), _ => break, // leave loop } } if ComboBox::new(im_str!("Select Fractal")) .build_simple_string( &ui, &mut selected_fractal, &fractal_names) { self.octree_config = OctreeConfig::default(); self.octree_config.fractal = FromPrimitive::from_usize(selected_fractal); self.octree_config.reset = Some(true); modified = true; } ui.separator(); ui.text(format!("Performance Settings")); if ui.button(im_str!("Update Now"), [0.0, 0.0]) { self.messages.push(Message::new(self.octree_config.clone())); }; ui.same_line(0.0); if ui.checkbox(im_str!("Continuous Update"), &mut self.octree_config.continuous_update.as_mut().unwrap()) { modified = true; } if Slider::new(im_str!("Subdivision Threshold (px)")) .range(RangeInclusive::new(1.0, 50.0)) .flags(SliderFlags::LOGARITHMIC) .build(&ui, &mut self.octree_config.subdiv_threshold.as_mut().unwrap()) { modified = true; } if Slider::new(im_str!("Distance Scale")) .range(RangeInclusive::new(0.05 as f64, 1.0 as f64)) //.flags(SliderFlags::LOGARITHMIC) .build(&ui, self.octree_config.distance_scale.as_mut().unwrap()) { modified = true; } if Slider::new(im_str!("Max. Octree Nodes")) .range(RangeInclusive::new(1e4 as u64, 2e7 as u64)) .build(&ui, self.octree_config.max_rendered_nodes.as_mut().unwrap()) { modified = true; } if modified { self.messages.push(Message::new(self.octree_config.clone())); } if ui.button(im_str!("Reset Octree"), [0.0, 0.0]) { let prev_selection = self.octree_config.fractal; self.octree_config = OctreeConfig::default(); self.octree_config.reset = Some(true); self.octree_config.fractal = prev_selection; self.messages.push(Message::new(self.octree_config.clone())); }; } } fn display_profiling_ui(&mut self, delta_time: Duration, ui: &Ui) { let rendered_nodes = &mut self.profiling_data.rendered_nodes.unwrap_or(0); let render_time = self.profiling_data.render_time.unwrap_or(0) as f64 / 1e6 as f64; let frame_times = &mut self.frame_times; frame_times.pop_front(); frame_times.push_back(delta_time.as_secs_f32()); let f_times: Vec<f32> = frame_times.iter().cloned().collect(); if CollapsingHeader::new(im_str!("Profiling")) .default_open(true) .build(&ui) { // Plot Frame Times ui.plot_lines(im_str!("Frame Times"), &f_times[..]) .graph_size([0.0, 50.0]) .overlay_text(&im_str!("{} ms", delta_time.as_millis())) .build(); // print times of seperate systems if self.profiling_data.system_times.is_some() { for (system_name, system_time) in self.profiling_data.system_times.as_ref().unwrap() { ui.text(im_str!("{}: {}", system_name, system_time.as_millis())); } } ui.separator(); ui.text(im_str!("Rendered Nodes: {}", rendered_nodes)); ui.text(im_str!("Render Time: {:.2} ms", render_time)); } } fn display_camera_ui(&mut self, ui: &Ui, _camera: &Camera, camera_transform: &mut Transformation) { if CollapsingHeader::new(im_str!("Camera")) .default_open(true) .build(&ui) { let mut view_dir = camera_transform.rotation.rotate_vector(-Vector3::unit_z()); InputFloat3::new( &ui, im_str!("View Direction (read only)"), view_dir.as_mut(), ).read_only(true).build(); InputFloat3::new( &ui, im_str!("Camera Position"), camera_transform.position.as_mut(), ).build(); camera_transform.update(); } } } impl System for OctreeGuiSystem { fn
(&mut self) -> Vec<Filter> { vec![ crate::filter!(Octree, Mesh, Transformation), crate::filter!(Camera, Transformation), ] } fn handle_input(&mut self, _event: &Event<()>) { let platform = &mut self.platform; let display = self.display.lock().unwrap(); let gl_window = display.gl_window(); let mut imgui = self.imgui.lock().unwrap(); match _event { Event::MainEventsCleared => { platform .prepare_frame(imgui.io_mut(), &gl_window.window()) // step 4 .expect("Failed to prepare frame"); } Event::WindowEvent { event: WindowEvent::CloseRequested, window_id, } => { if *window_id == gl_window.window().id() { println!("Close Octree Config Window"); gl_window.window().set_visible(false); return; } } Event::WindowEvent { event,.. } => match event { WindowEvent::KeyboardInput { input,.. } => match input { winit::event::KeyboardInput { virtual_keycode, state, .. } => match (virtual_keycode, state) { (Some(VirtualKeyCode::F12), ElementState::Pressed) => { println!("Open Octree Config Window"); gl_window.window().set_visible(true); } _ => (), }, }, _ => (), }, _ => (), } platform.handle_event(imgui.io_mut(), &gl_window.window(), &_event); // step 3 } fn consume_messages(&mut self, messages: &Vec<Message>) { for m in messages { if m.is_type::<ProfilingData>() { let data = m.get_payload::<ProfilingData>().unwrap(); self.profiling_data.replace(data); } if m.is_type::<OctreeOptimizations>() { let data = m.get_payload::<OctreeOptimizations>().unwrap(); self.octree_optimizations = data.clone(); } if m.is_type::<OctreeConfig>() { let data = m.get_payload::<OctreeConfig>().unwrap(); self.octree_config.merge(data); } } } fn execute(&mut self, filter: &Vec<Arc<Mutex<Filter>>>, delta_time: Duration) { let ctx = self.imgui.clone(); let display = self.display.clone(); let mut ctx_lock = ctx.lock().unwrap(); let display_lock = display.lock().unwrap(); let ui = ctx_lock.frame(); let gl_window = display_lock.gl_window(); let octree_entities = &filter[0].lock().unwrap().entities; let camera_entities = &filter[1].lock().unwrap().entities; let window = Window::new(im_str!("Octree")) .collapsible(false) .movable(false) .position([10.0, 10.0], Condition::FirstUseEver) .size([400.0, 740.0], Condition::FirstUseEver); let window_token = window.begin(&ui).unwrap(); for entity in octree_entities { let entitiy_mutex = entity.lock().unwrap(); let _octree_transform = entitiy_mutex .get_component::<Transformation>() .ok() .unwrap(); let octree = entitiy_mutex.get_component::<Octree>().ok().unwrap(); self.display_profiling_ui(delta_time, &ui); ui.new_line(); self.display_octree_ui(&ui, &octree.config, &octree.info); ui.new_line(); } for entity in camera_entities { let mut entitiy_mutex = entity.lock().unwrap(); let mut camera_transform = entitiy_mutex .get_component::<Transformation>() .ok() .unwrap().clone(); let camera = entitiy_mutex.get_component::<Camera>().ok().unwrap(); self.display_camera_ui(&ui, camera, &mut camera_transform); entitiy_mutex.add_component(camera_transform); } window_token.end(&ui); // construct the UI self.platform.prepare_render(&ui, &gl_window.window()); // step 5 // render the UI with a renderer let draw_data = ui.render(); let mut target = display_lock.draw(); target.clear_color_srgb(0.1, 0.1, 0.11, 1.0); self.renderer .render(&mut target, draw_data) .expect("Rendering failed"); target.finish().expect("Failed to swap buffers"); } fn get_messages(&mut self) -> Vec<Message> { let ret = self.messages.clone(); self.messages.clear(); ret } } #[derive(Debug, Clone, Default)] pub struct ProfilingData { pub rendered_nodes: Option<u32>, pub instance_data_generation: Option<u64>, pub render_time: Option<u64>, // in nano seconds pub system_times: Option<Vec<(String, Duration)>>, } impl ProfilingData { pub fn replace(&mut self, other: &Self) { Self::replace_option(&mut self.rendered_nodes, &other.rendered_nodes); Self::replace_option( &mut self.instance_data_generation, &other.instance_data_generation, ); Self::replace_option(&mut self.render_time, &other.render_time); Self::replace_option(&mut self.system_times, &other.system_times); } fn replace_option<T>(target: &mut Option<T>, source: &Option<T>) where T: Clone, { match source { Some(val) => target.replace(val.clone()), None => None, }; } } impl Payload for ProfilingData {}
get_filter
identifier_name
octree_gui.rs
use std::collections::VecDeque; use std::ops::RangeInclusive; use std::sync::{Arc, Mutex}; use std::time::Duration; use cgmath::{Rotation, Vector3}; use glium::{Display, Surface}; use glium::glutin; use glium::glutin::event::WindowEvent; use glium::glutin::window::WindowBuilder; use imgui::*; use imgui::{Context, FontConfig, FontGlyphRanges, FontSource}; use imgui_glium_renderer::Renderer; use imgui_winit_support::{HiDpiMode, WinitPlatform}; use num_traits::{FromPrimitive, ToPrimitive}; use winit::event::{ElementState, Event, VirtualKeyCode}; use crate::core::{Filter, Message, Payload, System}; use crate::NSE; use crate::rendering::{Camera, Mesh, Octree, OctreeConfig, OctreeInfo, OctreeOptimizations, RenderSystem, Transformation}; use crate::rendering::fractal_generators::FractalSelection; pub struct OctreeGuiSystem { imgui: Arc<Mutex<Context>>, platform: WinitPlatform, renderer: Renderer, display: Arc<Mutex<glium::Display>>, // octree data octree_config: OctreeConfig, octree_optimizations: OctreeOptimizations, // profiling data profiling_data: ProfilingData, frame_times: VecDeque<f32>, // message passing messages: Vec<Message>, } impl OctreeGuiSystem { pub fn new(nse: &NSE, _render_system: Arc<Mutex<RenderSystem>>) -> Arc<Mutex<Self>> { let mut imgui = Context::create(); // configure imgui-rs Context if necessary imgui.set_ini_filename(None); let context = glutin::ContextBuilder::new().with_vsync(false); let builder = WindowBuilder::new() .with_title("Octree - Config") .with_decorations(true) .with_inner_size(glutin::dpi::LogicalSize::new(420f64, 768f64)); let display = Display::new(builder, context, &nse.event_loop).expect("Failed to initialize display"); let mut platform = WinitPlatform::init(&mut imgui); // step 1 platform.attach_window( imgui.io_mut(), &display.gl_window().window(), HiDpiMode::Default, ); // step 2 let hidpi_factor = platform.hidpi_factor(); let font_size = (13.0 * hidpi_factor) as f32; imgui.fonts().add_font(&[ FontSource::DefaultFontData { config: Some(FontConfig { size_pixels: font_size, ..FontConfig::default() }), }, FontSource::TtfData { data: include_bytes!("resources/mplus-1p-regular.ttf"), size_pixels: font_size, config: Some(FontConfig { rasterizer_multiply: 1.75, glyph_ranges: FontGlyphRanges::japanese(), ..FontConfig::default() }), }, ]); imgui.io_mut().font_global_scale = (1.0 / hidpi_factor) as f32; let renderer = Renderer::init(&mut imgui, &display).expect("Failed to initialize renderer"); let mut frame_times = VecDeque::new(); frame_times.resize(500, 0.0); Arc::new(Mutex::new(OctreeGuiSystem { imgui: Arc::new(Mutex::new(imgui)), platform, renderer, display: Arc::new(Mutex::new(display)), octree_config: OctreeConfig::default(), octree_optimizations: OctreeOptimizations::default(), profiling_data: ProfilingData::default(), frame_times, messages: vec![], })) } fn display_octree_ui(&mut self, ui: &Ui, _config: &OctreeConfig, _info: &OctreeInfo) { if CollapsingHeader::new(im_str!("Settings")) .default_open(true) .build(&ui) { // reset the reset flag self.octree_config.reset = None; let mut modified = false; ui.text(format!("Fractal Selection")); let mut selected_fractal = self.octree_config.fractal.as_mut().unwrap().to_usize().unwrap_or(0); let mut fractal_names = vec![]; for x in 0.. { match FromPrimitive::from_i32(x) { Some(FractalSelection::MandelBulb) => fractal_names.push(im_str!("Mandel Bulb")), Some(FractalSelection::MandelBrot) => fractal_names.push(im_str!("Mandel Brot")), Some(FractalSelection::SierpinskiPyramid) => fractal_names.push(im_str!("Sierpinski Pyramid")), Some(FractalSelection::SierpinskiTetrahedron) => fractal_names.push(im_str!("Sierpinski Tetrahedron")), Some(FractalSelection::MengerSponge) => fractal_names.push(im_str!("Menger Sponge")), Some(FractalSelection::MidpointDisplacement) => fractal_names.push(im_str!("Midpoint Displacement")), _ => break, // leave loop } } if ComboBox::new(im_str!("Select Fractal")) .build_simple_string( &ui, &mut selected_fractal, &fractal_names) { self.octree_config = OctreeConfig::default(); self.octree_config.fractal = FromPrimitive::from_usize(selected_fractal); self.octree_config.reset = Some(true); modified = true; } ui.separator(); ui.text(format!("Performance Settings")); if ui.button(im_str!("Update Now"), [0.0, 0.0]) { self.messages.push(Message::new(self.octree_config.clone())); }; ui.same_line(0.0); if ui.checkbox(im_str!("Continuous Update"), &mut self.octree_config.continuous_update.as_mut().unwrap()) { modified = true; } if Slider::new(im_str!("Subdivision Threshold (px)")) .range(RangeInclusive::new(1.0, 50.0)) .flags(SliderFlags::LOGARITHMIC) .build(&ui, &mut self.octree_config.subdiv_threshold.as_mut().unwrap()) { modified = true; } if Slider::new(im_str!("Distance Scale")) .range(RangeInclusive::new(0.05 as f64, 1.0 as f64)) //.flags(SliderFlags::LOGARITHMIC) .build(&ui, self.octree_config.distance_scale.as_mut().unwrap()) { modified = true; } if Slider::new(im_str!("Max. Octree Nodes")) .range(RangeInclusive::new(1e4 as u64, 2e7 as u64)) .build(&ui, self.octree_config.max_rendered_nodes.as_mut().unwrap()) { modified = true; } if modified { self.messages.push(Message::new(self.octree_config.clone())); } if ui.button(im_str!("Reset Octree"), [0.0, 0.0]) { let prev_selection = self.octree_config.fractal; self.octree_config = OctreeConfig::default(); self.octree_config.reset = Some(true); self.octree_config.fractal = prev_selection; self.messages.push(Message::new(self.octree_config.clone())); }; } } fn display_profiling_ui(&mut self, delta_time: Duration, ui: &Ui) { let rendered_nodes = &mut self.profiling_data.rendered_nodes.unwrap_or(0); let render_time = self.profiling_data.render_time.unwrap_or(0) as f64 / 1e6 as f64; let frame_times = &mut self.frame_times; frame_times.pop_front(); frame_times.push_back(delta_time.as_secs_f32()); let f_times: Vec<f32> = frame_times.iter().cloned().collect(); if CollapsingHeader::new(im_str!("Profiling")) .default_open(true) .build(&ui) { // Plot Frame Times ui.plot_lines(im_str!("Frame Times"), &f_times[..]) .graph_size([0.0, 50.0]) .overlay_text(&im_str!("{} ms", delta_time.as_millis())) .build(); // print times of seperate systems if self.profiling_data.system_times.is_some() { for (system_name, system_time) in self.profiling_data.system_times.as_ref().unwrap() { ui.text(im_str!("{}: {}", system_name, system_time.as_millis())); } } ui.separator(); ui.text(im_str!("Rendered Nodes: {}", rendered_nodes)); ui.text(im_str!("Render Time: {:.2} ms", render_time)); } } fn display_camera_ui(&mut self, ui: &Ui, _camera: &Camera, camera_transform: &mut Transformation) { if CollapsingHeader::new(im_str!("Camera")) .default_open(true) .build(&ui) { let mut view_dir = camera_transform.rotation.rotate_vector(-Vector3::unit_z()); InputFloat3::new( &ui, im_str!("View Direction (read only)"), view_dir.as_mut(), ).read_only(true).build(); InputFloat3::new( &ui, im_str!("Camera Position"), camera_transform.position.as_mut(), ).build(); camera_transform.update(); } } } impl System for OctreeGuiSystem { fn get_filter(&mut self) -> Vec<Filter>
fn handle_input(&mut self, _event: &Event<()>) { let platform = &mut self.platform; let display = self.display.lock().unwrap(); let gl_window = display.gl_window(); let mut imgui = self.imgui.lock().unwrap(); match _event { Event::MainEventsCleared => { platform .prepare_frame(imgui.io_mut(), &gl_window.window()) // step 4 .expect("Failed to prepare frame"); } Event::WindowEvent { event: WindowEvent::CloseRequested, window_id, } => { if *window_id == gl_window.window().id() { println!("Close Octree Config Window"); gl_window.window().set_visible(false); return; } } Event::WindowEvent { event,.. } => match event { WindowEvent::KeyboardInput { input,.. } => match input { winit::event::KeyboardInput { virtual_keycode, state, .. } => match (virtual_keycode, state) { (Some(VirtualKeyCode::F12), ElementState::Pressed) => { println!("Open Octree Config Window"); gl_window.window().set_visible(true); } _ => (), }, }, _ => (), }, _ => (), } platform.handle_event(imgui.io_mut(), &gl_window.window(), &_event); // step 3 } fn consume_messages(&mut self, messages: &Vec<Message>) { for m in messages { if m.is_type::<ProfilingData>() { let data = m.get_payload::<ProfilingData>().unwrap(); self.profiling_data.replace(data); } if m.is_type::<OctreeOptimizations>() { let data = m.get_payload::<OctreeOptimizations>().unwrap(); self.octree_optimizations = data.clone(); } if m.is_type::<OctreeConfig>() { let data = m.get_payload::<OctreeConfig>().unwrap(); self.octree_config.merge(data); } } } fn execute(&mut self, filter: &Vec<Arc<Mutex<Filter>>>, delta_time: Duration) { let ctx = self.imgui.clone(); let display = self.display.clone(); let mut ctx_lock = ctx.lock().unwrap(); let display_lock = display.lock().unwrap(); let ui = ctx_lock.frame(); let gl_window = display_lock.gl_window(); let octree_entities = &filter[0].lock().unwrap().entities; let camera_entities = &filter[1].lock().unwrap().entities; let window = Window::new(im_str!("Octree")) .collapsible(false) .movable(false) .position([10.0, 10.0], Condition::FirstUseEver) .size([400.0, 740.0], Condition::FirstUseEver); let window_token = window.begin(&ui).unwrap(); for entity in octree_entities { let entitiy_mutex = entity.lock().unwrap(); let _octree_transform = entitiy_mutex .get_component::<Transformation>() .ok() .unwrap(); let octree = entitiy_mutex.get_component::<Octree>().ok().unwrap(); self.display_profiling_ui(delta_time, &ui); ui.new_line(); self.display_octree_ui(&ui, &octree.config, &octree.info); ui.new_line(); } for entity in camera_entities { let mut entitiy_mutex = entity.lock().unwrap(); let mut camera_transform = entitiy_mutex .get_component::<Transformation>() .ok() .unwrap().clone(); let camera = entitiy_mutex.get_component::<Camera>().ok().unwrap(); self.display_camera_ui(&ui, camera, &mut camera_transform); entitiy_mutex.add_component(camera_transform); } window_token.end(&ui); // construct the UI self.platform.prepare_render(&ui, &gl_window.window()); // step 5 // render the UI with a renderer let draw_data = ui.render(); let mut target = display_lock.draw(); target.clear_color_srgb(0.1, 0.1, 0.11, 1.0); self.renderer .render(&mut target, draw_data) .expect("Rendering failed"); target.finish().expect("Failed to swap buffers"); } fn get_messages(&mut self) -> Vec<Message> { let ret = self.messages.clone(); self.messages.clear(); ret } } #[derive(Debug, Clone, Default)] pub struct ProfilingData { pub rendered_nodes: Option<u32>, pub instance_data_generation: Option<u64>, pub render_time: Option<u64>, // in nano seconds pub system_times: Option<Vec<(String, Duration)>>, } impl ProfilingData { pub fn replace(&mut self, other: &Self) { Self::replace_option(&mut self.rendered_nodes, &other.rendered_nodes); Self::replace_option( &mut self.instance_data_generation, &other.instance_data_generation, ); Self::replace_option(&mut self.render_time, &other.render_time); Self::replace_option(&mut self.system_times, &other.system_times); } fn replace_option<T>(target: &mut Option<T>, source: &Option<T>) where T: Clone, { match source { Some(val) => target.replace(val.clone()), None => None, }; } } impl Payload for ProfilingData {}
{ vec![ crate::filter!(Octree, Mesh, Transformation), crate::filter!(Camera, Transformation), ] }
identifier_body
octree_gui.rs
use std::collections::VecDeque; use std::ops::RangeInclusive; use std::sync::{Arc, Mutex}; use std::time::Duration; use cgmath::{Rotation, Vector3}; use glium::{Display, Surface}; use glium::glutin; use glium::glutin::event::WindowEvent; use glium::glutin::window::WindowBuilder; use imgui::*; use imgui::{Context, FontConfig, FontGlyphRanges, FontSource}; use imgui_glium_renderer::Renderer; use imgui_winit_support::{HiDpiMode, WinitPlatform}; use num_traits::{FromPrimitive, ToPrimitive}; use winit::event::{ElementState, Event, VirtualKeyCode}; use crate::core::{Filter, Message, Payload, System}; use crate::NSE; use crate::rendering::{Camera, Mesh, Octree, OctreeConfig, OctreeInfo, OctreeOptimizations, RenderSystem, Transformation}; use crate::rendering::fractal_generators::FractalSelection; pub struct OctreeGuiSystem { imgui: Arc<Mutex<Context>>, platform: WinitPlatform, renderer: Renderer, display: Arc<Mutex<glium::Display>>, // octree data octree_config: OctreeConfig, octree_optimizations: OctreeOptimizations, // profiling data profiling_data: ProfilingData, frame_times: VecDeque<f32>, // message passing messages: Vec<Message>, } impl OctreeGuiSystem { pub fn new(nse: &NSE, _render_system: Arc<Mutex<RenderSystem>>) -> Arc<Mutex<Self>> { let mut imgui = Context::create(); // configure imgui-rs Context if necessary imgui.set_ini_filename(None); let context = glutin::ContextBuilder::new().with_vsync(false); let builder = WindowBuilder::new() .with_title("Octree - Config") .with_decorations(true) .with_inner_size(glutin::dpi::LogicalSize::new(420f64, 768f64)); let display = Display::new(builder, context, &nse.event_loop).expect("Failed to initialize display"); let mut platform = WinitPlatform::init(&mut imgui); // step 1 platform.attach_window( imgui.io_mut(), &display.gl_window().window(), HiDpiMode::Default, ); // step 2 let hidpi_factor = platform.hidpi_factor(); let font_size = (13.0 * hidpi_factor) as f32; imgui.fonts().add_font(&[ FontSource::DefaultFontData { config: Some(FontConfig { size_pixels: font_size, ..FontConfig::default() }), }, FontSource::TtfData { data: include_bytes!("resources/mplus-1p-regular.ttf"), size_pixels: font_size, config: Some(FontConfig { rasterizer_multiply: 1.75, glyph_ranges: FontGlyphRanges::japanese(), ..FontConfig::default() }), }, ]); imgui.io_mut().font_global_scale = (1.0 / hidpi_factor) as f32; let renderer = Renderer::init(&mut imgui, &display).expect("Failed to initialize renderer"); let mut frame_times = VecDeque::new(); frame_times.resize(500, 0.0); Arc::new(Mutex::new(OctreeGuiSystem { imgui: Arc::new(Mutex::new(imgui)), platform, renderer, display: Arc::new(Mutex::new(display)), octree_config: OctreeConfig::default(), octree_optimizations: OctreeOptimizations::default(), profiling_data: ProfilingData::default(), frame_times, messages: vec![], })) } fn display_octree_ui(&mut self, ui: &Ui, _config: &OctreeConfig, _info: &OctreeInfo) { if CollapsingHeader::new(im_str!("Settings")) .default_open(true) .build(&ui) { // reset the reset flag self.octree_config.reset = None; let mut modified = false; ui.text(format!("Fractal Selection")); let mut selected_fractal = self.octree_config.fractal.as_mut().unwrap().to_usize().unwrap_or(0); let mut fractal_names = vec![]; for x in 0.. { match FromPrimitive::from_i32(x) { Some(FractalSelection::MandelBulb) => fractal_names.push(im_str!("Mandel Bulb")), Some(FractalSelection::MandelBrot) => fractal_names.push(im_str!("Mandel Brot")), Some(FractalSelection::SierpinskiPyramid) => fractal_names.push(im_str!("Sierpinski Pyramid")), Some(FractalSelection::SierpinskiTetrahedron) => fractal_names.push(im_str!("Sierpinski Tetrahedron")), Some(FractalSelection::MengerSponge) => fractal_names.push(im_str!("Menger Sponge")), Some(FractalSelection::MidpointDisplacement) => fractal_names.push(im_str!("Midpoint Displacement")), _ => break, // leave loop } } if ComboBox::new(im_str!("Select Fractal")) .build_simple_string( &ui, &mut selected_fractal, &fractal_names) { self.octree_config = OctreeConfig::default(); self.octree_config.fractal = FromPrimitive::from_usize(selected_fractal); self.octree_config.reset = Some(true); modified = true; } ui.separator(); ui.text(format!("Performance Settings")); if ui.button(im_str!("Update Now"), [0.0, 0.0]) { self.messages.push(Message::new(self.octree_config.clone())); }; ui.same_line(0.0); if ui.checkbox(im_str!("Continuous Update"), &mut self.octree_config.continuous_update.as_mut().unwrap()) { modified = true; } if Slider::new(im_str!("Subdivision Threshold (px)")) .range(RangeInclusive::new(1.0, 50.0)) .flags(SliderFlags::LOGARITHMIC) .build(&ui, &mut self.octree_config.subdiv_threshold.as_mut().unwrap()) { modified = true; } if Slider::new(im_str!("Distance Scale")) .range(RangeInclusive::new(0.05 as f64, 1.0 as f64)) //.flags(SliderFlags::LOGARITHMIC) .build(&ui, self.octree_config.distance_scale.as_mut().unwrap()) { modified = true; } if Slider::new(im_str!("Max. Octree Nodes")) .range(RangeInclusive::new(1e4 as u64, 2e7 as u64)) .build(&ui, self.octree_config.max_rendered_nodes.as_mut().unwrap()) { modified = true; } if modified { self.messages.push(Message::new(self.octree_config.clone())); } if ui.button(im_str!("Reset Octree"), [0.0, 0.0]) { let prev_selection = self.octree_config.fractal; self.octree_config = OctreeConfig::default(); self.octree_config.reset = Some(true); self.octree_config.fractal = prev_selection; self.messages.push(Message::new(self.octree_config.clone())); }; } } fn display_profiling_ui(&mut self, delta_time: Duration, ui: &Ui) { let rendered_nodes = &mut self.profiling_data.rendered_nodes.unwrap_or(0); let render_time = self.profiling_data.render_time.unwrap_or(0) as f64 / 1e6 as f64; let frame_times = &mut self.frame_times; frame_times.pop_front(); frame_times.push_back(delta_time.as_secs_f32()); let f_times: Vec<f32> = frame_times.iter().cloned().collect(); if CollapsingHeader::new(im_str!("Profiling")) .default_open(true) .build(&ui) { // Plot Frame Times ui.plot_lines(im_str!("Frame Times"), &f_times[..]) .graph_size([0.0, 50.0]) .overlay_text(&im_str!("{} ms", delta_time.as_millis())) .build(); // print times of seperate systems if self.profiling_data.system_times.is_some() { for (system_name, system_time) in self.profiling_data.system_times.as_ref().unwrap() { ui.text(im_str!("{}: {}", system_name, system_time.as_millis())); } } ui.separator(); ui.text(im_str!("Rendered Nodes: {}", rendered_nodes)); ui.text(im_str!("Render Time: {:.2} ms", render_time)); } } fn display_camera_ui(&mut self, ui: &Ui, _camera: &Camera, camera_transform: &mut Transformation) { if CollapsingHeader::new(im_str!("Camera")) .default_open(true) .build(&ui) { let mut view_dir = camera_transform.rotation.rotate_vector(-Vector3::unit_z()); InputFloat3::new( &ui, im_str!("View Direction (read only)"),
view_dir.as_mut(), ).read_only(true).build(); InputFloat3::new( &ui, im_str!("Camera Position"), camera_transform.position.as_mut(), ).build(); camera_transform.update(); } } } impl System for OctreeGuiSystem { fn get_filter(&mut self) -> Vec<Filter> { vec![ crate::filter!(Octree, Mesh, Transformation), crate::filter!(Camera, Transformation), ] } fn handle_input(&mut self, _event: &Event<()>) { let platform = &mut self.platform; let display = self.display.lock().unwrap(); let gl_window = display.gl_window(); let mut imgui = self.imgui.lock().unwrap(); match _event { Event::MainEventsCleared => { platform .prepare_frame(imgui.io_mut(), &gl_window.window()) // step 4 .expect("Failed to prepare frame"); } Event::WindowEvent { event: WindowEvent::CloseRequested, window_id, } => { if *window_id == gl_window.window().id() { println!("Close Octree Config Window"); gl_window.window().set_visible(false); return; } } Event::WindowEvent { event,.. } => match event { WindowEvent::KeyboardInput { input,.. } => match input { winit::event::KeyboardInput { virtual_keycode, state, .. } => match (virtual_keycode, state) { (Some(VirtualKeyCode::F12), ElementState::Pressed) => { println!("Open Octree Config Window"); gl_window.window().set_visible(true); } _ => (), }, }, _ => (), }, _ => (), } platform.handle_event(imgui.io_mut(), &gl_window.window(), &_event); // step 3 } fn consume_messages(&mut self, messages: &Vec<Message>) { for m in messages { if m.is_type::<ProfilingData>() { let data = m.get_payload::<ProfilingData>().unwrap(); self.profiling_data.replace(data); } if m.is_type::<OctreeOptimizations>() { let data = m.get_payload::<OctreeOptimizations>().unwrap(); self.octree_optimizations = data.clone(); } if m.is_type::<OctreeConfig>() { let data = m.get_payload::<OctreeConfig>().unwrap(); self.octree_config.merge(data); } } } fn execute(&mut self, filter: &Vec<Arc<Mutex<Filter>>>, delta_time: Duration) { let ctx = self.imgui.clone(); let display = self.display.clone(); let mut ctx_lock = ctx.lock().unwrap(); let display_lock = display.lock().unwrap(); let ui = ctx_lock.frame(); let gl_window = display_lock.gl_window(); let octree_entities = &filter[0].lock().unwrap().entities; let camera_entities = &filter[1].lock().unwrap().entities; let window = Window::new(im_str!("Octree")) .collapsible(false) .movable(false) .position([10.0, 10.0], Condition::FirstUseEver) .size([400.0, 740.0], Condition::FirstUseEver); let window_token = window.begin(&ui).unwrap(); for entity in octree_entities { let entitiy_mutex = entity.lock().unwrap(); let _octree_transform = entitiy_mutex .get_component::<Transformation>() .ok() .unwrap(); let octree = entitiy_mutex.get_component::<Octree>().ok().unwrap(); self.display_profiling_ui(delta_time, &ui); ui.new_line(); self.display_octree_ui(&ui, &octree.config, &octree.info); ui.new_line(); } for entity in camera_entities { let mut entitiy_mutex = entity.lock().unwrap(); let mut camera_transform = entitiy_mutex .get_component::<Transformation>() .ok() .unwrap().clone(); let camera = entitiy_mutex.get_component::<Camera>().ok().unwrap(); self.display_camera_ui(&ui, camera, &mut camera_transform); entitiy_mutex.add_component(camera_transform); } window_token.end(&ui); // construct the UI self.platform.prepare_render(&ui, &gl_window.window()); // step 5 // render the UI with a renderer let draw_data = ui.render(); let mut target = display_lock.draw(); target.clear_color_srgb(0.1, 0.1, 0.11, 1.0); self.renderer .render(&mut target, draw_data) .expect("Rendering failed"); target.finish().expect("Failed to swap buffers"); } fn get_messages(&mut self) -> Vec<Message> { let ret = self.messages.clone(); self.messages.clear(); ret } } #[derive(Debug, Clone, Default)] pub struct ProfilingData { pub rendered_nodes: Option<u32>, pub instance_data_generation: Option<u64>, pub render_time: Option<u64>, // in nano seconds pub system_times: Option<Vec<(String, Duration)>>, } impl ProfilingData { pub fn replace(&mut self, other: &Self) { Self::replace_option(&mut self.rendered_nodes, &other.rendered_nodes); Self::replace_option( &mut self.instance_data_generation, &other.instance_data_generation, ); Self::replace_option(&mut self.render_time, &other.render_time); Self::replace_option(&mut self.system_times, &other.system_times); } fn replace_option<T>(target: &mut Option<T>, source: &Option<T>) where T: Clone, { match source { Some(val) => target.replace(val.clone()), None => None, }; } } impl Payload for ProfilingData {}
random_line_split
lib.rs
//! This is a platform agnostic Rust driver for the MAX3010x high-sensitivity //! pulse oximeter and heart-rate sensor for wearable health, based on the //! [`embedded-hal`] traits. //! //! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal //! //! This driver allows you to: //! - Get the number of samples available on the FIFO. See [`get_available_sample_count()`]. //! - Get the number of samples lost from the FIFO. See [`get_overflow_sample_count()`]. //! - Read samples from the FIFO. See [`read_fifo()`]. //! - Perform a temperature measurement. See [`read_temperature()`]. //! - Change into heart-rate, oximeter or multi-LED modes. See [`into_multi_led()`]. //! - Set the sample averaging. See [`set_sample_averaging()`]. //! - Set the LED pulse amplitude. See [`set_pulse_amplitude()`]. //! - Set the LED pulse width. See [`set_pulse_width()`]. //! - Set the sampling rate. See [`set_sampling_rate()`]. //! - Set the ADC range. See [`set_adc_range()`]. //! - Set the LED time slots in multi-LED mode. [`set_led_time_slots()`]. //! - Enable/disable the FIFO rollover. See [`enable_fifo_rollover()`]. //! - Clear the FIFO. See [`clear_fifo()`]. //! - Wake-up and shutdown the device. See [`shutdown()`]. //! - Perform a software reset. See [`reset()`]. //! - Get the device part and revision id. See [`get_part_id()`]. //! - Interrupts: //! - Read the status of all interrupts. See [`read_interrupt_status()`]. //! - Set FIFO-almost-full level interrupt. See [`set_fifo_almost_full_level_interrupt()`]. //! - Enable/disable the FIFO-almost-full interrupt. See [`enable_fifo_almost_full_interrupt()`]. //! - Enable/disable the ambient-light-cancellation overflow interrupt. See [`enable_alc_overflow_interrupt()`]. //! - Enable/disable the temperature-ready interrupt. See [`enable_temperature_ready_interrupt()`]. //! - Enable/disable the new-FIFO-data-ready interrupt. See [`enable_new_fifo_data_ready_interrupt()`]. //! //! [`get_available_sample_count()`]: struct.Max3010x.html#method.get_available_sample_count //! [`get_overflow_sample_count()`]: struct.Max3010x.html#method.get_overflow_sample_count //! [`read_fifo()`]: struct.Max3010x.html#method.read_fifo //! [`read_temperature()`]: struct.Max3010x.html#method.read_temperature //! [`into_multi_led()`]: struct.Max3010x.html#method.into_multi_led //! [`set_sample_averaging()`]: struct.Max3010x.html#method.set_sample_averaging //! [`set_pulse_width()`]: struct.Max3010x.html#method.set_pulse_width //! [`set_pulse_amplitude()`]: struct.Max3010x.html#method.set_pulse_amplitude //! [`set_sampling_rate()`]: struct.Max3010x.html#method.set_sampling_rate //! [`set_adc_range()`]: struct.Max3010x.html#method.set_adc_range //! [`set_led_time_slots()`]: struct.Max3010x.html#method.set_led_time_slots //! [`shutdown()`]: struct.Max3010x.html#method.shutdown //! [`reset()`]: struct.Max3010x.html#method.reset //! [`set_fifo_almost_full_level_interrupt()`]: struct.Max3010x.html#method.set_fifo_almost_full_level_interrupt //! [`enable_fifo_rollover()`]: struct.Max3010x.html#method.enable_fifo_rollover //! [`clear_fifo()`]: struct.Max3010x.html#method.clear_fifo //! [`read_interrupt_status()`]: struct.Max3010x.html#method.read_interrupt_status //! [`enable_fifo_almost_full_interrupt()`]: struct.Max3010x.html#method.enable_fifo_almost_full_interrupt //! [`enable_alc_overflow_interrupt()`]: struct.Max3010x.html#method.enable_alc_overflow_interrupt //! [`enable_temperature_ready_interrupt()`]: struct.Max3010x.html#method.enable_temperature_ready_interrupt //! [`enable_new_fifo_data_ready_interrupt()`]: struct.Max3010x.html#method.enable_new_fifo_data_ready_interrupt //! [`get_part_id()`]: struct.Max3010x.html#method.get_part_id //! //! ## The device //! The `MAX30102` is an integrated pulse oximetry and heart-rate monitor module. //! It includes internal LEDs, photodetectors, optical elements, and low-noise //! electronics with ambient light rejection. The `MAX30102` provides a complete //! system solution to ease the design-in process for mobile and //! wearable devices. //! //! The `MAX30102` operates on a single 1.8V power supply and a separate 3.3V //! power supply for the internal LEDs. Communication is through a standard //! I2C-compatible interface. The module can be shut down through software //! with zero standby current, allowing the power rails to remain //! powered at all times. //! //! Datasheet: //! - [`MAX30102`](https://datasheets.maximintegrated.com/en/ds/MAX30102.pdf) //! //! ## Usage examples (see also examples folder) //! //! To use this driver, import this crate and an `embedded_hal` implementation, //! then instantiate the device. //! //! Please find additional examples using hardware in this repository: [driver-examples] //! //! [driver-examples]: https://github.com/eldruin/driver-examples //! //! ### Read samples in heart-rate mode //! //! ```no_run //! extern crate linux_embedded_hal as hal; //! extern crate max3010x; //! use max3010x::{Max3010x, Led, SampleAveraging}; //! //! # fn main() { //! let dev = hal::I2cdev::new("/dev/i2c-1").unwrap(); //! let mut sensor = Max3010x::new_max30102(dev); //! let mut sensor = sensor.into_heart_rate().unwrap(); //! sensor.set_sample_averaging(SampleAveraging::Sa4).unwrap(); //! sensor.set_pulse_amplitude(Led::All, 15).unwrap(); //! sensor.enable_fifo_rollover().unwrap(); //! let mut data = [0; 3]; //! let samples_read = sensor.read_fifo(&mut data).unwrap(); //! //! // get the I2C device back //! let dev = sensor.destroy(); //! # } //! ``` //! //! ### Set led slots in multi-led mode //! //! ```no_run //! extern crate linux_embedded_hal as hal; //! extern crate max3010x; //! use max3010x::{ Max3010x, Led, TimeSlot }; //! //! # fn main() { //! let dev = hal::I2cdev::new("/dev/i2c-1").unwrap(); //! let mut max30102 = Max3010x::new_max30102(dev); //! let mut max30102 = max30102.into_multi_led().unwrap(); //! max30102.set_pulse_amplitude(Led::All, 15).unwrap(); //! max30102.set_led_time_slots([ //! TimeSlot::Led1, //! TimeSlot::Led2, //! TimeSlot::Led1, //! TimeSlot::Disabled //! ]).unwrap(); //! max30102.enable_fifo_rollover().unwrap(); //! let mut data = [0; 2]; //! let samples_read = max30102.read_fifo(&mut data).unwrap(); //! //! // get the I2C device back //! let dev = max30102.destroy(); //! # } //! ``` //! #![deny(missing_docs, unsafe_code)] #![no_std] extern crate embedded_hal as hal; use hal::blocking::i2c; extern crate nb; use core::marker::PhantomData; /// All possible errors in this crate #[derive(Debug)] pub enum Error<E> { /// I²C bus error I2C(E), /// Invalid arguments provided InvalidArguments, } /// LEDs #[derive(Debug, Clone, Copy, PartialEq)] pub enum Led { /// LED1 corresponds to Red in MAX30102 Led1, /// LED1 corresponds to IR in MAX30102 Led2, /// Select all available LEDs in the device All, } /// Multi-LED mode sample time slot configuration #[derive(Debug, Clone, Copy, PartialEq)] pub enum TimeSlot { /// Time slot is disabled Disabled, /// LED 1 active during time slot (corresponds to Red in MAX30102) Led1, /// LED 2 active during time slot (corresponds to IR in MAX30102) Led2, } /// Sample averaging #[derive(Debug, Clone, Copy, PartialEq)] pub enum SampleAveraging { /// 1 (no averaging) (default) Sa1, /// 2 Sa2, /// 4 Sa4, /// 8 Sa8, /// 16 Sa16, /// 32 Sa32, } /// Number of empty data samples when the FIFO almost full interrupt is issued. #[derive(Debug, Clone, Copy, PartialEq)] pub enum FifoAlmostFullLevelInterrupt { /// Interrupt issue when 0 spaces are left in FIFO. (default) L0, /// Interrupt issue when 1 space is left in FIFO. L1, /// Interrupt issue when 2 spaces are left in FIFO. L2, /// Interrupt issue when 3 spaces are left in FIFO. L3, /// Interrupt issue when 4 spaces are left in FIFO. L4, /// Interrupt issue when 5 spaces are left in FIFO. L5, /// Interrupt issue when 6 spaces are left in FIFO. L6, /// Interrupt issue when 7 spaces are left in FIFO. L7, /// Interrupt issue when 8 spaces are left in FIFO. L8, /// Interrupt issue when 9 spaces are left in FIFO. L9, /// Interrupt issue when 10 spaces are left in FIFO. L10, /// Interrupt issue when 11 spaces are left in FIFO. L11, /// Interrupt issue when 12 spaces are left in FIFO. L12, /// Interrupt issue when 13 spaces are left in FIFO. L13, /// Interrupt issue when 14 spaces are left in FIFO. L14, /// Interrupt issue when 15 spaces are left in FIFO. L15, } /// LED pulse width (determines ADC resolution) /// /// This is limited by the current mode and the selected sample rate. #[derive(Debug, Clone, Copy, PartialEq)] pub enum LedPulseWidth { /// 69 μs pulse width (15-bit ADC resolution) Pw69, /// 118 μs pulse width (16-bit ADC resolution) Pw118, /// 215 μs pulse width (17-bit ADC resolution) Pw215, /// 411 μs pulse width (18-bit ADC resolution) Pw411, } /// Sampling rate /// /// This is limited by the current mode and the selected LED pulse width. #[derive(Debug, Clone, Copy, PartialEq)] pub enum SamplingRate { /// 50 samples per second Sps50, /// 100 samples per second Sps100, /// 200 samples per second Sps200, /// 400 samples per second Sps400, /// 800 samples per second Sps800, /// 1000 samples per second Sps1000, /// 1600 samples per second Sps1600, /// 3200 samples per second Sps3200, } /// ADC range #[derive(Debug, Clone, Copy, PartialEq)] pub enum AdcRange { /// Full scale 2048 nA Fs2k, /// Full scale 4094 nA Fs4k, /// Full scale 8192 nA Fs8k, /// Full scale 16394 nA Fs16k, } /// Interrupt status flags #[derive(Debug, Clone, Copy)] pub struct InterruptStatus { /// Power ready interrupt pub power_ready: bool, /// FIFO almost full interrupt pub fifo_almost_full: bool, /// New FIFO data ready interrupt pub new_fifo_data_ready: bool, /// Ambient light cancellation overflow interrupt pub alc_overflow: bool, /// Internal die temperature conversion ready interrupt pub temperature_ready: bool, } const DEVICE_ADDRESS: u8 = 0b101_0111; struct Register; impl Register { const INT_STATUS: u8 = 0x0; const INT_EN1: u8 = 0x02; const INT_EN2: u8 = 0x03; const FIFO_WR_PTR: u8 = 0x04; const OVF_COUNTER: u8 = 0x05; const FIFO_DATA: u8 = 0x07; const FIFO_CONFIG: u8 = 0x08; const MODE: u8 = 0x09; const SPO2_CONFIG: u8 = 0x0A; const LED1_PA: u8 = 0x0C; const LED2_PA: u8 = 0x0D; const SLOT_CONFIG0: u8 = 0x11; const TEMP_INT: u8 = 0x1F; const TEMP_CONFIG: u8 = 0x21; const REV_ID: u8 = 0xFE; const PART_ID: u8 = 0xFF; } struct BitFlags; impl BitFlags { const FIFO_A_FULL_INT: u8 = 0b1000_0000; const ALC_OVF_INT: u8 = 0b0010_0000; const DIE_TEMP_RDY_INT: u8 = 0b0000_0010; const PPG_RDY_INT: u8 = 0b0100_0000; const PWR_RDY_INT: u8 = 0b0000_0001; const TEMP_EN: u8 = 0b0000_0001; const SHUTDOWN: u8 = 0b1000_0000; const RESET: u8 = 0b0100_0000; const FIFO_ROLLOVER_EN: u8 = 0b0001_0000; const ADC_RGE0: u8 = 0b0010_0000; const ADC_RGE1: u8 = 0b0100_0000; const LED_PW0: u8 = 0b0000_0001; const LED_PW1: u8 = 0b0000_0010; const SPO2_SR0: u8 = 0b0000_0100; const SPO2_SR1: u8 = 0b0000_1000; const SPO2_SR2: u8 = 0b0001_0000; } #[derive(Debug, Default, Clone, PartialEq)] struct Config { bits: u8, } impl Config { fn with_high(&self, mask: u8) -> Self { Config { bits: self.bits | mask, } } fn with_low(&self, mask: u8) -> Self { Config { bits: self.bits &!mask, } } } #[doc(hidden)] pub mod marker { pub mod mode { pub struct None(()); pub struct HeartRate(()); pub struct Oxime
pub struct MultiLed(()); } pub mod ic { pub struct Max30102(()); } } /// MAX3010x device driver. #[derive(Debug, Default)] pub struct Max3010x<I2C, IC, MODE> { /// The concrete I²C device implementation. i2c: I2C, temperature_measurement_started: bool, mode: Config, fifo_config: Config, spo2_config: Config, int_en1: Config, int_en2: Config, _ic: PhantomData<IC>, _mode: PhantomData<MODE>, } impl<I2C, E> Max3010x<I2C, marker::ic::Max30102, marker::mode::None> where I2C: i2c::WriteRead<Error = E> + i2c::Write<Error = E>, { /// Create new instance of the MAX3010x device. pub fn new_max30102(i2c: I2C) -> Self { Max3010x { i2c, temperature_measurement_started: false, mode: Config { bits: 0 }, fifo_config: Config { bits: 0 }, spo2_config: Config { bits: 0 }, int_en1: Config { bits: 0 }, int_en2: Config { bits: 0 }, _ic: PhantomData, _mode: PhantomData, } } } impl<I2C, E, IC, MODE> Max3010x<I2C, IC, MODE> where I2C: i2c::Write<Error = E>, { /// Destroy driver instance, return I²C bus instance. pub fn destroy(self) -> I2C { self.i2c } fn write_data(&mut self, data: &[u8]) -> Result<(), Error<E>> { self.i2c.write(DEVICE_ADDRESS, data).map_err(Error::I2C) } } mod config; mod reading; mod private { use super::*; pub trait Sealed {} impl Sealed for marker::mode::HeartRate {} impl Sealed for marker::mode::Oximeter {} impl Sealed for marker::mode::MultiLed {} impl Sealed for marker::ic::Max30102 {} }
ter(());
identifier_name
lib.rs
//! This is a platform agnostic Rust driver for the MAX3010x high-sensitivity //! pulse oximeter and heart-rate sensor for wearable health, based on the //! [`embedded-hal`] traits. //! //! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal //! //! This driver allows you to: //! - Get the number of samples available on the FIFO. See [`get_available_sample_count()`]. //! - Get the number of samples lost from the FIFO. See [`get_overflow_sample_count()`]. //! - Read samples from the FIFO. See [`read_fifo()`]. //! - Perform a temperature measurement. See [`read_temperature()`]. //! - Change into heart-rate, oximeter or multi-LED modes. See [`into_multi_led()`]. //! - Set the sample averaging. See [`set_sample_averaging()`]. //! - Set the LED pulse amplitude. See [`set_pulse_amplitude()`]. //! - Set the LED pulse width. See [`set_pulse_width()`]. //! - Set the sampling rate. See [`set_sampling_rate()`]. //! - Set the ADC range. See [`set_adc_range()`]. //! - Set the LED time slots in multi-LED mode. [`set_led_time_slots()`]. //! - Enable/disable the FIFO rollover. See [`enable_fifo_rollover()`]. //! - Clear the FIFO. See [`clear_fifo()`]. //! - Wake-up and shutdown the device. See [`shutdown()`]. //! - Perform a software reset. See [`reset()`]. //! - Get the device part and revision id. See [`get_part_id()`]. //! - Interrupts: //! - Read the status of all interrupts. See [`read_interrupt_status()`]. //! - Set FIFO-almost-full level interrupt. See [`set_fifo_almost_full_level_interrupt()`]. //! - Enable/disable the FIFO-almost-full interrupt. See [`enable_fifo_almost_full_interrupt()`]. //! - Enable/disable the ambient-light-cancellation overflow interrupt. See [`enable_alc_overflow_interrupt()`]. //! - Enable/disable the temperature-ready interrupt. See [`enable_temperature_ready_interrupt()`]. //! - Enable/disable the new-FIFO-data-ready interrupt. See [`enable_new_fifo_data_ready_interrupt()`]. //! //! [`get_available_sample_count()`]: struct.Max3010x.html#method.get_available_sample_count //! [`get_overflow_sample_count()`]: struct.Max3010x.html#method.get_overflow_sample_count //! [`read_fifo()`]: struct.Max3010x.html#method.read_fifo //! [`read_temperature()`]: struct.Max3010x.html#method.read_temperature //! [`into_multi_led()`]: struct.Max3010x.html#method.into_multi_led //! [`set_sample_averaging()`]: struct.Max3010x.html#method.set_sample_averaging //! [`set_pulse_width()`]: struct.Max3010x.html#method.set_pulse_width //! [`set_pulse_amplitude()`]: struct.Max3010x.html#method.set_pulse_amplitude //! [`set_sampling_rate()`]: struct.Max3010x.html#method.set_sampling_rate //! [`set_adc_range()`]: struct.Max3010x.html#method.set_adc_range //! [`set_led_time_slots()`]: struct.Max3010x.html#method.set_led_time_slots //! [`shutdown()`]: struct.Max3010x.html#method.shutdown //! [`reset()`]: struct.Max3010x.html#method.reset //! [`set_fifo_almost_full_level_interrupt()`]: struct.Max3010x.html#method.set_fifo_almost_full_level_interrupt //! [`enable_fifo_rollover()`]: struct.Max3010x.html#method.enable_fifo_rollover //! [`clear_fifo()`]: struct.Max3010x.html#method.clear_fifo //! [`read_interrupt_status()`]: struct.Max3010x.html#method.read_interrupt_status //! [`enable_fifo_almost_full_interrupt()`]: struct.Max3010x.html#method.enable_fifo_almost_full_interrupt //! [`enable_alc_overflow_interrupt()`]: struct.Max3010x.html#method.enable_alc_overflow_interrupt //! [`enable_temperature_ready_interrupt()`]: struct.Max3010x.html#method.enable_temperature_ready_interrupt //! [`enable_new_fifo_data_ready_interrupt()`]: struct.Max3010x.html#method.enable_new_fifo_data_ready_interrupt //! [`get_part_id()`]: struct.Max3010x.html#method.get_part_id //! //! ## The device //! The `MAX30102` is an integrated pulse oximetry and heart-rate monitor module. //! It includes internal LEDs, photodetectors, optical elements, and low-noise //! electronics with ambient light rejection. The `MAX30102` provides a complete //! system solution to ease the design-in process for mobile and //! wearable devices. //! //! The `MAX30102` operates on a single 1.8V power supply and a separate 3.3V //! power supply for the internal LEDs. Communication is through a standard //! I2C-compatible interface. The module can be shut down through software //! with zero standby current, allowing the power rails to remain //! powered at all times. //! //! Datasheet: //! - [`MAX30102`](https://datasheets.maximintegrated.com/en/ds/MAX30102.pdf) //! //! ## Usage examples (see also examples folder) //! //! To use this driver, import this crate and an `embedded_hal` implementation, //! then instantiate the device. //! //! Please find additional examples using hardware in this repository: [driver-examples] //! //! [driver-examples]: https://github.com/eldruin/driver-examples //! //! ### Read samples in heart-rate mode //! //! ```no_run //! extern crate linux_embedded_hal as hal; //! extern crate max3010x; //! use max3010x::{Max3010x, Led, SampleAveraging}; //! //! # fn main() { //! let dev = hal::I2cdev::new("/dev/i2c-1").unwrap(); //! let mut sensor = Max3010x::new_max30102(dev); //! let mut sensor = sensor.into_heart_rate().unwrap(); //! sensor.set_sample_averaging(SampleAveraging::Sa4).unwrap(); //! sensor.set_pulse_amplitude(Led::All, 15).unwrap(); //! sensor.enable_fifo_rollover().unwrap(); //! let mut data = [0; 3]; //! let samples_read = sensor.read_fifo(&mut data).unwrap(); //! //! // get the I2C device back //! let dev = sensor.destroy(); //! # } //! ``` //! //! ### Set led slots in multi-led mode //! //! ```no_run //! extern crate linux_embedded_hal as hal; //! extern crate max3010x; //! use max3010x::{ Max3010x, Led, TimeSlot }; //! //! # fn main() { //! let dev = hal::I2cdev::new("/dev/i2c-1").unwrap(); //! let mut max30102 = Max3010x::new_max30102(dev); //! let mut max30102 = max30102.into_multi_led().unwrap(); //! max30102.set_pulse_amplitude(Led::All, 15).unwrap(); //! max30102.set_led_time_slots([ //! TimeSlot::Led1, //! TimeSlot::Led2, //! TimeSlot::Led1, //! TimeSlot::Disabled //! ]).unwrap(); //! max30102.enable_fifo_rollover().unwrap(); //! let mut data = [0; 2]; //! let samples_read = max30102.read_fifo(&mut data).unwrap(); //! //! // get the I2C device back //! let dev = max30102.destroy(); //! # } //! ``` //! #![deny(missing_docs, unsafe_code)] #![no_std] extern crate embedded_hal as hal; use hal::blocking::i2c; extern crate nb; use core::marker::PhantomData; /// All possible errors in this crate #[derive(Debug)] pub enum Error<E> { /// I²C bus error I2C(E), /// Invalid arguments provided InvalidArguments, } /// LEDs #[derive(Debug, Clone, Copy, PartialEq)] pub enum Led { /// LED1 corresponds to Red in MAX30102 Led1, /// LED1 corresponds to IR in MAX30102 Led2, /// Select all available LEDs in the device All, } /// Multi-LED mode sample time slot configuration #[derive(Debug, Clone, Copy, PartialEq)] pub enum TimeSlot { /// Time slot is disabled Disabled, /// LED 1 active during time slot (corresponds to Red in MAX30102) Led1, /// LED 2 active during time slot (corresponds to IR in MAX30102) Led2, } /// Sample averaging #[derive(Debug, Clone, Copy, PartialEq)] pub enum SampleAveraging { /// 1 (no averaging) (default) Sa1, /// 2 Sa2, /// 4 Sa4, /// 8 Sa8, /// 16 Sa16, /// 32 Sa32, } /// Number of empty data samples when the FIFO almost full interrupt is issued. #[derive(Debug, Clone, Copy, PartialEq)] pub enum FifoAlmostFullLevelInterrupt { /// Interrupt issue when 0 spaces are left in FIFO. (default) L0, /// Interrupt issue when 1 space is left in FIFO. L1, /// Interrupt issue when 2 spaces are left in FIFO. L2, /// Interrupt issue when 3 spaces are left in FIFO. L3, /// Interrupt issue when 4 spaces are left in FIFO. L4, /// Interrupt issue when 5 spaces are left in FIFO. L5, /// Interrupt issue when 6 spaces are left in FIFO. L6, /// Interrupt issue when 7 spaces are left in FIFO. L7, /// Interrupt issue when 8 spaces are left in FIFO. L8, /// Interrupt issue when 9 spaces are left in FIFO. L9, /// Interrupt issue when 10 spaces are left in FIFO. L10, /// Interrupt issue when 11 spaces are left in FIFO. L11, /// Interrupt issue when 12 spaces are left in FIFO. L12, /// Interrupt issue when 13 spaces are left in FIFO. L13, /// Interrupt issue when 14 spaces are left in FIFO. L14, /// Interrupt issue when 15 spaces are left in FIFO. L15, } /// LED pulse width (determines ADC resolution) /// /// This is limited by the current mode and the selected sample rate. #[derive(Debug, Clone, Copy, PartialEq)] pub enum LedPulseWidth { /// 69 μs pulse width (15-bit ADC resolution) Pw69, /// 118 μs pulse width (16-bit ADC resolution) Pw118, /// 215 μs pulse width (17-bit ADC resolution) Pw215, /// 411 μs pulse width (18-bit ADC resolution) Pw411, } /// Sampling rate /// /// This is limited by the current mode and the selected LED pulse width. #[derive(Debug, Clone, Copy, PartialEq)] pub enum SamplingRate { /// 50 samples per second Sps50, /// 100 samples per second Sps100, /// 200 samples per second Sps200, /// 400 samples per second Sps400, /// 800 samples per second Sps800, /// 1000 samples per second Sps1000, /// 1600 samples per second Sps1600, /// 3200 samples per second Sps3200, } /// ADC range #[derive(Debug, Clone, Copy, PartialEq)] pub enum AdcRange { /// Full scale 2048 nA Fs2k, /// Full scale 4094 nA Fs4k, /// Full scale 8192 nA Fs8k, /// Full scale 16394 nA Fs16k, } /// Interrupt status flags #[derive(Debug, Clone, Copy)] pub struct InterruptStatus { /// Power ready interrupt pub power_ready: bool, /// FIFO almost full interrupt pub fifo_almost_full: bool, /// New FIFO data ready interrupt pub new_fifo_data_ready: bool, /// Ambient light cancellation overflow interrupt
const DEVICE_ADDRESS: u8 = 0b101_0111; struct Register; impl Register { const INT_STATUS: u8 = 0x0; const INT_EN1: u8 = 0x02; const INT_EN2: u8 = 0x03; const FIFO_WR_PTR: u8 = 0x04; const OVF_COUNTER: u8 = 0x05; const FIFO_DATA: u8 = 0x07; const FIFO_CONFIG: u8 = 0x08; const MODE: u8 = 0x09; const SPO2_CONFIG: u8 = 0x0A; const LED1_PA: u8 = 0x0C; const LED2_PA: u8 = 0x0D; const SLOT_CONFIG0: u8 = 0x11; const TEMP_INT: u8 = 0x1F; const TEMP_CONFIG: u8 = 0x21; const REV_ID: u8 = 0xFE; const PART_ID: u8 = 0xFF; } struct BitFlags; impl BitFlags { const FIFO_A_FULL_INT: u8 = 0b1000_0000; const ALC_OVF_INT: u8 = 0b0010_0000; const DIE_TEMP_RDY_INT: u8 = 0b0000_0010; const PPG_RDY_INT: u8 = 0b0100_0000; const PWR_RDY_INT: u8 = 0b0000_0001; const TEMP_EN: u8 = 0b0000_0001; const SHUTDOWN: u8 = 0b1000_0000; const RESET: u8 = 0b0100_0000; const FIFO_ROLLOVER_EN: u8 = 0b0001_0000; const ADC_RGE0: u8 = 0b0010_0000; const ADC_RGE1: u8 = 0b0100_0000; const LED_PW0: u8 = 0b0000_0001; const LED_PW1: u8 = 0b0000_0010; const SPO2_SR0: u8 = 0b0000_0100; const SPO2_SR1: u8 = 0b0000_1000; const SPO2_SR2: u8 = 0b0001_0000; } #[derive(Debug, Default, Clone, PartialEq)] struct Config { bits: u8, } impl Config { fn with_high(&self, mask: u8) -> Self { Config { bits: self.bits | mask, } } fn with_low(&self, mask: u8) -> Self { Config { bits: self.bits &!mask, } } } #[doc(hidden)] pub mod marker { pub mod mode { pub struct None(()); pub struct HeartRate(()); pub struct Oximeter(()); pub struct MultiLed(()); } pub mod ic { pub struct Max30102(()); } } /// MAX3010x device driver. #[derive(Debug, Default)] pub struct Max3010x<I2C, IC, MODE> { /// The concrete I²C device implementation. i2c: I2C, temperature_measurement_started: bool, mode: Config, fifo_config: Config, spo2_config: Config, int_en1: Config, int_en2: Config, _ic: PhantomData<IC>, _mode: PhantomData<MODE>, } impl<I2C, E> Max3010x<I2C, marker::ic::Max30102, marker::mode::None> where I2C: i2c::WriteRead<Error = E> + i2c::Write<Error = E>, { /// Create new instance of the MAX3010x device. pub fn new_max30102(i2c: I2C) -> Self { Max3010x { i2c, temperature_measurement_started: false, mode: Config { bits: 0 }, fifo_config: Config { bits: 0 }, spo2_config: Config { bits: 0 }, int_en1: Config { bits: 0 }, int_en2: Config { bits: 0 }, _ic: PhantomData, _mode: PhantomData, } } } impl<I2C, E, IC, MODE> Max3010x<I2C, IC, MODE> where I2C: i2c::Write<Error = E>, { /// Destroy driver instance, return I²C bus instance. pub fn destroy(self) -> I2C { self.i2c } fn write_data(&mut self, data: &[u8]) -> Result<(), Error<E>> { self.i2c.write(DEVICE_ADDRESS, data).map_err(Error::I2C) } } mod config; mod reading; mod private { use super::*; pub trait Sealed {} impl Sealed for marker::mode::HeartRate {} impl Sealed for marker::mode::Oximeter {} impl Sealed for marker::mode::MultiLed {} impl Sealed for marker::ic::Max30102 {} }
pub alc_overflow: bool, /// Internal die temperature conversion ready interrupt pub temperature_ready: bool, }
random_line_split
asn1.rs
//! Support for ECDSA signatures encoded as ASN.1 DER. // Adapted from BearSSL. Copyright (c) 2016 Thomas Pornin <[email protected]>. // Relicensed under Apache 2.0 + MIT (from original MIT) with permission. // // <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_atr.c> // <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_rta.c> use crate::{ generic_array::{typenum::Unsigned, ArrayLength, GenericArray}, Error, }; use core::{ convert::{TryFrom, TryInto}, fmt, ops::{Add, Range}, }; use elliptic_curve::{consts::U9, weierstrass::Curve}; #[cfg(feature = "alloc")] use alloc::boxed::Box; /// Maximum overhead of an ASN.1 DER-encoded ECDSA signature for a given curve: /// 9-bytes. /// /// Includes 3-byte ASN.1 DER header: /// /// - 1-byte: ASN.1 `SEQUENCE` tag (0x30) /// - 2-byte: length /// ///...followed by two ASN.1 `INTEGER` values, which each have a header whose /// maximum length is the following: /// /// - 1-byte: ASN.1 `INTEGER` tag (0x02) /// - 1-byte: length /// - 1-byte: zero to indicate value is positive (`INTEGER` is signed) pub type MaxOverhead = U9; /// Maximum size of an ASN.1 DER encoded signature for the given elliptic curve. pub type MaxSize<C> = <<<C as elliptic_curve::Curve>::FieldSize as Add>::Output as Add<MaxOverhead>>::Output; /// Byte array containing a serialized ASN.1 signature type DocumentBytes<C> = GenericArray<u8, MaxSize<C>>; /// ASN.1 `INTEGER` tag const INTEGER_TAG: u8 = 0x02; /// ASN.1 `SEQUENCE` tag const SEQUENCE_TAG: u8 = 0x30; /// ASN.1 DER-encoded signature. /// /// Generic over the scalar size of the elliptic curve. pub struct Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// ASN.1 DER-encoded signature data bytes: DocumentBytes<C>, /// Range of the `r` value within the signature r_range: Range<usize>, /// Range of the `s` value within the signature s_range: Range<usize>, } impl<C> signature::Signature for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// Parse an ASN.1 DER-encoded ECDSA signature from a byte slice fn from_bytes(bytes: &[u8]) -> Result<Self, Error> { bytes.try_into() } } #[allow(clippy::len_without_is_empty)] impl<C> Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// Get the length of the signature in bytes pub fn len(&self) -> usize { self.s_range.end } /// Borrow this signature as a byte slice pub fn
(&self) -> &[u8] { &self.bytes.as_slice()[..self.len()] } /// Serialize this signature as a boxed byte slice #[cfg(feature = "alloc")] pub fn to_bytes(&self) -> Box<[u8]> { self.as_bytes().to_vec().into_boxed_slice() } /// Create an ASN.1 DER encoded signature from big endian `r` and `s` scalars pub(crate) fn from_scalar_bytes(r: &[u8], s: &[u8]) -> Self { let r_len = int_length(r); let s_len = int_length(s); let scalar_size = C::FieldSize::to_usize(); let mut bytes = DocumentBytes::<C>::default(); // SEQUENCE header bytes[0] = SEQUENCE_TAG as u8; let zlen = r_len.checked_add(s_len).unwrap().checked_add(4).unwrap(); let offset = if zlen >= 0x80 { bytes[1] = 0x81; bytes[2] = zlen as u8; 3 } else { bytes[1] = zlen as u8; 2 }; // First INTEGER (r) serialize_int(r, &mut bytes[offset..], r_len, scalar_size); let r_end = offset.checked_add(2).unwrap().checked_add(r_len).unwrap(); // Second INTEGER (s) serialize_int(s, &mut bytes[r_end..], s_len, scalar_size); let s_end = r_end.checked_add(2).unwrap().checked_add(s_len).unwrap(); bytes[..s_end] .try_into() .expect("generated invalid ASN.1 DER") } /// Get the `r` component of the signature (leading zeros removed) pub(crate) fn r(&self) -> &[u8] { &self.bytes[self.r_range.clone()] } /// Get the `s` component of the signature (leading zeros removed) pub(crate) fn s(&self) -> &[u8] { &self.bytes[self.s_range.clone()] } } impl<C> AsRef<[u8]> for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { fn as_ref(&self) -> &[u8] { self.as_bytes() } } impl<C> fmt::Debug for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("asn1::Signature") .field("r", &self.r()) .field("s", &self.s()) .finish() } } impl<C> TryFrom<&[u8]> for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { type Error = Error; fn try_from(bytes: &[u8]) -> Result<Self, Error> { // Signature format is a SEQUENCE of two INTEGER values. We // support only integers of less than 127 bytes each (signed // encoding) so the resulting raw signature will have length // at most 254 bytes. // // First byte is SEQUENCE tag. if bytes[0]!= SEQUENCE_TAG as u8 { return Err(Error::new()); } // The SEQUENCE length will be encoded over one or two bytes. We // limit the total SEQUENCE contents to 255 bytes, because it // makes things simpler; this is enough for subgroup orders up // to 999 bits. let mut zlen = bytes[1] as usize; let offset = if zlen > 0x80 { if zlen!= 0x81 { return Err(Error::new()); } zlen = bytes[2] as usize; 3 } else { 2 }; if zlen!= bytes.len().checked_sub(offset).unwrap() { return Err(Error::new()); } // First INTEGER (r) let r_range = parse_int(&bytes[offset..], C::FieldSize::to_usize())?; let r_start = offset.checked_add(r_range.start).unwrap(); let r_end = offset.checked_add(r_range.end).unwrap(); // Second INTEGER (s) let s_range = parse_int(&bytes[r_end..], C::FieldSize::to_usize())?; let s_start = r_end.checked_add(s_range.start).unwrap(); let s_end = r_end.checked_add(s_range.end).unwrap(); if s_end!= bytes.as_ref().len() { return Err(Error::new()); } let mut byte_arr = DocumentBytes::<C>::default(); byte_arr[..s_end].copy_from_slice(bytes.as_ref()); Ok(Signature { bytes: byte_arr, r_range: Range { start: r_start, end: r_end, }, s_range: Range { start: s_start, end: s_end, }, }) } } #[cfg(all(feature = "digest", feature = "hazmat"))] impl<C> signature::PrehashSignature for Signature<C> where C: Curve + crate::hazmat::DigestPrimitive, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { type Digest = C::Digest; } /// Parse an integer from its ASN.1 DER serialization fn parse_int(bytes: &[u8], scalar_size: usize) -> Result<Range<usize>, Error> { if bytes.len() < 3 { return Err(Error::new()); } if bytes[0]!= INTEGER_TAG as u8 { return Err(Error::new()); } let len = bytes[1] as usize; if len >= 0x80 || len.checked_add(2).unwrap() > bytes.len() { return Err(Error::new()); } let mut start = 2usize; let end = start.checked_add(len).unwrap(); start = start .checked_add(trim_zeroes(&bytes[start..end], scalar_size)?) .unwrap(); Ok(Range { start, end }) } /// Serialize scalar as ASN.1 DER fn serialize_int(scalar: &[u8], out: &mut [u8], len: usize, scalar_size: usize) { out[0] = INTEGER_TAG as u8; out[1] = len as u8; if len > scalar_size { out[2] = 0x00; out[3..scalar_size.checked_add(3).unwrap()].copy_from_slice(scalar); } else { out[2..len.checked_add(2).unwrap()] .copy_from_slice(&scalar[scalar_size.checked_sub(len).unwrap()..]); } } /// Compute ASN.1 DER encoded length for the provided scalar. The ASN.1 /// encoding is signed, so its leading bit must have value 0; it must also be /// of minimal length (so leading bytes of value 0 must be removed, except if /// that would contradict the rule about the sign bit). fn int_length(mut x: &[u8]) -> usize { while!x.is_empty() && x[0] == 0 { x = &x[1..]; } if x.is_empty() || x[0] >= 0x80 { x.len().checked_add(1).unwrap() } else { x.len() } } /// Compute an offset within an ASN.1 INTEGER after skipping leading zeroes fn trim_zeroes(mut bytes: &[u8], scalar_size: usize) -> Result<usize, Error> { let mut offset = 0; if bytes.len() > scalar_size { if bytes.len()!= scalar_size.checked_add(1).unwrap() { return Err(Error::new()); } if bytes[0]!= 0 { return Err(Error::new()); } bytes = &bytes[1..]; offset += 1; } while!bytes.is_empty() && bytes[0] == 0 { bytes = &bytes[1..]; offset += 1; } Ok(offset) } #[cfg(all(feature = "dev", test))] mod tests { use crate::dev::curve::Signature; use signature::Signature as _; const EXAMPLE_SIGNATURE: [u8; 64] = [ 0xf3, 0xac, 0x80, 0x61, 0xb5, 0x14, 0x79, 0x5b, 0x88, 0x43, 0xe3, 0xd6, 0x62, 0x95, 0x27, 0xed, 0x2a, 0xfd, 0x6b, 0x1f, 0x6a, 0x55, 0x5a, 0x7a, 0xca, 0xbb, 0x5e, 0x6f, 0x79, 0xc8, 0xc2, 0xac, 0x8b, 0xf7, 0x78, 0x19, 0xca, 0x5, 0xa6, 0xb2, 0x78, 0x6c, 0x76, 0x26, 0x2b, 0xf7, 0x37, 0x1c, 0xef, 0x97, 0xb2, 0x18, 0xe9, 0x6f, 0x17, 0x5a, 0x3c, 0xcd, 0xda, 0x2a, 0xcc, 0x5, 0x89, 0x3, ]; #[test] fn test_fixed_to_asn1_signature_roundtrip() { let signature1 = Signature::from_bytes(&EXAMPLE_SIGNATURE).unwrap(); // Convert to ASN.1 DER and back let asn1_signature = signature1.to_asn1(); let signature2 = Signature::from_asn1(asn1_signature.as_ref()).unwrap(); assert_eq!(signature1, signature2); } }
as_bytes
identifier_name
asn1.rs
//! Support for ECDSA signatures encoded as ASN.1 DER. // Adapted from BearSSL. Copyright (c) 2016 Thomas Pornin <[email protected]>. // Relicensed under Apache 2.0 + MIT (from original MIT) with permission. // // <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_atr.c> // <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_rta.c> use crate::{ generic_array::{typenum::Unsigned, ArrayLength, GenericArray}, Error, }; use core::{ convert::{TryFrom, TryInto}, fmt, ops::{Add, Range}, }; use elliptic_curve::{consts::U9, weierstrass::Curve}; #[cfg(feature = "alloc")] use alloc::boxed::Box; /// Maximum overhead of an ASN.1 DER-encoded ECDSA signature for a given curve: /// 9-bytes. /// /// Includes 3-byte ASN.1 DER header: /// /// - 1-byte: ASN.1 `SEQUENCE` tag (0x30) /// - 2-byte: length /// ///...followed by two ASN.1 `INTEGER` values, which each have a header whose /// maximum length is the following: /// /// - 1-byte: ASN.1 `INTEGER` tag (0x02) /// - 1-byte: length /// - 1-byte: zero to indicate value is positive (`INTEGER` is signed) pub type MaxOverhead = U9; /// Maximum size of an ASN.1 DER encoded signature for the given elliptic curve. pub type MaxSize<C> = <<<C as elliptic_curve::Curve>::FieldSize as Add>::Output as Add<MaxOverhead>>::Output; /// Byte array containing a serialized ASN.1 signature type DocumentBytes<C> = GenericArray<u8, MaxSize<C>>; /// ASN.1 `INTEGER` tag const INTEGER_TAG: u8 = 0x02; /// ASN.1 `SEQUENCE` tag const SEQUENCE_TAG: u8 = 0x30; /// ASN.1 DER-encoded signature. /// /// Generic over the scalar size of the elliptic curve. pub struct Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// ASN.1 DER-encoded signature data bytes: DocumentBytes<C>, /// Range of the `r` value within the signature r_range: Range<usize>, /// Range of the `s` value within the signature s_range: Range<usize>, } impl<C> signature::Signature for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// Parse an ASN.1 DER-encoded ECDSA signature from a byte slice fn from_bytes(bytes: &[u8]) -> Result<Self, Error> { bytes.try_into() } } #[allow(clippy::len_without_is_empty)] impl<C> Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// Get the length of the signature in bytes pub fn len(&self) -> usize { self.s_range.end } /// Borrow this signature as a byte slice pub fn as_bytes(&self) -> &[u8] { &self.bytes.as_slice()[..self.len()] } /// Serialize this signature as a boxed byte slice #[cfg(feature = "alloc")] pub fn to_bytes(&self) -> Box<[u8]> { self.as_bytes().to_vec().into_boxed_slice() } /// Create an ASN.1 DER encoded signature from big endian `r` and `s` scalars pub(crate) fn from_scalar_bytes(r: &[u8], s: &[u8]) -> Self
serialize_int(r, &mut bytes[offset..], r_len, scalar_size); let r_end = offset.checked_add(2).unwrap().checked_add(r_len).unwrap(); // Second INTEGER (s) serialize_int(s, &mut bytes[r_end..], s_len, scalar_size); let s_end = r_end.checked_add(2).unwrap().checked_add(s_len).unwrap(); bytes[..s_end] .try_into() .expect("generated invalid ASN.1 DER") } /// Get the `r` component of the signature (leading zeros removed) pub(crate) fn r(&self) -> &[u8] { &self.bytes[self.r_range.clone()] } /// Get the `s` component of the signature (leading zeros removed) pub(crate) fn s(&self) -> &[u8] { &self.bytes[self.s_range.clone()] } } impl<C> AsRef<[u8]> for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { fn as_ref(&self) -> &[u8] { self.as_bytes() } } impl<C> fmt::Debug for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("asn1::Signature") .field("r", &self.r()) .field("s", &self.s()) .finish() } } impl<C> TryFrom<&[u8]> for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { type Error = Error; fn try_from(bytes: &[u8]) -> Result<Self, Error> { // Signature format is a SEQUENCE of two INTEGER values. We // support only integers of less than 127 bytes each (signed // encoding) so the resulting raw signature will have length // at most 254 bytes. // // First byte is SEQUENCE tag. if bytes[0]!= SEQUENCE_TAG as u8 { return Err(Error::new()); } // The SEQUENCE length will be encoded over one or two bytes. We // limit the total SEQUENCE contents to 255 bytes, because it // makes things simpler; this is enough for subgroup orders up // to 999 bits. let mut zlen = bytes[1] as usize; let offset = if zlen > 0x80 { if zlen!= 0x81 { return Err(Error::new()); } zlen = bytes[2] as usize; 3 } else { 2 }; if zlen!= bytes.len().checked_sub(offset).unwrap() { return Err(Error::new()); } // First INTEGER (r) let r_range = parse_int(&bytes[offset..], C::FieldSize::to_usize())?; let r_start = offset.checked_add(r_range.start).unwrap(); let r_end = offset.checked_add(r_range.end).unwrap(); // Second INTEGER (s) let s_range = parse_int(&bytes[r_end..], C::FieldSize::to_usize())?; let s_start = r_end.checked_add(s_range.start).unwrap(); let s_end = r_end.checked_add(s_range.end).unwrap(); if s_end!= bytes.as_ref().len() { return Err(Error::new()); } let mut byte_arr = DocumentBytes::<C>::default(); byte_arr[..s_end].copy_from_slice(bytes.as_ref()); Ok(Signature { bytes: byte_arr, r_range: Range { start: r_start, end: r_end, }, s_range: Range { start: s_start, end: s_end, }, }) } } #[cfg(all(feature = "digest", feature = "hazmat"))] impl<C> signature::PrehashSignature for Signature<C> where C: Curve + crate::hazmat::DigestPrimitive, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { type Digest = C::Digest; } /// Parse an integer from its ASN.1 DER serialization fn parse_int(bytes: &[u8], scalar_size: usize) -> Result<Range<usize>, Error> { if bytes.len() < 3 { return Err(Error::new()); } if bytes[0]!= INTEGER_TAG as u8 { return Err(Error::new()); } let len = bytes[1] as usize; if len >= 0x80 || len.checked_add(2).unwrap() > bytes.len() { return Err(Error::new()); } let mut start = 2usize; let end = start.checked_add(len).unwrap(); start = start .checked_add(trim_zeroes(&bytes[start..end], scalar_size)?) .unwrap(); Ok(Range { start, end }) } /// Serialize scalar as ASN.1 DER fn serialize_int(scalar: &[u8], out: &mut [u8], len: usize, scalar_size: usize) { out[0] = INTEGER_TAG as u8; out[1] = len as u8; if len > scalar_size { out[2] = 0x00; out[3..scalar_size.checked_add(3).unwrap()].copy_from_slice(scalar); } else { out[2..len.checked_add(2).unwrap()] .copy_from_slice(&scalar[scalar_size.checked_sub(len).unwrap()..]); } } /// Compute ASN.1 DER encoded length for the provided scalar. The ASN.1 /// encoding is signed, so its leading bit must have value 0; it must also be /// of minimal length (so leading bytes of value 0 must be removed, except if /// that would contradict the rule about the sign bit). fn int_length(mut x: &[u8]) -> usize { while!x.is_empty() && x[0] == 0 { x = &x[1..]; } if x.is_empty() || x[0] >= 0x80 { x.len().checked_add(1).unwrap() } else { x.len() } } /// Compute an offset within an ASN.1 INTEGER after skipping leading zeroes fn trim_zeroes(mut bytes: &[u8], scalar_size: usize) -> Result<usize, Error> { let mut offset = 0; if bytes.len() > scalar_size { if bytes.len()!= scalar_size.checked_add(1).unwrap() { return Err(Error::new()); } if bytes[0]!= 0 { return Err(Error::new()); } bytes = &bytes[1..]; offset += 1; } while!bytes.is_empty() && bytes[0] == 0 { bytes = &bytes[1..]; offset += 1; } Ok(offset) } #[cfg(all(feature = "dev", test))] mod tests { use crate::dev::curve::Signature; use signature::Signature as _; const EXAMPLE_SIGNATURE: [u8; 64] = [ 0xf3, 0xac, 0x80, 0x61, 0xb5, 0x14, 0x79, 0x5b, 0x88, 0x43, 0xe3, 0xd6, 0x62, 0x95, 0x27, 0xed, 0x2a, 0xfd, 0x6b, 0x1f, 0x6a, 0x55, 0x5a, 0x7a, 0xca, 0xbb, 0x5e, 0x6f, 0x79, 0xc8, 0xc2, 0xac, 0x8b, 0xf7, 0x78, 0x19, 0xca, 0x5, 0xa6, 0xb2, 0x78, 0x6c, 0x76, 0x26, 0x2b, 0xf7, 0x37, 0x1c, 0xef, 0x97, 0xb2, 0x18, 0xe9, 0x6f, 0x17, 0x5a, 0x3c, 0xcd, 0xda, 0x2a, 0xcc, 0x5, 0x89, 0x3, ]; #[test] fn test_fixed_to_asn1_signature_roundtrip() { let signature1 = Signature::from_bytes(&EXAMPLE_SIGNATURE).unwrap(); // Convert to ASN.1 DER and back let asn1_signature = signature1.to_asn1(); let signature2 = Signature::from_asn1(asn1_signature.as_ref()).unwrap(); assert_eq!(signature1, signature2); } }
{ let r_len = int_length(r); let s_len = int_length(s); let scalar_size = C::FieldSize::to_usize(); let mut bytes = DocumentBytes::<C>::default(); // SEQUENCE header bytes[0] = SEQUENCE_TAG as u8; let zlen = r_len.checked_add(s_len).unwrap().checked_add(4).unwrap(); let offset = if zlen >= 0x80 { bytes[1] = 0x81; bytes[2] = zlen as u8; 3 } else { bytes[1] = zlen as u8; 2 }; // First INTEGER (r)
identifier_body
asn1.rs
//! Support for ECDSA signatures encoded as ASN.1 DER. // Adapted from BearSSL. Copyright (c) 2016 Thomas Pornin <[email protected]>. // Relicensed under Apache 2.0 + MIT (from original MIT) with permission. // // <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_atr.c> // <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_rta.c> use crate::{ generic_array::{typenum::Unsigned, ArrayLength, GenericArray}, Error, }; use core::{ convert::{TryFrom, TryInto}, fmt, ops::{Add, Range}, }; use elliptic_curve::{consts::U9, weierstrass::Curve}; #[cfg(feature = "alloc")] use alloc::boxed::Box; /// Maximum overhead of an ASN.1 DER-encoded ECDSA signature for a given curve: /// 9-bytes. /// /// Includes 3-byte ASN.1 DER header: /// /// - 1-byte: ASN.1 `SEQUENCE` tag (0x30) /// - 2-byte: length /// ///...followed by two ASN.1 `INTEGER` values, which each have a header whose /// maximum length is the following: /// /// - 1-byte: ASN.1 `INTEGER` tag (0x02) /// - 1-byte: length /// - 1-byte: zero to indicate value is positive (`INTEGER` is signed) pub type MaxOverhead = U9; /// Maximum size of an ASN.1 DER encoded signature for the given elliptic curve. pub type MaxSize<C> = <<<C as elliptic_curve::Curve>::FieldSize as Add>::Output as Add<MaxOverhead>>::Output; /// Byte array containing a serialized ASN.1 signature type DocumentBytes<C> = GenericArray<u8, MaxSize<C>>; /// ASN.1 `INTEGER` tag const INTEGER_TAG: u8 = 0x02; /// ASN.1 `SEQUENCE` tag const SEQUENCE_TAG: u8 = 0x30; /// ASN.1 DER-encoded signature. /// /// Generic over the scalar size of the elliptic curve. pub struct Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// ASN.1 DER-encoded signature data bytes: DocumentBytes<C>, /// Range of the `r` value within the signature r_range: Range<usize>, /// Range of the `s` value within the signature s_range: Range<usize>, } impl<C> signature::Signature for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// Parse an ASN.1 DER-encoded ECDSA signature from a byte slice fn from_bytes(bytes: &[u8]) -> Result<Self, Error> { bytes.try_into() } } #[allow(clippy::len_without_is_empty)] impl<C> Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// Get the length of the signature in bytes pub fn len(&self) -> usize { self.s_range.end } /// Borrow this signature as a byte slice pub fn as_bytes(&self) -> &[u8] { &self.bytes.as_slice()[..self.len()] } /// Serialize this signature as a boxed byte slice #[cfg(feature = "alloc")] pub fn to_bytes(&self) -> Box<[u8]> { self.as_bytes().to_vec().into_boxed_slice() } /// Create an ASN.1 DER encoded signature from big endian `r` and `s` scalars pub(crate) fn from_scalar_bytes(r: &[u8], s: &[u8]) -> Self { let r_len = int_length(r); let s_len = int_length(s); let scalar_size = C::FieldSize::to_usize(); let mut bytes = DocumentBytes::<C>::default(); // SEQUENCE header bytes[0] = SEQUENCE_TAG as u8; let zlen = r_len.checked_add(s_len).unwrap().checked_add(4).unwrap(); let offset = if zlen >= 0x80 { bytes[1] = 0x81; bytes[2] = zlen as u8; 3 } else { bytes[1] = zlen as u8; 2 }; // First INTEGER (r) serialize_int(r, &mut bytes[offset..], r_len, scalar_size); let r_end = offset.checked_add(2).unwrap().checked_add(r_len).unwrap(); // Second INTEGER (s) serialize_int(s, &mut bytes[r_end..], s_len, scalar_size); let s_end = r_end.checked_add(2).unwrap().checked_add(s_len).unwrap(); bytes[..s_end] .try_into() .expect("generated invalid ASN.1 DER") } /// Get the `r` component of the signature (leading zeros removed) pub(crate) fn r(&self) -> &[u8] { &self.bytes[self.r_range.clone()] } /// Get the `s` component of the signature (leading zeros removed) pub(crate) fn s(&self) -> &[u8] { &self.bytes[self.s_range.clone()] } } impl<C> AsRef<[u8]> for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { fn as_ref(&self) -> &[u8] { self.as_bytes() } } impl<C> fmt::Debug for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("asn1::Signature") .field("r", &self.r()) .field("s", &self.s()) .finish() } } impl<C> TryFrom<&[u8]> for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { type Error = Error; fn try_from(bytes: &[u8]) -> Result<Self, Error> { // Signature format is a SEQUENCE of two INTEGER values. We // support only integers of less than 127 bytes each (signed // encoding) so the resulting raw signature will have length // at most 254 bytes. // // First byte is SEQUENCE tag. if bytes[0]!= SEQUENCE_TAG as u8 { return Err(Error::new()); } // The SEQUENCE length will be encoded over one or two bytes. We // limit the total SEQUENCE contents to 255 bytes, because it // makes things simpler; this is enough for subgroup orders up // to 999 bits. let mut zlen = bytes[1] as usize; let offset = if zlen > 0x80 { if zlen!= 0x81 { return Err(Error::new()); } zlen = bytes[2] as usize; 3 } else { 2 }; if zlen!= bytes.len().checked_sub(offset).unwrap() { return Err(Error::new()); } // First INTEGER (r) let r_range = parse_int(&bytes[offset..], C::FieldSize::to_usize())?; let r_start = offset.checked_add(r_range.start).unwrap(); let r_end = offset.checked_add(r_range.end).unwrap(); // Second INTEGER (s) let s_range = parse_int(&bytes[r_end..], C::FieldSize::to_usize())?; let s_start = r_end.checked_add(s_range.start).unwrap(); let s_end = r_end.checked_add(s_range.end).unwrap(); if s_end!= bytes.as_ref().len() { return Err(Error::new()); } let mut byte_arr = DocumentBytes::<C>::default(); byte_arr[..s_end].copy_from_slice(bytes.as_ref()); Ok(Signature { bytes: byte_arr, r_range: Range { start: r_start, end: r_end, }, s_range: Range { start: s_start, end: s_end, }, }) } } #[cfg(all(feature = "digest", feature = "hazmat"))] impl<C> signature::PrehashSignature for Signature<C> where C: Curve + crate::hazmat::DigestPrimitive, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { type Digest = C::Digest; } /// Parse an integer from its ASN.1 DER serialization fn parse_int(bytes: &[u8], scalar_size: usize) -> Result<Range<usize>, Error> { if bytes.len() < 3 { return Err(Error::new()); } if bytes[0]!= INTEGER_TAG as u8 { return Err(Error::new()); } let len = bytes[1] as usize; if len >= 0x80 || len.checked_add(2).unwrap() > bytes.len() { return Err(Error::new()); } let mut start = 2usize; let end = start.checked_add(len).unwrap(); start = start .checked_add(trim_zeroes(&bytes[start..end], scalar_size)?) .unwrap(); Ok(Range { start, end }) } /// Serialize scalar as ASN.1 DER fn serialize_int(scalar: &[u8], out: &mut [u8], len: usize, scalar_size: usize) { out[0] = INTEGER_TAG as u8; out[1] = len as u8; if len > scalar_size { out[2] = 0x00; out[3..scalar_size.checked_add(3).unwrap()].copy_from_slice(scalar); } else { out[2..len.checked_add(2).unwrap()] .copy_from_slice(&scalar[scalar_size.checked_sub(len).unwrap()..]); } } /// Compute ASN.1 DER encoded length for the provided scalar. The ASN.1 /// encoding is signed, so its leading bit must have value 0; it must also be /// of minimal length (so leading bytes of value 0 must be removed, except if /// that would contradict the rule about the sign bit). fn int_length(mut x: &[u8]) -> usize { while!x.is_empty() && x[0] == 0 { x = &x[1..]; } if x.is_empty() || x[0] >= 0x80 { x.len().checked_add(1).unwrap() } else { x.len() } } /// Compute an offset within an ASN.1 INTEGER after skipping leading zeroes fn trim_zeroes(mut bytes: &[u8], scalar_size: usize) -> Result<usize, Error> { let mut offset = 0; if bytes.len() > scalar_size { if bytes.len()!= scalar_size.checked_add(1).unwrap()
if bytes[0]!= 0 { return Err(Error::new()); } bytes = &bytes[1..]; offset += 1; } while!bytes.is_empty() && bytes[0] == 0 { bytes = &bytes[1..]; offset += 1; } Ok(offset) } #[cfg(all(feature = "dev", test))] mod tests { use crate::dev::curve::Signature; use signature::Signature as _; const EXAMPLE_SIGNATURE: [u8; 64] = [ 0xf3, 0xac, 0x80, 0x61, 0xb5, 0x14, 0x79, 0x5b, 0x88, 0x43, 0xe3, 0xd6, 0x62, 0x95, 0x27, 0xed, 0x2a, 0xfd, 0x6b, 0x1f, 0x6a, 0x55, 0x5a, 0x7a, 0xca, 0xbb, 0x5e, 0x6f, 0x79, 0xc8, 0xc2, 0xac, 0x8b, 0xf7, 0x78, 0x19, 0xca, 0x5, 0xa6, 0xb2, 0x78, 0x6c, 0x76, 0x26, 0x2b, 0xf7, 0x37, 0x1c, 0xef, 0x97, 0xb2, 0x18, 0xe9, 0x6f, 0x17, 0x5a, 0x3c, 0xcd, 0xda, 0x2a, 0xcc, 0x5, 0x89, 0x3, ]; #[test] fn test_fixed_to_asn1_signature_roundtrip() { let signature1 = Signature::from_bytes(&EXAMPLE_SIGNATURE).unwrap(); // Convert to ASN.1 DER and back let asn1_signature = signature1.to_asn1(); let signature2 = Signature::from_asn1(asn1_signature.as_ref()).unwrap(); assert_eq!(signature1, signature2); } }
{ return Err(Error::new()); }
conditional_block
asn1.rs
//! Support for ECDSA signatures encoded as ASN.1 DER. // Adapted from BearSSL. Copyright (c) 2016 Thomas Pornin <[email protected]>. // Relicensed under Apache 2.0 + MIT (from original MIT) with permission. // // <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_atr.c> // <https://www.bearssl.org/gitweb/?p=BearSSL;a=blob;f=src/ec/ecdsa_rta.c> use crate::{ generic_array::{typenum::Unsigned, ArrayLength, GenericArray}, Error, }; use core::{ convert::{TryFrom, TryInto}, fmt, ops::{Add, Range}, }; use elliptic_curve::{consts::U9, weierstrass::Curve}; #[cfg(feature = "alloc")] use alloc::boxed::Box; /// Maximum overhead of an ASN.1 DER-encoded ECDSA signature for a given curve: /// 9-bytes. /// /// Includes 3-byte ASN.1 DER header: /// /// - 1-byte: ASN.1 `SEQUENCE` tag (0x30) /// - 2-byte: length /// ///...followed by two ASN.1 `INTEGER` values, which each have a header whose /// maximum length is the following: /// /// - 1-byte: ASN.1 `INTEGER` tag (0x02) /// - 1-byte: length /// - 1-byte: zero to indicate value is positive (`INTEGER` is signed) pub type MaxOverhead = U9; /// Maximum size of an ASN.1 DER encoded signature for the given elliptic curve. pub type MaxSize<C> = <<<C as elliptic_curve::Curve>::FieldSize as Add>::Output as Add<MaxOverhead>>::Output; /// Byte array containing a serialized ASN.1 signature type DocumentBytes<C> = GenericArray<u8, MaxSize<C>>; /// ASN.1 `INTEGER` tag const INTEGER_TAG: u8 = 0x02; /// ASN.1 `SEQUENCE` tag const SEQUENCE_TAG: u8 = 0x30; /// ASN.1 DER-encoded signature. /// /// Generic over the scalar size of the elliptic curve. pub struct Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// ASN.1 DER-encoded signature data bytes: DocumentBytes<C>, /// Range of the `r` value within the signature r_range: Range<usize>, /// Range of the `s` value within the signature s_range: Range<usize>, } impl<C> signature::Signature for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// Parse an ASN.1 DER-encoded ECDSA signature from a byte slice fn from_bytes(bytes: &[u8]) -> Result<Self, Error> { bytes.try_into() } } #[allow(clippy::len_without_is_empty)] impl<C> Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { /// Get the length of the signature in bytes pub fn len(&self) -> usize { self.s_range.end } /// Borrow this signature as a byte slice pub fn as_bytes(&self) -> &[u8] { &self.bytes.as_slice()[..self.len()] } /// Serialize this signature as a boxed byte slice #[cfg(feature = "alloc")] pub fn to_bytes(&self) -> Box<[u8]> { self.as_bytes().to_vec().into_boxed_slice() } /// Create an ASN.1 DER encoded signature from big endian `r` and `s` scalars pub(crate) fn from_scalar_bytes(r: &[u8], s: &[u8]) -> Self { let r_len = int_length(r); let s_len = int_length(s); let scalar_size = C::FieldSize::to_usize(); let mut bytes = DocumentBytes::<C>::default(); // SEQUENCE header bytes[0] = SEQUENCE_TAG as u8; let zlen = r_len.checked_add(s_len).unwrap().checked_add(4).unwrap(); let offset = if zlen >= 0x80 { bytes[1] = 0x81; bytes[2] = zlen as u8; 3 } else { bytes[1] = zlen as u8; 2 }; // First INTEGER (r) serialize_int(r, &mut bytes[offset..], r_len, scalar_size); let r_end = offset.checked_add(2).unwrap().checked_add(r_len).unwrap(); // Second INTEGER (s) serialize_int(s, &mut bytes[r_end..], s_len, scalar_size); let s_end = r_end.checked_add(2).unwrap().checked_add(s_len).unwrap(); bytes[..s_end] .try_into() .expect("generated invalid ASN.1 DER") } /// Get the `r` component of the signature (leading zeros removed) pub(crate) fn r(&self) -> &[u8] { &self.bytes[self.r_range.clone()] } /// Get the `s` component of the signature (leading zeros removed) pub(crate) fn s(&self) -> &[u8] { &self.bytes[self.s_range.clone()] } } impl<C> AsRef<[u8]> for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { fn as_ref(&self) -> &[u8] { self.as_bytes() } } impl<C> fmt::Debug for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("asn1::Signature") .field("r", &self.r()) .field("s", &self.s()) .finish() } } impl<C> TryFrom<&[u8]> for Signature<C> where C: Curve, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { type Error = Error; fn try_from(bytes: &[u8]) -> Result<Self, Error> { // Signature format is a SEQUENCE of two INTEGER values. We // support only integers of less than 127 bytes each (signed // encoding) so the resulting raw signature will have length // at most 254 bytes. // // First byte is SEQUENCE tag. if bytes[0]!= SEQUENCE_TAG as u8 { return Err(Error::new()); } // The SEQUENCE length will be encoded over one or two bytes. We // limit the total SEQUENCE contents to 255 bytes, because it // makes things simpler; this is enough for subgroup orders up // to 999 bits. let mut zlen = bytes[1] as usize; let offset = if zlen > 0x80 { if zlen!= 0x81 { return Err(Error::new()); } zlen = bytes[2] as usize; 3 } else { 2 }; if zlen!= bytes.len().checked_sub(offset).unwrap() { return Err(Error::new()); } // First INTEGER (r) let r_range = parse_int(&bytes[offset..], C::FieldSize::to_usize())?; let r_start = offset.checked_add(r_range.start).unwrap(); let r_end = offset.checked_add(r_range.end).unwrap(); // Second INTEGER (s)
if s_end!= bytes.as_ref().len() { return Err(Error::new()); } let mut byte_arr = DocumentBytes::<C>::default(); byte_arr[..s_end].copy_from_slice(bytes.as_ref()); Ok(Signature { bytes: byte_arr, r_range: Range { start: r_start, end: r_end, }, s_range: Range { start: s_start, end: s_end, }, }) } } #[cfg(all(feature = "digest", feature = "hazmat"))] impl<C> signature::PrehashSignature for Signature<C> where C: Curve + crate::hazmat::DigestPrimitive, C::FieldSize: Add + ArrayLength<u8>, MaxSize<C>: ArrayLength<u8>, <C::FieldSize as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>, { type Digest = C::Digest; } /// Parse an integer from its ASN.1 DER serialization fn parse_int(bytes: &[u8], scalar_size: usize) -> Result<Range<usize>, Error> { if bytes.len() < 3 { return Err(Error::new()); } if bytes[0]!= INTEGER_TAG as u8 { return Err(Error::new()); } let len = bytes[1] as usize; if len >= 0x80 || len.checked_add(2).unwrap() > bytes.len() { return Err(Error::new()); } let mut start = 2usize; let end = start.checked_add(len).unwrap(); start = start .checked_add(trim_zeroes(&bytes[start..end], scalar_size)?) .unwrap(); Ok(Range { start, end }) } /// Serialize scalar as ASN.1 DER fn serialize_int(scalar: &[u8], out: &mut [u8], len: usize, scalar_size: usize) { out[0] = INTEGER_TAG as u8; out[1] = len as u8; if len > scalar_size { out[2] = 0x00; out[3..scalar_size.checked_add(3).unwrap()].copy_from_slice(scalar); } else { out[2..len.checked_add(2).unwrap()] .copy_from_slice(&scalar[scalar_size.checked_sub(len).unwrap()..]); } } /// Compute ASN.1 DER encoded length for the provided scalar. The ASN.1 /// encoding is signed, so its leading bit must have value 0; it must also be /// of minimal length (so leading bytes of value 0 must be removed, except if /// that would contradict the rule about the sign bit). fn int_length(mut x: &[u8]) -> usize { while!x.is_empty() && x[0] == 0 { x = &x[1..]; } if x.is_empty() || x[0] >= 0x80 { x.len().checked_add(1).unwrap() } else { x.len() } } /// Compute an offset within an ASN.1 INTEGER after skipping leading zeroes fn trim_zeroes(mut bytes: &[u8], scalar_size: usize) -> Result<usize, Error> { let mut offset = 0; if bytes.len() > scalar_size { if bytes.len()!= scalar_size.checked_add(1).unwrap() { return Err(Error::new()); } if bytes[0]!= 0 { return Err(Error::new()); } bytes = &bytes[1..]; offset += 1; } while!bytes.is_empty() && bytes[0] == 0 { bytes = &bytes[1..]; offset += 1; } Ok(offset) } #[cfg(all(feature = "dev", test))] mod tests { use crate::dev::curve::Signature; use signature::Signature as _; const EXAMPLE_SIGNATURE: [u8; 64] = [ 0xf3, 0xac, 0x80, 0x61, 0xb5, 0x14, 0x79, 0x5b, 0x88, 0x43, 0xe3, 0xd6, 0x62, 0x95, 0x27, 0xed, 0x2a, 0xfd, 0x6b, 0x1f, 0x6a, 0x55, 0x5a, 0x7a, 0xca, 0xbb, 0x5e, 0x6f, 0x79, 0xc8, 0xc2, 0xac, 0x8b, 0xf7, 0x78, 0x19, 0xca, 0x5, 0xa6, 0xb2, 0x78, 0x6c, 0x76, 0x26, 0x2b, 0xf7, 0x37, 0x1c, 0xef, 0x97, 0xb2, 0x18, 0xe9, 0x6f, 0x17, 0x5a, 0x3c, 0xcd, 0xda, 0x2a, 0xcc, 0x5, 0x89, 0x3, ]; #[test] fn test_fixed_to_asn1_signature_roundtrip() { let signature1 = Signature::from_bytes(&EXAMPLE_SIGNATURE).unwrap(); // Convert to ASN.1 DER and back let asn1_signature = signature1.to_asn1(); let signature2 = Signature::from_asn1(asn1_signature.as_ref()).unwrap(); assert_eq!(signature1, signature2); } }
let s_range = parse_int(&bytes[r_end..], C::FieldSize::to_usize())?; let s_start = r_end.checked_add(s_range.start).unwrap(); let s_end = r_end.checked_add(s_range.end).unwrap();
random_line_split
lsp_plugin.rs
// Copyright 2018 The xi-editor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Implementation of Language Server Plugin use std::collections::HashMap; use std::path::Path; use std::sync::{Arc, Mutex}; use url::Url; use xi_plugin_lib::{ChunkCache, CoreProxy, Plugin, View}; use xi_rope::rope::RopeDelta; use crate::conversion_utils::*; use crate::language_server_client::LanguageServerClient; use crate::lsp_types::*; use crate::result_queue::ResultQueue; use crate::types::{Config, LanguageResponseError, LspResponse}; use crate::utils::*; use crate::xi_core::{ConfigTable, ViewId}; pub struct ViewInfo { version: u64, ls_identifier: String, } /// Represents the state of the Language Server Plugin pub struct LspPlugin { pub config: Config, view_info: HashMap<ViewId, ViewInfo>, core: Option<CoreProxy>, result_queue: ResultQueue, language_server_clients: HashMap<String, Arc<Mutex<LanguageServerClient>>>, } impl LspPlugin { pub fn new(config: Config) -> Self { LspPlugin { config, core: None, result_queue: ResultQueue::new(), view_info: HashMap::new(), language_server_clients: HashMap::new(), } } } impl Plugin for LspPlugin { type Cache = ChunkCache; fn initialize(&mut self, core: CoreProxy) { self.core = Some(core) } fn update( &mut self, view: &mut View<Self::Cache>, delta: Option<&RopeDelta>, _edit_type: String, _author: String, ) { let view_info = self.view_info.get_mut(&view.get_id()); if let Some(view_info) = view_info { // This won't fail since we definitely have a client for the given // client identifier let ls_client = &self.language_server_clients[&view_info.ls_identifier]; let mut ls_client = ls_client.lock().unwrap(); let sync_kind = ls_client.get_sync_kind(); view_info.version += 1; if let Some(changes) = get_change_for_sync_kind(sync_kind, view, delta) { ls_client.send_did_change(view.get_id(), changes, view_info.version); } } } fn did_save(&mut self, view: &mut View<Self::Cache>, _old: Option<&Path>) { trace!("saved view {}", view.get_id()); let document_text = view.get_document().unwrap(); self.with_language_server_for_view(view, |ls_client| { ls_client.send_did_save(view.get_id(), &document_text); }); } fn did_close(&mut self, view: &View<Self::Cache>) { trace!("close view {}", view.get_id()); self.with_language_server_for_view(view, |ls_client| { ls_client.send_did_close(view.get_id()); }); } fn new_view(&mut self, view: &mut View<Self::Cache>) { trace!("new view {}", view.get_id()); let document_text = view.get_document().unwrap(); let path = view.get_path(); let view_id = view.get_id(); // TODO: Use Language Idenitifier assigned by core when the // implementation is settled if let Some(language_id) = self.get_language_for_view(view) { let path = path.unwrap(); let workspace_root_uri = { let config = &self.config.language_config.get_mut(&language_id).unwrap(); config.workspace_identifier.clone().and_then(|identifier| { let path = view.get_path().unwrap(); let q = get_workspace_root_uri(&identifier, path); q.ok() }) }; let result = self.get_lsclient_from_workspace_root(&language_id, &workspace_root_uri); if let Some((identifier, ls_client)) = result { self.view_info .insert(view.get_id(), ViewInfo { version: 0, ls_identifier: identifier }); let mut ls_client = ls_client.lock().unwrap(); let document_uri = Url::from_file_path(path).unwrap(); if!ls_client.is_initialized { ls_client.send_initialize(workspace_root_uri, move |ls_client, result| { if let Ok(result) = result { let init_result: InitializeResult = serde_json::from_value(result).unwrap(); debug!("Init Result: {:?}", init_result); ls_client.server_capabilities = Some(init_result.capabilities); ls_client.is_initialized = true; ls_client.send_did_open(view_id, document_uri, document_text); } }); } else { ls_client.send_did_open(view_id, document_uri, document_text); } } } } fn config_changed(&mut self, _view: &mut View<Self::Cache>, _changes: &ConfigTable) {} fn get_hover(&mut self, view: &mut View<Self::Cache>, request_id: usize, position: usize) { let view_id = view.get_id(); let position_ls = get_position_of_offset(view, position); self.with_language_server_for_view(view, |ls_client| match position_ls { Ok(position) => ls_client.request_hover(view_id, position, move |ls_client, result| { let res = result .map_err(|e| LanguageResponseError::LanguageServerError(format!("{:?}", e))) .and_then(|h| { let hover: Option<Hover> = serde_json::from_value(h).unwrap(); hover.ok_or(LanguageResponseError::NullResponse) }); ls_client.result_queue.push_result(request_id, LspResponse::Hover(res)); ls_client.core.schedule_idle(view_id); }), Err(err) => { ls_client.result_queue.push_result(request_id, LspResponse::Hover(Err(err.into()))); ls_client.core.schedule_idle(view_id); } }); } fn idle(&mut self, view: &mut View<Self::Cache>) { let result = self.result_queue.pop_result(); if let Some((request_id, reponse)) = result { match reponse { LspResponse::Hover(res) => { let res = res.and_then(|h| core_hover_from_hover(view, h)).map_err(|e| e.into()); self.with_language_server_for_view(view, |ls_client| { ls_client.core.display_hover(view.get_id(), request_id, &res) }); } } } } } /// Util Methods impl LspPlugin { /// Get the Language Server Client given the Workspace root /// This method checks if a language server is running at the specified root /// and returns it else it tries to spawn a new language server and returns a /// Arc reference to it fn
( &mut self, language_id: &str, workspace_root: &Option<Url>, ) -> Option<(String, Arc<Mutex<LanguageServerClient>>)> { workspace_root .clone() .map(|r| r.into_string()) .or_else(|| { let config = &self.config.language_config[language_id]; if config.supports_single_file { // A generic client is the one that supports single files i.e. // Non-Workspace projects as well Some(String::from("generic")) } else { None } }) .and_then(|language_server_identifier| { let contains = self.language_server_clients.contains_key(&language_server_identifier); if contains { let client = self.language_server_clients[&language_server_identifier].clone(); Some((language_server_identifier, client)) } else { let config = &self.config.language_config[language_id]; let client = start_new_server( config.start_command.clone(), config.start_arguments.clone(), config.extensions.clone(), language_id, // Unwrap is safe self.core.clone().unwrap(), self.result_queue.clone(), ); match client { Ok(client) => { let client_clone = client.clone(); self.language_server_clients .insert(language_server_identifier.clone(), client); Some((language_server_identifier, client_clone)) } Err(err) => { error!( "Error occured while starting server for Language: {}: {:?}", language_id, err ); None } } } }) } /// Tries to get language for the View using the extension of the document. /// Only searches for the languages supported by the Language Plugin as /// defined in the config fn get_language_for_view(&mut self, view: &View<ChunkCache>) -> Option<String> { view.get_path() .and_then(|path| path.extension()) .and_then(|extension| extension.to_str()) .and_then(|extension_str| { for (lang, config) in &self.config.language_config { if config.extensions.iter().any(|x| x == extension_str) { return Some(lang.clone()); } } None }) } fn with_language_server_for_view<F, R>(&mut self, view: &View<ChunkCache>, f: F) -> Option<R> where F: FnOnce(&mut LanguageServerClient) -> R, { let view_info = self.view_info.get_mut(&view.get_id())?; let ls_client_arc = &self.language_server_clients[&view_info.ls_identifier]; let mut ls_client = ls_client_arc.lock().unwrap(); Some(f(&mut ls_client)) } }
get_lsclient_from_workspace_root
identifier_name
lsp_plugin.rs
// Copyright 2018 The xi-editor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Implementation of Language Server Plugin use std::collections::HashMap; use std::path::Path; use std::sync::{Arc, Mutex}; use url::Url; use xi_plugin_lib::{ChunkCache, CoreProxy, Plugin, View}; use xi_rope::rope::RopeDelta; use crate::conversion_utils::*; use crate::language_server_client::LanguageServerClient; use crate::lsp_types::*; use crate::result_queue::ResultQueue; use crate::types::{Config, LanguageResponseError, LspResponse}; use crate::utils::*; use crate::xi_core::{ConfigTable, ViewId}; pub struct ViewInfo { version: u64, ls_identifier: String, } /// Represents the state of the Language Server Plugin pub struct LspPlugin { pub config: Config, view_info: HashMap<ViewId, ViewInfo>, core: Option<CoreProxy>, result_queue: ResultQueue, language_server_clients: HashMap<String, Arc<Mutex<LanguageServerClient>>>, } impl LspPlugin { pub fn new(config: Config) -> Self { LspPlugin { config, core: None, result_queue: ResultQueue::new(), view_info: HashMap::new(), language_server_clients: HashMap::new(), } } } impl Plugin for LspPlugin { type Cache = ChunkCache; fn initialize(&mut self, core: CoreProxy) { self.core = Some(core) } fn update( &mut self, view: &mut View<Self::Cache>, delta: Option<&RopeDelta>, _edit_type: String, _author: String, ) { let view_info = self.view_info.get_mut(&view.get_id()); if let Some(view_info) = view_info { // This won't fail since we definitely have a client for the given // client identifier let ls_client = &self.language_server_clients[&view_info.ls_identifier]; let mut ls_client = ls_client.lock().unwrap(); let sync_kind = ls_client.get_sync_kind(); view_info.version += 1; if let Some(changes) = get_change_for_sync_kind(sync_kind, view, delta) { ls_client.send_did_change(view.get_id(), changes, view_info.version); } } } fn did_save(&mut self, view: &mut View<Self::Cache>, _old: Option<&Path>) { trace!("saved view {}", view.get_id()); let document_text = view.get_document().unwrap(); self.with_language_server_for_view(view, |ls_client| { ls_client.send_did_save(view.get_id(), &document_text); }); } fn did_close(&mut self, view: &View<Self::Cache>) { trace!("close view {}", view.get_id()); self.with_language_server_for_view(view, |ls_client| { ls_client.send_did_close(view.get_id()); }); } fn new_view(&mut self, view: &mut View<Self::Cache>) { trace!("new view {}", view.get_id()); let document_text = view.get_document().unwrap(); let path = view.get_path(); let view_id = view.get_id(); // TODO: Use Language Idenitifier assigned by core when the // implementation is settled if let Some(language_id) = self.get_language_for_view(view) { let path = path.unwrap(); let workspace_root_uri = { let config = &self.config.language_config.get_mut(&language_id).unwrap(); config.workspace_identifier.clone().and_then(|identifier| { let path = view.get_path().unwrap(); let q = get_workspace_root_uri(&identifier, path); q.ok() }) }; let result = self.get_lsclient_from_workspace_root(&language_id, &workspace_root_uri); if let Some((identifier, ls_client)) = result { self.view_info .insert(view.get_id(), ViewInfo { version: 0, ls_identifier: identifier }); let mut ls_client = ls_client.lock().unwrap(); let document_uri = Url::from_file_path(path).unwrap(); if!ls_client.is_initialized { ls_client.send_initialize(workspace_root_uri, move |ls_client, result| { if let Ok(result) = result { let init_result: InitializeResult = serde_json::from_value(result).unwrap(); debug!("Init Result: {:?}", init_result); ls_client.server_capabilities = Some(init_result.capabilities); ls_client.is_initialized = true; ls_client.send_did_open(view_id, document_uri, document_text); } }); } else { ls_client.send_did_open(view_id, document_uri, document_text); } } } } fn config_changed(&mut self, _view: &mut View<Self::Cache>, _changes: &ConfigTable) {} fn get_hover(&mut self, view: &mut View<Self::Cache>, request_id: usize, position: usize) { let view_id = view.get_id(); let position_ls = get_position_of_offset(view, position); self.with_language_server_for_view(view, |ls_client| match position_ls { Ok(position) => ls_client.request_hover(view_id, position, move |ls_client, result| { let res = result .map_err(|e| LanguageResponseError::LanguageServerError(format!("{:?}", e))) .and_then(|h| { let hover: Option<Hover> = serde_json::from_value(h).unwrap(); hover.ok_or(LanguageResponseError::NullResponse) }); ls_client.result_queue.push_result(request_id, LspResponse::Hover(res)); ls_client.core.schedule_idle(view_id); }), Err(err) => { ls_client.result_queue.push_result(request_id, LspResponse::Hover(Err(err.into()))); ls_client.core.schedule_idle(view_id); } }); } fn idle(&mut self, view: &mut View<Self::Cache>) { let result = self.result_queue.pop_result(); if let Some((request_id, reponse)) = result { match reponse { LspResponse::Hover(res) => { let res = res.and_then(|h| core_hover_from_hover(view, h)).map_err(|e| e.into()); self.with_language_server_for_view(view, |ls_client| { ls_client.core.display_hover(view.get_id(), request_id, &res) }); } } } } } /// Util Methods impl LspPlugin { /// Get the Language Server Client given the Workspace root /// This method checks if a language server is running at the specified root /// and returns it else it tries to spawn a new language server and returns a /// Arc reference to it fn get_lsclient_from_workspace_root( &mut self, language_id: &str, workspace_root: &Option<Url>, ) -> Option<(String, Arc<Mutex<LanguageServerClient>>)> { workspace_root .clone() .map(|r| r.into_string()) .or_else(|| { let config = &self.config.language_config[language_id]; if config.supports_single_file { // A generic client is the one that supports single files i.e. // Non-Workspace projects as well Some(String::from("generic")) } else { None } }) .and_then(|language_server_identifier| { let contains = self.language_server_clients.contains_key(&language_server_identifier); if contains { let client = self.language_server_clients[&language_server_identifier].clone(); Some((language_server_identifier, client)) } else
Err(err) => { error!( "Error occured while starting server for Language: {}: {:?}", language_id, err ); None } } } }) } /// Tries to get language for the View using the extension of the document. /// Only searches for the languages supported by the Language Plugin as /// defined in the config fn get_language_for_view(&mut self, view: &View<ChunkCache>) -> Option<String> { view.get_path() .and_then(|path| path.extension()) .and_then(|extension| extension.to_str()) .and_then(|extension_str| { for (lang, config) in &self.config.language_config { if config.extensions.iter().any(|x| x == extension_str) { return Some(lang.clone()); } } None }) } fn with_language_server_for_view<F, R>(&mut self, view: &View<ChunkCache>, f: F) -> Option<R> where F: FnOnce(&mut LanguageServerClient) -> R, { let view_info = self.view_info.get_mut(&view.get_id())?; let ls_client_arc = &self.language_server_clients[&view_info.ls_identifier]; let mut ls_client = ls_client_arc.lock().unwrap(); Some(f(&mut ls_client)) } }
{ let config = &self.config.language_config[language_id]; let client = start_new_server( config.start_command.clone(), config.start_arguments.clone(), config.extensions.clone(), language_id, // Unwrap is safe self.core.clone().unwrap(), self.result_queue.clone(), ); match client { Ok(client) => { let client_clone = client.clone(); self.language_server_clients .insert(language_server_identifier.clone(), client); Some((language_server_identifier, client_clone)) }
conditional_block
lsp_plugin.rs
// Copyright 2018 The xi-editor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Implementation of Language Server Plugin use std::collections::HashMap; use std::path::Path; use std::sync::{Arc, Mutex}; use url::Url; use xi_plugin_lib::{ChunkCache, CoreProxy, Plugin, View}; use xi_rope::rope::RopeDelta; use crate::conversion_utils::*; use crate::language_server_client::LanguageServerClient; use crate::lsp_types::*; use crate::result_queue::ResultQueue; use crate::types::{Config, LanguageResponseError, LspResponse}; use crate::utils::*; use crate::xi_core::{ConfigTable, ViewId}; pub struct ViewInfo { version: u64, ls_identifier: String, } /// Represents the state of the Language Server Plugin pub struct LspPlugin { pub config: Config, view_info: HashMap<ViewId, ViewInfo>, core: Option<CoreProxy>, result_queue: ResultQueue, language_server_clients: HashMap<String, Arc<Mutex<LanguageServerClient>>>, } impl LspPlugin { pub fn new(config: Config) -> Self { LspPlugin { config, core: None, result_queue: ResultQueue::new(), view_info: HashMap::new(), language_server_clients: HashMap::new(), } } } impl Plugin for LspPlugin { type Cache = ChunkCache; fn initialize(&mut self, core: CoreProxy) { self.core = Some(core) } fn update( &mut self, view: &mut View<Self::Cache>, delta: Option<&RopeDelta>, _edit_type: String, _author: String, ) { let view_info = self.view_info.get_mut(&view.get_id()); if let Some(view_info) = view_info { // This won't fail since we definitely have a client for the given // client identifier let ls_client = &self.language_server_clients[&view_info.ls_identifier]; let mut ls_client = ls_client.lock().unwrap(); let sync_kind = ls_client.get_sync_kind(); view_info.version += 1; if let Some(changes) = get_change_for_sync_kind(sync_kind, view, delta) { ls_client.send_did_change(view.get_id(), changes, view_info.version); } } } fn did_save(&mut self, view: &mut View<Self::Cache>, _old: Option<&Path>) { trace!("saved view {}", view.get_id()); let document_text = view.get_document().unwrap(); self.with_language_server_for_view(view, |ls_client| { ls_client.send_did_save(view.get_id(), &document_text); }); } fn did_close(&mut self, view: &View<Self::Cache>) { trace!("close view {}", view.get_id()); self.with_language_server_for_view(view, |ls_client| { ls_client.send_did_close(view.get_id()); }); } fn new_view(&mut self, view: &mut View<Self::Cache>) { trace!("new view {}", view.get_id()); let document_text = view.get_document().unwrap(); let path = view.get_path(); let view_id = view.get_id(); // TODO: Use Language Idenitifier assigned by core when the // implementation is settled if let Some(language_id) = self.get_language_for_view(view) { let path = path.unwrap(); let workspace_root_uri = { let config = &self.config.language_config.get_mut(&language_id).unwrap(); config.workspace_identifier.clone().and_then(|identifier| { let path = view.get_path().unwrap(); let q = get_workspace_root_uri(&identifier, path); q.ok() }) }; let result = self.get_lsclient_from_workspace_root(&language_id, &workspace_root_uri); if let Some((identifier, ls_client)) = result { self.view_info .insert(view.get_id(), ViewInfo { version: 0, ls_identifier: identifier }); let mut ls_client = ls_client.lock().unwrap(); let document_uri = Url::from_file_path(path).unwrap(); if!ls_client.is_initialized { ls_client.send_initialize(workspace_root_uri, move |ls_client, result| { if let Ok(result) = result { let init_result: InitializeResult = serde_json::from_value(result).unwrap(); debug!("Init Result: {:?}", init_result); ls_client.server_capabilities = Some(init_result.capabilities); ls_client.is_initialized = true; ls_client.send_did_open(view_id, document_uri, document_text); } }); } else { ls_client.send_did_open(view_id, document_uri, document_text); } } } } fn config_changed(&mut self, _view: &mut View<Self::Cache>, _changes: &ConfigTable) {} fn get_hover(&mut self, view: &mut View<Self::Cache>, request_id: usize, position: usize) { let view_id = view.get_id(); let position_ls = get_position_of_offset(view, position); self.with_language_server_for_view(view, |ls_client| match position_ls { Ok(position) => ls_client.request_hover(view_id, position, move |ls_client, result| { let res = result .map_err(|e| LanguageResponseError::LanguageServerError(format!("{:?}", e))) .and_then(|h| { let hover: Option<Hover> = serde_json::from_value(h).unwrap(); hover.ok_or(LanguageResponseError::NullResponse) }); ls_client.result_queue.push_result(request_id, LspResponse::Hover(res)); ls_client.core.schedule_idle(view_id); }), Err(err) => { ls_client.result_queue.push_result(request_id, LspResponse::Hover(Err(err.into()))); ls_client.core.schedule_idle(view_id); } }); } fn idle(&mut self, view: &mut View<Self::Cache>) { let result = self.result_queue.pop_result(); if let Some((request_id, reponse)) = result { match reponse { LspResponse::Hover(res) => { let res = res.and_then(|h| core_hover_from_hover(view, h)).map_err(|e| e.into()); self.with_language_server_for_view(view, |ls_client| { ls_client.core.display_hover(view.get_id(), request_id, &res) }); } } } } } /// Util Methods impl LspPlugin { /// Get the Language Server Client given the Workspace root /// This method checks if a language server is running at the specified root /// and returns it else it tries to spawn a new language server and returns a /// Arc reference to it fn get_lsclient_from_workspace_root( &mut self, language_id: &str, workspace_root: &Option<Url>, ) -> Option<(String, Arc<Mutex<LanguageServerClient>>)> { workspace_root .clone() .map(|r| r.into_string()) .or_else(|| { let config = &self.config.language_config[language_id]; if config.supports_single_file { // A generic client is the one that supports single files i.e. // Non-Workspace projects as well Some(String::from("generic")) } else { None } }) .and_then(|language_server_identifier| { let contains = self.language_server_clients.contains_key(&language_server_identifier); if contains { let client = self.language_server_clients[&language_server_identifier].clone(); Some((language_server_identifier, client)) } else { let config = &self.config.language_config[language_id]; let client = start_new_server( config.start_command.clone(), config.start_arguments.clone(), config.extensions.clone(), language_id, // Unwrap is safe self.core.clone().unwrap(), self.result_queue.clone(), ); match client { Ok(client) => { let client_clone = client.clone();
Err(err) => { error!( "Error occured while starting server for Language: {}: {:?}", language_id, err ); None } } } }) } /// Tries to get language for the View using the extension of the document. /// Only searches for the languages supported by the Language Plugin as /// defined in the config fn get_language_for_view(&mut self, view: &View<ChunkCache>) -> Option<String> { view.get_path() .and_then(|path| path.extension()) .and_then(|extension| extension.to_str()) .and_then(|extension_str| { for (lang, config) in &self.config.language_config { if config.extensions.iter().any(|x| x == extension_str) { return Some(lang.clone()); } } None }) } fn with_language_server_for_view<F, R>(&mut self, view: &View<ChunkCache>, f: F) -> Option<R> where F: FnOnce(&mut LanguageServerClient) -> R, { let view_info = self.view_info.get_mut(&view.get_id())?; let ls_client_arc = &self.language_server_clients[&view_info.ls_identifier]; let mut ls_client = ls_client_arc.lock().unwrap(); Some(f(&mut ls_client)) } }
self.language_server_clients .insert(language_server_identifier.clone(), client); Some((language_server_identifier, client_clone)) }
random_line_split
lsp_plugin.rs
// Copyright 2018 The xi-editor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Implementation of Language Server Plugin use std::collections::HashMap; use std::path::Path; use std::sync::{Arc, Mutex}; use url::Url; use xi_plugin_lib::{ChunkCache, CoreProxy, Plugin, View}; use xi_rope::rope::RopeDelta; use crate::conversion_utils::*; use crate::language_server_client::LanguageServerClient; use crate::lsp_types::*; use crate::result_queue::ResultQueue; use crate::types::{Config, LanguageResponseError, LspResponse}; use crate::utils::*; use crate::xi_core::{ConfigTable, ViewId}; pub struct ViewInfo { version: u64, ls_identifier: String, } /// Represents the state of the Language Server Plugin pub struct LspPlugin { pub config: Config, view_info: HashMap<ViewId, ViewInfo>, core: Option<CoreProxy>, result_queue: ResultQueue, language_server_clients: HashMap<String, Arc<Mutex<LanguageServerClient>>>, } impl LspPlugin { pub fn new(config: Config) -> Self { LspPlugin { config, core: None, result_queue: ResultQueue::new(), view_info: HashMap::new(), language_server_clients: HashMap::new(), } } } impl Plugin for LspPlugin { type Cache = ChunkCache; fn initialize(&mut self, core: CoreProxy) { self.core = Some(core) } fn update( &mut self, view: &mut View<Self::Cache>, delta: Option<&RopeDelta>, _edit_type: String, _author: String, ) { let view_info = self.view_info.get_mut(&view.get_id()); if let Some(view_info) = view_info { // This won't fail since we definitely have a client for the given // client identifier let ls_client = &self.language_server_clients[&view_info.ls_identifier]; let mut ls_client = ls_client.lock().unwrap(); let sync_kind = ls_client.get_sync_kind(); view_info.version += 1; if let Some(changes) = get_change_for_sync_kind(sync_kind, view, delta) { ls_client.send_did_change(view.get_id(), changes, view_info.version); } } } fn did_save(&mut self, view: &mut View<Self::Cache>, _old: Option<&Path>) { trace!("saved view {}", view.get_id()); let document_text = view.get_document().unwrap(); self.with_language_server_for_view(view, |ls_client| { ls_client.send_did_save(view.get_id(), &document_text); }); } fn did_close(&mut self, view: &View<Self::Cache>)
fn new_view(&mut self, view: &mut View<Self::Cache>) { trace!("new view {}", view.get_id()); let document_text = view.get_document().unwrap(); let path = view.get_path(); let view_id = view.get_id(); // TODO: Use Language Idenitifier assigned by core when the // implementation is settled if let Some(language_id) = self.get_language_for_view(view) { let path = path.unwrap(); let workspace_root_uri = { let config = &self.config.language_config.get_mut(&language_id).unwrap(); config.workspace_identifier.clone().and_then(|identifier| { let path = view.get_path().unwrap(); let q = get_workspace_root_uri(&identifier, path); q.ok() }) }; let result = self.get_lsclient_from_workspace_root(&language_id, &workspace_root_uri); if let Some((identifier, ls_client)) = result { self.view_info .insert(view.get_id(), ViewInfo { version: 0, ls_identifier: identifier }); let mut ls_client = ls_client.lock().unwrap(); let document_uri = Url::from_file_path(path).unwrap(); if!ls_client.is_initialized { ls_client.send_initialize(workspace_root_uri, move |ls_client, result| { if let Ok(result) = result { let init_result: InitializeResult = serde_json::from_value(result).unwrap(); debug!("Init Result: {:?}", init_result); ls_client.server_capabilities = Some(init_result.capabilities); ls_client.is_initialized = true; ls_client.send_did_open(view_id, document_uri, document_text); } }); } else { ls_client.send_did_open(view_id, document_uri, document_text); } } } } fn config_changed(&mut self, _view: &mut View<Self::Cache>, _changes: &ConfigTable) {} fn get_hover(&mut self, view: &mut View<Self::Cache>, request_id: usize, position: usize) { let view_id = view.get_id(); let position_ls = get_position_of_offset(view, position); self.with_language_server_for_view(view, |ls_client| match position_ls { Ok(position) => ls_client.request_hover(view_id, position, move |ls_client, result| { let res = result .map_err(|e| LanguageResponseError::LanguageServerError(format!("{:?}", e))) .and_then(|h| { let hover: Option<Hover> = serde_json::from_value(h).unwrap(); hover.ok_or(LanguageResponseError::NullResponse) }); ls_client.result_queue.push_result(request_id, LspResponse::Hover(res)); ls_client.core.schedule_idle(view_id); }), Err(err) => { ls_client.result_queue.push_result(request_id, LspResponse::Hover(Err(err.into()))); ls_client.core.schedule_idle(view_id); } }); } fn idle(&mut self, view: &mut View<Self::Cache>) { let result = self.result_queue.pop_result(); if let Some((request_id, reponse)) = result { match reponse { LspResponse::Hover(res) => { let res = res.and_then(|h| core_hover_from_hover(view, h)).map_err(|e| e.into()); self.with_language_server_for_view(view, |ls_client| { ls_client.core.display_hover(view.get_id(), request_id, &res) }); } } } } } /// Util Methods impl LspPlugin { /// Get the Language Server Client given the Workspace root /// This method checks if a language server is running at the specified root /// and returns it else it tries to spawn a new language server and returns a /// Arc reference to it fn get_lsclient_from_workspace_root( &mut self, language_id: &str, workspace_root: &Option<Url>, ) -> Option<(String, Arc<Mutex<LanguageServerClient>>)> { workspace_root .clone() .map(|r| r.into_string()) .or_else(|| { let config = &self.config.language_config[language_id]; if config.supports_single_file { // A generic client is the one that supports single files i.e. // Non-Workspace projects as well Some(String::from("generic")) } else { None } }) .and_then(|language_server_identifier| { let contains = self.language_server_clients.contains_key(&language_server_identifier); if contains { let client = self.language_server_clients[&language_server_identifier].clone(); Some((language_server_identifier, client)) } else { let config = &self.config.language_config[language_id]; let client = start_new_server( config.start_command.clone(), config.start_arguments.clone(), config.extensions.clone(), language_id, // Unwrap is safe self.core.clone().unwrap(), self.result_queue.clone(), ); match client { Ok(client) => { let client_clone = client.clone(); self.language_server_clients .insert(language_server_identifier.clone(), client); Some((language_server_identifier, client_clone)) } Err(err) => { error!( "Error occured while starting server for Language: {}: {:?}", language_id, err ); None } } } }) } /// Tries to get language for the View using the extension of the document. /// Only searches for the languages supported by the Language Plugin as /// defined in the config fn get_language_for_view(&mut self, view: &View<ChunkCache>) -> Option<String> { view.get_path() .and_then(|path| path.extension()) .and_then(|extension| extension.to_str()) .and_then(|extension_str| { for (lang, config) in &self.config.language_config { if config.extensions.iter().any(|x| x == extension_str) { return Some(lang.clone()); } } None }) } fn with_language_server_for_view<F, R>(&mut self, view: &View<ChunkCache>, f: F) -> Option<R> where F: FnOnce(&mut LanguageServerClient) -> R, { let view_info = self.view_info.get_mut(&view.get_id())?; let ls_client_arc = &self.language_server_clients[&view_info.ls_identifier]; let mut ls_client = ls_client_arc.lock().unwrap(); Some(f(&mut ls_client)) } }
{ trace!("close view {}", view.get_id()); self.with_language_server_for_view(view, |ls_client| { ls_client.send_did_close(view.get_id()); }); }
identifier_body
sphere.rs
// Copyright 2017 Dasein Phaos aka. Luxko // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use geometry::prelude::*; use super::Shape; use std; use serde; use serde::{Serialize, Deserialize}; use serde::ser::{Serializer, SerializeStruct}; use serde::de::{Deserializer, MapAccess, SeqAccess, Visitor}; /// A (possibly-partial) sphere, as a geometry definition #[derive(Copy, Clone, PartialEq)] pub struct Sphere { /// The radius of the sphere pub radius: Float, /// The lower bound xy-plane. Points with `z<zmin` being excluded. pub zmin: Float, /// The upper bound xy-plane. Points with `z>zmax` being excluded. pub zmax: Float, /// The maximum `phi`. Points with `phi>phimax` being excluded. pub phimax: Float, // These two are updated accordingly when `zmin` or `zmax` changes. thetamin: Float, thetamax: Float, } impl Serialize for Sphere { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { let mut state = s.serialize_struct("Sphere", 4)?; state.serialize_field("radius", &self.radius)?; state.serialize_field("zmin", &self.zmin)?; state.serialize_field("zmax", &self.zmax)?; state.serialize_field("phimax", &self.phimax)?; state.end() } } impl<'de> Deserialize<'de> for Sphere { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de> { #[derive(Deserialize)] #[serde(field_identifier, rename_all = "lowercase")] enum Field { Radius, Zmin, Zmax, Phimax } struct SamplerVisitor; impl<'de> Visitor<'de> for SamplerVisitor { type Value = Sphere; fn expecting(&self, fmter: &mut std::fmt::Formatter) -> std::fmt::Result { fmter.write_str("struct Sphere") } fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error> where V: SeqAccess<'de> { let radius = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(0, &self))?; let zmin = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(1, &self))?; let zmax = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(2, &self))?; let phimax = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(3, &self))?; Ok(Sphere::new(radius, zmin, zmax, phimax)) } fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error> where V: MapAccess<'de> { let mut radius = None; let mut zmin = None; let mut zmax = None; let mut phimax = None; while let Some(key) = map.next_key()? { match key { Field::Radius => { if radius.is_some() { return Err(serde::de::Error::duplicate_field("radius")); } radius = Some(map.next_value()?); } Field::Zmin => { if zmin.is_some() { return Err(serde::de::Error::duplicate_field("zmin")); } zmin = Some(map.next_value()?); } Field::Zmax => { if zmax.is_some() { return Err(serde::de::Error::duplicate_field("zmax")); } zmax = Some(map.next_value()?); } Field::Phimax => { if phimax.is_some() { return Err(serde::de::Error::duplicate_field("phimax")); } phimax = Some(map.next_value()?); } } } let radius = radius.ok_or_else(|| serde::de::Error::missing_field("radius") )?; let zmin = zmin.ok_or_else(|| serde::de::Error::missing_field("zmin") )?; let zmax = zmax.ok_or_else(|| serde::de::Error::missing_field("znear") )?; let phimax = phimax.ok_or_else(|| serde::de::Error::missing_field("zfar") )?; Ok(Sphere::new( radius, zmin, zmax, phimax )) } } const FIELDS: &[&str] = &["transform", "screen", "znear", "zfar", "fov", "lens", "film"]; deserializer.deserialize_struct("Sphere", FIELDS, SamplerVisitor) } } impl Sphere { /// Constructs a new `Sphere`. pub fn new(radius: Float, mut zmin: Float, mut zmax: Float, mut phimax: Float) -> Sphere
thetamax: thetamax, phimax: phimax, } } /// Constructs a full sphere #[inline] pub fn full(radius: Float) -> Sphere { Sphere::new(radius, -radius, radius, float::pi() * (2.0 as Float)) } /// returns the local space bounding box #[inline] pub fn bounding(&self) -> BBox3f { BBox3f::new( Point3f::new(-self.radius, -self.radius, self.zmin), Point3f::new(self.radius, self.radius, self.zmax) ) } // /// test intersection in local frame, returns `t` when first hit // #[inline] // pub fn intersect_ray(&self, ray: &RawRay) -> Option<Float> // { // if let Some(t) = Sphere::intersect_ray_full(self.radius, ray) { // let p = ray.evaluate(t); // // TODO: refine sphere intersection // let mut phi = p.y.atan2(p.x); // if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); } // if p.z < self.zmin || p.z > self.zmax || phi > self.phimax { // None // } else { // Some(t) // } // } else { // None // } // } /// test intersection against the full sphere pub fn intersect_ray_full(radius: Float, ray: &RawRay) -> Option<Float> { let origin = ray.origin().to_vec(); let direction = ray.direction(); let a = direction.magnitude2(); let b = (direction.mul_element_wise(origin) * (2.0 as Float)).sum(); let c = origin.magnitude2() - radius * radius; let delta = b* b - (4.0 as Float) * a * c; if delta < (0.0 as Float) { return None; } let invert_2a = (1.0 as Float) / ((2.0 as Float) * a); let d1 = delta.sqrt() * invert_2a; let d0 = -b * invert_2a; let(t0, t1) = if invert_2a > 0.0 as Float { (d0-d1, d0+d1) } else { (d0+d1, d0-d1) }; let tmax = ray.max_extend(); if t0 > tmax || t1 < (0.0 as Float) { return None; } if t0 > (0.0 as Float) { Some(t0) } else if t1 > tmax { None } else { Some(t1) } } } impl Shape for Sphere { #[inline] fn bbox_local(&self) -> BBox3f { self.bounding() } #[inline] fn intersect_ray(&self, ray: &RawRay) -> Option<(Float, SurfaceInteraction)> { if let Some(t) = Sphere::intersect_ray_full(self.radius, &ray) { let mut p = ray.evaluate(t).to_vec(); // refine sphere intersection p = p* self.radius / p.magnitude(); if p.x == 0.0 as Float && p.y == 0.0 as Float { p.x = 1e-5 as Float * self.radius; } let p = Point3f::from_vec(p); let mut phi = p.y.atan2(p.x); if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); } // TODO: refine test against clipping if p.z < self.zmin || p.z > self.zmax || phi > self.phimax { None } else { let phimax = self.phimax; let thetamax = self.thetamax; let thetamin = self.thetamin; let thetadelta = thetamax - thetamin; let u = phi / phimax; let theta = (p.z / self.radius).acos(); let v = (theta - thetamin) / thetadelta; let inv_z_radius = (1.0 as Float) / (p.x * p.x + p.y * p.y).sqrt(); let cos_phi = p.x * inv_z_radius; let sin_phi = p.y * inv_z_radius; let dpdu = Vector3f::new(-phimax * p.y, phimax * p.x, 0.0 as Float); let dpdv = thetadelta * Vector3f::new(p.z * cos_phi, p.z * sin_phi, -self.radius * theta.sin()); let (dndu, dndv) = { let dppduu = - phimax * phimax * Vector3f::new(p.x, p.y, 0.0 as Float); let dppduv = thetadelta * p.z * phimax * Vector3f::new(-sin_phi, cos_phi, 0.0 as Float); let dppdvv = -thetadelta * thetadelta * Vector3f::new(p.x, p.y, p.z); let e = dpdu.dot(dpdu); let f = dpdu.dot(dpdv); let g = dpdv.dot(dpdv); let n = dpdu.cross(dpdv).normalize(); let ee = n.dot(dppduu); let ff = n.dot(dppduv); let gg = n.dot(dppdvv); let inv = (1.0 as Float) / (e * g - f * f); ( (ff*f - ee*g) * inv * dpdu + (ee*f - ff*e) * inv * dpdv, (gg*f - ff*g) * inv * dpdu + (ff*f - gg*e) * inv * dpdv ) }; Some(( t, SurfaceInteraction::new( p, // FIXME: wrong Vector3f::zero(), -ray.direction(), Point2f::new(u, v), DuvInfo{ dpdu: dpdu, dpdv: dpdv, dndu: dndu, dndv: dndv, }, ) )) } } else { None } } #[inline] fn surface_area(&self) -> Float { self.phimax * self.radius * (self.zmax - self.zmin) } fn sample(&self, sample: Point2f) -> (Point3f, Vector3f, Float) { // sample.x scaled to [0, phimax] let phi = sample.x * self.phimax; // sample.y scaled to [thetamin, thetamax] let theta = sample.y * (self.thetamax - self.thetamin) + self.thetamin; let dir = Sphericalf::new(theta, phi).to_vec(); let pos = Point3f::from_vec(dir*self.radius); (pos, dir, 1. as Float / self.surface_area()) // use sample::sample_uniform_sphere; // let dir = sample_uniform_sphere(sample); // let pos = Point3f::from_vec(dir*self.radius); // (pos, dir, 1. as Float / self.surface_area()) } }
{ assert!(radius>(0.0 as Float), "Sphere radius should be positive"); assert!(zmin<zmax, "zmin should be lower than zmax"); if zmin < -radius { zmin = -radius; } if zmax > radius { zmax = radius; } if phimax < (0.0 as Float) { phimax = 0.0 as Float; } let twopi = float::pi() * (2.0 as Float); if phimax > twopi { phimax = twopi; } // TODO: double check let thetamin = (zmin/radius).acos(); let thetamax = (zmax/radius).acos(); Sphere { radius: radius, zmin: zmin, zmax: zmax, thetamin: thetamin,
identifier_body
sphere.rs
// Copyright 2017 Dasein Phaos aka. Luxko // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use geometry::prelude::*; use super::Shape; use std; use serde; use serde::{Serialize, Deserialize}; use serde::ser::{Serializer, SerializeStruct}; use serde::de::{Deserializer, MapAccess, SeqAccess, Visitor}; /// A (possibly-partial) sphere, as a geometry definition #[derive(Copy, Clone, PartialEq)] pub struct Sphere { /// The radius of the sphere pub radius: Float, /// The lower bound xy-plane. Points with `z<zmin` being excluded. pub zmin: Float, /// The upper bound xy-plane. Points with `z>zmax` being excluded. pub zmax: Float, /// The maximum `phi`. Points with `phi>phimax` being excluded. pub phimax: Float, // These two are updated accordingly when `zmin` or `zmax` changes. thetamin: Float, thetamax: Float, } impl Serialize for Sphere { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { let mut state = s.serialize_struct("Sphere", 4)?; state.serialize_field("radius", &self.radius)?; state.serialize_field("zmin", &self.zmin)?; state.serialize_field("zmax", &self.zmax)?; state.serialize_field("phimax", &self.phimax)?; state.end() } } impl<'de> Deserialize<'de> for Sphere { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de> { #[derive(Deserialize)] #[serde(field_identifier, rename_all = "lowercase")] enum Field { Radius, Zmin, Zmax, Phimax } struct SamplerVisitor; impl<'de> Visitor<'de> for SamplerVisitor { type Value = Sphere; fn expecting(&self, fmter: &mut std::fmt::Formatter) -> std::fmt::Result { fmter.write_str("struct Sphere") } fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error> where V: SeqAccess<'de> { let radius = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(0, &self))?; let zmin = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(1, &self))?; let zmax = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(2, &self))?; let phimax = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(3, &self))?; Ok(Sphere::new(radius, zmin, zmax, phimax)) } fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error> where V: MapAccess<'de> { let mut radius = None; let mut zmin = None; let mut zmax = None; let mut phimax = None; while let Some(key) = map.next_key()? { match key { Field::Radius => { if radius.is_some() { return Err(serde::de::Error::duplicate_field("radius")); } radius = Some(map.next_value()?); } Field::Zmin => { if zmin.is_some() { return Err(serde::de::Error::duplicate_field("zmin")); } zmin = Some(map.next_value()?); } Field::Zmax => { if zmax.is_some() { return Err(serde::de::Error::duplicate_field("zmax")); } zmax = Some(map.next_value()?); } Field::Phimax => { if phimax.is_some() { return Err(serde::de::Error::duplicate_field("phimax")); } phimax = Some(map.next_value()?); } } } let radius = radius.ok_or_else(|| serde::de::Error::missing_field("radius") )?; let zmin = zmin.ok_or_else(|| serde::de::Error::missing_field("zmin") )?; let zmax = zmax.ok_or_else(|| serde::de::Error::missing_field("znear") )?; let phimax = phimax.ok_or_else(|| serde::de::Error::missing_field("zfar") )?; Ok(Sphere::new( radius, zmin, zmax, phimax )) } } const FIELDS: &[&str] = &["transform", "screen", "znear", "zfar", "fov", "lens", "film"]; deserializer.deserialize_struct("Sphere", FIELDS, SamplerVisitor) } } impl Sphere { /// Constructs a new `Sphere`. pub fn new(radius: Float, mut zmin: Float, mut zmax: Float, mut phimax: Float) -> Sphere { assert!(radius>(0.0 as Float), "Sphere radius should be positive"); assert!(zmin<zmax, "zmin should be lower than zmax"); if zmin < -radius { zmin = -radius; } if zmax > radius { zmax = radius; } if phimax < (0.0 as Float) { phimax = 0.0 as Float; } let twopi = float::pi() * (2.0 as Float); if phimax > twopi { phimax = twopi; } // TODO: double check let thetamin = (zmin/radius).acos(); let thetamax = (zmax/radius).acos(); Sphere { radius: radius, zmin: zmin, zmax: zmax, thetamin: thetamin, thetamax: thetamax, phimax: phimax, } } /// Constructs a full sphere #[inline] pub fn full(radius: Float) -> Sphere { Sphere::new(radius, -radius, radius, float::pi() * (2.0 as Float)) } /// returns the local space bounding box #[inline] pub fn bounding(&self) -> BBox3f { BBox3f::new( Point3f::new(-self.radius, -self.radius, self.zmin),
// #[inline] // pub fn intersect_ray(&self, ray: &RawRay) -> Option<Float> // { // if let Some(t) = Sphere::intersect_ray_full(self.radius, ray) { // let p = ray.evaluate(t); // // TODO: refine sphere intersection // let mut phi = p.y.atan2(p.x); // if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); } // if p.z < self.zmin || p.z > self.zmax || phi > self.phimax { // None // } else { // Some(t) // } // } else { // None // } // } /// test intersection against the full sphere pub fn intersect_ray_full(radius: Float, ray: &RawRay) -> Option<Float> { let origin = ray.origin().to_vec(); let direction = ray.direction(); let a = direction.magnitude2(); let b = (direction.mul_element_wise(origin) * (2.0 as Float)).sum(); let c = origin.magnitude2() - radius * radius; let delta = b* b - (4.0 as Float) * a * c; if delta < (0.0 as Float) { return None; } let invert_2a = (1.0 as Float) / ((2.0 as Float) * a); let d1 = delta.sqrt() * invert_2a; let d0 = -b * invert_2a; let(t0, t1) = if invert_2a > 0.0 as Float { (d0-d1, d0+d1) } else { (d0+d1, d0-d1) }; let tmax = ray.max_extend(); if t0 > tmax || t1 < (0.0 as Float) { return None; } if t0 > (0.0 as Float) { Some(t0) } else if t1 > tmax { None } else { Some(t1) } } } impl Shape for Sphere { #[inline] fn bbox_local(&self) -> BBox3f { self.bounding() } #[inline] fn intersect_ray(&self, ray: &RawRay) -> Option<(Float, SurfaceInteraction)> { if let Some(t) = Sphere::intersect_ray_full(self.radius, &ray) { let mut p = ray.evaluate(t).to_vec(); // refine sphere intersection p = p* self.radius / p.magnitude(); if p.x == 0.0 as Float && p.y == 0.0 as Float { p.x = 1e-5 as Float * self.radius; } let p = Point3f::from_vec(p); let mut phi = p.y.atan2(p.x); if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); } // TODO: refine test against clipping if p.z < self.zmin || p.z > self.zmax || phi > self.phimax { None } else { let phimax = self.phimax; let thetamax = self.thetamax; let thetamin = self.thetamin; let thetadelta = thetamax - thetamin; let u = phi / phimax; let theta = (p.z / self.radius).acos(); let v = (theta - thetamin) / thetadelta; let inv_z_radius = (1.0 as Float) / (p.x * p.x + p.y * p.y).sqrt(); let cos_phi = p.x * inv_z_radius; let sin_phi = p.y * inv_z_radius; let dpdu = Vector3f::new(-phimax * p.y, phimax * p.x, 0.0 as Float); let dpdv = thetadelta * Vector3f::new(p.z * cos_phi, p.z * sin_phi, -self.radius * theta.sin()); let (dndu, dndv) = { let dppduu = - phimax * phimax * Vector3f::new(p.x, p.y, 0.0 as Float); let dppduv = thetadelta * p.z * phimax * Vector3f::new(-sin_phi, cos_phi, 0.0 as Float); let dppdvv = -thetadelta * thetadelta * Vector3f::new(p.x, p.y, p.z); let e = dpdu.dot(dpdu); let f = dpdu.dot(dpdv); let g = dpdv.dot(dpdv); let n = dpdu.cross(dpdv).normalize(); let ee = n.dot(dppduu); let ff = n.dot(dppduv); let gg = n.dot(dppdvv); let inv = (1.0 as Float) / (e * g - f * f); ( (ff*f - ee*g) * inv * dpdu + (ee*f - ff*e) * inv * dpdv, (gg*f - ff*g) * inv * dpdu + (ff*f - gg*e) * inv * dpdv ) }; Some(( t, SurfaceInteraction::new( p, // FIXME: wrong Vector3f::zero(), -ray.direction(), Point2f::new(u, v), DuvInfo{ dpdu: dpdu, dpdv: dpdv, dndu: dndu, dndv: dndv, }, ) )) } } else { None } } #[inline] fn surface_area(&self) -> Float { self.phimax * self.radius * (self.zmax - self.zmin) } fn sample(&self, sample: Point2f) -> (Point3f, Vector3f, Float) { // sample.x scaled to [0, phimax] let phi = sample.x * self.phimax; // sample.y scaled to [thetamin, thetamax] let theta = sample.y * (self.thetamax - self.thetamin) + self.thetamin; let dir = Sphericalf::new(theta, phi).to_vec(); let pos = Point3f::from_vec(dir*self.radius); (pos, dir, 1. as Float / self.surface_area()) // use sample::sample_uniform_sphere; // let dir = sample_uniform_sphere(sample); // let pos = Point3f::from_vec(dir*self.radius); // (pos, dir, 1. as Float / self.surface_area()) } }
Point3f::new(self.radius, self.radius, self.zmax) ) } // /// test intersection in local frame, returns `t` when first hit
random_line_split
sphere.rs
// Copyright 2017 Dasein Phaos aka. Luxko // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use geometry::prelude::*; use super::Shape; use std; use serde; use serde::{Serialize, Deserialize}; use serde::ser::{Serializer, SerializeStruct}; use serde::de::{Deserializer, MapAccess, SeqAccess, Visitor}; /// A (possibly-partial) sphere, as a geometry definition #[derive(Copy, Clone, PartialEq)] pub struct Sphere { /// The radius of the sphere pub radius: Float, /// The lower bound xy-plane. Points with `z<zmin` being excluded. pub zmin: Float, /// The upper bound xy-plane. Points with `z>zmax` being excluded. pub zmax: Float, /// The maximum `phi`. Points with `phi>phimax` being excluded. pub phimax: Float, // These two are updated accordingly when `zmin` or `zmax` changes. thetamin: Float, thetamax: Float, } impl Serialize for Sphere { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { let mut state = s.serialize_struct("Sphere", 4)?; state.serialize_field("radius", &self.radius)?; state.serialize_field("zmin", &self.zmin)?; state.serialize_field("zmax", &self.zmax)?; state.serialize_field("phimax", &self.phimax)?; state.end() } } impl<'de> Deserialize<'de> for Sphere { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de> { #[derive(Deserialize)] #[serde(field_identifier, rename_all = "lowercase")] enum Field { Radius, Zmin, Zmax, Phimax } struct SamplerVisitor; impl<'de> Visitor<'de> for SamplerVisitor { type Value = Sphere; fn expecting(&self, fmter: &mut std::fmt::Formatter) -> std::fmt::Result { fmter.write_str("struct Sphere") } fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error> where V: SeqAccess<'de> { let radius = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(0, &self))?; let zmin = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(1, &self))?; let zmax = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(2, &self))?; let phimax = seq.next_element()? .ok_or_else(|| serde::de::Error::invalid_length(3, &self))?; Ok(Sphere::new(radius, zmin, zmax, phimax)) } fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error> where V: MapAccess<'de> { let mut radius = None; let mut zmin = None; let mut zmax = None; let mut phimax = None; while let Some(key) = map.next_key()? { match key { Field::Radius => { if radius.is_some() { return Err(serde::de::Error::duplicate_field("radius")); } radius = Some(map.next_value()?); } Field::Zmin => { if zmin.is_some() { return Err(serde::de::Error::duplicate_field("zmin")); } zmin = Some(map.next_value()?); } Field::Zmax => { if zmax.is_some() { return Err(serde::de::Error::duplicate_field("zmax")); } zmax = Some(map.next_value()?); } Field::Phimax => { if phimax.is_some() { return Err(serde::de::Error::duplicate_field("phimax")); } phimax = Some(map.next_value()?); } } } let radius = radius.ok_or_else(|| serde::de::Error::missing_field("radius") )?; let zmin = zmin.ok_or_else(|| serde::de::Error::missing_field("zmin") )?; let zmax = zmax.ok_or_else(|| serde::de::Error::missing_field("znear") )?; let phimax = phimax.ok_or_else(|| serde::de::Error::missing_field("zfar") )?; Ok(Sphere::new( radius, zmin, zmax, phimax )) } } const FIELDS: &[&str] = &["transform", "screen", "znear", "zfar", "fov", "lens", "film"]; deserializer.deserialize_struct("Sphere", FIELDS, SamplerVisitor) } } impl Sphere { /// Constructs a new `Sphere`. pub fn new(radius: Float, mut zmin: Float, mut zmax: Float, mut phimax: Float) -> Sphere { assert!(radius>(0.0 as Float), "Sphere radius should be positive"); assert!(zmin<zmax, "zmin should be lower than zmax"); if zmin < -radius { zmin = -radius; } if zmax > radius { zmax = radius; } if phimax < (0.0 as Float) { phimax = 0.0 as Float; } let twopi = float::pi() * (2.0 as Float); if phimax > twopi { phimax = twopi; } // TODO: double check let thetamin = (zmin/radius).acos(); let thetamax = (zmax/radius).acos(); Sphere { radius: radius, zmin: zmin, zmax: zmax, thetamin: thetamin, thetamax: thetamax, phimax: phimax, } } /// Constructs a full sphere #[inline] pub fn full(radius: Float) -> Sphere { Sphere::new(radius, -radius, radius, float::pi() * (2.0 as Float)) } /// returns the local space bounding box #[inline] pub fn bounding(&self) -> BBox3f { BBox3f::new( Point3f::new(-self.radius, -self.radius, self.zmin), Point3f::new(self.radius, self.radius, self.zmax) ) } // /// test intersection in local frame, returns `t` when first hit // #[inline] // pub fn intersect_ray(&self, ray: &RawRay) -> Option<Float> // { // if let Some(t) = Sphere::intersect_ray_full(self.radius, ray) { // let p = ray.evaluate(t); // // TODO: refine sphere intersection // let mut phi = p.y.atan2(p.x); // if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); } // if p.z < self.zmin || p.z > self.zmax || phi > self.phimax { // None // } else { // Some(t) // } // } else { // None // } // } /// test intersection against the full sphere pub fn
(radius: Float, ray: &RawRay) -> Option<Float> { let origin = ray.origin().to_vec(); let direction = ray.direction(); let a = direction.magnitude2(); let b = (direction.mul_element_wise(origin) * (2.0 as Float)).sum(); let c = origin.magnitude2() - radius * radius; let delta = b* b - (4.0 as Float) * a * c; if delta < (0.0 as Float) { return None; } let invert_2a = (1.0 as Float) / ((2.0 as Float) * a); let d1 = delta.sqrt() * invert_2a; let d0 = -b * invert_2a; let(t0, t1) = if invert_2a > 0.0 as Float { (d0-d1, d0+d1) } else { (d0+d1, d0-d1) }; let tmax = ray.max_extend(); if t0 > tmax || t1 < (0.0 as Float) { return None; } if t0 > (0.0 as Float) { Some(t0) } else if t1 > tmax { None } else { Some(t1) } } } impl Shape for Sphere { #[inline] fn bbox_local(&self) -> BBox3f { self.bounding() } #[inline] fn intersect_ray(&self, ray: &RawRay) -> Option<(Float, SurfaceInteraction)> { if let Some(t) = Sphere::intersect_ray_full(self.radius, &ray) { let mut p = ray.evaluate(t).to_vec(); // refine sphere intersection p = p* self.radius / p.magnitude(); if p.x == 0.0 as Float && p.y == 0.0 as Float { p.x = 1e-5 as Float * self.radius; } let p = Point3f::from_vec(p); let mut phi = p.y.atan2(p.x); if phi < (0.0 as Float) { phi += (2.0 as Float) * float::pi(); } // TODO: refine test against clipping if p.z < self.zmin || p.z > self.zmax || phi > self.phimax { None } else { let phimax = self.phimax; let thetamax = self.thetamax; let thetamin = self.thetamin; let thetadelta = thetamax - thetamin; let u = phi / phimax; let theta = (p.z / self.radius).acos(); let v = (theta - thetamin) / thetadelta; let inv_z_radius = (1.0 as Float) / (p.x * p.x + p.y * p.y).sqrt(); let cos_phi = p.x * inv_z_radius; let sin_phi = p.y * inv_z_radius; let dpdu = Vector3f::new(-phimax * p.y, phimax * p.x, 0.0 as Float); let dpdv = thetadelta * Vector3f::new(p.z * cos_phi, p.z * sin_phi, -self.radius * theta.sin()); let (dndu, dndv) = { let dppduu = - phimax * phimax * Vector3f::new(p.x, p.y, 0.0 as Float); let dppduv = thetadelta * p.z * phimax * Vector3f::new(-sin_phi, cos_phi, 0.0 as Float); let dppdvv = -thetadelta * thetadelta * Vector3f::new(p.x, p.y, p.z); let e = dpdu.dot(dpdu); let f = dpdu.dot(dpdv); let g = dpdv.dot(dpdv); let n = dpdu.cross(dpdv).normalize(); let ee = n.dot(dppduu); let ff = n.dot(dppduv); let gg = n.dot(dppdvv); let inv = (1.0 as Float) / (e * g - f * f); ( (ff*f - ee*g) * inv * dpdu + (ee*f - ff*e) * inv * dpdv, (gg*f - ff*g) * inv * dpdu + (ff*f - gg*e) * inv * dpdv ) }; Some(( t, SurfaceInteraction::new( p, // FIXME: wrong Vector3f::zero(), -ray.direction(), Point2f::new(u, v), DuvInfo{ dpdu: dpdu, dpdv: dpdv, dndu: dndu, dndv: dndv, }, ) )) } } else { None } } #[inline] fn surface_area(&self) -> Float { self.phimax * self.radius * (self.zmax - self.zmin) } fn sample(&self, sample: Point2f) -> (Point3f, Vector3f, Float) { // sample.x scaled to [0, phimax] let phi = sample.x * self.phimax; // sample.y scaled to [thetamin, thetamax] let theta = sample.y * (self.thetamax - self.thetamin) + self.thetamin; let dir = Sphericalf::new(theta, phi).to_vec(); let pos = Point3f::from_vec(dir*self.radius); (pos, dir, 1. as Float / self.surface_area()) // use sample::sample_uniform_sphere; // let dir = sample_uniform_sphere(sample); // let pos = Point3f::from_vec(dir*self.radius); // (pos, dir, 1. as Float / self.surface_area()) } }
intersect_ray_full
identifier_name
node.rs
use std::convert::TryFrom; use std::sync::Arc; use std::time::Duration; use bytes::Bytes; use casbin::prelude::{Enforcer, MgmtApi}; use dashmap::DashMap; use http::Uri; use prost::Message; use raft::prelude::*; use raft::{Config, RawNode}; use slog::Logger; use tokio::sync::mpsc::*; use tokio::sync::RwLock; use tokio::time::*; use tonic::Request; use crate::cluster::{self, InternalRaftMessage, RaftRequest}; use crate::network::{create_client, RpcClient}; use crate::storage::{MemStorage, Storage}; use crate::utils; pub struct CasbinRaft { pub id: u64, pub node: RawNode<MemStorage>, pub logger: Logger, pub mailbox_sender: Sender<cluster::Message>, pub mailbox_recv: Receiver<cluster::Message>, pub conf_sender: Sender<ConfChange>, pub conf_recv: Receiver<ConfChange>, pub peers: Arc<DashMap<u64, RpcClient>>, pub heartbeat: usize, pub enforcer: Arc<RwLock<Enforcer>>, } impl CasbinRaft { pub fn new( id: u64, cfg: Config, logger: Logger, peers: Arc<DashMap<u64, RpcClient>>, mailbox_sender: Sender<cluster::Message>, mailbox_recv: Receiver<cluster::Message>, enforcer: Arc<RwLock<Enforcer>>, ) -> Result<Self, crate::Error> { cfg.validate()?; let storage = MemStorage::new(); let node = RawNode::new(&cfg, storage, &logger)?; let (conf_sender, conf_recv) = channel(1024); Ok(Self { id, node, logger: logger.clone(), mailbox_sender, mailbox_recv, conf_sender, conf_recv, heartbeat: cfg.heartbeat_tick, peers, enforcer, }) } pub fn tick(&mut self) -> bool { self.node.tick() } pub fn propose_conf_change( &mut self, context: Vec<u8>, cc: ConfChange, ) -> Result<(), raft::Error> { Ok(self.node.propose_conf_change(context, cc)?) } pub fn become_leader(&mut self) { self.node.raft.raft_log.committed = 0; self.node.raft.become_candidate(); self.node.raft.become_leader(); } fn set_hard_state( &mut self, commit: u64, term: u64, ) -> Result<(), crate::error::Error> { self.node.raft.mut_store().set_hard_state(commit, term); Ok(()) } #[allow(irrefutable_let_patterns)] pub async fn run( mut self, ) -> Result<(), Box<dyn std::error::Error + Send + Sync +'static>> { while let _ = interval(Duration::from_millis(self.heartbeat as u64)) .tick() .await { let msg = match timeout(Duration::from_millis(100), self.mailbox_recv.recv()) .await { Ok(Some(msg)) => Some(msg), Ok(None) => None, Err(_) => None, }; if let Some(msg) = msg { slog::info!(self.logger, "Inbound raft message: {:?}", msg); self.node.step(msg.into())?; } match timeout(Duration::from_millis(100), self.conf_recv.recv()).await { Ok(Some(cc)) => { let ccc = cc.clone(); let state = self.node.apply_conf_change(&cc)?; self.node.mut_store().set_conf_state(state); let p = self.peers.clone(); let logger = self.logger.clone(); tokio::spawn(async move { let uri = Uri::try_from(&ccc.context[..]).unwrap(); let client: RpcClient = create_client(uri.clone(), Some(logger.clone())) .await .unwrap(); p.insert(ccc.node_id, client); slog::info!( logger, "Added client: {:?} - {:?}", ccc.node_id, &uri ); }); } Ok(None) => (), Err(_) => (), }; if self.node.has_ready() { slog::info!(self.logger, "I'm ready!"); self.ready().await?; } self.node.tick(); } Ok(()) } pub async fn
( &mut self, ) -> Result<(), Box<dyn std::error::Error + Send + Sync +'static>> { let mut ready = self.node.ready(); let is_leader = self.node.raft.leader_id == self.node.raft.id; slog::info!( self.logger, "Leader ID: {}, Node ID: {}", self.node.raft.leader_id, self.node.raft.id ); slog::info!(self.logger, "Am I leader?: {}", is_leader); if!Snapshot::is_empty(ready.snapshot()) { let snap = ready.snapshot().clone(); slog::info!(self.logger, "Got a snap: {:?}", snap); self.node.mut_store().apply_snapshot(snap)?; } if!ready.entries().is_empty() { let entries = ready .entries() .iter() .cloned() .filter(|e|!e.get_data().is_empty()) .collect::<Vec<Entry>>(); slog::info!(self.logger, "Entries?: {}", entries.len()); self.node.mut_store().append(&entries)?; } if let Some(hs) = ready.hs() { slog::info!(self.logger, "HS?: {:?}", hs); self.node.mut_store().set_hard_state(hs.commit, hs.term); // self.node.mut_store().state.hard_state = (*hs).clone(); // self.node.mut_store().commit()?; } for mut msg in ready.messages.drain(..) { slog::info!(self.logger, "LOGMSG==={:?}", msg); let to = msg.to; msg.from = self.id; msg.log_term = self.node.store().hard_state().term; msg.commit = self.node.store().hard_state().commit; if let Some(client) = self.peers.get(&to) { let mut msg_bytes = vec![]; msg.encode(&mut msg_bytes).unwrap(); let req = Request::new(RaftRequest { tpe: 0, message: msg_bytes, }); let req = client.clone().raft(req).await?; slog::info!(self.logger, "RESP={:?}", req); } self.append_entries(&msg.entries).await?; } if let Some(committed_entries) = ready.committed_entries.take() { for entry in committed_entries.clone() { slog::info!(self.logger, "Committing: {:?}", entry); if entry.data.is_empty() { // From new elected leaders. continue; } let mut internal_raft_message = InternalRaftMessage::default(); internal_raft_message .merge(Bytes::from(entry.data.clone())) .unwrap(); if let Err(error) = self.apply(internal_raft_message) { slog::error!(self.logger, "Unable to apply entry. {:?}", error); // TODO: return an error to the user } } if let Some(entry) = committed_entries.last() { self.set_hard_state(entry.index, entry.term)?; } } self.node.advance(ready); Ok(()) } pub fn propose( &mut self, ctx: Vec<u8>, entry: Vec<u8>, ) -> Result<(), Box<dyn std::error::Error>> { Ok(self.node.propose(ctx, entry)?) } pub async fn send(&mut self, msg: cluster::Message) -> Result<(), crate::Error> { slog::info!(self.logger, "SEND = {:?}", msg); self.mailbox_sender.send(msg).await.unwrap(); Ok(()) } pub async fn append_entries( &mut self, entries: &[Entry], ) -> Result<(), crate::Error> { for entry in entries { if entry.data.is_empty() { continue; } slog::info!(self.logger, "APPEND={:?}", entry); match EntryType::from_i32(entry.entry_type) { Some(EntryType::EntryConfChange) => { let mut cc = ConfChange::default(); cc.merge(Bytes::from(entry.data.clone()))?; let cs = self.node.apply_conf_change(&cc)?; self.node.mut_store().set_conf_state(cs); } Some(EntryType::EntryNormal) => { let mut e = Entry::default(); e.merge(Bytes::from(entry.data.clone()))?; self.node.mut_store().append(&[e])?; } Some(EntryType::EntryConfChangeV2) => panic!("Conf2"), None => panic!(":-("), } } Ok(()) } pub fn apply(&mut self, request: InternalRaftMessage) -> Result<(), crate::Error> { if let Some(policy_request) = request.policy { let op = utils::string_to_static_str(policy_request.op.to_lowercase()); // self.db.insert(put.key, put.value)?; match op { "add" => { let cloned_enforcer = self.enforcer.clone(); let p_type = "p".to_string(); let policy = policy_request.params; Box::pin(async move { let mut lock = cloned_enforcer.write().await; lock.add_named_policy(&p_type, policy).await.unwrap(); }); } "remove" => { let cloned_enforcer = self.enforcer.clone(); let policy = policy_request.params; Box::pin(async move { let mut lock = cloned_enforcer.write().await; lock.remove_policy(policy).await.unwrap(); }); } _ => panic!(":-("), } } Ok(()) } }
ready
identifier_name
node.rs
use std::convert::TryFrom; use std::sync::Arc; use std::time::Duration; use bytes::Bytes; use casbin::prelude::{Enforcer, MgmtApi}; use dashmap::DashMap; use http::Uri; use prost::Message; use raft::prelude::*; use raft::{Config, RawNode}; use slog::Logger; use tokio::sync::mpsc::*; use tokio::sync::RwLock; use tokio::time::*; use tonic::Request; use crate::cluster::{self, InternalRaftMessage, RaftRequest}; use crate::network::{create_client, RpcClient}; use crate::storage::{MemStorage, Storage}; use crate::utils; pub struct CasbinRaft { pub id: u64, pub node: RawNode<MemStorage>, pub logger: Logger, pub mailbox_sender: Sender<cluster::Message>, pub mailbox_recv: Receiver<cluster::Message>, pub conf_sender: Sender<ConfChange>, pub conf_recv: Receiver<ConfChange>, pub peers: Arc<DashMap<u64, RpcClient>>, pub heartbeat: usize, pub enforcer: Arc<RwLock<Enforcer>>, } impl CasbinRaft { pub fn new( id: u64, cfg: Config, logger: Logger, peers: Arc<DashMap<u64, RpcClient>>, mailbox_sender: Sender<cluster::Message>, mailbox_recv: Receiver<cluster::Message>, enforcer: Arc<RwLock<Enforcer>>, ) -> Result<Self, crate::Error> { cfg.validate()?; let storage = MemStorage::new(); let node = RawNode::new(&cfg, storage, &logger)?; let (conf_sender, conf_recv) = channel(1024); Ok(Self { id, node, logger: logger.clone(), mailbox_sender, mailbox_recv, conf_sender, conf_recv, heartbeat: cfg.heartbeat_tick, peers, enforcer, }) } pub fn tick(&mut self) -> bool { self.node.tick() } pub fn propose_conf_change( &mut self, context: Vec<u8>, cc: ConfChange, ) -> Result<(), raft::Error> { Ok(self.node.propose_conf_change(context, cc)?) } pub fn become_leader(&mut self) { self.node.raft.raft_log.committed = 0; self.node.raft.become_candidate(); self.node.raft.become_leader(); } fn set_hard_state( &mut self, commit: u64, term: u64, ) -> Result<(), crate::error::Error> { self.node.raft.mut_store().set_hard_state(commit, term); Ok(()) } #[allow(irrefutable_let_patterns)] pub async fn run( mut self, ) -> Result<(), Box<dyn std::error::Error + Send + Sync +'static>> { while let _ = interval(Duration::from_millis(self.heartbeat as u64)) .tick() .await { let msg = match timeout(Duration::from_millis(100), self.mailbox_recv.recv()) .await { Ok(Some(msg)) => Some(msg), Ok(None) => None, Err(_) => None, }; if let Some(msg) = msg
match timeout(Duration::from_millis(100), self.conf_recv.recv()).await { Ok(Some(cc)) => { let ccc = cc.clone(); let state = self.node.apply_conf_change(&cc)?; self.node.mut_store().set_conf_state(state); let p = self.peers.clone(); let logger = self.logger.clone(); tokio::spawn(async move { let uri = Uri::try_from(&ccc.context[..]).unwrap(); let client: RpcClient = create_client(uri.clone(), Some(logger.clone())) .await .unwrap(); p.insert(ccc.node_id, client); slog::info!( logger, "Added client: {:?} - {:?}", ccc.node_id, &uri ); }); } Ok(None) => (), Err(_) => (), }; if self.node.has_ready() { slog::info!(self.logger, "I'm ready!"); self.ready().await?; } self.node.tick(); } Ok(()) } pub async fn ready( &mut self, ) -> Result<(), Box<dyn std::error::Error + Send + Sync +'static>> { let mut ready = self.node.ready(); let is_leader = self.node.raft.leader_id == self.node.raft.id; slog::info!( self.logger, "Leader ID: {}, Node ID: {}", self.node.raft.leader_id, self.node.raft.id ); slog::info!(self.logger, "Am I leader?: {}", is_leader); if!Snapshot::is_empty(ready.snapshot()) { let snap = ready.snapshot().clone(); slog::info!(self.logger, "Got a snap: {:?}", snap); self.node.mut_store().apply_snapshot(snap)?; } if!ready.entries().is_empty() { let entries = ready .entries() .iter() .cloned() .filter(|e|!e.get_data().is_empty()) .collect::<Vec<Entry>>(); slog::info!(self.logger, "Entries?: {}", entries.len()); self.node.mut_store().append(&entries)?; } if let Some(hs) = ready.hs() { slog::info!(self.logger, "HS?: {:?}", hs); self.node.mut_store().set_hard_state(hs.commit, hs.term); // self.node.mut_store().state.hard_state = (*hs).clone(); // self.node.mut_store().commit()?; } for mut msg in ready.messages.drain(..) { slog::info!(self.logger, "LOGMSG==={:?}", msg); let to = msg.to; msg.from = self.id; msg.log_term = self.node.store().hard_state().term; msg.commit = self.node.store().hard_state().commit; if let Some(client) = self.peers.get(&to) { let mut msg_bytes = vec![]; msg.encode(&mut msg_bytes).unwrap(); let req = Request::new(RaftRequest { tpe: 0, message: msg_bytes, }); let req = client.clone().raft(req).await?; slog::info!(self.logger, "RESP={:?}", req); } self.append_entries(&msg.entries).await?; } if let Some(committed_entries) = ready.committed_entries.take() { for entry in committed_entries.clone() { slog::info!(self.logger, "Committing: {:?}", entry); if entry.data.is_empty() { // From new elected leaders. continue; } let mut internal_raft_message = InternalRaftMessage::default(); internal_raft_message .merge(Bytes::from(entry.data.clone())) .unwrap(); if let Err(error) = self.apply(internal_raft_message) { slog::error!(self.logger, "Unable to apply entry. {:?}", error); // TODO: return an error to the user } } if let Some(entry) = committed_entries.last() { self.set_hard_state(entry.index, entry.term)?; } } self.node.advance(ready); Ok(()) } pub fn propose( &mut self, ctx: Vec<u8>, entry: Vec<u8>, ) -> Result<(), Box<dyn std::error::Error>> { Ok(self.node.propose(ctx, entry)?) } pub async fn send(&mut self, msg: cluster::Message) -> Result<(), crate::Error> { slog::info!(self.logger, "SEND = {:?}", msg); self.mailbox_sender.send(msg).await.unwrap(); Ok(()) } pub async fn append_entries( &mut self, entries: &[Entry], ) -> Result<(), crate::Error> { for entry in entries { if entry.data.is_empty() { continue; } slog::info!(self.logger, "APPEND={:?}", entry); match EntryType::from_i32(entry.entry_type) { Some(EntryType::EntryConfChange) => { let mut cc = ConfChange::default(); cc.merge(Bytes::from(entry.data.clone()))?; let cs = self.node.apply_conf_change(&cc)?; self.node.mut_store().set_conf_state(cs); } Some(EntryType::EntryNormal) => { let mut e = Entry::default(); e.merge(Bytes::from(entry.data.clone()))?; self.node.mut_store().append(&[e])?; } Some(EntryType::EntryConfChangeV2) => panic!("Conf2"), None => panic!(":-("), } } Ok(()) } pub fn apply(&mut self, request: InternalRaftMessage) -> Result<(), crate::Error> { if let Some(policy_request) = request.policy { let op = utils::string_to_static_str(policy_request.op.to_lowercase()); // self.db.insert(put.key, put.value)?; match op { "add" => { let cloned_enforcer = self.enforcer.clone(); let p_type = "p".to_string(); let policy = policy_request.params; Box::pin(async move { let mut lock = cloned_enforcer.write().await; lock.add_named_policy(&p_type, policy).await.unwrap(); }); } "remove" => { let cloned_enforcer = self.enforcer.clone(); let policy = policy_request.params; Box::pin(async move { let mut lock = cloned_enforcer.write().await; lock.remove_policy(policy).await.unwrap(); }); } _ => panic!(":-("), } } Ok(()) } }
{ slog::info!(self.logger, "Inbound raft message: {:?}", msg); self.node.step(msg.into())?; }
conditional_block
node.rs
use std::convert::TryFrom; use std::sync::Arc; use std::time::Duration; use bytes::Bytes; use casbin::prelude::{Enforcer, MgmtApi}; use dashmap::DashMap; use http::Uri; use prost::Message; use raft::prelude::*; use raft::{Config, RawNode}; use slog::Logger; use tokio::sync::mpsc::*; use tokio::sync::RwLock; use tokio::time::*; use tonic::Request; use crate::cluster::{self, InternalRaftMessage, RaftRequest}; use crate::network::{create_client, RpcClient}; use crate::storage::{MemStorage, Storage}; use crate::utils; pub struct CasbinRaft { pub id: u64, pub node: RawNode<MemStorage>, pub logger: Logger, pub mailbox_sender: Sender<cluster::Message>, pub mailbox_recv: Receiver<cluster::Message>, pub conf_sender: Sender<ConfChange>, pub conf_recv: Receiver<ConfChange>, pub peers: Arc<DashMap<u64, RpcClient>>, pub heartbeat: usize, pub enforcer: Arc<RwLock<Enforcer>>, } impl CasbinRaft { pub fn new( id: u64, cfg: Config, logger: Logger, peers: Arc<DashMap<u64, RpcClient>>, mailbox_sender: Sender<cluster::Message>, mailbox_recv: Receiver<cluster::Message>, enforcer: Arc<RwLock<Enforcer>>, ) -> Result<Self, crate::Error> { cfg.validate()?; let storage = MemStorage::new(); let node = RawNode::new(&cfg, storage, &logger)?; let (conf_sender, conf_recv) = channel(1024); Ok(Self { id, node, logger: logger.clone(), mailbox_sender, mailbox_recv, conf_sender, conf_recv, heartbeat: cfg.heartbeat_tick, peers, enforcer, }) } pub fn tick(&mut self) -> bool { self.node.tick() } pub fn propose_conf_change( &mut self, context: Vec<u8>, cc: ConfChange, ) -> Result<(), raft::Error> { Ok(self.node.propose_conf_change(context, cc)?) } pub fn become_leader(&mut self)
fn set_hard_state( &mut self, commit: u64, term: u64, ) -> Result<(), crate::error::Error> { self.node.raft.mut_store().set_hard_state(commit, term); Ok(()) } #[allow(irrefutable_let_patterns)] pub async fn run( mut self, ) -> Result<(), Box<dyn std::error::Error + Send + Sync +'static>> { while let _ = interval(Duration::from_millis(self.heartbeat as u64)) .tick() .await { let msg = match timeout(Duration::from_millis(100), self.mailbox_recv.recv()) .await { Ok(Some(msg)) => Some(msg), Ok(None) => None, Err(_) => None, }; if let Some(msg) = msg { slog::info!(self.logger, "Inbound raft message: {:?}", msg); self.node.step(msg.into())?; } match timeout(Duration::from_millis(100), self.conf_recv.recv()).await { Ok(Some(cc)) => { let ccc = cc.clone(); let state = self.node.apply_conf_change(&cc)?; self.node.mut_store().set_conf_state(state); let p = self.peers.clone(); let logger = self.logger.clone(); tokio::spawn(async move { let uri = Uri::try_from(&ccc.context[..]).unwrap(); let client: RpcClient = create_client(uri.clone(), Some(logger.clone())) .await .unwrap(); p.insert(ccc.node_id, client); slog::info!( logger, "Added client: {:?} - {:?}", ccc.node_id, &uri ); }); } Ok(None) => (), Err(_) => (), }; if self.node.has_ready() { slog::info!(self.logger, "I'm ready!"); self.ready().await?; } self.node.tick(); } Ok(()) } pub async fn ready( &mut self, ) -> Result<(), Box<dyn std::error::Error + Send + Sync +'static>> { let mut ready = self.node.ready(); let is_leader = self.node.raft.leader_id == self.node.raft.id; slog::info!( self.logger, "Leader ID: {}, Node ID: {}", self.node.raft.leader_id, self.node.raft.id ); slog::info!(self.logger, "Am I leader?: {}", is_leader); if!Snapshot::is_empty(ready.snapshot()) { let snap = ready.snapshot().clone(); slog::info!(self.logger, "Got a snap: {:?}", snap); self.node.mut_store().apply_snapshot(snap)?; } if!ready.entries().is_empty() { let entries = ready .entries() .iter() .cloned() .filter(|e|!e.get_data().is_empty()) .collect::<Vec<Entry>>(); slog::info!(self.logger, "Entries?: {}", entries.len()); self.node.mut_store().append(&entries)?; } if let Some(hs) = ready.hs() { slog::info!(self.logger, "HS?: {:?}", hs); self.node.mut_store().set_hard_state(hs.commit, hs.term); // self.node.mut_store().state.hard_state = (*hs).clone(); // self.node.mut_store().commit()?; } for mut msg in ready.messages.drain(..) { slog::info!(self.logger, "LOGMSG==={:?}", msg); let to = msg.to; msg.from = self.id; msg.log_term = self.node.store().hard_state().term; msg.commit = self.node.store().hard_state().commit; if let Some(client) = self.peers.get(&to) { let mut msg_bytes = vec![]; msg.encode(&mut msg_bytes).unwrap(); let req = Request::new(RaftRequest { tpe: 0, message: msg_bytes, }); let req = client.clone().raft(req).await?; slog::info!(self.logger, "RESP={:?}", req); } self.append_entries(&msg.entries).await?; } if let Some(committed_entries) = ready.committed_entries.take() { for entry in committed_entries.clone() { slog::info!(self.logger, "Committing: {:?}", entry); if entry.data.is_empty() { // From new elected leaders. continue; } let mut internal_raft_message = InternalRaftMessage::default(); internal_raft_message .merge(Bytes::from(entry.data.clone())) .unwrap(); if let Err(error) = self.apply(internal_raft_message) { slog::error!(self.logger, "Unable to apply entry. {:?}", error); // TODO: return an error to the user } } if let Some(entry) = committed_entries.last() { self.set_hard_state(entry.index, entry.term)?; } } self.node.advance(ready); Ok(()) } pub fn propose( &mut self, ctx: Vec<u8>, entry: Vec<u8>, ) -> Result<(), Box<dyn std::error::Error>> { Ok(self.node.propose(ctx, entry)?) } pub async fn send(&mut self, msg: cluster::Message) -> Result<(), crate::Error> { slog::info!(self.logger, "SEND = {:?}", msg); self.mailbox_sender.send(msg).await.unwrap(); Ok(()) } pub async fn append_entries( &mut self, entries: &[Entry], ) -> Result<(), crate::Error> { for entry in entries { if entry.data.is_empty() { continue; } slog::info!(self.logger, "APPEND={:?}", entry); match EntryType::from_i32(entry.entry_type) { Some(EntryType::EntryConfChange) => { let mut cc = ConfChange::default(); cc.merge(Bytes::from(entry.data.clone()))?; let cs = self.node.apply_conf_change(&cc)?; self.node.mut_store().set_conf_state(cs); } Some(EntryType::EntryNormal) => { let mut e = Entry::default(); e.merge(Bytes::from(entry.data.clone()))?; self.node.mut_store().append(&[e])?; } Some(EntryType::EntryConfChangeV2) => panic!("Conf2"), None => panic!(":-("), } } Ok(()) } pub fn apply(&mut self, request: InternalRaftMessage) -> Result<(), crate::Error> { if let Some(policy_request) = request.policy { let op = utils::string_to_static_str(policy_request.op.to_lowercase()); // self.db.insert(put.key, put.value)?; match op { "add" => { let cloned_enforcer = self.enforcer.clone(); let p_type = "p".to_string(); let policy = policy_request.params; Box::pin(async move { let mut lock = cloned_enforcer.write().await; lock.add_named_policy(&p_type, policy).await.unwrap(); }); } "remove" => { let cloned_enforcer = self.enforcer.clone(); let policy = policy_request.params; Box::pin(async move { let mut lock = cloned_enforcer.write().await; lock.remove_policy(policy).await.unwrap(); }); } _ => panic!(":-("), } } Ok(()) } }
{ self.node.raft.raft_log.committed = 0; self.node.raft.become_candidate(); self.node.raft.become_leader(); }
identifier_body
node.rs
use std::convert::TryFrom; use std::sync::Arc; use std::time::Duration; use bytes::Bytes; use casbin::prelude::{Enforcer, MgmtApi}; use dashmap::DashMap; use http::Uri; use prost::Message; use raft::prelude::*; use raft::{Config, RawNode}; use slog::Logger; use tokio::sync::mpsc::*; use tokio::sync::RwLock; use tokio::time::*; use tonic::Request; use crate::cluster::{self, InternalRaftMessage, RaftRequest}; use crate::network::{create_client, RpcClient}; use crate::storage::{MemStorage, Storage}; use crate::utils; pub struct CasbinRaft { pub id: u64, pub node: RawNode<MemStorage>, pub logger: Logger, pub mailbox_sender: Sender<cluster::Message>, pub mailbox_recv: Receiver<cluster::Message>, pub conf_sender: Sender<ConfChange>, pub conf_recv: Receiver<ConfChange>, pub peers: Arc<DashMap<u64, RpcClient>>, pub heartbeat: usize, pub enforcer: Arc<RwLock<Enforcer>>, } impl CasbinRaft { pub fn new( id: u64, cfg: Config, logger: Logger, peers: Arc<DashMap<u64, RpcClient>>, mailbox_sender: Sender<cluster::Message>, mailbox_recv: Receiver<cluster::Message>, enforcer: Arc<RwLock<Enforcer>>, ) -> Result<Self, crate::Error> { cfg.validate()?; let storage = MemStorage::new(); let node = RawNode::new(&cfg, storage, &logger)?; let (conf_sender, conf_recv) = channel(1024); Ok(Self { id, node, logger: logger.clone(), mailbox_sender, mailbox_recv, conf_sender, conf_recv, heartbeat: cfg.heartbeat_tick, peers, enforcer, }) } pub fn tick(&mut self) -> bool { self.node.tick() } pub fn propose_conf_change( &mut self, context: Vec<u8>, cc: ConfChange, ) -> Result<(), raft::Error> { Ok(self.node.propose_conf_change(context, cc)?) } pub fn become_leader(&mut self) { self.node.raft.raft_log.committed = 0; self.node.raft.become_candidate(); self.node.raft.become_leader(); } fn set_hard_state( &mut self, commit: u64, term: u64, ) -> Result<(), crate::error::Error> { self.node.raft.mut_store().set_hard_state(commit, term); Ok(()) } #[allow(irrefutable_let_patterns)] pub async fn run( mut self, ) -> Result<(), Box<dyn std::error::Error + Send + Sync +'static>> { while let _ = interval(Duration::from_millis(self.heartbeat as u64)) .tick() .await { let msg = match timeout(Duration::from_millis(100), self.mailbox_recv.recv()) .await { Ok(Some(msg)) => Some(msg), Ok(None) => None, Err(_) => None, }; if let Some(msg) = msg { slog::info!(self.logger, "Inbound raft message: {:?}", msg); self.node.step(msg.into())?; } match timeout(Duration::from_millis(100), self.conf_recv.recv()).await { Ok(Some(cc)) => { let ccc = cc.clone(); let state = self.node.apply_conf_change(&cc)?; self.node.mut_store().set_conf_state(state); let p = self.peers.clone(); let logger = self.logger.clone(); tokio::spawn(async move { let uri = Uri::try_from(&ccc.context[..]).unwrap(); let client: RpcClient = create_client(uri.clone(), Some(logger.clone())) .await .unwrap(); p.insert(ccc.node_id, client); slog::info!( logger, "Added client: {:?} - {:?}", ccc.node_id, &uri ); }); } Ok(None) => (), Err(_) => (), }; if self.node.has_ready() { slog::info!(self.logger, "I'm ready!"); self.ready().await?; } self.node.tick(); } Ok(()) } pub async fn ready( &mut self, ) -> Result<(), Box<dyn std::error::Error + Send + Sync +'static>> { let mut ready = self.node.ready(); let is_leader = self.node.raft.leader_id == self.node.raft.id; slog::info!( self.logger, "Leader ID: {}, Node ID: {}", self.node.raft.leader_id, self.node.raft.id ); slog::info!(self.logger, "Am I leader?: {}", is_leader); if!Snapshot::is_empty(ready.snapshot()) { let snap = ready.snapshot().clone(); slog::info!(self.logger, "Got a snap: {:?}", snap); self.node.mut_store().apply_snapshot(snap)?; } if!ready.entries().is_empty() { let entries = ready .entries() .iter() .cloned() .filter(|e|!e.get_data().is_empty()) .collect::<Vec<Entry>>(); slog::info!(self.logger, "Entries?: {}", entries.len()); self.node.mut_store().append(&entries)?; } if let Some(hs) = ready.hs() { slog::info!(self.logger, "HS?: {:?}", hs); self.node.mut_store().set_hard_state(hs.commit, hs.term); // self.node.mut_store().state.hard_state = (*hs).clone(); // self.node.mut_store().commit()?; } for mut msg in ready.messages.drain(..) { slog::info!(self.logger, "LOGMSG==={:?}", msg); let to = msg.to; msg.from = self.id; msg.log_term = self.node.store().hard_state().term; msg.commit = self.node.store().hard_state().commit; if let Some(client) = self.peers.get(&to) { let mut msg_bytes = vec![]; msg.encode(&mut msg_bytes).unwrap(); let req = Request::new(RaftRequest { tpe: 0, message: msg_bytes, }); let req = client.clone().raft(req).await?; slog::info!(self.logger, "RESP={:?}", req); } self.append_entries(&msg.entries).await?; } if let Some(committed_entries) = ready.committed_entries.take() { for entry in committed_entries.clone() { slog::info!(self.logger, "Committing: {:?}", entry); if entry.data.is_empty() { // From new elected leaders. continue; }
internal_raft_message .merge(Bytes::from(entry.data.clone())) .unwrap(); if let Err(error) = self.apply(internal_raft_message) { slog::error!(self.logger, "Unable to apply entry. {:?}", error); // TODO: return an error to the user } } if let Some(entry) = committed_entries.last() { self.set_hard_state(entry.index, entry.term)?; } } self.node.advance(ready); Ok(()) } pub fn propose( &mut self, ctx: Vec<u8>, entry: Vec<u8>, ) -> Result<(), Box<dyn std::error::Error>> { Ok(self.node.propose(ctx, entry)?) } pub async fn send(&mut self, msg: cluster::Message) -> Result<(), crate::Error> { slog::info!(self.logger, "SEND = {:?}", msg); self.mailbox_sender.send(msg).await.unwrap(); Ok(()) } pub async fn append_entries( &mut self, entries: &[Entry], ) -> Result<(), crate::Error> { for entry in entries { if entry.data.is_empty() { continue; } slog::info!(self.logger, "APPEND={:?}", entry); match EntryType::from_i32(entry.entry_type) { Some(EntryType::EntryConfChange) => { let mut cc = ConfChange::default(); cc.merge(Bytes::from(entry.data.clone()))?; let cs = self.node.apply_conf_change(&cc)?; self.node.mut_store().set_conf_state(cs); } Some(EntryType::EntryNormal) => { let mut e = Entry::default(); e.merge(Bytes::from(entry.data.clone()))?; self.node.mut_store().append(&[e])?; } Some(EntryType::EntryConfChangeV2) => panic!("Conf2"), None => panic!(":-("), } } Ok(()) } pub fn apply(&mut self, request: InternalRaftMessage) -> Result<(), crate::Error> { if let Some(policy_request) = request.policy { let op = utils::string_to_static_str(policy_request.op.to_lowercase()); // self.db.insert(put.key, put.value)?; match op { "add" => { let cloned_enforcer = self.enforcer.clone(); let p_type = "p".to_string(); let policy = policy_request.params; Box::pin(async move { let mut lock = cloned_enforcer.write().await; lock.add_named_policy(&p_type, policy).await.unwrap(); }); } "remove" => { let cloned_enforcer = self.enforcer.clone(); let policy = policy_request.params; Box::pin(async move { let mut lock = cloned_enforcer.write().await; lock.remove_policy(policy).await.unwrap(); }); } _ => panic!(":-("), } } Ok(()) } }
let mut internal_raft_message = InternalRaftMessage::default();
random_line_split
index_file_deleter.rs
self.inc_ref_files(&sis.files(true)); } } // We keep commits list in sorted order (oldest to newest): self.commits.sort(); // refCounts only includes "normal" filenames (does not include write.lock) { let ref_counts = self.ref_counts.read()?; let files: Vec<&str> = ref_counts.keys().map(|s| s.as_str()).collect(); Self::inflate_gens(segment_infos, files)?; } // Now delete anything with ref count at 0. These are // presumably abandoned files eg due to crash of // IndexWriter. { let mut to_delete = HashSet::new(); for (filename, rc) in &*self.ref_counts.read()? { if rc.count == 0 { // A segments_N file should never have ref count 0 on init if filename.starts_with(INDEX_FILE_SEGMENTS) { bail!(ErrorKind::IllegalState(format!( "file '{}' has ref_count=0, shouldn't happen on init", filename ))); } to_delete.insert(filename.clone()); } } self.delete_files(&to_delete, false)?; } // Finally, give policy a chance to remove things on // startup: { let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len()); for i in &mut self.commits { commits.push(i); } self.policy.on_init(commits)?; } // Always protect the incoming segmentInfos since // sometime it may not be the most recent commit self.checkpoint(segment_infos, false)?; let mut starting_commit_deleted = false; if let Some(idx) = current_commit_point_idx { if self.commits[idx].deleted { starting_commit_deleted = true; } } self.delete_commits()?; self.inited = true; Ok(starting_commit_deleted) } /// Set all gens beyond what we currently see in the directory, to avoid double-write /// in cases where the previous IndexWriter did not gracefully close/rollback (e.g. /// os/machine crashed or lost power). fn inflate_gens<C: Codec>(infos: &mut SegmentInfos<D, C>, files: Vec<&str>) -> Result<()> { let mut max_segment_gen = i64::min_value(); let mut max_segment_name = i32::min_value(); // Confusingly, this is the union of live_docs, field infos, doc values // (and maybe others, in the future) gens. This is somewhat messy, // since it means DV updates will suddenly write to the next gen after // live docs' gen, for example, but we don't have the APIs to ask the // codec which file is which: let mut max_per_segment_gen = HashMap::new(); for filename in files { if filename == INDEX_FILE_OLD_SEGMENT_GEN { // do nothing } else if filename.starts_with(INDEX_FILE_SEGMENTS) { // trash file: we have to handle this since we allow anything // starting with'segments' here if let Ok(gen) = generation_from_segments_file_name(filename) { max_segment_gen = max_segment_gen.max(gen); } } else if filename.starts_with(INDEX_FILE_PENDING_SEGMENTS) { // the first 8 bytes is "pending_", so the slice operation is safe if let Ok(gen) = generation_from_segments_file_name(&filename[8..]) { max_segment_gen = max_segment_gen.max(gen); } } else { let segment_name = parse_segment_name(filename); debug_assert!(segment_name.starts_with('_')); if filename.to_lowercase().ends_with(".tmp") { // A temp file: don't try to look at its gen continue; } max_segment_name = max_segment_name.max(i32::from_str_radix(&segment_name[1..], 36)?); let mut cur_gen = max_per_segment_gen.get(segment_name).map_or(0, |x| *x); if let Ok(gen) = parse_generation(filename) { cur_gen = cur_gen.max(gen); } max_per_segment_gen.insert(segment_name.to_string(), cur_gen); } } // Generation is advanced before write: let next_write_gen = max_segment_gen.max(infos.generation); infos.set_next_write_generation(next_write_gen)?; if infos.counter < max_segment_name + 1 { infos.counter = max_segment_name } for info in &mut infos.segments { let gen = max_per_segment_gen[&info.info.name]; if info.next_write_del_gen() < gen + 1 { info.set_next_write_del_gen(gen + 1); } if info.next_write_field_infos_gen() < gen + 1 { info.set_next_write_field_infos_gen(gen + 1); } if info.next_write_doc_values_gen() < gen + 1 { info.set_next_write_doc_values_gen(gen + 1); } } Ok(()) } /// For definition of "check point" see IndexWriter comments: /// "Clarification: Check Points (and commits)". /// /// Writer calls this when it has made a "consistent /// change" to the index, meaning new files are written to /// the index and the in-memory SegmentInfos have been /// modified to point to those files. /// /// This may or may not be a commit (segments_N may or may /// not have been written). /// /// We simply incref the files referenced by the new /// SegmentInfos and decref the files we had previously /// seen (if any). /// /// If this is a commit, we also call the policy to give it /// a chance to remove other commits. If any commits are /// removed, we decref their files as well. pub fn checkpoint<C: Codec>( &mut self, segment_infos: &SegmentInfos<D, C>, is_commit: bool, ) -> Result<()> { // incref the files: self.inc_ref_files(&segment_infos.files(is_commit)); if is_commit { // Append to our commits list: let p = CommitPoint::new( segment_infos.generation, segment_infos.segment_file_name().unwrap_or("".to_string()), segment_infos.files(true), segment_infos.has_dv_updates(), ); self.commits.push(p); // Tell policy so it can remove commits: { let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len()); for i in &mut self.commits { commits.push(i); } self.policy.on_commit(commits)?; } // DecRef file for commits that were deleted by the policy self.delete_commits() } else { let res = self.dec_ref_files(&self.last_files); self.last_files.clear(); res?; // Save files so we can decr on next checkpoint/commit: self.last_files.extend(segment_infos.files(false)); Ok(()) } } pub fn exists(&self, filename: &str) -> bool { if!self.ref_counts.read().unwrap().contains_key(filename) { false } else { self.ensure_ref_count(filename); self.ref_counts.read().unwrap()[filename].count > 0 } } fn ensure_ref_count(&self, file_name: &str) { let mut ref_counts = self.ref_counts.write().unwrap(); if!ref_counts.contains_key(file_name) { ref_counts.insert(file_name.to_string(), RefCount::default()); } } pub fn inc_ref_files(&self, files: &HashSet<String>) { for file in files { self.ensure_ref_count(file); self.ref_counts .write() .unwrap() .get_mut(file) .unwrap() .inc_ref(); } } /// Decrefs all provided files, even on exception; throws first exception hit, if any. pub fn dec_ref_files(&self, files: &HashSet<String>) -> Result<()> { let mut to_delete = HashSet::new(); for f in files { if self.dec_ref(f) { to_delete.insert(f.clone()); } } self.delete_files(&to_delete, false) } fn _dec_ref_files_by_commit(&self, files: &HashSet<String>) -> Result<()> { let mut to_delete = HashSet::new(); for f in files { if self.dec_ref(f) { to_delete.insert(f.clone()); } } self.delete_files(&to_delete, true) } pub fn dec_ref_files_no_error(&self, files: &HashSet<String>) { if let Err(e) = self.dec_ref_files(files) { warn!("dec_ref_files_no_error failed with '{:?}'", e); } } /// Returns true if the file should now be deleted. fn dec_ref(&self, filename: &str) -> bool { self.ensure_ref_count(filename); let mut ref_counts = self.ref_counts.write().unwrap(); if ref_counts.get_mut(filename).unwrap().dec_ref() == 0 { // This file is no longer referenced by any past // commit points nor by the in-memory SegmentInfos: ref_counts.remove(filename); true } else { false } } /// Remove the CommitPoints in the commitsToDelete List by /// DecRef'ing all files from each SegmentInfos. fn delete_commits(&mut self) -> Result<()> { let mut res = Ok(()); // First decref all files that had been referred to by // the now-deleted commits: for commit in &self.commits { if commit.deleted { res = self.dec_ref_files(&commit.files); } } // NOTE: does nothing if not err if res.is_err() { return res; } // Now compact commits to remove deleted ones (preserving the sort): let size = self.commits.len(); let mut read_from = 0; let mut write_to = 0; while read_from < size { if!self.commits[read_from].deleted { if write_to!= read_from { self.commits.swap(read_from, write_to); } write_to += 1; } read_from += 1; } self.commits.truncate(write_to); Ok(()) } fn delete_files(&self, files: &HashSet<String>, do_commit_filter: bool) -> Result<()> { // We make two passes, first deleting any segments_N files, second // deleting the rest. We do this so that if we throw exc or JVM // crashes during deletions, even when not on Windows, we don't // leave the index in an "apparently corrupt" state: let mut copys = vec![]; for file in files { copys.push(file); if!file.starts_with(INDEX_FILE_SEGMENTS) { continue; } self.delete_file(file)?; } if do_commit_filter { self.filter_dv_update_files(&mut copys); } for file in copys { if file.starts_with(INDEX_FILE_SEGMENTS) { continue; } self.delete_file(file)?; } Ok(()) } fn filter_dv_update_files(&self, candidates: &mut Vec<&String>) { let dv_update_files: Vec<String> = candidates .drain_filter(|f| -> bool { self.fnm_pattern.is_match(f) || self.dv_pattern.is_match(f) }) .map(|f| f.clone()) .collect(); let to_deletes: Vec<Vec<String>>; { let mut l = self.delayed_dv_update_files.lock(); let old_dv_update_files = l.as_mut().unwrap(); let tm_now = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs(); to_deletes = old_dv_update_files .drain_filter(|(x, _)| -> bool { *x < tm_now }) .map(|(_, y)| y) .collect(); old_dv_update_files.push((tm_now + 60, dv_update_files)); } for files in to_deletes { for file in files { self.delete_file(&file).unwrap_or(()); } } } fn delete_file(&self, filename: &str) -> Result<()> { // panic!("wrong deleted files"); self.directory.delete_file(filename) } /// Deletes the specified files, but only if they are new /// (have not yes been incref'd). pub fn delete_new_files(&self, files: &HashSet<String>) -> Result<()> { let mut filtered = HashSet::with_capacity(files.len()); let ref_counts = self.ref_counts.read().unwrap(); for file in files { // NOTE: it's very unusual yet possible for the // refCount to be present and 0: it can happen if you // open IW on a crashed index, and it removes a bunch // of unref'd files, and then you add new docs / do // merging, and it reuses that segment name. // TestCrash.testCrashAfterReopen can hit this: if!ref_counts.contains_key(file) || ref_counts[file].count == 0 { filtered.insert(file.clone()); } } self.delete_files(&filtered, false) } /// Writer calls this when it has hit an error and had to /// roll back, to tell us that there may now be /// unreferenced files in the filesystem. So we re-list /// the filesystem and delete such files. If segmentName /// is non-null, we will only delete files corresponding to /// that segment. pub fn refresh(&mut self) -> Result<()> { debug_assert!(self.inited); let files = self.directory.list_all()?; let mut to_delete = HashSet::new(); let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap(); for filename in &files { if!self.ref_counts.read()?.contains_key(filename) && (pattern.is_match(filename) || filename.starts_with(INDEX_FILE_SEGMENTS) || filename.starts_with(INDEX_FILE_PENDING_SEGMENTS)) { // Unreferenced file, so remove it to_delete.insert(filename.clone()); } } self.delete_files(&to_delete, false) } pub fn close(&mut self) -> Result<()> { if!self.last_files.is_empty() { let files = mem::replace(&mut self.last_files, HashSet::new()); self.dec_ref_files(&files)?; } Ok(()) } } struct RefCount { inited: bool, count: u32, } impl Default for RefCount { fn default() -> Self { RefCount { inited: false, count: 0, } } } impl RefCount { fn inc_ref(&mut self) -> u32 { if!self.inited { self.inited = true; } else { debug_assert!(self.count > 0); } self.count += 1; self.count } fn dec_ref(&mut self) -> u32 { debug_assert!(self.count > 0); self.count -= 1; self.count } } impl fmt::Display for RefCount { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.count) } } impl fmt::Debug for RefCount { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.count) } } /// Expert: represents a single commit into an index as seen by the /// {@link IndexDeletionPolicy} or {@link IndexReader}. /// /// Changes to the content of an index are made visible /// only after the writer who made that change commits by /// writing a new segments file /// (`segments_N</code`). This point in time, when the /// action of writing of a new segments file to the directory /// is completed, is an index commit. /// /// Each index commit point has a unique segments file /// associated with it. The segments file associated with a /// later index commit point would have a larger N. /// /// Holds details for each commit point. This class is also passed to /// the deletion policy. Note: this class has a natural ordering that /// is inconsistent with equals. pub struct CommitPoint { generation: i64, segment_file_name: String, files: HashSet<String>, has_dv_updates: bool, deleted: bool, } impl CommitPoint { fn new( generation: i64, segment_file_name: String, files: HashSet<String>, has_dv_updates: bool, ) -> Self { CommitPoint { generation, segment_file_name, files, has_dv_updates, deleted: false, } } /// Get the segments file (`segments_N`) associated with this commit point pub fn segments_file_name(&self) -> &str { &self.segment_file_name } /// Delete this commit point. This only applies when using /// the commit point in the context of IndexWriter's /// IndexDeletionPolicy. /// /// Upon calling this, the writer is notified that this commit /// point should be deleted. /// /// Decision that a commit-point should be deleted is taken by the /// `IndexDeletionPolicy` in effect and therefore this should only /// be called by its `IndexDeletionPolicy#onInit on_init()` or /// `IndexDeletionPolicy#onCommit on_commit()` methods. pub fn delete(&mut self) -> Result<()> { self.deleted = true; Ok(())
} pub fn has_dv_updates(&self) -> bool { self.has_dv_updates }
random_line_split
index_file_deleter.rs
<D>>, inited: bool, } impl<D: Directory> IndexFileDeleter<D> { pub fn new(directory: Arc<LockValidatingDirectoryWrapper<D>>) -> Self { IndexFileDeleter { ref_counts: Arc::new(RwLock::new(HashMap::new())), commits: vec![], last_files: HashSet::new(), policy: KeepOnlyLastCommitDeletionPolicy {}, delayed_dv_update_files: Arc::new(Mutex::new(Vec::new())), dv_pattern: Regex::new(CODEC_UPDATE_DV_PATTERN).unwrap(), fnm_pattern: Regex::new(CODEC_UPDATE_FNM_PATTERN).unwrap(), directory, inited: false, } } pub fn init<C: Codec>( &mut self, directory_orig: Arc<D>, files: &[String], segment_infos: &mut SegmentInfos<D, C>, initial_index_exists: bool, ) -> Result<bool> { let mut current_commit_point_idx: Option<usize> = None; if let Some(ref current_segments_file) = segment_infos.segment_file_name() { let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap(); for filename in files { if pattern.is_match(filename) || filename.starts_with(INDEX_FILE_SEGMENTS) || filename.starts_with(INDEX_FILE_PENDING_SEGMENTS) { // Add this file to ref_counts with initial count 0. { if!self.ref_counts.read()?.contains_key(filename) { self.ref_counts .write()? .insert(filename.to_string(), RefCount::default()); } } if filename.starts_with(INDEX_FILE_SEGMENTS) && filename!= INDEX_FILE_OLD_SEGMENT_GEN { // This is a commit (segments or segments_N), and // it's valid (<= the max gen). Load it, then // incref all files it refers to: let sis: SegmentInfos<D, C> = SegmentInfos::read_commit(&directory_orig, filename)?; let commit_point = CommitPoint::new( sis.generation, sis.segment_file_name().unwrap_or("".to_string()), sis.files(true), sis.has_dv_updates(), ); self.commits.push(commit_point); if sis.generation == segment_infos.generation { current_commit_point_idx = Some(self.commits.len() - 1); } self.inc_ref_files(&sis.files(true)); } } } if current_commit_point_idx.is_none() && initial_index_exists { // We did not in fact see the segments_N file // corresponding to the segmentInfos that was passed // in. Yet, it must exist, because our caller holds // the write lock. This can happen when the directory // listing was stale (eg when index accessed via NFS // client with stale directory listing cache). So we // try now to explicitly open this commit point: let sis: SegmentInfos<D, C> = SegmentInfos::read_commit(&directory_orig, current_segments_file)?; let commit_point = CommitPoint::new( sis.generation, sis.segment_file_name().unwrap_or("".to_string()), sis.files(true), sis.has_dv_updates(), ); self.commits.push(commit_point); current_commit_point_idx = Some(self.commits.len() - 1); self.inc_ref_files(&sis.files(true)); } } // We keep commits list in sorted order (oldest to newest): self.commits.sort(); // refCounts only includes "normal" filenames (does not include write.lock) { let ref_counts = self.ref_counts.read()?; let files: Vec<&str> = ref_counts.keys().map(|s| s.as_str()).collect(); Self::inflate_gens(segment_infos, files)?; } // Now delete anything with ref count at 0. These are // presumably abandoned files eg due to crash of // IndexWriter. { let mut to_delete = HashSet::new(); for (filename, rc) in &*self.ref_counts.read()? { if rc.count == 0 { // A segments_N file should never have ref count 0 on init if filename.starts_with(INDEX_FILE_SEGMENTS) { bail!(ErrorKind::IllegalState(format!( "file '{}' has ref_count=0, shouldn't happen on init", filename ))); } to_delete.insert(filename.clone()); } } self.delete_files(&to_delete, false)?; } // Finally, give policy a chance to remove things on // startup: { let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len()); for i in &mut self.commits { commits.push(i); } self.policy.on_init(commits)?; } // Always protect the incoming segmentInfos since // sometime it may not be the most recent commit self.checkpoint(segment_infos, false)?; let mut starting_commit_deleted = false; if let Some(idx) = current_commit_point_idx { if self.commits[idx].deleted { starting_commit_deleted = true; } } self.delete_commits()?; self.inited = true; Ok(starting_commit_deleted) } /// Set all gens beyond what we currently see in the directory, to avoid double-write /// in cases where the previous IndexWriter did not gracefully close/rollback (e.g. /// os/machine crashed or lost power). fn inflate_gens<C: Codec>(infos: &mut SegmentInfos<D, C>, files: Vec<&str>) -> Result<()> { let mut max_segment_gen = i64::min_value(); let mut max_segment_name = i32::min_value(); // Confusingly, this is the union of live_docs, field infos, doc values // (and maybe others, in the future) gens. This is somewhat messy, // since it means DV updates will suddenly write to the next gen after // live docs' gen, for example, but we don't have the APIs to ask the // codec which file is which: let mut max_per_segment_gen = HashMap::new(); for filename in files { if filename == INDEX_FILE_OLD_SEGMENT_GEN { // do nothing } else if filename.starts_with(INDEX_FILE_SEGMENTS) { // trash file: we have to handle this since we allow anything // starting with'segments' here if let Ok(gen) = generation_from_segments_file_name(filename) { max_segment_gen = max_segment_gen.max(gen); } } else if filename.starts_with(INDEX_FILE_PENDING_SEGMENTS) { // the first 8 bytes is "pending_", so the slice operation is safe if let Ok(gen) = generation_from_segments_file_name(&filename[8..]) { max_segment_gen = max_segment_gen.max(gen); } } else { let segment_name = parse_segment_name(filename); debug_assert!(segment_name.starts_with('_')); if filename.to_lowercase().ends_with(".tmp") { // A temp file: don't try to look at its gen continue; } max_segment_name = max_segment_name.max(i32::from_str_radix(&segment_name[1..], 36)?); let mut cur_gen = max_per_segment_gen.get(segment_name).map_or(0, |x| *x); if let Ok(gen) = parse_generation(filename) { cur_gen = cur_gen.max(gen); } max_per_segment_gen.insert(segment_name.to_string(), cur_gen); } } // Generation is advanced before write: let next_write_gen = max_segment_gen.max(infos.generation); infos.set_next_write_generation(next_write_gen)?; if infos.counter < max_segment_name + 1 { infos.counter = max_segment_name } for info in &mut infos.segments { let gen = max_per_segment_gen[&info.info.name]; if info.next_write_del_gen() < gen + 1 { info.set_next_write_del_gen(gen + 1); } if info.next_write_field_infos_gen() < gen + 1 { info.set_next_write_field_infos_gen(gen + 1); } if info.next_write_doc_values_gen() < gen + 1 { info.set_next_write_doc_values_gen(gen + 1); } } Ok(()) } /// For definition of "check point" see IndexWriter comments: /// "Clarification: Check Points (and commits)". /// /// Writer calls this when it has made a "consistent /// change" to the index, meaning new files are written to /// the index and the in-memory SegmentInfos have been /// modified to point to those files. /// /// This may or may not be a commit (segments_N may or may /// not have been written). /// /// We simply incref the files referenced by the new /// SegmentInfos and decref the files we had previously /// seen (if any). /// /// If this is a commit, we also call the policy to give it /// a chance to remove other commits. If any commits are /// removed, we decref their files as well. pub fn checkpoint<C: Codec>( &mut self, segment_infos: &SegmentInfos<D, C>, is_commit: bool, ) -> Result<()> { // incref the files: self.inc_ref_files(&segment_infos.files(is_commit)); if is_commit { // Append to our commits list: let p = CommitPoint::new( segment_infos.generation, segment_infos.segment_file_name().unwrap_or("".to_string()), segment_infos.files(true), segment_infos.has_dv_updates(), ); self.commits.push(p); // Tell policy so it can remove commits: { let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len()); for i in &mut self.commits { commits.push(i); } self.policy.on_commit(commits)?; } // DecRef file for commits that were deleted by the policy self.delete_commits() } else { let res = self.dec_ref_files(&self.last_files); self.last_files.clear(); res?; // Save files so we can decr on next checkpoint/commit: self.last_files.extend(segment_infos.files(false)); Ok(()) } } pub fn exists(&self, filename: &str) -> bool { if!self.ref_counts.read().unwrap().contains_key(filename) { false } else { self.ensure_ref_count(filename); self.ref_counts.read().unwrap()[filename].count > 0 } } fn ensure_ref_count(&self, file_name: &str) { let mut ref_counts = self.ref_counts.write().unwrap(); if!ref_counts.contains_key(file_name) { ref_counts.insert(file_name.to_string(), RefCount::default()); } } pub fn inc_ref_files(&self, files: &HashSet<String>) { for file in files { self.ensure_ref_count(file); self.ref_counts .write() .unwrap() .get_mut(file) .unwrap() .inc_ref(); } } /// Decrefs all provided files, even on exception; throws first exception hit, if any. pub fn dec_ref_files(&self, files: &HashSet<String>) -> Result<()> { let mut to_delete = HashSet::new(); for f in files { if self.dec_ref(f) { to_delete.insert(f.clone()); } } self.delete_files(&to_delete, false) } fn _dec_ref_files_by_commit(&self, files: &HashSet<String>) -> Result<()>
pub fn dec_ref_files_no_error(&self, files: &HashSet<String>) { if let Err(e) = self.dec_ref_files(files) { warn!("dec_ref_files_no_error failed with '{:?}'", e); } } /// Returns true if the file should now be deleted. fn dec_ref(&self, filename: &str) -> bool { self.ensure_ref_count(filename); let mut ref_counts = self.ref_counts.write().unwrap(); if ref_counts.get_mut(filename).unwrap().dec_ref() == 0 { // This file is no longer referenced by any past // commit points nor by the in-memory SegmentInfos: ref_counts.remove(filename); true } else { false } } /// Remove the CommitPoints in the commitsToDelete List by /// DecRef'ing all files from each SegmentInfos. fn delete_commits(&mut self) -> Result<()> { let mut res = Ok(()); // First decref all files that had been referred to by // the now-deleted commits: for commit in &self.commits { if commit.deleted { res = self.dec_ref_files(&commit.files); } } // NOTE: does nothing if not err if res.is_err() { return res; } // Now compact commits to remove deleted ones (preserving the sort): let size = self.commits.len(); let mut read_from = 0; let mut write_to = 0; while read_from < size { if!self.commits[read_from].deleted { if write_to!= read_from { self.commits.swap(read_from, write_to); } write_to += 1; } read_from += 1; } self.commits.truncate(write_to); Ok(()) } fn delete_files(&self, files: &HashSet<String>, do_commit_filter: bool) -> Result<()> { // We make two passes, first deleting any segments_N files, second // deleting the rest. We do this so that if we throw exc or JVM // crashes during deletions, even when not on Windows, we don't // leave the index in an "apparently corrupt" state: let mut copys = vec![]; for file in files { copys.push(file); if!file.starts_with(INDEX_FILE_SEGMENTS) { continue; } self.delete_file(file)?; } if do_commit_filter { self.filter_dv_update_files(&mut copys); } for file in copys { if file.starts_with(INDEX_FILE_SEGMENTS) { continue; } self.delete_file(file)?; } Ok(()) } fn filter_dv_update_files(&self, candidates: &mut Vec<&String>) { let dv_update_files: Vec<String> = candidates .drain_filter(|f| -> bool { self.fnm_pattern.is_match(f) || self.dv_pattern.is_match(f) }) .map(|f| f.clone()) .collect(); let to_deletes: Vec<Vec<String>>; { let mut l = self.delayed_dv_update_files.lock(); let old_dv_update_files = l.as_mut().unwrap(); let tm_now = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs(); to_deletes = old_dv_update_files .drain_filter(|(x, _)| -> bool { *x < tm_now }) .map(|(_, y)| y) .collect(); old_dv_update_files.push((tm_now + 60, dv_update_files)); } for files in to_deletes { for file in files { self.delete_file(&file).unwrap_or(()); } } } fn delete_file(&self, filename: &str) -> Result<()>
{ let mut to_delete = HashSet::new(); for f in files { if self.dec_ref(f) { to_delete.insert(f.clone()); } } self.delete_files(&to_delete, true) }
identifier_body
index_file_deleter.rs
); self.commits.push(commit_point); current_commit_point_idx = Some(self.commits.len() - 1); self.inc_ref_files(&sis.files(true)); } } // We keep commits list in sorted order (oldest to newest): self.commits.sort(); // refCounts only includes "normal" filenames (does not include write.lock) { let ref_counts = self.ref_counts.read()?; let files: Vec<&str> = ref_counts.keys().map(|s| s.as_str()).collect(); Self::inflate_gens(segment_infos, files)?; } // Now delete anything with ref count at 0. These are // presumably abandoned files eg due to crash of // IndexWriter. { let mut to_delete = HashSet::new(); for (filename, rc) in &*self.ref_counts.read()? { if rc.count == 0 { // A segments_N file should never have ref count 0 on init if filename.starts_with(INDEX_FILE_SEGMENTS) { bail!(ErrorKind::IllegalState(format!( "file '{}' has ref_count=0, shouldn't happen on init", filename ))); } to_delete.insert(filename.clone()); } } self.delete_files(&to_delete, false)?; } // Finally, give policy a chance to remove things on // startup: { let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len()); for i in &mut self.commits { commits.push(i); } self.policy.on_init(commits)?; } // Always protect the incoming segmentInfos since // sometime it may not be the most recent commit self.checkpoint(segment_infos, false)?; let mut starting_commit_deleted = false; if let Some(idx) = current_commit_point_idx { if self.commits[idx].deleted { starting_commit_deleted = true; } } self.delete_commits()?; self.inited = true; Ok(starting_commit_deleted) } /// Set all gens beyond what we currently see in the directory, to avoid double-write /// in cases where the previous IndexWriter did not gracefully close/rollback (e.g. /// os/machine crashed or lost power). fn inflate_gens<C: Codec>(infos: &mut SegmentInfos<D, C>, files: Vec<&str>) -> Result<()> { let mut max_segment_gen = i64::min_value(); let mut max_segment_name = i32::min_value(); // Confusingly, this is the union of live_docs, field infos, doc values // (and maybe others, in the future) gens. This is somewhat messy, // since it means DV updates will suddenly write to the next gen after // live docs' gen, for example, but we don't have the APIs to ask the // codec which file is which: let mut max_per_segment_gen = HashMap::new(); for filename in files { if filename == INDEX_FILE_OLD_SEGMENT_GEN { // do nothing } else if filename.starts_with(INDEX_FILE_SEGMENTS) { // trash file: we have to handle this since we allow anything // starting with'segments' here if let Ok(gen) = generation_from_segments_file_name(filename) { max_segment_gen = max_segment_gen.max(gen); } } else if filename.starts_with(INDEX_FILE_PENDING_SEGMENTS) { // the first 8 bytes is "pending_", so the slice operation is safe if let Ok(gen) = generation_from_segments_file_name(&filename[8..]) { max_segment_gen = max_segment_gen.max(gen); } } else { let segment_name = parse_segment_name(filename); debug_assert!(segment_name.starts_with('_')); if filename.to_lowercase().ends_with(".tmp") { // A temp file: don't try to look at its gen continue; } max_segment_name = max_segment_name.max(i32::from_str_radix(&segment_name[1..], 36)?); let mut cur_gen = max_per_segment_gen.get(segment_name).map_or(0, |x| *x); if let Ok(gen) = parse_generation(filename) { cur_gen = cur_gen.max(gen); } max_per_segment_gen.insert(segment_name.to_string(), cur_gen); } } // Generation is advanced before write: let next_write_gen = max_segment_gen.max(infos.generation); infos.set_next_write_generation(next_write_gen)?; if infos.counter < max_segment_name + 1 { infos.counter = max_segment_name } for info in &mut infos.segments { let gen = max_per_segment_gen[&info.info.name]; if info.next_write_del_gen() < gen + 1 { info.set_next_write_del_gen(gen + 1); } if info.next_write_field_infos_gen() < gen + 1 { info.set_next_write_field_infos_gen(gen + 1); } if info.next_write_doc_values_gen() < gen + 1 { info.set_next_write_doc_values_gen(gen + 1); } } Ok(()) } /// For definition of "check point" see IndexWriter comments: /// "Clarification: Check Points (and commits)". /// /// Writer calls this when it has made a "consistent /// change" to the index, meaning new files are written to /// the index and the in-memory SegmentInfos have been /// modified to point to those files. /// /// This may or may not be a commit (segments_N may or may /// not have been written). /// /// We simply incref the files referenced by the new /// SegmentInfos and decref the files we had previously /// seen (if any). /// /// If this is a commit, we also call the policy to give it /// a chance to remove other commits. If any commits are /// removed, we decref their files as well. pub fn checkpoint<C: Codec>( &mut self, segment_infos: &SegmentInfos<D, C>, is_commit: bool, ) -> Result<()> { // incref the files: self.inc_ref_files(&segment_infos.files(is_commit)); if is_commit { // Append to our commits list: let p = CommitPoint::new( segment_infos.generation, segment_infos.segment_file_name().unwrap_or("".to_string()), segment_infos.files(true), segment_infos.has_dv_updates(), ); self.commits.push(p); // Tell policy so it can remove commits: { let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len()); for i in &mut self.commits { commits.push(i); } self.policy.on_commit(commits)?; } // DecRef file for commits that were deleted by the policy self.delete_commits() } else { let res = self.dec_ref_files(&self.last_files); self.last_files.clear(); res?; // Save files so we can decr on next checkpoint/commit: self.last_files.extend(segment_infos.files(false)); Ok(()) } } pub fn exists(&self, filename: &str) -> bool { if!self.ref_counts.read().unwrap().contains_key(filename) { false } else { self.ensure_ref_count(filename); self.ref_counts.read().unwrap()[filename].count > 0 } } fn ensure_ref_count(&self, file_name: &str) { let mut ref_counts = self.ref_counts.write().unwrap(); if!ref_counts.contains_key(file_name) { ref_counts.insert(file_name.to_string(), RefCount::default()); } } pub fn inc_ref_files(&self, files: &HashSet<String>) { for file in files { self.ensure_ref_count(file); self.ref_counts .write() .unwrap() .get_mut(file) .unwrap() .inc_ref(); } } /// Decrefs all provided files, even on exception; throws first exception hit, if any. pub fn dec_ref_files(&self, files: &HashSet<String>) -> Result<()> { let mut to_delete = HashSet::new(); for f in files { if self.dec_ref(f) { to_delete.insert(f.clone()); } } self.delete_files(&to_delete, false) } fn _dec_ref_files_by_commit(&self, files: &HashSet<String>) -> Result<()> { let mut to_delete = HashSet::new(); for f in files { if self.dec_ref(f) { to_delete.insert(f.clone()); } } self.delete_files(&to_delete, true) } pub fn dec_ref_files_no_error(&self, files: &HashSet<String>) { if let Err(e) = self.dec_ref_files(files) { warn!("dec_ref_files_no_error failed with '{:?}'", e); } } /// Returns true if the file should now be deleted. fn dec_ref(&self, filename: &str) -> bool { self.ensure_ref_count(filename); let mut ref_counts = self.ref_counts.write().unwrap(); if ref_counts.get_mut(filename).unwrap().dec_ref() == 0 { // This file is no longer referenced by any past // commit points nor by the in-memory SegmentInfos: ref_counts.remove(filename); true } else { false } } /// Remove the CommitPoints in the commitsToDelete List by /// DecRef'ing all files from each SegmentInfos. fn delete_commits(&mut self) -> Result<()> { let mut res = Ok(()); // First decref all files that had been referred to by // the now-deleted commits: for commit in &self.commits { if commit.deleted { res = self.dec_ref_files(&commit.files); } } // NOTE: does nothing if not err if res.is_err() { return res; } // Now compact commits to remove deleted ones (preserving the sort): let size = self.commits.len(); let mut read_from = 0; let mut write_to = 0; while read_from < size { if!self.commits[read_from].deleted { if write_to!= read_from { self.commits.swap(read_from, write_to); } write_to += 1; } read_from += 1; } self.commits.truncate(write_to); Ok(()) } fn delete_files(&self, files: &HashSet<String>, do_commit_filter: bool) -> Result<()> { // We make two passes, first deleting any segments_N files, second // deleting the rest. We do this so that if we throw exc or JVM // crashes during deletions, even when not on Windows, we don't // leave the index in an "apparently corrupt" state: let mut copys = vec![]; for file in files { copys.push(file); if!file.starts_with(INDEX_FILE_SEGMENTS) { continue; } self.delete_file(file)?; } if do_commit_filter { self.filter_dv_update_files(&mut copys); } for file in copys { if file.starts_with(INDEX_FILE_SEGMENTS) { continue; } self.delete_file(file)?; } Ok(()) } fn filter_dv_update_files(&self, candidates: &mut Vec<&String>) { let dv_update_files: Vec<String> = candidates .drain_filter(|f| -> bool { self.fnm_pattern.is_match(f) || self.dv_pattern.is_match(f) }) .map(|f| f.clone()) .collect(); let to_deletes: Vec<Vec<String>>; { let mut l = self.delayed_dv_update_files.lock(); let old_dv_update_files = l.as_mut().unwrap(); let tm_now = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs(); to_deletes = old_dv_update_files .drain_filter(|(x, _)| -> bool { *x < tm_now }) .map(|(_, y)| y) .collect(); old_dv_update_files.push((tm_now + 60, dv_update_files)); } for files in to_deletes { for file in files { self.delete_file(&file).unwrap_or(()); } } } fn delete_file(&self, filename: &str) -> Result<()> { // panic!("wrong deleted files"); self.directory.delete_file(filename) } /// Deletes the specified files, but only if they are new /// (have not yes been incref'd). pub fn delete_new_files(&self, files: &HashSet<String>) -> Result<()> { let mut filtered = HashSet::with_capacity(files.len()); let ref_counts = self.ref_counts.read().unwrap(); for file in files { // NOTE: it's very unusual yet possible for the // refCount to be present and 0: it can happen if you // open IW on a crashed index, and it removes a bunch // of unref'd files, and then you add new docs / do // merging, and it reuses that segment name. // TestCrash.testCrashAfterReopen can hit this: if!ref_counts.contains_key(file) || ref_counts[file].count == 0 { filtered.insert(file.clone()); } } self.delete_files(&filtered, false) } /// Writer calls this when it has hit an error and had to /// roll back, to tell us that there may now be /// unreferenced files in the filesystem. So we re-list /// the filesystem and delete such files. If segmentName /// is non-null, we will only delete files corresponding to /// that segment. pub fn refresh(&mut self) -> Result<()> { debug_assert!(self.inited); let files = self.directory.list_all()?; let mut to_delete = HashSet::new(); let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap(); for filename in &files { if!self.ref_counts.read()?.contains_key(filename) && (pattern.is_match(filename) || filename.starts_with(INDEX_FILE_SEGMENTS) || filename.starts_with(INDEX_FILE_PENDING_SEGMENTS)) { // Unreferenced file, so remove it to_delete.insert(filename.clone()); } } self.delete_files(&to_delete, false) } pub fn close(&mut self) -> Result<()> { if!self.last_files.is_empty() { let files = mem::replace(&mut self.last_files, HashSet::new()); self.dec_ref_files(&files)?; } Ok(()) } } struct RefCount { inited: bool, count: u32, } impl Default for RefCount { fn default() -> Self { RefCount { inited: false, count: 0, } } } impl RefCount { fn inc_ref(&mut self) -> u32 { if!self.inited { self.inited = true; } else { debug_assert!(self.count > 0); } self.count += 1; self.count } fn dec_ref(&mut self) -> u32 { debug_assert!(self.count > 0); self.count -= 1; self.count } } impl fmt::Display for RefCount { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.count) } } impl fmt::Debug for RefCount { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.count) } } /// Expert: represents a single commit into an index as seen by the /// {@link IndexDeletionPolicy} or {@link IndexReader}. /// /// Changes to the content of an index are made visible /// only after the writer who made that change commits by /// writing a new segments file /// (`segments_N</code`). This point in time, when the /// action of writing of a new segments file to the directory /// is completed, is an index commit. /// /// Each index commit point has a unique segments file /// associated with it. The segments file associated with a /// later index commit point would have a larger N. /// /// Holds details for each commit point. This class is also passed to /// the deletion policy. Note: this class has a natural ordering that /// is inconsistent with equals. pub struct CommitPoint { generation: i64, segment_file_name: String, files: HashSet<String>, has_dv_updates: bool, deleted: bool, } impl CommitPoint { fn new( generation: i64, segment_file_name: String, files: HashSet<String>, has_dv_updates: bool, ) -> Self { CommitPoint { generation, segment_file_name, files, has_dv_updates, deleted: false, } } /// Get the segments file (`segments_N`) associated with this commit point pub fn segments_file_name(&self) -> &str { &self.segment_file_name } /// Delete this commit point. This only applies when using /// the commit point in the context of IndexWriter's /// IndexDeletionPolicy. /// /// Upon calling this, the writer is notified that this commit /// point should be deleted. /// /// Decision that a commit-point should be deleted is taken by the /// `IndexDeletionPolicy` in effect and therefore this should only /// be called by its `IndexDeletionPolicy#onInit on_init()` or /// `IndexDeletionPolicy#onCommit on_commit()` methods. pub fn
delete
identifier_name
index_file_deleter.rs
<D>>, inited: bool, } impl<D: Directory> IndexFileDeleter<D> { pub fn new(directory: Arc<LockValidatingDirectoryWrapper<D>>) -> Self { IndexFileDeleter { ref_counts: Arc::new(RwLock::new(HashMap::new())), commits: vec![], last_files: HashSet::new(), policy: KeepOnlyLastCommitDeletionPolicy {}, delayed_dv_update_files: Arc::new(Mutex::new(Vec::new())), dv_pattern: Regex::new(CODEC_UPDATE_DV_PATTERN).unwrap(), fnm_pattern: Regex::new(CODEC_UPDATE_FNM_PATTERN).unwrap(), directory, inited: false, } } pub fn init<C: Codec>( &mut self, directory_orig: Arc<D>, files: &[String], segment_infos: &mut SegmentInfos<D, C>, initial_index_exists: bool, ) -> Result<bool> { let mut current_commit_point_idx: Option<usize> = None; if let Some(ref current_segments_file) = segment_infos.segment_file_name() { let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap(); for filename in files { if pattern.is_match(filename) || filename.starts_with(INDEX_FILE_SEGMENTS) || filename.starts_with(INDEX_FILE_PENDING_SEGMENTS) { // Add this file to ref_counts with initial count 0. { if!self.ref_counts.read()?.contains_key(filename) { self.ref_counts .write()? .insert(filename.to_string(), RefCount::default()); } } if filename.starts_with(INDEX_FILE_SEGMENTS) && filename!= INDEX_FILE_OLD_SEGMENT_GEN { // This is a commit (segments or segments_N), and // it's valid (<= the max gen). Load it, then // incref all files it refers to: let sis: SegmentInfos<D, C> = SegmentInfos::read_commit(&directory_orig, filename)?; let commit_point = CommitPoint::new( sis.generation, sis.segment_file_name().unwrap_or("".to_string()), sis.files(true), sis.has_dv_updates(), ); self.commits.push(commit_point); if sis.generation == segment_infos.generation { current_commit_point_idx = Some(self.commits.len() - 1); } self.inc_ref_files(&sis.files(true)); } } } if current_commit_point_idx.is_none() && initial_index_exists { // We did not in fact see the segments_N file // corresponding to the segmentInfos that was passed // in. Yet, it must exist, because our caller holds // the write lock. This can happen when the directory // listing was stale (eg when index accessed via NFS // client with stale directory listing cache). So we // try now to explicitly open this commit point: let sis: SegmentInfos<D, C> = SegmentInfos::read_commit(&directory_orig, current_segments_file)?; let commit_point = CommitPoint::new( sis.generation, sis.segment_file_name().unwrap_or("".to_string()), sis.files(true), sis.has_dv_updates(), ); self.commits.push(commit_point); current_commit_point_idx = Some(self.commits.len() - 1); self.inc_ref_files(&sis.files(true)); } } // We keep commits list in sorted order (oldest to newest): self.commits.sort(); // refCounts only includes "normal" filenames (does not include write.lock) { let ref_counts = self.ref_counts.read()?; let files: Vec<&str> = ref_counts.keys().map(|s| s.as_str()).collect(); Self::inflate_gens(segment_infos, files)?; } // Now delete anything with ref count at 0. These are // presumably abandoned files eg due to crash of // IndexWriter. { let mut to_delete = HashSet::new(); for (filename, rc) in &*self.ref_counts.read()? { if rc.count == 0 { // A segments_N file should never have ref count 0 on init if filename.starts_with(INDEX_FILE_SEGMENTS) { bail!(ErrorKind::IllegalState(format!( "file '{}' has ref_count=0, shouldn't happen on init", filename ))); } to_delete.insert(filename.clone()); } } self.delete_files(&to_delete, false)?; } // Finally, give policy a chance to remove things on // startup: { let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len()); for i in &mut self.commits { commits.push(i); } self.policy.on_init(commits)?; } // Always protect the incoming segmentInfos since // sometime it may not be the most recent commit self.checkpoint(segment_infos, false)?; let mut starting_commit_deleted = false; if let Some(idx) = current_commit_point_idx { if self.commits[idx].deleted
} self.delete_commits()?; self.inited = true; Ok(starting_commit_deleted) } /// Set all gens beyond what we currently see in the directory, to avoid double-write /// in cases where the previous IndexWriter did not gracefully close/rollback (e.g. /// os/machine crashed or lost power). fn inflate_gens<C: Codec>(infos: &mut SegmentInfos<D, C>, files: Vec<&str>) -> Result<()> { let mut max_segment_gen = i64::min_value(); let mut max_segment_name = i32::min_value(); // Confusingly, this is the union of live_docs, field infos, doc values // (and maybe others, in the future) gens. This is somewhat messy, // since it means DV updates will suddenly write to the next gen after // live docs' gen, for example, but we don't have the APIs to ask the // codec which file is which: let mut max_per_segment_gen = HashMap::new(); for filename in files { if filename == INDEX_FILE_OLD_SEGMENT_GEN { // do nothing } else if filename.starts_with(INDEX_FILE_SEGMENTS) { // trash file: we have to handle this since we allow anything // starting with'segments' here if let Ok(gen) = generation_from_segments_file_name(filename) { max_segment_gen = max_segment_gen.max(gen); } } else if filename.starts_with(INDEX_FILE_PENDING_SEGMENTS) { // the first 8 bytes is "pending_", so the slice operation is safe if let Ok(gen) = generation_from_segments_file_name(&filename[8..]) { max_segment_gen = max_segment_gen.max(gen); } } else { let segment_name = parse_segment_name(filename); debug_assert!(segment_name.starts_with('_')); if filename.to_lowercase().ends_with(".tmp") { // A temp file: don't try to look at its gen continue; } max_segment_name = max_segment_name.max(i32::from_str_radix(&segment_name[1..], 36)?); let mut cur_gen = max_per_segment_gen.get(segment_name).map_or(0, |x| *x); if let Ok(gen) = parse_generation(filename) { cur_gen = cur_gen.max(gen); } max_per_segment_gen.insert(segment_name.to_string(), cur_gen); } } // Generation is advanced before write: let next_write_gen = max_segment_gen.max(infos.generation); infos.set_next_write_generation(next_write_gen)?; if infos.counter < max_segment_name + 1 { infos.counter = max_segment_name } for info in &mut infos.segments { let gen = max_per_segment_gen[&info.info.name]; if info.next_write_del_gen() < gen + 1 { info.set_next_write_del_gen(gen + 1); } if info.next_write_field_infos_gen() < gen + 1 { info.set_next_write_field_infos_gen(gen + 1); } if info.next_write_doc_values_gen() < gen + 1 { info.set_next_write_doc_values_gen(gen + 1); } } Ok(()) } /// For definition of "check point" see IndexWriter comments: /// "Clarification: Check Points (and commits)". /// /// Writer calls this when it has made a "consistent /// change" to the index, meaning new files are written to /// the index and the in-memory SegmentInfos have been /// modified to point to those files. /// /// This may or may not be a commit (segments_N may or may /// not have been written). /// /// We simply incref the files referenced by the new /// SegmentInfos and decref the files we had previously /// seen (if any). /// /// If this is a commit, we also call the policy to give it /// a chance to remove other commits. If any commits are /// removed, we decref their files as well. pub fn checkpoint<C: Codec>( &mut self, segment_infos: &SegmentInfos<D, C>, is_commit: bool, ) -> Result<()> { // incref the files: self.inc_ref_files(&segment_infos.files(is_commit)); if is_commit { // Append to our commits list: let p = CommitPoint::new( segment_infos.generation, segment_infos.segment_file_name().unwrap_or("".to_string()), segment_infos.files(true), segment_infos.has_dv_updates(), ); self.commits.push(p); // Tell policy so it can remove commits: { let mut commits: Vec<&mut CommitPoint> = Vec::with_capacity(self.commits.len()); for i in &mut self.commits { commits.push(i); } self.policy.on_commit(commits)?; } // DecRef file for commits that were deleted by the policy self.delete_commits() } else { let res = self.dec_ref_files(&self.last_files); self.last_files.clear(); res?; // Save files so we can decr on next checkpoint/commit: self.last_files.extend(segment_infos.files(false)); Ok(()) } } pub fn exists(&self, filename: &str) -> bool { if!self.ref_counts.read().unwrap().contains_key(filename) { false } else { self.ensure_ref_count(filename); self.ref_counts.read().unwrap()[filename].count > 0 } } fn ensure_ref_count(&self, file_name: &str) { let mut ref_counts = self.ref_counts.write().unwrap(); if!ref_counts.contains_key(file_name) { ref_counts.insert(file_name.to_string(), RefCount::default()); } } pub fn inc_ref_files(&self, files: &HashSet<String>) { for file in files { self.ensure_ref_count(file); self.ref_counts .write() .unwrap() .get_mut(file) .unwrap() .inc_ref(); } } /// Decrefs all provided files, even on exception; throws first exception hit, if any. pub fn dec_ref_files(&self, files: &HashSet<String>) -> Result<()> { let mut to_delete = HashSet::new(); for f in files { if self.dec_ref(f) { to_delete.insert(f.clone()); } } self.delete_files(&to_delete, false) } fn _dec_ref_files_by_commit(&self, files: &HashSet<String>) -> Result<()> { let mut to_delete = HashSet::new(); for f in files { if self.dec_ref(f) { to_delete.insert(f.clone()); } } self.delete_files(&to_delete, true) } pub fn dec_ref_files_no_error(&self, files: &HashSet<String>) { if let Err(e) = self.dec_ref_files(files) { warn!("dec_ref_files_no_error failed with '{:?}'", e); } } /// Returns true if the file should now be deleted. fn dec_ref(&self, filename: &str) -> bool { self.ensure_ref_count(filename); let mut ref_counts = self.ref_counts.write().unwrap(); if ref_counts.get_mut(filename).unwrap().dec_ref() == 0 { // This file is no longer referenced by any past // commit points nor by the in-memory SegmentInfos: ref_counts.remove(filename); true } else { false } } /// Remove the CommitPoints in the commitsToDelete List by /// DecRef'ing all files from each SegmentInfos. fn delete_commits(&mut self) -> Result<()> { let mut res = Ok(()); // First decref all files that had been referred to by // the now-deleted commits: for commit in &self.commits { if commit.deleted { res = self.dec_ref_files(&commit.files); } } // NOTE: does nothing if not err if res.is_err() { return res; } // Now compact commits to remove deleted ones (preserving the sort): let size = self.commits.len(); let mut read_from = 0; let mut write_to = 0; while read_from < size { if!self.commits[read_from].deleted { if write_to!= read_from { self.commits.swap(read_from, write_to); } write_to += 1; } read_from += 1; } self.commits.truncate(write_to); Ok(()) } fn delete_files(&self, files: &HashSet<String>, do_commit_filter: bool) -> Result<()> { // We make two passes, first deleting any segments_N files, second // deleting the rest. We do this so that if we throw exc or JVM // crashes during deletions, even when not on Windows, we don't // leave the index in an "apparently corrupt" state: let mut copys = vec![]; for file in files { copys.push(file); if!file.starts_with(INDEX_FILE_SEGMENTS) { continue; } self.delete_file(file)?; } if do_commit_filter { self.filter_dv_update_files(&mut copys); } for file in copys { if file.starts_with(INDEX_FILE_SEGMENTS) { continue; } self.delete_file(file)?; } Ok(()) } fn filter_dv_update_files(&self, candidates: &mut Vec<&String>) { let dv_update_files: Vec<String> = candidates .drain_filter(|f| -> bool { self.fnm_pattern.is_match(f) || self.dv_pattern.is_match(f) }) .map(|f| f.clone()) .collect(); let to_deletes: Vec<Vec<String>>; { let mut l = self.delayed_dv_update_files.lock(); let old_dv_update_files = l.as_mut().unwrap(); let tm_now = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs(); to_deletes = old_dv_update_files .drain_filter(|(x, _)| -> bool { *x < tm_now }) .map(|(_, y)| y) .collect(); old_dv_update_files.push((tm_now + 60, dv_update_files)); } for files in to_deletes { for file in files { self.delete_file(&file).unwrap_or(()); } } } fn delete_file(&self, filename: &str) -> Result<()>
{ starting_commit_deleted = true; }
conditional_block
ssh.rs
_from_slice(&tag_bytes[..TAG_LEN_BYTES]); tag } /// OpenSSH-supported ciphers. #[allow(clippy::enum_variant_names)] #[derive(Clone, Copy, Debug)] enum OpenSshCipher { Aes256Cbc, Aes128Ctr, Aes192Ctr, Aes256Ctr, Aes256Gcm, } impl OpenSshCipher { /// Returns the length of the authenticating part of the cipher (the tag of an AEAD). fn auth_len(self) -> usize { match self { OpenSshCipher::Aes256Cbc | OpenSshCipher::Aes128Ctr | OpenSshCipher::Aes192Ctr | OpenSshCipher::Aes256Ctr => 0, OpenSshCipher::Aes256Gcm => <Aes256Gcm as AeadCore>::TagSize::USIZE, } } fn
( self, kdf: &OpenSshKdf, p: SecretString, ct: &[u8], ) -> Result<Vec<u8>, DecryptError> { match self { OpenSshCipher::Aes256Cbc => decrypt::aes_cbc::<Aes256CbcDec>(kdf, p, ct), OpenSshCipher::Aes128Ctr => Ok(decrypt::aes_ctr::<Aes128Ctr>(kdf, p, ct)), OpenSshCipher::Aes192Ctr => Ok(decrypt::aes_ctr::<Aes192Ctr>(kdf, p, ct)), OpenSshCipher::Aes256Ctr => Ok(decrypt::aes_ctr::<Aes256Ctr>(kdf, p, ct)), OpenSshCipher::Aes256Gcm => decrypt::aes_gcm::<Aes256Gcm>(kdf, p, ct), } } } /// OpenSSH-supported KDFs. #[derive(Clone, Debug)] enum OpenSshKdf { Bcrypt { salt: Vec<u8>, rounds: u32 }, } impl OpenSshKdf { fn derive(&self, passphrase: SecretString, out_len: usize) -> Vec<u8> { match self { OpenSshKdf::Bcrypt { salt, rounds } => { let mut output = vec![0; out_len]; bcrypt_pbkdf(passphrase.expose_secret(), salt, *rounds, &mut output) .expect("parameters are valid"); output } } } } /// An encrypted SSH private key. #[derive(Clone)] pub struct EncryptedKey { ssh_key: Vec<u8>, cipher: OpenSshCipher, kdf: OpenSshKdf, encrypted: Vec<u8>, filename: Option<String>, } impl EncryptedKey { /// Decrypts this private key. pub fn decrypt( &self, passphrase: SecretString, ) -> Result<identity::UnencryptedKey, DecryptError> { let decrypted = self .cipher .decrypt(&self.kdf, passphrase, &self.encrypted)?; let mut parser = read_ssh::openssh_unencrypted_privkey(&self.ssh_key); match parser(&decrypted) .map(|(_, sk)| sk) .map_err(|_| DecryptError::KeyDecryptionFailed)? { Identity::Unencrypted(key) => Ok(key), Identity::Unsupported(_) => Err(DecryptError::KeyDecryptionFailed), Identity::Encrypted(_) => unreachable!(), } } } mod decrypt { use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit, StreamCipher}; use aes_gcm::aead::{AeadMut, KeyInit}; use age_core::secrecy::SecretString; use cipher::generic_array::{ArrayLength, GenericArray}; use super::OpenSshKdf; use crate::error::DecryptError; fn derive_key_material<KeySize: ArrayLength<u8>, IvSize: ArrayLength<u8>>( kdf: &OpenSshKdf, passphrase: SecretString, ) -> (GenericArray<u8, KeySize>, GenericArray<u8, IvSize>) { let kdf_output = kdf.derive(passphrase, KeySize::USIZE + IvSize::USIZE); let (key, iv) = kdf_output.split_at(KeySize::USIZE); ( GenericArray::from_exact_iter(key.iter().copied()).expect("key is correct length"), GenericArray::from_exact_iter(iv.iter().copied()).expect("iv is correct length"), ) } pub(super) fn aes_cbc<C: BlockDecryptMut + KeyIvInit>( kdf: &OpenSshKdf, passphrase: SecretString, ciphertext: &[u8], ) -> Result<Vec<u8>, DecryptError> { let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase); let cipher = C::new(&key, &iv); cipher .decrypt_padded_vec_mut::<NoPadding>(ciphertext) .map_err(|_| DecryptError::KeyDecryptionFailed) } pub(super) fn aes_ctr<C: StreamCipher + KeyIvInit>( kdf: &OpenSshKdf, passphrase: SecretString, ciphertext: &[u8], ) -> Vec<u8> { let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase); let mut cipher = C::new(&key, &iv); let mut plaintext = ciphertext.to_vec(); cipher.apply_keystream(&mut plaintext); plaintext } pub(super) fn aes_gcm<C: AeadMut + KeyInit>( kdf: &OpenSshKdf, passphrase: SecretString, ciphertext: &[u8], ) -> Result<Vec<u8>, DecryptError> { let (key, nonce) = derive_key_material::<C::KeySize, C::NonceSize>(kdf, passphrase); let mut cipher = C::new(&key); cipher .decrypt(&nonce, ciphertext) .map_err(|_| DecryptError::KeyDecryptionFailed) } } mod read_ssh { use age_core::secrecy::Secret; use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint}; use nom::{ branch::alt, bytes::complete::{tag, take}, combinator::{flat_map, map, map_opt, map_parser, map_res, recognize, rest, verify}, multi::{length_data, length_value}, number::complete::be_u32, sequence::{delimited, pair, preceded, terminated, tuple}, IResult, }; use num_traits::Zero; use rsa::BigUint; use super::{ identity::{UnencryptedKey, UnsupportedKey}, EncryptedKey, Identity, OpenSshCipher, OpenSshKdf, SSH_ED25519_KEY_PREFIX, SSH_RSA_KEY_PREFIX, }; /// The SSH `string` [data type](https://tools.ietf.org/html/rfc4251#section-5). pub(crate) fn string(input: &[u8]) -> IResult<&[u8], &[u8]> { length_data(be_u32)(input) } /// Recognizes an SSH `string` matching a tag. #[allow(clippy::needless_lifetimes)] // false positive pub fn string_tag<'a>(value: &'a str) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], &'a [u8]> { move |input: &[u8]| length_value(be_u32, tag(value))(input) } /// The SSH `mpint` data type, restricted to non-negative integers. /// /// From [RFC 4251](https://tools.ietf.org/html/rfc4251#section-5): /// ```text /// Represents multiple precision integers in two's complement format, /// stored as a string, 8 bits per byte, MSB first. Negative numbers /// have the value 1 as the most significant bit of the first byte of /// the data partition. If the most significant bit would be set for /// a positive number, the number MUST be preceded by a zero byte. /// Unnecessary leading bytes with the value 0 or 255 MUST NOT be /// included. The value zero MUST be stored as a string with zero /// bytes of data. /// ``` fn mpint(input: &[u8]) -> IResult<&[u8], BigUint> { map_opt(string, |bytes| { if bytes.is_empty() { Some(BigUint::zero()) } else { // Enforce canonicity let mut non_zero_bytes = bytes; while non_zero_bytes[0] == 0 { non_zero_bytes = &non_zero_bytes[1..]; } if non_zero_bytes.is_empty() { // Non-canonical zero return None; } if non_zero_bytes.len() + (non_zero_bytes[0] >> 7) as usize!= bytes.len() { // Negative number or non-canonical positive number return None; } Some(BigUint::from_bytes_be(bytes)) } })(input) } enum CipherResult { Supported(OpenSshCipher), Unsupported(String), } /// Parse a cipher and KDF. fn encryption_header(input: &[u8]) -> IResult<&[u8], Option<(CipherResult, OpenSshKdf)>> { alt(( // If either cipher or KDF is None, both must be. map( tuple((string_tag("none"), string_tag("none"), string_tag(""))), |_| None, ), map( tuple(( alt(( map(string_tag("aes256-cbc"), |_| { CipherResult::Supported(OpenSshCipher::Aes256Cbc) }), map(string_tag("aes128-ctr"), |_| { CipherResult::Supported(OpenSshCipher::Aes128Ctr) }), map(string_tag("aes192-ctr"), |_| { CipherResult::Supported(OpenSshCipher::Aes192Ctr) }), map(string_tag("aes256-ctr"), |_| { CipherResult::Supported(OpenSshCipher::Aes256Ctr) }), map(string_tag("[email protected]"), |_| { CipherResult::Supported(OpenSshCipher::Aes256Gcm) }), map(string, |s| { CipherResult::Unsupported(String::from_utf8_lossy(s).into_owned()) }), )), map_opt( preceded( string_tag("bcrypt"), map_parser(string, tuple((string, be_u32))), ), |(salt, rounds)| { if salt.is_empty() || rounds == 0 { // Invalid parameters None } else { Some(OpenSshKdf::Bcrypt { salt: salt.into(), rounds, }) } }, ), )), Some, ), ))(input) } /// Parses the comment from an OpenSSH privkey and verifies its deterministic padding. fn comment_and_padding(input: &[u8]) -> IResult<&[u8], &[u8]> { terminated( // Comment string, // Deterministic padding verify(rest, |padding: &[u8]| { padding.iter().enumerate().all(|(i, b)| *b == (i + 1) as u8) }), )(input) } /// Internal OpenSSH encoding of an RSA private key. /// /// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3187-L3198) fn openssh_rsa_privkey(input: &[u8]) -> IResult<&[u8], rsa::RsaPrivateKey> { delimited( string_tag(SSH_RSA_KEY_PREFIX), map_res( tuple((mpint, mpint, mpint, mpint, mpint, mpint)), |(n, e, d, _iqmp, p, q)| rsa::RsaPrivateKey::from_components(n, e, d, vec![p, q]), ), comment_and_padding, )(input) } /// Internal OpenSSH encoding of an Ed25519 private key. /// /// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3277-L3283) fn openssh_ed25519_privkey(input: &[u8]) -> IResult<&[u8], Secret<[u8; 64]>> { delimited( string_tag(SSH_ED25519_KEY_PREFIX), map_opt(tuple((string, string)), |(pubkey_bytes, privkey_bytes)| { if privkey_bytes.len() == 64 && pubkey_bytes == &privkey_bytes[32..64] { let mut privkey = [0; 64]; privkey.copy_from_slice(privkey_bytes); Some(Secret::new(privkey)) } else { None } }), comment_and_padding, )(input) } /// Unencrypted, padded list of private keys. /// /// From the [specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key): /// ```text /// uint32 checkint /// uint32 checkint /// string privatekey1 /// string comment1 /// string privatekey2 /// string comment2 ///... /// string privatekeyN /// string commentN /// char 1 /// char 2 /// char 3 ///... /// char padlen % 255 /// ``` /// /// Note however that the `string` type for the private keys is wrong; it should be /// an opaque type, or the composite type `(string, byte[])`. /// /// We only support a single key, like OpenSSH. #[allow(clippy::needless_lifetimes)] pub(super) fn openssh_unencrypted_privkey<'a>( ssh_key: &[u8], ) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> { // We need to own, move, and clone these in order to keep them alive. let ssh_key_rsa = ssh_key.to_vec(); let ssh_key_ed25519 = ssh_key.to_vec(); preceded( // Repeated checkint, intended for verifying correct decryption. // Don't copy this idea into a new protocol; use an AEAD instead. map_opt(pair(take(4usize), take(4usize)), |(c1, c2)| { if c1 == c2 { Some(c1) } else { None } }), alt(( map(openssh_rsa_privkey, move |sk| { UnencryptedKey::SshRsa(ssh_key_rsa.clone(), Box::new(sk)).into() }), map(openssh_ed25519_privkey, move |privkey| { UnencryptedKey::SshEd25519(ssh_key_ed25519.clone(), privkey).into() }), map(string, |key_type| { UnsupportedKey::Type(String::from_utf8_lossy(key_type).to_string()).into() }), )), ) } /// An OpenSSH-formatted private key. /// /// - [Specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key) pub(super) fn openssh_privkey(input: &[u8]) -> IResult<&[u8], Identity> { flat_map( pair( preceded(tag(b"openssh-key-v1\x00"), encryption_header), preceded( // We only support a single key, like OpenSSH: // https://github.com/openssh/openssh-portable/blob/4103a3ec/sshkey.c#L4171 tag(b"\x00\x00\x00\x01"), string, // The public key in SSH format ), ), openssh_privkey_inner, )(input) } /// Encrypted, padded list of private keys. fn openssh_privkey_inner<'a>( (encryption, ssh_key): (Option<(CipherResult, OpenSshKdf)>, &'a [u8]), ) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> { // `PROTOCOL.key` specifies that the encrypted list of private keys is encoded as // a `string`, but this is incorrect when AEAD ciphertexts are used. For what I // can only assume are backwards-compatibility reasons, the `string` part encodes // the ciphertext without tag, and the tag is just appended to the encoding. So // you can only parse the full data structure by interpreting the encryption // header. let expected_remainder = encryption.as_ref().map_or(0, |(cipher_res, _)| { if let CipherResult::Supported(cipher) = cipher_res { cipher.auth_len() } else { 0 } }); move |input: &[u8]| match &encryption { None => map_parser(string, openssh_unencrypted_privkey(ssh_key))(input), Some((cipher_res, kdf)) => map( map_parser( recognize(pair(string, take(expected_remainder))), preceded(be_u32, rest), ), |private| match cipher_res { CipherResult::Supported(cipher) => EncryptedKey { ssh_key: ssh_key.to_vec(), cipher: *cipher, kdf: kdf.clone(), encrypted: private.to_vec(), filename: None, } .into(), CipherResult::Unsupported(cipher) => { UnsupportedKey::EncryptedSsh(cipher.clone()).into() } }, )(input), } } /// An SSH-encoded RSA public key. /// /// From [RFC 4253](https://tools.ietf.org/html/rfc4253#section-6.6): /// ```text /// string "ssh-rsa" /// mpint e
decrypt
identifier_name
ssh.rs
copy_from_slice(&tag_bytes[..TAG_LEN_BYTES]); tag } /// OpenSSH-supported ciphers. #[allow(clippy::enum_variant_names)] #[derive(Clone, Copy, Debug)] enum OpenSshCipher { Aes256Cbc, Aes128Ctr, Aes192Ctr, Aes256Ctr, Aes256Gcm, } impl OpenSshCipher { /// Returns the length of the authenticating part of the cipher (the tag of an AEAD). fn auth_len(self) -> usize { match self { OpenSshCipher::Aes256Cbc | OpenSshCipher::Aes128Ctr | OpenSshCipher::Aes192Ctr | OpenSshCipher::Aes256Ctr => 0, OpenSshCipher::Aes256Gcm => <Aes256Gcm as AeadCore>::TagSize::USIZE, } } fn decrypt( self, kdf: &OpenSshKdf, p: SecretString, ct: &[u8], ) -> Result<Vec<u8>, DecryptError> { match self { OpenSshCipher::Aes256Cbc => decrypt::aes_cbc::<Aes256CbcDec>(kdf, p, ct), OpenSshCipher::Aes128Ctr => Ok(decrypt::aes_ctr::<Aes128Ctr>(kdf, p, ct)), OpenSshCipher::Aes192Ctr => Ok(decrypt::aes_ctr::<Aes192Ctr>(kdf, p, ct)), OpenSshCipher::Aes256Ctr => Ok(decrypt::aes_ctr::<Aes256Ctr>(kdf, p, ct)), OpenSshCipher::Aes256Gcm => decrypt::aes_gcm::<Aes256Gcm>(kdf, p, ct), } } } /// OpenSSH-supported KDFs. #[derive(Clone, Debug)] enum OpenSshKdf { Bcrypt { salt: Vec<u8>, rounds: u32 }, } impl OpenSshKdf { fn derive(&self, passphrase: SecretString, out_len: usize) -> Vec<u8> { match self { OpenSshKdf::Bcrypt { salt, rounds } => { let mut output = vec![0; out_len]; bcrypt_pbkdf(passphrase.expose_secret(), salt, *rounds, &mut output) .expect("parameters are valid"); output } } } } /// An encrypted SSH private key. #[derive(Clone)] pub struct EncryptedKey { ssh_key: Vec<u8>, cipher: OpenSshCipher, kdf: OpenSshKdf, encrypted: Vec<u8>, filename: Option<String>, } impl EncryptedKey { /// Decrypts this private key. pub fn decrypt( &self, passphrase: SecretString, ) -> Result<identity::UnencryptedKey, DecryptError> { let decrypted = self .cipher .decrypt(&self.kdf, passphrase, &self.encrypted)?; let mut parser = read_ssh::openssh_unencrypted_privkey(&self.ssh_key); match parser(&decrypted) .map(|(_, sk)| sk) .map_err(|_| DecryptError::KeyDecryptionFailed)? { Identity::Unencrypted(key) => Ok(key), Identity::Unsupported(_) => Err(DecryptError::KeyDecryptionFailed),
} } mod decrypt { use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit, StreamCipher}; use aes_gcm::aead::{AeadMut, KeyInit}; use age_core::secrecy::SecretString; use cipher::generic_array::{ArrayLength, GenericArray}; use super::OpenSshKdf; use crate::error::DecryptError; fn derive_key_material<KeySize: ArrayLength<u8>, IvSize: ArrayLength<u8>>( kdf: &OpenSshKdf, passphrase: SecretString, ) -> (GenericArray<u8, KeySize>, GenericArray<u8, IvSize>) { let kdf_output = kdf.derive(passphrase, KeySize::USIZE + IvSize::USIZE); let (key, iv) = kdf_output.split_at(KeySize::USIZE); ( GenericArray::from_exact_iter(key.iter().copied()).expect("key is correct length"), GenericArray::from_exact_iter(iv.iter().copied()).expect("iv is correct length"), ) } pub(super) fn aes_cbc<C: BlockDecryptMut + KeyIvInit>( kdf: &OpenSshKdf, passphrase: SecretString, ciphertext: &[u8], ) -> Result<Vec<u8>, DecryptError> { let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase); let cipher = C::new(&key, &iv); cipher .decrypt_padded_vec_mut::<NoPadding>(ciphertext) .map_err(|_| DecryptError::KeyDecryptionFailed) } pub(super) fn aes_ctr<C: StreamCipher + KeyIvInit>( kdf: &OpenSshKdf, passphrase: SecretString, ciphertext: &[u8], ) -> Vec<u8> { let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase); let mut cipher = C::new(&key, &iv); let mut plaintext = ciphertext.to_vec(); cipher.apply_keystream(&mut plaintext); plaintext } pub(super) fn aes_gcm<C: AeadMut + KeyInit>( kdf: &OpenSshKdf, passphrase: SecretString, ciphertext: &[u8], ) -> Result<Vec<u8>, DecryptError> { let (key, nonce) = derive_key_material::<C::KeySize, C::NonceSize>(kdf, passphrase); let mut cipher = C::new(&key); cipher .decrypt(&nonce, ciphertext) .map_err(|_| DecryptError::KeyDecryptionFailed) } } mod read_ssh { use age_core::secrecy::Secret; use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint}; use nom::{ branch::alt, bytes::complete::{tag, take}, combinator::{flat_map, map, map_opt, map_parser, map_res, recognize, rest, verify}, multi::{length_data, length_value}, number::complete::be_u32, sequence::{delimited, pair, preceded, terminated, tuple}, IResult, }; use num_traits::Zero; use rsa::BigUint; use super::{ identity::{UnencryptedKey, UnsupportedKey}, EncryptedKey, Identity, OpenSshCipher, OpenSshKdf, SSH_ED25519_KEY_PREFIX, SSH_RSA_KEY_PREFIX, }; /// The SSH `string` [data type](https://tools.ietf.org/html/rfc4251#section-5). pub(crate) fn string(input: &[u8]) -> IResult<&[u8], &[u8]> { length_data(be_u32)(input) } /// Recognizes an SSH `string` matching a tag. #[allow(clippy::needless_lifetimes)] // false positive pub fn string_tag<'a>(value: &'a str) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], &'a [u8]> { move |input: &[u8]| length_value(be_u32, tag(value))(input) } /// The SSH `mpint` data type, restricted to non-negative integers. /// /// From [RFC 4251](https://tools.ietf.org/html/rfc4251#section-5): /// ```text /// Represents multiple precision integers in two's complement format, /// stored as a string, 8 bits per byte, MSB first. Negative numbers /// have the value 1 as the most significant bit of the first byte of /// the data partition. If the most significant bit would be set for /// a positive number, the number MUST be preceded by a zero byte. /// Unnecessary leading bytes with the value 0 or 255 MUST NOT be /// included. The value zero MUST be stored as a string with zero /// bytes of data. /// ``` fn mpint(input: &[u8]) -> IResult<&[u8], BigUint> { map_opt(string, |bytes| { if bytes.is_empty() { Some(BigUint::zero()) } else { // Enforce canonicity let mut non_zero_bytes = bytes; while non_zero_bytes[0] == 0 { non_zero_bytes = &non_zero_bytes[1..]; } if non_zero_bytes.is_empty() { // Non-canonical zero return None; } if non_zero_bytes.len() + (non_zero_bytes[0] >> 7) as usize!= bytes.len() { // Negative number or non-canonical positive number return None; } Some(BigUint::from_bytes_be(bytes)) } })(input) } enum CipherResult { Supported(OpenSshCipher), Unsupported(String), } /// Parse a cipher and KDF. fn encryption_header(input: &[u8]) -> IResult<&[u8], Option<(CipherResult, OpenSshKdf)>> { alt(( // If either cipher or KDF is None, both must be. map( tuple((string_tag("none"), string_tag("none"), string_tag(""))), |_| None, ), map( tuple(( alt(( map(string_tag("aes256-cbc"), |_| { CipherResult::Supported(OpenSshCipher::Aes256Cbc) }), map(string_tag("aes128-ctr"), |_| { CipherResult::Supported(OpenSshCipher::Aes128Ctr) }), map(string_tag("aes192-ctr"), |_| { CipherResult::Supported(OpenSshCipher::Aes192Ctr) }), map(string_tag("aes256-ctr"), |_| { CipherResult::Supported(OpenSshCipher::Aes256Ctr) }), map(string_tag("[email protected]"), |_| { CipherResult::Supported(OpenSshCipher::Aes256Gcm) }), map(string, |s| { CipherResult::Unsupported(String::from_utf8_lossy(s).into_owned()) }), )), map_opt( preceded( string_tag("bcrypt"), map_parser(string, tuple((string, be_u32))), ), |(salt, rounds)| { if salt.is_empty() || rounds == 0 { // Invalid parameters None } else { Some(OpenSshKdf::Bcrypt { salt: salt.into(), rounds, }) } }, ), )), Some, ), ))(input) } /// Parses the comment from an OpenSSH privkey and verifies its deterministic padding. fn comment_and_padding(input: &[u8]) -> IResult<&[u8], &[u8]> { terminated( // Comment string, // Deterministic padding verify(rest, |padding: &[u8]| { padding.iter().enumerate().all(|(i, b)| *b == (i + 1) as u8) }), )(input) } /// Internal OpenSSH encoding of an RSA private key. /// /// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3187-L3198) fn openssh_rsa_privkey(input: &[u8]) -> IResult<&[u8], rsa::RsaPrivateKey> { delimited( string_tag(SSH_RSA_KEY_PREFIX), map_res( tuple((mpint, mpint, mpint, mpint, mpint, mpint)), |(n, e, d, _iqmp, p, q)| rsa::RsaPrivateKey::from_components(n, e, d, vec![p, q]), ), comment_and_padding, )(input) } /// Internal OpenSSH encoding of an Ed25519 private key. /// /// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3277-L3283) fn openssh_ed25519_privkey(input: &[u8]) -> IResult<&[u8], Secret<[u8; 64]>> { delimited( string_tag(SSH_ED25519_KEY_PREFIX), map_opt(tuple((string, string)), |(pubkey_bytes, privkey_bytes)| { if privkey_bytes.len() == 64 && pubkey_bytes == &privkey_bytes[32..64] { let mut privkey = [0; 64]; privkey.copy_from_slice(privkey_bytes); Some(Secret::new(privkey)) } else { None } }), comment_and_padding, )(input) } /// Unencrypted, padded list of private keys. /// /// From the [specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key): /// ```text /// uint32 checkint /// uint32 checkint /// string privatekey1 /// string comment1 /// string privatekey2 /// string comment2 ///... /// string privatekeyN /// string commentN /// char 1 /// char 2 /// char 3 ///... /// char padlen % 255 /// ``` /// /// Note however that the `string` type for the private keys is wrong; it should be /// an opaque type, or the composite type `(string, byte[])`. /// /// We only support a single key, like OpenSSH. #[allow(clippy::needless_lifetimes)] pub(super) fn openssh_unencrypted_privkey<'a>( ssh_key: &[u8], ) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> { // We need to own, move, and clone these in order to keep them alive. let ssh_key_rsa = ssh_key.to_vec(); let ssh_key_ed25519 = ssh_key.to_vec(); preceded( // Repeated checkint, intended for verifying correct decryption. // Don't copy this idea into a new protocol; use an AEAD instead. map_opt(pair(take(4usize), take(4usize)), |(c1, c2)| { if c1 == c2 { Some(c1) } else { None } }), alt(( map(openssh_rsa_privkey, move |sk| { UnencryptedKey::SshRsa(ssh_key_rsa.clone(), Box::new(sk)).into() }), map(openssh_ed25519_privkey, move |privkey| { UnencryptedKey::SshEd25519(ssh_key_ed25519.clone(), privkey).into() }), map(string, |key_type| { UnsupportedKey::Type(String::from_utf8_lossy(key_type).to_string()).into() }), )), ) } /// An OpenSSH-formatted private key. /// /// - [Specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key) pub(super) fn openssh_privkey(input: &[u8]) -> IResult<&[u8], Identity> { flat_map( pair( preceded(tag(b"openssh-key-v1\x00"), encryption_header), preceded( // We only support a single key, like OpenSSH: // https://github.com/openssh/openssh-portable/blob/4103a3ec/sshkey.c#L4171 tag(b"\x00\x00\x00\x01"), string, // The public key in SSH format ), ), openssh_privkey_inner, )(input) } /// Encrypted, padded list of private keys. fn openssh_privkey_inner<'a>( (encryption, ssh_key): (Option<(CipherResult, OpenSshKdf)>, &'a [u8]), ) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> { // `PROTOCOL.key` specifies that the encrypted list of private keys is encoded as // a `string`, but this is incorrect when AEAD ciphertexts are used. For what I // can only assume are backwards-compatibility reasons, the `string` part encodes // the ciphertext without tag, and the tag is just appended to the encoding. So // you can only parse the full data structure by interpreting the encryption // header. let expected_remainder = encryption.as_ref().map_or(0, |(cipher_res, _)| { if let CipherResult::Supported(cipher) = cipher_res { cipher.auth_len() } else { 0 } }); move |input: &[u8]| match &encryption { None => map_parser(string, openssh_unencrypted_privkey(ssh_key))(input), Some((cipher_res, kdf)) => map( map_parser( recognize(pair(string, take(expected_remainder))), preceded(be_u32, rest), ), |private| match cipher_res { CipherResult::Supported(cipher) => EncryptedKey { ssh_key: ssh_key.to_vec(), cipher: *cipher, kdf: kdf.clone(), encrypted: private.to_vec(), filename: None, } .into(), CipherResult::Unsupported(cipher) => { UnsupportedKey::EncryptedSsh(cipher.clone()).into() } }, )(input), } } /// An SSH-encoded RSA public key. /// /// From [RFC 4253](https://tools.ietf.org/html/rfc4253#section-6.6): /// ```text /// string "ssh-rsa" /// mpint e
Identity::Encrypted(_) => unreachable!(), }
random_line_split
ssh.rs
_from_slice(&tag_bytes[..TAG_LEN_BYTES]); tag } /// OpenSSH-supported ciphers. #[allow(clippy::enum_variant_names)] #[derive(Clone, Copy, Debug)] enum OpenSshCipher { Aes256Cbc, Aes128Ctr, Aes192Ctr, Aes256Ctr, Aes256Gcm, } impl OpenSshCipher { /// Returns the length of the authenticating part of the cipher (the tag of an AEAD). fn auth_len(self) -> usize { match self { OpenSshCipher::Aes256Cbc | OpenSshCipher::Aes128Ctr | OpenSshCipher::Aes192Ctr | OpenSshCipher::Aes256Ctr => 0, OpenSshCipher::Aes256Gcm => <Aes256Gcm as AeadCore>::TagSize::USIZE, } } fn decrypt( self, kdf: &OpenSshKdf, p: SecretString, ct: &[u8], ) -> Result<Vec<u8>, DecryptError> { match self { OpenSshCipher::Aes256Cbc => decrypt::aes_cbc::<Aes256CbcDec>(kdf, p, ct), OpenSshCipher::Aes128Ctr => Ok(decrypt::aes_ctr::<Aes128Ctr>(kdf, p, ct)), OpenSshCipher::Aes192Ctr => Ok(decrypt::aes_ctr::<Aes192Ctr>(kdf, p, ct)), OpenSshCipher::Aes256Ctr => Ok(decrypt::aes_ctr::<Aes256Ctr>(kdf, p, ct)), OpenSshCipher::Aes256Gcm => decrypt::aes_gcm::<Aes256Gcm>(kdf, p, ct), } } } /// OpenSSH-supported KDFs. #[derive(Clone, Debug)] enum OpenSshKdf { Bcrypt { salt: Vec<u8>, rounds: u32 }, } impl OpenSshKdf { fn derive(&self, passphrase: SecretString, out_len: usize) -> Vec<u8> { match self { OpenSshKdf::Bcrypt { salt, rounds } => { let mut output = vec![0; out_len]; bcrypt_pbkdf(passphrase.expose_secret(), salt, *rounds, &mut output) .expect("parameters are valid"); output } } } } /// An encrypted SSH private key. #[derive(Clone)] pub struct EncryptedKey { ssh_key: Vec<u8>, cipher: OpenSshCipher, kdf: OpenSshKdf, encrypted: Vec<u8>, filename: Option<String>, } impl EncryptedKey { /// Decrypts this private key. pub fn decrypt( &self, passphrase: SecretString, ) -> Result<identity::UnencryptedKey, DecryptError> { let decrypted = self .cipher .decrypt(&self.kdf, passphrase, &self.encrypted)?; let mut parser = read_ssh::openssh_unencrypted_privkey(&self.ssh_key); match parser(&decrypted) .map(|(_, sk)| sk) .map_err(|_| DecryptError::KeyDecryptionFailed)? { Identity::Unencrypted(key) => Ok(key), Identity::Unsupported(_) => Err(DecryptError::KeyDecryptionFailed), Identity::Encrypted(_) => unreachable!(), } } } mod decrypt { use aes::cipher::{block_padding::NoPadding, BlockDecryptMut, KeyIvInit, StreamCipher}; use aes_gcm::aead::{AeadMut, KeyInit}; use age_core::secrecy::SecretString; use cipher::generic_array::{ArrayLength, GenericArray}; use super::OpenSshKdf; use crate::error::DecryptError; fn derive_key_material<KeySize: ArrayLength<u8>, IvSize: ArrayLength<u8>>( kdf: &OpenSshKdf, passphrase: SecretString, ) -> (GenericArray<u8, KeySize>, GenericArray<u8, IvSize>) { let kdf_output = kdf.derive(passphrase, KeySize::USIZE + IvSize::USIZE); let (key, iv) = kdf_output.split_at(KeySize::USIZE); ( GenericArray::from_exact_iter(key.iter().copied()).expect("key is correct length"), GenericArray::from_exact_iter(iv.iter().copied()).expect("iv is correct length"), ) } pub(super) fn aes_cbc<C: BlockDecryptMut + KeyIvInit>( kdf: &OpenSshKdf, passphrase: SecretString, ciphertext: &[u8], ) -> Result<Vec<u8>, DecryptError> { let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase); let cipher = C::new(&key, &iv); cipher .decrypt_padded_vec_mut::<NoPadding>(ciphertext) .map_err(|_| DecryptError::KeyDecryptionFailed) } pub(super) fn aes_ctr<C: StreamCipher + KeyIvInit>( kdf: &OpenSshKdf, passphrase: SecretString, ciphertext: &[u8], ) -> Vec<u8> { let (key, iv) = derive_key_material::<C::KeySize, C::IvSize>(kdf, passphrase); let mut cipher = C::new(&key, &iv); let mut plaintext = ciphertext.to_vec(); cipher.apply_keystream(&mut plaintext); plaintext } pub(super) fn aes_gcm<C: AeadMut + KeyInit>( kdf: &OpenSshKdf, passphrase: SecretString, ciphertext: &[u8], ) -> Result<Vec<u8>, DecryptError> { let (key, nonce) = derive_key_material::<C::KeySize, C::NonceSize>(kdf, passphrase); let mut cipher = C::new(&key); cipher .decrypt(&nonce, ciphertext) .map_err(|_| DecryptError::KeyDecryptionFailed) } } mod read_ssh { use age_core::secrecy::Secret; use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint}; use nom::{ branch::alt, bytes::complete::{tag, take}, combinator::{flat_map, map, map_opt, map_parser, map_res, recognize, rest, verify}, multi::{length_data, length_value}, number::complete::be_u32, sequence::{delimited, pair, preceded, terminated, tuple}, IResult, }; use num_traits::Zero; use rsa::BigUint; use super::{ identity::{UnencryptedKey, UnsupportedKey}, EncryptedKey, Identity, OpenSshCipher, OpenSshKdf, SSH_ED25519_KEY_PREFIX, SSH_RSA_KEY_PREFIX, }; /// The SSH `string` [data type](https://tools.ietf.org/html/rfc4251#section-5). pub(crate) fn string(input: &[u8]) -> IResult<&[u8], &[u8]> { length_data(be_u32)(input) } /// Recognizes an SSH `string` matching a tag. #[allow(clippy::needless_lifetimes)] // false positive pub fn string_tag<'a>(value: &'a str) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], &'a [u8]> { move |input: &[u8]| length_value(be_u32, tag(value))(input) } /// The SSH `mpint` data type, restricted to non-negative integers. /// /// From [RFC 4251](https://tools.ietf.org/html/rfc4251#section-5): /// ```text /// Represents multiple precision integers in two's complement format, /// stored as a string, 8 bits per byte, MSB first. Negative numbers /// have the value 1 as the most significant bit of the first byte of /// the data partition. If the most significant bit would be set for /// a positive number, the number MUST be preceded by a zero byte. /// Unnecessary leading bytes with the value 0 or 255 MUST NOT be /// included. The value zero MUST be stored as a string with zero /// bytes of data. /// ``` fn mpint(input: &[u8]) -> IResult<&[u8], BigUint> { map_opt(string, |bytes| { if bytes.is_empty() { Some(BigUint::zero()) } else { // Enforce canonicity let mut non_zero_bytes = bytes; while non_zero_bytes[0] == 0 { non_zero_bytes = &non_zero_bytes[1..]; } if non_zero_bytes.is_empty() { // Non-canonical zero return None; } if non_zero_bytes.len() + (non_zero_bytes[0] >> 7) as usize!= bytes.len() { // Negative number or non-canonical positive number return None; } Some(BigUint::from_bytes_be(bytes)) } })(input) } enum CipherResult { Supported(OpenSshCipher), Unsupported(String), } /// Parse a cipher and KDF. fn encryption_header(input: &[u8]) -> IResult<&[u8], Option<(CipherResult, OpenSshKdf)>> { alt(( // If either cipher or KDF is None, both must be. map( tuple((string_tag("none"), string_tag("none"), string_tag(""))), |_| None, ), map( tuple(( alt(( map(string_tag("aes256-cbc"), |_| { CipherResult::Supported(OpenSshCipher::Aes256Cbc) }), map(string_tag("aes128-ctr"), |_| { CipherResult::Supported(OpenSshCipher::Aes128Ctr) }), map(string_tag("aes192-ctr"), |_| { CipherResult::Supported(OpenSshCipher::Aes192Ctr) }), map(string_tag("aes256-ctr"), |_| { CipherResult::Supported(OpenSshCipher::Aes256Ctr) }), map(string_tag("[email protected]"), |_| { CipherResult::Supported(OpenSshCipher::Aes256Gcm) }), map(string, |s| { CipherResult::Unsupported(String::from_utf8_lossy(s).into_owned()) }), )), map_opt( preceded( string_tag("bcrypt"), map_parser(string, tuple((string, be_u32))), ), |(salt, rounds)| { if salt.is_empty() || rounds == 0 { // Invalid parameters None } else { Some(OpenSshKdf::Bcrypt { salt: salt.into(), rounds, }) } }, ), )), Some, ), ))(input) } /// Parses the comment from an OpenSSH privkey and verifies its deterministic padding. fn comment_and_padding(input: &[u8]) -> IResult<&[u8], &[u8]> { terminated( // Comment string, // Deterministic padding verify(rest, |padding: &[u8]| { padding.iter().enumerate().all(|(i, b)| *b == (i + 1) as u8) }), )(input) } /// Internal OpenSSH encoding of an RSA private key. /// /// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3187-L3198) fn openssh_rsa_privkey(input: &[u8]) -> IResult<&[u8], rsa::RsaPrivateKey> { delimited( string_tag(SSH_RSA_KEY_PREFIX), map_res( tuple((mpint, mpint, mpint, mpint, mpint, mpint)), |(n, e, d, _iqmp, p, q)| rsa::RsaPrivateKey::from_components(n, e, d, vec![p, q]), ), comment_and_padding, )(input) } /// Internal OpenSSH encoding of an Ed25519 private key. /// /// - [OpenSSH serialization code](https://github.com/openssh/openssh-portable/blob/4103a3ec7c68493dbc4f0994a229507e943a86d3/sshkey.c#L3277-L3283) fn openssh_ed25519_privkey(input: &[u8]) -> IResult<&[u8], Secret<[u8; 64]>> { delimited( string_tag(SSH_ED25519_KEY_PREFIX), map_opt(tuple((string, string)), |(pubkey_bytes, privkey_bytes)| { if privkey_bytes.len() == 64 && pubkey_bytes == &privkey_bytes[32..64] { let mut privkey = [0; 64]; privkey.copy_from_slice(privkey_bytes); Some(Secret::new(privkey)) } else { None } }), comment_and_padding, )(input) } /// Unencrypted, padded list of private keys. /// /// From the [specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key): /// ```text /// uint32 checkint /// uint32 checkint /// string privatekey1 /// string comment1 /// string privatekey2 /// string comment2 ///... /// string privatekeyN /// string commentN /// char 1 /// char 2 /// char 3 ///... /// char padlen % 255 /// ``` /// /// Note however that the `string` type for the private keys is wrong; it should be /// an opaque type, or the composite type `(string, byte[])`. /// /// We only support a single key, like OpenSSH. #[allow(clippy::needless_lifetimes)] pub(super) fn openssh_unencrypted_privkey<'a>( ssh_key: &[u8], ) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> { // We need to own, move, and clone these in order to keep them alive. let ssh_key_rsa = ssh_key.to_vec(); let ssh_key_ed25519 = ssh_key.to_vec(); preceded( // Repeated checkint, intended for verifying correct decryption. // Don't copy this idea into a new protocol; use an AEAD instead. map_opt(pair(take(4usize), take(4usize)), |(c1, c2)| { if c1 == c2
else { None } }), alt(( map(openssh_rsa_privkey, move |sk| { UnencryptedKey::SshRsa(ssh_key_rsa.clone(), Box::new(sk)).into() }), map(openssh_ed25519_privkey, move |privkey| { UnencryptedKey::SshEd25519(ssh_key_ed25519.clone(), privkey).into() }), map(string, |key_type| { UnsupportedKey::Type(String::from_utf8_lossy(key_type).to_string()).into() }), )), ) } /// An OpenSSH-formatted private key. /// /// - [Specification](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key) pub(super) fn openssh_privkey(input: &[u8]) -> IResult<&[u8], Identity> { flat_map( pair( preceded(tag(b"openssh-key-v1\x00"), encryption_header), preceded( // We only support a single key, like OpenSSH: // https://github.com/openssh/openssh-portable/blob/4103a3ec/sshkey.c#L4171 tag(b"\x00\x00\x00\x01"), string, // The public key in SSH format ), ), openssh_privkey_inner, )(input) } /// Encrypted, padded list of private keys. fn openssh_privkey_inner<'a>( (encryption, ssh_key): (Option<(CipherResult, OpenSshKdf)>, &'a [u8]), ) -> impl FnMut(&'a [u8]) -> IResult<&'a [u8], Identity> { // `PROTOCOL.key` specifies that the encrypted list of private keys is encoded as // a `string`, but this is incorrect when AEAD ciphertexts are used. For what I // can only assume are backwards-compatibility reasons, the `string` part encodes // the ciphertext without tag, and the tag is just appended to the encoding. So // you can only parse the full data structure by interpreting the encryption // header. let expected_remainder = encryption.as_ref().map_or(0, |(cipher_res, _)| { if let CipherResult::Supported(cipher) = cipher_res { cipher.auth_len() } else { 0 } }); move |input: &[u8]| match &encryption { None => map_parser(string, openssh_unencrypted_privkey(ssh_key))(input), Some((cipher_res, kdf)) => map( map_parser( recognize(pair(string, take(expected_remainder))), preceded(be_u32, rest), ), |private| match cipher_res { CipherResult::Supported(cipher) => EncryptedKey { ssh_key: ssh_key.to_vec(), cipher: *cipher, kdf: kdf.clone(), encrypted: private.to_vec(), filename: None, } .into(), CipherResult::Unsupported(cipher) => { UnsupportedKey::EncryptedSsh(cipher.clone()).into() } }, )(input), } } /// An SSH-encoded RSA public key. /// /// From [RFC 4253](https://tools.ietf.org/html/rfc4253#section-6.6): /// ```text /// string "ssh-rsa" /// mpint e
{ Some(c1) }
conditional_block
wd.rs
//! WebDriver types and declarations. use crate::error; #[cfg(doc)] use crate::Client; use http::Method; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::convert::TryFrom; use std::fmt; use std::fmt::Debug; use std::time::Duration; use url::{ParseError, Url}; use webdriver::command::TimeoutsParameters; /// A command that can be sent to the WebDriver. /// /// Anything that implements this command can be sent to [`Client::issue_cmd()`] in order /// to send custom commands to the WebDriver instance. pub trait WebDriverCompatibleCommand: Debug { /// The endpoint to send the request to. fn endpoint( &self, base_url: &url::Url, session_id: Option<&str>, ) -> Result<url::Url, url::ParseError>; /// The HTTP request method to use, and the request body for the request. /// /// The `url` will be the one returned from the `endpoint()` method above. fn method_and_body(&self, request_url: &url::Url) -> (http::Method, Option<String>); /// Return true if this command starts a new WebDriver session. fn is_new_session(&self) -> bool { false
} /// Return true if this session should only support the legacy webdriver protocol. /// /// This only applies to the obsolete JSON Wire Protocol and should return `false` /// for all implementations that follow the W3C specification. /// /// See <https://www.selenium.dev/documentation/legacy/json_wire_protocol/> for more /// details about JSON Wire Protocol. fn is_legacy(&self) -> bool { false } } /// Blanket implementation for &T, for better ergonomics. impl<T> WebDriverCompatibleCommand for &T where T: WebDriverCompatibleCommand, { fn endpoint(&self, base_url: &Url, session_id: Option<&str>) -> Result<Url, ParseError> { T::endpoint(self, base_url, session_id) } fn method_and_body(&self, request_url: &Url) -> (Method, Option<String>) { T::method_and_body(self, request_url) } fn is_new_session(&self) -> bool { T::is_new_session(self) } fn is_legacy(&self) -> bool { T::is_legacy(self) } } /// Blanket implementation for Box<T>, for better ergonomics. impl<T> WebDriverCompatibleCommand for Box<T> where T: WebDriverCompatibleCommand, { fn endpoint(&self, base_url: &Url, session_id: Option<&str>) -> Result<Url, ParseError> { T::endpoint(self, base_url, session_id) } fn method_and_body(&self, request_url: &Url) -> (Method, Option<String>) { T::method_and_body(self, request_url) } fn is_new_session(&self) -> bool { T::is_new_session(self) } fn is_legacy(&self) -> bool { T::is_legacy(self) } } /// A [handle][1] to a browser window. /// /// Should be obtained it via [`Client::window()`] method (or similar). /// /// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles #[derive(Clone, Debug, Eq, PartialEq)] pub struct WindowHandle(String); impl From<WindowHandle> for String { fn from(w: WindowHandle) -> Self { w.0 } } impl<'a> TryFrom<Cow<'a, str>> for WindowHandle { type Error = error::InvalidWindowHandle; /// Makes the given string a [`WindowHandle`]. /// /// Avoids allocation if possible. /// /// # Errors /// /// If the given string is [`"current"`][1]. /// /// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles fn try_from(s: Cow<'a, str>) -> Result<Self, Self::Error> { if s!= "current" { Ok(Self(s.into_owned())) } else { Err(error::InvalidWindowHandle) } } } impl TryFrom<String> for WindowHandle { type Error = error::InvalidWindowHandle; /// Makes the given [`String`] a [`WindowHandle`]. /// /// # Errors /// /// If the given [`String`] is [`"current"`][1]. /// /// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles fn try_from(s: String) -> Result<Self, Self::Error> { Self::try_from(Cow::Owned(s)) } } impl TryFrom<&str> for WindowHandle { type Error = error::InvalidWindowHandle; /// Makes the given string a [`WindowHandle`]. /// /// Allocates if succeeds. /// /// # Errors /// /// If the given string is [`"current"`][1]. /// /// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles fn try_from(s: &str) -> Result<Self, Self::Error> { Self::try_from(Cow::Borrowed(s)) } } /// A type of a new browser window. /// /// Returned by [`Client::new_window()`] method. /// /// [`Client::new_window()`]: crate::Client::new_window #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum NewWindowType { /// Opened in a tab. Tab, /// Opened in a separate window. Window, } impl fmt::Display for NewWindowType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Tab => write!(f, "tab"), Self::Window => write!(f, "window"), } } } /// Dynamic set of [WebDriver capabilities][1]. /// /// [1]: https://www.w3.org/TR/webdriver/#dfn-capability pub type Capabilities = serde_json::Map<String, serde_json::Value>; /// An element locator. /// /// See [the specification][1] for more details. /// /// [1]: https://www.w3.org/TR/webdriver1/#locator-strategies #[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)] pub enum Locator<'a> { /// Find an element matching the given [CSS selector][1]. /// /// [1]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors Css(&'a str), /// Find an element using the given [`id`][1]. /// /// [1]: https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/id Id(&'a str), /// Find a link element with the given link text. /// /// The text matching is exact. LinkText(&'a str), /// Find an element using the given [XPath expression][1]. /// /// You can address pretty much any element this way, if you're willing to /// put in the time to find the right XPath. /// /// [1]: https://developer.mozilla.org/en-US/docs/Web/XPath XPath(&'a str), } impl<'a> Locator<'a> { pub(crate) fn into_parameters(self) -> webdriver::command::LocatorParameters { use webdriver::command::LocatorParameters; use webdriver::common::LocatorStrategy; match self { Locator::Css(s) => LocatorParameters { using: LocatorStrategy::CSSSelector, value: s.to_string(), }, Locator::Id(s) => LocatorParameters { using: LocatorStrategy::XPath, value: format!("//*[@id=\"{}\"]", s), }, Locator::XPath(s) => LocatorParameters { using: LocatorStrategy::XPath, value: s.to_string(), }, Locator::LinkText(s) => LocatorParameters { using: LocatorStrategy::LinkText, value: s.to_string(), }, } } } /// The WebDriver status as returned by [`Client::status()`]. /// /// See [8.3 Status](https://www.w3.org/TR/webdriver1/#status) of the WebDriver standard. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WebDriverStatus { /// True if the webdriver is ready to start a new session. /// /// NOTE: Geckodriver will return `false` if a session has already started, since it /// only supports a single session. pub ready: bool, /// The current status message. pub message: String, } /// Timeout configuration, for various timeout settings. /// /// Used by [`Client::get_timeouts()`] and [`Client::update_timeouts()`]. #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct TimeoutConfiguration { #[serde(skip_serializing_if = "Option::is_none")] script: Option<u64>, #[serde(rename = "pageLoad", skip_serializing_if = "Option::is_none")] page_load: Option<u64>, #[serde(skip_serializing_if = "Option::is_none")] implicit: Option<u64>, } impl Default for TimeoutConfiguration { fn default() -> Self { TimeoutConfiguration::new( Some(Duration::from_secs(60)), Some(Duration::from_secs(60)), Some(Duration::from_secs(0)), ) } } impl TimeoutConfiguration { /// Create new timeout configuration. /// /// The various settings are as follows: /// - script Determines when to interrupt a script that is being evaluated. /// Default is 60 seconds. /// - page_load Provides the timeout limit used to interrupt navigation of the browsing /// context. Default is 60 seconds. /// - implicit Gives the timeout of when to abort locating an element. Default is 0 seconds. /// /// NOTE: It is recommended to leave the `implicit` timeout at 0 seconds, because that makes /// it possible to check for the non-existence of an element without an implicit delay. /// Also see [`Client::wait()`] for element polling functionality. pub fn new( script: Option<Duration>, page_load: Option<Duration>, implicit: Option<Duration>, ) -> Self { TimeoutConfiguration { script: script.map(|x| x.as_millis() as u64), page_load: page_load.map(|x| x.as_millis() as u64), implicit: implicit.map(|x| x.as_millis() as u64), } } /// Get the script timeout. pub fn script(&self) -> Option<Duration> { self.script.map(Duration::from_millis) } /// Set the script timeout. pub fn set_script(&mut self, timeout: Option<Duration>) { self.script = timeout.map(|x| x.as_millis() as u64); } /// Get the page load timeout. pub fn page_load(&self) -> Option<Duration> { self.page_load.map(Duration::from_millis) } /// Set the page load timeout. pub fn set_page_load(&mut self, timeout: Option<Duration>) { self.page_load = timeout.map(|x| x.as_millis() as u64); } /// Get the implicit wait timeout. pub fn implicit(&self) -> Option<Duration> { self.implicit.map(Duration::from_millis) } /// Set the implicit wait timeout. pub fn set_implicit(&mut self, timeout: Option<Duration>) { self.implicit = timeout.map(|x| x.as_millis() as u64); } } impl TimeoutConfiguration { pub(crate) fn into_params(self) -> TimeoutsParameters { TimeoutsParameters { script: self.script.map(Some), page_load: self.page_load, implicit: self.implicit, } } }
random_line_split
wd.rs
//! WebDriver types and declarations. use crate::error; #[cfg(doc)] use crate::Client; use http::Method; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::convert::TryFrom; use std::fmt; use std::fmt::Debug; use std::time::Duration; use url::{ParseError, Url}; use webdriver::command::TimeoutsParameters; /// A command that can be sent to the WebDriver. /// /// Anything that implements this command can be sent to [`Client::issue_cmd()`] in order /// to send custom commands to the WebDriver instance. pub trait WebDriverCompatibleCommand: Debug { /// The endpoint to send the request to. fn endpoint( &self, base_url: &url::Url, session_id: Option<&str>, ) -> Result<url::Url, url::ParseError>; /// The HTTP request method to use, and the request body for the request. /// /// The `url` will be the one returned from the `endpoint()` method above. fn method_and_body(&self, request_url: &url::Url) -> (http::Method, Option<String>); /// Return true if this command starts a new WebDriver session. fn is_new_session(&self) -> bool { false } /// Return true if this session should only support the legacy webdriver protocol. /// /// This only applies to the obsolete JSON Wire Protocol and should return `false` /// for all implementations that follow the W3C specification. /// /// See <https://www.selenium.dev/documentation/legacy/json_wire_protocol/> for more /// details about JSON Wire Protocol. fn is_legacy(&self) -> bool { false } } /// Blanket implementation for &T, for better ergonomics. impl<T> WebDriverCompatibleCommand for &T where T: WebDriverCompatibleCommand, { fn endpoint(&self, base_url: &Url, session_id: Option<&str>) -> Result<Url, ParseError> { T::endpoint(self, base_url, session_id) } fn method_and_body(&self, request_url: &Url) -> (Method, Option<String>) { T::method_and_body(self, request_url) } fn is_new_session(&self) -> bool { T::is_new_session(self) } fn is_legacy(&self) -> bool { T::is_legacy(self) } } /// Blanket implementation for Box<T>, for better ergonomics. impl<T> WebDriverCompatibleCommand for Box<T> where T: WebDriverCompatibleCommand, { fn endpoint(&self, base_url: &Url, session_id: Option<&str>) -> Result<Url, ParseError> { T::endpoint(self, base_url, session_id) } fn
(&self, request_url: &Url) -> (Method, Option<String>) { T::method_and_body(self, request_url) } fn is_new_session(&self) -> bool { T::is_new_session(self) } fn is_legacy(&self) -> bool { T::is_legacy(self) } } /// A [handle][1] to a browser window. /// /// Should be obtained it via [`Client::window()`] method (or similar). /// /// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles #[derive(Clone, Debug, Eq, PartialEq)] pub struct WindowHandle(String); impl From<WindowHandle> for String { fn from(w: WindowHandle) -> Self { w.0 } } impl<'a> TryFrom<Cow<'a, str>> for WindowHandle { type Error = error::InvalidWindowHandle; /// Makes the given string a [`WindowHandle`]. /// /// Avoids allocation if possible. /// /// # Errors /// /// If the given string is [`"current"`][1]. /// /// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles fn try_from(s: Cow<'a, str>) -> Result<Self, Self::Error> { if s!= "current" { Ok(Self(s.into_owned())) } else { Err(error::InvalidWindowHandle) } } } impl TryFrom<String> for WindowHandle { type Error = error::InvalidWindowHandle; /// Makes the given [`String`] a [`WindowHandle`]. /// /// # Errors /// /// If the given [`String`] is [`"current"`][1]. /// /// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles fn try_from(s: String) -> Result<Self, Self::Error> { Self::try_from(Cow::Owned(s)) } } impl TryFrom<&str> for WindowHandle { type Error = error::InvalidWindowHandle; /// Makes the given string a [`WindowHandle`]. /// /// Allocates if succeeds. /// /// # Errors /// /// If the given string is [`"current"`][1]. /// /// [1]: https://www.w3.org/TR/webdriver/#dfn-window-handles fn try_from(s: &str) -> Result<Self, Self::Error> { Self::try_from(Cow::Borrowed(s)) } } /// A type of a new browser window. /// /// Returned by [`Client::new_window()`] method. /// /// [`Client::new_window()`]: crate::Client::new_window #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum NewWindowType { /// Opened in a tab. Tab, /// Opened in a separate window. Window, } impl fmt::Display for NewWindowType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Tab => write!(f, "tab"), Self::Window => write!(f, "window"), } } } /// Dynamic set of [WebDriver capabilities][1]. /// /// [1]: https://www.w3.org/TR/webdriver/#dfn-capability pub type Capabilities = serde_json::Map<String, serde_json::Value>; /// An element locator. /// /// See [the specification][1] for more details. /// /// [1]: https://www.w3.org/TR/webdriver1/#locator-strategies #[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)] pub enum Locator<'a> { /// Find an element matching the given [CSS selector][1]. /// /// [1]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors Css(&'a str), /// Find an element using the given [`id`][1]. /// /// [1]: https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes/id Id(&'a str), /// Find a link element with the given link text. /// /// The text matching is exact. LinkText(&'a str), /// Find an element using the given [XPath expression][1]. /// /// You can address pretty much any element this way, if you're willing to /// put in the time to find the right XPath. /// /// [1]: https://developer.mozilla.org/en-US/docs/Web/XPath XPath(&'a str), } impl<'a> Locator<'a> { pub(crate) fn into_parameters(self) -> webdriver::command::LocatorParameters { use webdriver::command::LocatorParameters; use webdriver::common::LocatorStrategy; match self { Locator::Css(s) => LocatorParameters { using: LocatorStrategy::CSSSelector, value: s.to_string(), }, Locator::Id(s) => LocatorParameters { using: LocatorStrategy::XPath, value: format!("//*[@id=\"{}\"]", s), }, Locator::XPath(s) => LocatorParameters { using: LocatorStrategy::XPath, value: s.to_string(), }, Locator::LinkText(s) => LocatorParameters { using: LocatorStrategy::LinkText, value: s.to_string(), }, } } } /// The WebDriver status as returned by [`Client::status()`]. /// /// See [8.3 Status](https://www.w3.org/TR/webdriver1/#status) of the WebDriver standard. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WebDriverStatus { /// True if the webdriver is ready to start a new session. /// /// NOTE: Geckodriver will return `false` if a session has already started, since it /// only supports a single session. pub ready: bool, /// The current status message. pub message: String, } /// Timeout configuration, for various timeout settings. /// /// Used by [`Client::get_timeouts()`] and [`Client::update_timeouts()`]. #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct TimeoutConfiguration { #[serde(skip_serializing_if = "Option::is_none")] script: Option<u64>, #[serde(rename = "pageLoad", skip_serializing_if = "Option::is_none")] page_load: Option<u64>, #[serde(skip_serializing_if = "Option::is_none")] implicit: Option<u64>, } impl Default for TimeoutConfiguration { fn default() -> Self { TimeoutConfiguration::new( Some(Duration::from_secs(60)), Some(Duration::from_secs(60)), Some(Duration::from_secs(0)), ) } } impl TimeoutConfiguration { /// Create new timeout configuration. /// /// The various settings are as follows: /// - script Determines when to interrupt a script that is being evaluated. /// Default is 60 seconds. /// - page_load Provides the timeout limit used to interrupt navigation of the browsing /// context. Default is 60 seconds. /// - implicit Gives the timeout of when to abort locating an element. Default is 0 seconds. /// /// NOTE: It is recommended to leave the `implicit` timeout at 0 seconds, because that makes /// it possible to check for the non-existence of an element without an implicit delay. /// Also see [`Client::wait()`] for element polling functionality. pub fn new( script: Option<Duration>, page_load: Option<Duration>, implicit: Option<Duration>, ) -> Self { TimeoutConfiguration { script: script.map(|x| x.as_millis() as u64), page_load: page_load.map(|x| x.as_millis() as u64), implicit: implicit.map(|x| x.as_millis() as u64), } } /// Get the script timeout. pub fn script(&self) -> Option<Duration> { self.script.map(Duration::from_millis) } /// Set the script timeout. pub fn set_script(&mut self, timeout: Option<Duration>) { self.script = timeout.map(|x| x.as_millis() as u64); } /// Get the page load timeout. pub fn page_load(&self) -> Option<Duration> { self.page_load.map(Duration::from_millis) } /// Set the page load timeout. pub fn set_page_load(&mut self, timeout: Option<Duration>) { self.page_load = timeout.map(|x| x.as_millis() as u64); } /// Get the implicit wait timeout. pub fn implicit(&self) -> Option<Duration> { self.implicit.map(Duration::from_millis) } /// Set the implicit wait timeout. pub fn set_implicit(&mut self, timeout: Option<Duration>) { self.implicit = timeout.map(|x| x.as_millis() as u64); } } impl TimeoutConfiguration { pub(crate) fn into_params(self) -> TimeoutsParameters { TimeoutsParameters { script: self.script.map(Some), page_load: self.page_load, implicit: self.implicit, } } }
method_and_body
identifier_name
decoder.rs
use crate::ebml; use crate::schema::{Schema, SchemaDict}; use crate::vint::{read_vint, UnrepresentableLengthError}; use chrono::{DateTime, NaiveDateTime, Utc}; use err_derive::Error; use log_derive::{logfn, logfn_inputs}; use std::convert::TryFrom; pub trait ReadEbmlExt: std::io::Read { #[logfn(ok = "TRACE", err = "ERROR")] fn read_ebml_to_end<'a, D: SchemaDict<'a>>( &mut self, schema: &'a D, ) -> Result<Vec<ebml::ElementDetail>, DecodeError> { let mut decoder = Decoder::new(schema); let mut buf = vec![]; let _size = self.read_to_end(&mut buf).map_err(DecodeError::Io)?; let elms = decoder.decode(buf)?; Ok(elms) } } impl<R: std::io::Read +?Sized> ReadEbmlExt for R {} pub trait BufReadEbmlExt: std::io::BufRead { #[logfn(ok = "TRACE", err = "ERROR")] fn read<'a, D: SchemaDict<'a>>( &mut self, schema: &'a D, ) -> Result<Vec<ebml::ElementDetail>, DecodeError> { let mut decoder = Decoder::new(schema); let mut buf = vec![]; loop { let used = { let available = match self.fill_buf() { Ok(n) => n, Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue, Err(e) => return Err(DecodeError::Io(e)), }; buf.append(&mut decoder.decode(available.to_vec())?); available.len() }; self.consume(used); if used == 0 { break; } } Ok(buf) } } impl<R: std::io::BufRead +?Sized> BufReadEbmlExt for R {} #[derive(Debug, Error)] pub enum DecodeError { #[error(display = "{}", _0)] ReadVint(#[error(cause)] UnrepresentableLengthError), #[error(display = "UnknwonSizeNotAllowedInChildElement: pos {:?}", _0)] UnknwonSizeNotAllowedInChildElement(ebml::ElementPosition), #[error(display = "ReadContent")] ReadContent(#[error(cause)] ReadContentError), #[error(display = "UnknownEbmlId: {:?}", _0)] UnknownEbmlId(ebml::EbmlId), #[error(display = "Io")] Io(#[error(cause)] std::io::Error), } impl From<UnrepresentableLengthError> for DecodeError { fn from(o: UnrepresentableLengthError) -> Self { DecodeError::ReadVint(o) } } impl From<ReadContentError> for DecodeError { fn from(o: ReadContentError) -> Self { DecodeError::ReadContent(o) } } #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] enum State { Tag, Size, Content, } pub struct Decoder<'a, D: SchemaDict<'a>> { schema: &'a D, state: State, buffer: Vec<u8>, cursor: usize, total: usize, stack: Vec<ebml::ElementPosition>, queue: Vec<ebml::ElementDetail>, } impl<'a, D: SchemaDict<'a>> Decoder<'a, D> { pub fn new(schema: &'a D) -> Self { Self { schema, state: State::Tag, buffer: vec![], cursor: 0, total: 0, stack: vec![], queue: vec![], } } #[logfn(ok = "TRACE", err = "ERROR")] pub fn decode(&mut self, chunk: Vec<u8>) -> Result<Vec<ebml::ElementDetail>, DecodeError> { self.read_chunk(chunk)?; let mut result = vec![]; std::mem::swap(&mut self.queue, &mut result); Ok(result) } #[logfn(ok = "TRACE", err = "ERROR")] fn read_chunk(&mut self, mut chunk: Vec<u8>) -> Result<(), DecodeError> { // 読みかけの(読めなかった) buffer と 新しい chunk を合わせて読み直す self.buffer.append(&mut chunk); while self.cursor < self.buffer.len() { match self.state { State::Tag => { if!self.read_tag()? { break; } } State::Size => { if!self.read_size()? { break; } } State::Content => { if!self.read_content()? { break; } } } } Ok(()) } /// return false when waiting for more data #[logfn(ok = "TRACE", err = "ERROR")] fn read_tag(&mut self) -> Result<bool, DecodeError> { // tag is out of buffer if self.cursor >= self.buffer.len() { return Ok(false); } // read ebml id vint without first byte let opt_tag = read_vint(&self.buffer, self.cursor)?; // cannot read tag yet if opt_tag.is_none() { return Ok(false); } let tag_size = opt_tag.unwrap().length; let ebml_id = ebml::EbmlId(opt_tag.unwrap().value); let tag_start = self.total; let size_start = self.total + (tag_size as usize); let content_start = 0; let content_size = 0; let schema = self .schema .get(ebml_id) .ok_or_else(|| DecodeError::UnknownEbmlId(ebml_id))?; let pos = ebml::ElementPosition { level: schema.level(), r#type: schema.r#type(), ebml_id, tag_start, size_start, content_start, content_size, }; self.stack.push(pos); // move cursor self.cursor += tag_size as usize; self.total += tag_size as usize; // change decoder state self.state = State::Size; Ok(true) } /// return false when waiting for more data #[logfn(ok = "TRACE", err = "ERROR")] fn read_size(&mut self) -> Result<bool, DecodeError> { if self.cursor >= self.buffer.len() { return Ok(false); } // read ebml datasize vint without first byte let opt_size = read_vint(&self.buffer, self.cursor)?; if opt_size.is_none() { return Ok(false); } let size = opt_size.unwrap(); // decide current tag data size let ebml::ElementPosition { ref mut tag_start, ref mut content_start, ref mut content_size, .. } = self.stack.last_mut().unwrap(); *content_start = *tag_start + (size.length as usize); *content_size = size.value; // move cursor and change state self.cursor += size.length as usize; self.total += size.length as usize; self.state = State::Content; Ok(true) } #[logfn(ok = "TRACE", err = "ERROR")] fn read_content(&mut self) -> Result<bool, DecodeError> { let current_pos = self.stack.last().unwrap(); // master element は子要素を持つので生データはない if current_pos.r#type =='m' { let elm = ( ebml::MasterStartElement { ebml_id: current_pos.ebml_id, unknown_size: current_pos.content_size == -1, }, *current_pos, ) .into(); self.queue.push(elm); self.state = State::Tag; // この Mastert Element は空要素か if current_pos.content_size == 0 { // 即座に終了タグを追加 self.queue.push( ( ebml::MasterEndElement { ebml_id: current_pos.ebml_id, }, *current_pos, ) .into(), ); // スタックからこのタグを捨てる self.stack.pop(); } return Ok(true); } // endless master element // waiting for more data if current_pos.content_size < 0 { return Err(DecodeError::UnknwonSizeNotAllowedInChildElement( *current_pos, )); } use std::convert::TryFrom as _; let content_size = usize::try_from(current_pos.content_size).unwrap(); if self.buffer.len() < self.cursor + content_size { return Ok(false); } // タグの中身の生データ let content = self.buffer[self.cursor..self.cursor + content_size].to_vec(); // 読み終わったバッファを捨てて読み込んでいる部分のバッファのみ残す self.buffer = self.buffer.split_off(self.cursor + content_size); let child_elm = read_child_element( current_pos.ebml_id, current_pos.r#type, std::io::Cursor::new(content), content_size, )?; self.queue.push((child_elm, *current_pos).into()); // ポインタを進める self.total += content_size; // タグ待ちモードに変更 self.state = State::Tag; self.cursor = 0; // remove the object from the stack self.stack.pop(); while!self.stack.is_empty() { let parent_pos = self.stack.last().unwrap(); // 親が不定長サイズなので閉じタグは期待できない if parent_pos.content_size < 0 { self.stack.pop(); // 親タグを捨てる return Ok(true); } // 閉じタグの来るべき場所まで来たかどうか if self.total < parent_pos.content_start + content_size { break;
if parent_pos.r#type!='m' { // throw new Error("parent element is not master element"); unreachable!(); } self.queue.push( ( ebml::MasterEndElement { ebml_id: parent_pos.ebml_id, }, *parent_pos, ) .into(), ); // スタックからこのタグを捨てる self.stack.pop(); } Ok(true) } } #[derive(Debug, Error)] pub enum ReadContentError { #[error(display = "Date")] Date(#[error(cause)] std::io::Error), #[error(display = "Utf8")] Utf8(#[error(cause)] std::io::Error), #[error(display = "UnsignedInteger")] UnsignedInteger(#[error(cause)] std::io::Error), #[error(display = "Integer")] Integer(#[error(cause)] std::io::Error), #[error(display = "Float")] Float(#[error(cause)] std::io::Error), #[error(display = "Binary")] Binary(#[error(cause)] std::io::Error), #[error(display = "String")] String(#[error(cause)] std::io::Error), #[error(display = "Master")] Master(#[error(cause)] std::io::Error), #[error(display = "Unknown")] Unknown(#[error(cause)] std::io::Error), } #[logfn_inputs(TRACE)] #[logfn(ok = "TRACE", err = "ERROR")] fn read_child_element<C: std::io::Read + std::fmt::Debug>( ebml_id: ebml::EbmlId, r#type: char, mut content: C, content_size: usize, ) -> Result<ebml::ChildElement, ReadContentError> { use byteorder::{BigEndian, ReadBytesExt as _}; use ReadContentError::{String as StringE, *}; match r#type { // Unsigned Integer - Big-endian, any size from 1 to 8 octets 'u' => { let value = content .read_uint::<BigEndian>(content_size) .map_err(UnsignedInteger)?; Ok(ebml::UnsignedIntegerElement { ebml_id, value }.into()) } // Signed Integer - Big-endian, any size from 1 to 8 octets 'i' => { let value = content .read_int::<BigEndian>(content_size) .map_err(Integer)?; Ok(ebml::IntegerElement { ebml_id, value }.into()) } // Float - Big-endian, defined for 4 and 8 octets (32, 64 bits) 'f' => { let value = if content_size == 4 { f64::from(content.read_f32::<BigEndian>().map_err(Float)?) } else if content_size == 8 { content.read_f64::<BigEndian>().map_err(Float)? } else { Err(Float(std::io::Error::new( std::io::ErrorKind::Other, format!("invalid float content_size: {}", content_size), )))? }; Ok(ebml::FloatElement { ebml_id, value }.into()) } // Printable ASCII (0x20 to 0x7E), zero-padded when needed 's' => { let mut value = vec![0; content_size]; content.read_exact(&mut value).map_err(StringE)?; Ok(ebml::StringElement { ebml_id, value }.into()) } // Unicode string, zero padded when needed (RFC 2279) '8' => { let mut value = std::string::String::new(); content.read_to_string(&mut value).map_err(Utf8)?; Ok(ebml::Utf8Element { ebml_id, value }.into()) } // Binary - not interpreted by the parser 'b' => { let mut value = vec![0; content_size]; content.read_exact(&mut value).map_err(Binary)?; Ok(ebml::BinaryElement { ebml_id, value }.into()) } // nano second; Date.UTC(2001,1,1,0,0,0,0) === 980985600000 // new Date("2001-01-01T00:00:00.000Z").getTime() = 978307200000 // Date - signed 8 octets integer in nanoseconds with 0 indicating // the precise beginning of the millennium (at 2001-01-01T00:00:00,000000000 UTC) 'd' => { let nanos = content.read_i64::<BigEndian>().map_err(Date)?; let unix_time_nanos: i64 = nanos - 978_307_200 * 1000 * 1000 * 1000; let unix_time_secs: i64 = unix_time_nanos / 1000 / 1000 / 1000 - 1; let nsecs: u32 = u32::try_from((unix_time_nanos & (1000 * 1000 * 1000)) + (1000 * 1000 * 1000)) .unwrap(); let datetime = NaiveDateTime::from_timestamp(unix_time_secs, nsecs); let value = DateTime::from_utc(datetime, Utc); Ok(ebml::DateElement { ebml_id, value }.into()) } // Master-Element - contains other EBML sub-elements of the next lower level 'm' => Err(Master(std::io::Error::new( std::io::ErrorKind::Other, "cannot read master element as child element".to_string(), )))?, _ => Err(Unknown(std::io::Error::new( std::io::ErrorKind::Other, format!("unknown type: {}", r#type), )))?, } }
} // 閉じタグを挿入すべきタイミングが来た
random_line_split