file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
film.rs
use crate::core::geometry::point::{Point2i, Point2f}; use crate::core::spectrum::{Spectrum, xyz_to_rgb}; use crate::core::pbrt::{Float, Options, clamp, INFINITY}; use crate::core::filter::{Filters, Filter}; use crate::core::geometry::bounds::{Bounds2i, Bounds2f}; use crate::core::parallel::AtomicFloat; use std::sync::RwLock; use log::{info, error, warn}; use anyhow::Result; use crate::core::geometry::vector::Vector2f; use crate::core::paramset::ParamSet; use crate::core::imageio::write_image; use std::path::{PathBuf}; use smallvec::SmallVec; const FILTER_TABLE_WIDTH: usize = 16; #[derive(Default, Clone, Copy)] struct FilmTilePixel { contrib_sum : Spectrum, filter_weight_sum : Float } #[derive(Clone)] struct Pixel { xyz : [Float; 3], filter_weight_sum : Float, splat_xyz : [AtomicFloat; 3], _pad : Float } impl Default for Pixel { fn default() -> Self
} pub struct Film { pub full_resolution : Point2i, pub diagonal : Float, pub filter : Filters, pub filename : PathBuf, pub cropped_pixel_bounds: Bounds2i, pixels : RwLock<Vec<Pixel>>, filter_table : [Float; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH], scale : Float, max_sample_luminance : Float } impl Film { pub fn new(resolution: &Point2i, crop_window: &Bounds2f, filt: Filters, diagonal: Float, filename: PathBuf, scale: Float, max_sample_luminance: Float) -> Self { let crop_pixel_bounds = Bounds2i::from_points( &Point2i::new( (resolution.x as Float * crop_window.p_min.x).ceil() as isize, (resolution.y as Float * crop_window.p_min.y).ceil() as isize), &Point2i::new( (resolution.x as Float * crop_window.p_max.x).ceil() as isize, (resolution.y as Float * crop_window.p_max.y).ceil() as isize ) ); info!("Created film with full resolution {}\ . Crop window of {} -> croppedPixelBounds {}", resolution, crop_window, crop_pixel_bounds); // Allocate film image storage let pixels = vec![Pixel::default(); crop_pixel_bounds.area() as usize]; // TODO: filmPixelMemory // Precompute filter weight table let mut offset = 0; let mut filter_table = [0.0; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH]; for y in 0..FILTER_TABLE_WIDTH { for x in 0..FILTER_TABLE_WIDTH { let p = Point2f::new( (x as Float + 0.5) * filt.radius().x / FILTER_TABLE_WIDTH as Float, (y as Float + 0.5) * filt.radius().y / FILTER_TABLE_WIDTH as Float ); filter_table[offset] = filt.evaluate(&p); offset += 1; } } Self { full_resolution: *resolution, diagonal: diagonal * 0.001, filter: filt, filename: filename.to_owned(), scale, max_sample_luminance, cropped_pixel_bounds: crop_pixel_bounds, pixels: RwLock::new(pixels), filter_table } } pub fn get_sample_bounds(&self) -> Bounds2i { let p1 = (Point2f::from(self.cropped_pixel_bounds.p_min) + Vector2f::new(0.5, 0.5) - self.filter.radius()).floor(); let p2 = (Point2f::from(self.cropped_pixel_bounds.p_max) - Vector2f::new(0.5, 0.5) + self.filter.radius()).ceil(); Bounds2i::from_points(&Point2i::from(p1), &Point2i::from(p2)) } pub fn get_physical_extent(&self) -> Bounds2f { let aspect = self.full_resolution.y as Float / self.full_resolution.x as Float; let x = (self.diagonal * self.diagonal / (1.0 + aspect * aspect)).sqrt(); let y = aspect * x; Bounds2f::new( &Point2f::new(-x / 2.0, -y / 2.0), &Point2f::new(x / 2.0, y / 2.0) ) } pub fn get_film_tile(&self, sample_bounds: &Bounds2i) -> FilmTile { // Bound image pixels that samples in sampleBounds contribute to let half_pixel = Vector2f::new(0.5, 0.5); let float_bounds = Bounds2f { p_min: Point2f::from(sample_bounds.p_min), p_max: Point2f::from(sample_bounds.p_max) }; let p0f = (float_bounds.p_min - half_pixel - self.filter.radius()).ceil(); let p1f = (float_bounds.p_max - half_pixel + self.filter.radius()).floor(); let p0 = Point2i::from(p0f); let p1 = Point2i::from(p1f) + Point2i::new(1, 1); let tile_bounds = Bounds2i::from_points(&p0, &p1).intersect(&self.cropped_pixel_bounds); FilmTile::new(&tile_bounds, &self.filter.radius(), &self.filter_table, FILTER_TABLE_WIDTH, self.max_sample_luminance) } pub fn merge_film_tile(&self, tile: &mut FilmTile) { // TODO: ProfilePhase let mut pixels = self.pixels.write().unwrap(); info!("Merging film tile {}", tile.pixel_bounds); for p in &tile.get_pixel_bounds() { // Merge pixel into Film::pixels let tile_pixel = tile.get_pixel(&p); let offset = self.get_pixel(&p); let merge_pixel = &mut pixels[offset]; let xyz = tile_pixel.contrib_sum.to_xyz(); for i in 0..3 { merge_pixel.xyz[i] += xyz[i]; } merge_pixel.filter_weight_sum += tile_pixel.filter_weight_sum; } } fn get_pixel(&self, p: &Point2i) -> usize { assert!(self.cropped_pixel_bounds.inside_exclusive(p)); let width = self.cropped_pixel_bounds.p_max.x - self.cropped_pixel_bounds.p_min.x; let offset = (p.x - self.cropped_pixel_bounds.p_min.x) + (p.y - self.cropped_pixel_bounds.p_min.y) * width; offset as usize } pub fn set_image(&self, img: &[Spectrum]) { let npixels = self.cropped_pixel_bounds.area() as usize; let mut pixels = self.pixels.write().unwrap(); for i in 0..npixels { let p = &mut pixels[i]; p.xyz = img[i].to_xyz(); p.filter_weight_sum = 1.0; p.splat_xyz[0] = AtomicFloat::new(0.0); p.splat_xyz[1] = AtomicFloat::new(0.0); p.splat_xyz[2] = AtomicFloat::new(0.0); } } pub fn add_splat(&self, p: &Point2f, mut v: Spectrum) { // TODO: ProfilePhase if v.has_nans() { error!("Ignoring splatted spectrum with NaN values at ({}, {})", p.x, p.y); return; } else if v.y() < 0.0 { error!("Ignoring splatted spectrum with negative luminance {} at ({}, {})", v.y(), p.x, p.y); return } else if v.y().is_infinite() { error!("Ignoring slatted spectrum with infinite luminance at ({}, {})", p.x, p.y); return; } let pi = Point2i::from(p.floor()); if!self.cropped_pixel_bounds.inside_exclusive(&pi) { return; } if v.y() > self.max_sample_luminance { v *= self.max_sample_luminance / v.y(); } let mut pixels = self.pixels.write().unwrap(); let xyz = v.to_xyz(); let offset = self.get_pixel(&pi); let pixel = &mut pixels[offset]; for i in 0..3 { pixel.splat_xyz[i].add(xyz[i]); } } pub fn write_image(&self, splat_scale: Float) -> Result<()> { // Convert image to RGB and compute final pixel values info!("Converting image to RGB and computing final weighted pixel values"); let mut rgb = vec![0.0; (3 * self.cropped_pixel_bounds.area()) as usize]; let mut offset: usize; for p in &self.cropped_pixel_bounds { // Convert pixel XYZ color to RGB offset = self.get_pixel(&p); let pixel = &self.pixels.read().unwrap()[offset]; let start = offset * 3; let xyz = xyz_to_rgb(pixel.xyz); rgb[start] = xyz[0]; rgb[start + 1] = xyz[1]; rgb[start + 2] = xyz[2]; // Normalize pixel with weight sum let filter_weight_sum = pixel.filter_weight_sum; if filter_weight_sum!= 0.0 { let invwt = 1.0 / filter_weight_sum; rgb[start] = (rgb[start] * invwt).max(0.0); rgb[start + 1] = (rgb[start + 1] * invwt).max(0.0); rgb[start + 2] = (rgb[start + 2] * invwt).max(0.0); } // splate value at pixel let splat_xyz: [Float; 3] = [ pixel.splat_xyz[0].clone().into(), pixel.splat_xyz[1].clone().into(), pixel.splat_xyz[2].clone().into() ]; let splat_rgb = xyz_to_rgb(splat_xyz); rgb[start] += splat_scale * splat_rgb[0]; rgb[start + 1] += splat_scale * splat_rgb[1]; rgb[start + 2] += splat_scale * splat_rgb[2]; // Scale pixel value by scale rgb[start] *= self.scale; rgb[start + 1] *= self.scale; rgb[start + 2] *= self.scale; } info!("Writing image {} with bounds {}", self.filename.display(), self.cropped_pixel_bounds); // TODO: WriteImage write_image(&self.filename, &rgb, &self.cropped_pixel_bounds, &self.full_resolution) } } pub struct FilmTile<'a> { pub pixel_bounds : Bounds2i, filter_radius : Vector2f, inv_filter_radius : Vector2f, filter_table : &'a[Float], filter_table_size : usize, pixels : Vec<FilmTilePixel>, max_sample_luminance: Float } impl<'a> FilmTile<'a> { pub fn new(pixel_bounds: &Bounds2i, filter_radius: &Vector2f, filter_table: &'a[Float], filter_table_size: usize, max_sample_luminance: Float) -> Self { Self { filter_table, filter_table_size, max_sample_luminance, pixel_bounds: *pixel_bounds, filter_radius: *filter_radius, inv_filter_radius: Vector2f::new(1.0 / filter_radius.x, 1.0 / filter_radius.y), pixels: vec![FilmTilePixel::default(); std::cmp::max(0, pixel_bounds.area() as usize)] } } pub fn add_sample(&mut self, pfilm: &Point2f, mut L: Spectrum, sample_weight: Float) { // TODO: ProfilePhase if L.y() > self.max_sample_luminance { L *= Spectrum::new(self.max_sample_luminance / L.y()); } // Compute sample's raster bounds; let pfilm_discrete = *pfilm - Vector2f::new(0.5, 0.5); let p0f = (pfilm_discrete - self.filter_radius).ceil(); let p1f = (pfilm_discrete + self.filter_radius).floor(); let mut p0 = Point2i::new(p0f.x as isize, p0f.y as isize); let mut p1 = Point2i::new(p1f.x as isize, p1f.y as isize) + Point2i::new(1, 1); p0 = p0.max(&self.pixel_bounds.p_min); p1 = p1.min(&self.pixel_bounds.p_max); // Loop over filter support and add sample to pixel arrays; let mut ifx: SmallVec<[usize; 16]> = SmallVec::with_capacity(p1.x as usize - p0.x as usize); let mut ify: SmallVec<[usize; 16]> = SmallVec::with_capacity(p1.y as usize - p0.y as usize); for x in p0.x..p1.x { let fx = ((x as Float - pfilm_discrete.x) * self.inv_filter_radius.x * self.filter_table_size as Float).abs(); ifx.push(std::cmp::min(fx.floor() as usize, self.filter_table_size - 1)); } for y in p0.y..p1.y { let fy = ((y as Float - pfilm_discrete.y) * self.inv_filter_radius.y * self.filter_table_size as Float).abs(); ify.push(std::cmp::min(fy.floor() as usize, self.filter_table_size - 1)); } for y in p0.y..p1.y { for x in p0.x..p1.x { // Evaluate filter value at (x, y) pixel let offset = ify[(y - p0.y) as usize] * self.filter_table_size + ifx[(x - p0.x) as usize]; let filter_weight = self.filter_table[offset]; // Update pixel values with filtered sample contribution let pixel = self.get_pixel(&Point2i::new(x, y)); pixel.contrib_sum += L * Spectrum::new(sample_weight) * Spectrum::new(filter_weight); pixel.filter_weight_sum += filter_weight; } } } fn get_pixel(&mut self, p: &Point2i) -> &mut FilmTilePixel { assert!(self.pixel_bounds.inside_exclusive(p)); let width = self.pixel_bounds.p_max.x - self.pixel_bounds.p_min.x; let offset = (p.x - self.pixel_bounds.p_min.x) + (p.y - self.pixel_bounds.p_min.y) * width; &mut self.pixels[offset as usize] } fn get_pixel_bounds(&self) -> Bounds2i { self.pixel_bounds } } pub fn create_film(params: &ParamSet, filter: Filters, opts: &Options) -> Film { let filename = if!opts.image_file.as_os_str().is_empty() { let params_filename = params.find_one_string("filename", "".to_owned()); if!params_filename.is_empty() { warn!("Output filename supplied on command line. \"{}\" is overriding \ filename provided in scene description file, \"{}\"", opts.image_file.display(), params_filename); } opts.image_file.clone() } else { let f = params.find_one_string("filename", "pbrt.exr".to_owned()); PathBuf::from(f) }; let mut xres = params.find_one_int("xresolution", 1280); let mut yres = params.find_one_int("yresolution", 720); if opts.quick_render { xres = std::cmp::max(1, xres / 4); yres = std::cmp::max(1, yres / 4); } let mut crop = Bounds2f::default(); let mut cwi = 0; let cr_some = params.find_float("cropwindow", &mut cwi); if cr_some.is_some() && cwi == 4 { let cr = cr_some.unwrap(); crop.p_min.x = clamp(cr[0].min(cr[1]), 0.0, 1.0); crop.p_max.x = clamp(cr[0].max(cr[1]), 0.0, 1.0); crop.p_min.y = clamp(cr[2].min(cr[3]), 0.0, 1.0); crop.p_max.y = clamp(cr[2].max(cr[3]), 0.0, 1.0); } else if cr_some.is_some() { error!("{} values supplised fir \"cropwindow\". Expected 4.", cwi); } else { crop = Bounds2f::new( &Point2f::new( clamp(opts.crop_window[0][0], 0.0, 1.0), clamp(opts.crop_window[1][0], 0.0, 1.0)), &Point2f::new( clamp(opts.crop_window[0][1], 0.0, 1.0), clamp(opts.crop_window[1][1], 0.0, 1.0) ) ); } let scale = params.find_one_float("scale", 1.0); let diagonal = params.find_one_float("diagonal", 35.0); let max_sample_luminance = params.find_one_float("maxsampleluminance", INFINITY); Film::new( &Point2i::new(xres, yres), &crop, filter, diagonal, filename, scale, max_sample_luminance) }
{ Self { xyz: [0.0; 3], filter_weight_sum: 0.0, splat_xyz: [AtomicFloat::default(), AtomicFloat::default(), AtomicFloat::default()], _pad: 0.0 } }
identifier_body
auth.rs
Rejected(Response<Body>), LoggedIn(Response<Body>), } type AuthFuture<T> = Pin<Box<dyn Future<Output = Result<AuthResult<T>>> + Send>>; pub trait Authenticator: Send + Sync { type Credentials; fn authenticate(&self, req: RequestWrapper) -> AuthFuture<Self::Credentials>; } #[derive(Clone, Debug)] struct Secrets { shared_secret: String, server_secret: Vec<u8>, token_validity_hours: u32, } #[derive(Clone)] pub struct SharedSecretAuthenticator { secrets: Arc<Secrets>, } impl SharedSecretAuthenticator { pub fn new(shared_secret: String, server_secret: Vec<u8>, token_validity_hours: u32) -> Self { SharedSecretAuthenticator { secrets: Arc::new(Secrets { shared_secret, server_secret, token_validity_hours, }), } } } const COOKIE_NAME: &str = "audioserve_token"; const COOKIE_DELETE_DATE: &str = "Thu, 01 Jan 1970 00:00:00 GMT"; fn deny(req: &RequestWrapper) -> Result<AuthResult<()>> { let mut resp = resp::deny(); // delete cookie, if it was send in request if req .headers() .typed_get::<Cookie>() .map(|c| c.get(COOKIE_NAME).is_some()) .unwrap_or(false) { resp.headers_mut().append( SET_COOKIE, HeaderValue::from_str(&format!( "{}=; Expires={}; {}", COOKIE_NAME, COOKIE_DELETE_DATE, cookie_params(req) )) .unwrap(), ); // unwrap is safe as we control } Ok(AuthResult::Rejected(resp)) } fn cookie_params(req: &RequestWrapper) -> &'static str { if req.is_https() && get_config().is_cors_enabled(&req.request) { "SameSite=None; Secure" } else { "SameSite=Lax" } } impl Authenticator for SharedSecretAuthenticator { type Credentials = (); fn authenticate(&self, mut req: RequestWrapper) -> AuthFuture<()> { // this is part where client can authenticate itself and get token if req.method() == Method::POST && req.path() == "/authenticate" { debug!("Authentication request"); let auth = self.secrets.clone(); return Box::pin(async move { match req.body_bytes().await { Err(e) => bail!(e), Ok(b) => { let content_type = req .headers() .get("Content-Type") .and_then(|v| v.to_str().ok()) .map(|s| s.to_lowercase()); let params = if let Some(ct) = content_type { if ct.starts_with("application/x-www-form-urlencoded") { form_urlencoded::parse(b.as_ref()) .into_owned() .collect::<HashMap<String, String>>() } else if ct.starts_with("application/json") { match serde_json::from_slice::<HashMap<String, String>>(&b) { Ok(m) => m, Err(e) => { error!("Invalid JSON: {}", e); return deny(&req); } } } else { error!("Invalid content type {}", ct); return deny(&req); } } else { error!("Content-Type header is missing"); return deny(&req); }; if let Some(secret) = params.get("secret") { debug!("Authenticating user"); if auth.auth_token_ok(secret) { debug!("Authentication success"); let token = auth.new_auth_token(); let resp = Response::builder() .typed_header(ContentType::text()) .typed_header(ContentLength(token.len() as u64)) .header( SET_COOKIE, format!( "{}={}; Max-Age={}; {}", COOKIE_NAME, token, get_config().token_validity_hours * 3600, cookie_params(&req) ) .as_str(), ); Ok(AuthResult::LoggedIn(resp.body(token.into()).unwrap())) } else { error!( "Invalid authentication: invalid shared secret, client: {:?}", req.remote_addr() ); // Let's not return failure immediately, because somebody is using wrong shared secret // Legitimate user can wait a bit, but for brute force attack it can be advantage not to reply quickly sleep(Duration::from_millis(500)).await; deny(&req) } } else { error!( "Invalid authentication: missing shared secret, client: {:?}", req.remote_addr() ); deny(&req) } } } }); } else { // And in this part we check token let mut token = req .headers() .typed_get::<Authorization<Bearer>>() .map(|a| a.0.token().to_owned()); if token.is_none() { token = req .headers() .typed_get::<Cookie>() .and_then(|c| c.get(COOKIE_NAME).map(borrow::ToOwned::to_owned)); } if token.is_none() { error!( "Invalid access: missing token on path {}, client: {:?}", req.path(), req.remote_addr() ); return Box::pin(future::ready(deny(&req))); } if!self.secrets.token_ok(&token.unwrap()) { error!( "Invalid access: invalid token on path {}, client: {:?}", req.path(), req.remote_addr() ); return Box::pin(future::ready(deny(&req))); } } // If everything is ok we return credentials (in this case they are just unit type) and we return back request Box::pin(future::ok(AuthResult::Authenticated { request: req, credentials: (), })) } } impl Secrets { fn auth_token_ok(&self, token: &str) -> bool { let parts = token .split('|') .filter_map(|s| match BASE64.decode(s.as_bytes()) { Ok(x) => Some(x), Err(e) => { error!( "Invalid base64 in authentication token {} in string {}", e, s ); None } }) .collect::<Vec<_>>(); if parts.len() == 2 { if parts[0].len()!= 32 { error!("Random salt must be 32 bytes"); return false; } let mut hash2 = self.shared_secret.clone().into_bytes(); let hash = &parts[1]; hash2.extend(&parts[0]); let hash2 = digest(&SHA256, &hash2); return hash2.as_ref() == &hash[..]; } else { error!("Incorrectly formed login token - {} parts", parts.len()) } false } fn new_auth_token(&self) -> String { Token::new(self.token_validity_hours, &self.server_secret).into() } fn token_ok(&self, token: &str) -> bool { match token.parse::<Token>() { Ok(token) => token.is_valid(&self.server_secret), Err(e) => { warn!("Invalid token: {}", e); false } } } } #[derive(Clone, PartialEq, Debug)] struct Token { random: [u8; 32], validity: [u8; 8], signature: [u8; 32], } fn prepare_data(r: &[u8; 32], v: [u8; 8]) -> [u8; 40] { let mut to_sign = [0u8; 40]; to_sign[0..32].copy_from_slice(&r[..]); to_sign[32..40].copy_from_slice(&v[..]); to_sign } fn now() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Invalid system time") .as_secs() } impl Token { fn new(token_validity_hours: u32, secret: &[u8]) -> Self { let mut random = [0u8; 32]; let rng = SystemRandom::new(); rng.fill(&mut random) .expect("Cannot generate random number"); let validity: u64 = now() + u64::from(token_validity_hours) * 3600; let validity: [u8; 8] = validity.to_be_bytes(); let to_sign = prepare_data(&random, validity); let key = hmac::Key::new(hmac::HMAC_SHA256, secret); let sig = hmac::sign(&key, &to_sign); let slice = sig.as_ref(); assert!(slice.len() == 32); let mut signature = [0u8; 32]; signature.copy_from_slice(slice); Token { random, validity, signature, } } fn is_valid(&self, secret: &[u8]) -> bool { let key = hmac::Key::new(hmac::HMAC_SHA256, secret); let data = prepare_data(&self.random, self.validity); if hmac::verify(&key, &data, &self.signature).is_err() { return false; }; self.validity() > now() } fn validity(&self) -> u64 { let ts: u64 = unsafe { ::std::mem::transmute_copy(&self.validity) }; u64::from_be(ts) } } impl From<Token> for String { fn from(token: Token) -> String { let data = [&token.random[..], &token.validity[..], &token.signature[..]].concat(); BASE64.encode(&data) } } #[derive(Error, Debug, PartialEq)] enum TokenError { #[error("Invalid token size")] InvalidSize, #[error("Invalid token encoding")] InvalidEncoding(#[from] ::data_encoding::DecodeError), } impl ::std::str::FromStr for Token { type Err = TokenError; fn from_str(s: &str) -> Result<Self, Self::Err> { let bytes = BASE64.decode(s.as_bytes())?; if bytes.len()!= 72 { return Err(TokenError::InvalidSize); }; let mut random = [0u8; 32]; let mut validity = [0u8; 8]; let mut signature = [0u8; 32]; random.copy_from_slice(&bytes[0..32]); validity.copy_from_slice(&bytes[32..40]); signature.copy_from_slice(&bytes[40..72]); Ok(Token { random, validity, signature, }) } } #[cfg(test)] mod tests { use super::*; use crate::config::init::init_default_config; use borrow::Cow; use hyper::{Request, StatusCode}; #[test] fn
() { let token = Token::new(24, b"my big secret"); assert!(token.is_valid(b"my big secret")); let orig_token = token.clone(); let serialized_token: String = token.into(); assert!(serialized_token.len() >= 72); let new_token: Token = serialized_token.parse().unwrap(); assert_eq!(orig_token, new_token); assert!(new_token.is_valid(b"my big secret")); assert!(!new_token.is_valid(b"wrong secret")); assert!(new_token.validity() - now() <= 24 * 3600); } fn build_request(body: impl Into<Body>, json: bool) -> RequestWrapper { let b = body.into(); let req = Request::builder() .method(Method::POST) .header( "Content-Type", if json { "application/json" } else { "application/x-www-form-urlencoded" }, ) .uri("/authenticate") .body(b) .unwrap(); RequestWrapper::new(req, None, [192, 168, 1, 2].into(), false).unwrap() } fn build_authenticated_request(token: &str) -> RequestWrapper { let req = Request::builder() .method(Method::GET) .uri("/neco") .header("Authorization", format!("Bearer {}", token)) .body(Body::from("Hey")) .unwrap(); RequestWrapper::new(req, None, [192, 168, 1, 2].into(), false).unwrap() } fn shared_secret(sec: &str) -> String { let mut salt = [0u8; 32]; let rng = SystemRandom::new(); rng.fill(&mut salt).expect("cannot generate random number"); let mut res = BASE64.encode(&salt); res.push('|'); let mut hash: Vec<u8> = sec.into(); hash.extend(&salt); let hash = digest(&SHA256, &hash); res.push_str(&BASE64.encode(hash.as_ref())); res } fn shared_secret_form(sec: &str) -> String { let ss = shared_secret(sec); let encoded_ss: Cow<str> = percent_encoding::percent_encode(ss.as_bytes(), percent_encoding::NON_ALPHANUMERIC) .into(); "secret=".to_string() + encoded_ss.as_ref() } #[tokio::test] async fn test_json_login() { env_logger::try_init().ok(); init_default_config(); let sec = "MamelukLetiNaMesic74328"; let aut = SharedSecretAuthenticator::new( sec.into(), (&b"kjhfdakjjhafjhshjkjyuewqy87jkhakcjdsjk"[..]).into(), 24, ); let mut smap = HashMap::new(); smap.insert("secret".to_string(), shared_secret(sec)); let body = serde_json::to_string(&smap).expect("JSON serialization error"); let req = build_request(body, true); let res = aut .authenticate(req) .await .expect("authentication procedure internal error"); if let AuthResult::LoggedIn(res) = res { assert_eq!(res.status(), StatusCode::OK); } else { panic!("Authentication failure"); } } #[tokio::test] async fn test_authenticator_login() { env_logger::try_init().ok(); let invalid_secret = "secret=aaaaa"; let shared = "kulisak"; init_default_config(); let ss = shared_secret_form(shared); let aut = SharedSecretAuthenticator::new(shared.into(), (&b"123456"[..]).into(), 24); let req = build_request(ss, false); let res = aut .authenticate(req) .await .expect("authentication procedure internal error"); if let AuthResult::LoggedIn(res) = res { assert_eq!(res.status(), StatusCode::
test_token
identifier_name
auth.rs
Rejected(Response<Body>), LoggedIn(Response<Body>), } type AuthFuture<T> = Pin<Box<dyn Future<Output = Result<AuthResult<T>>> + Send>>; pub trait Authenticator: Send + Sync { type Credentials; fn authenticate(&self, req: RequestWrapper) -> AuthFuture<Self::Credentials>; } #[derive(Clone, Debug)] struct Secrets { shared_secret: String, server_secret: Vec<u8>, token_validity_hours: u32, } #[derive(Clone)] pub struct SharedSecretAuthenticator { secrets: Arc<Secrets>, } impl SharedSecretAuthenticator { pub fn new(shared_secret: String, server_secret: Vec<u8>, token_validity_hours: u32) -> Self { SharedSecretAuthenticator { secrets: Arc::new(Secrets { shared_secret, server_secret, token_validity_hours, }), } } } const COOKIE_NAME: &str = "audioserve_token"; const COOKIE_DELETE_DATE: &str = "Thu, 01 Jan 1970 00:00:00 GMT"; fn deny(req: &RequestWrapper) -> Result<AuthResult<()>> { let mut resp = resp::deny(); // delete cookie, if it was send in request if req .headers() .typed_get::<Cookie>() .map(|c| c.get(COOKIE_NAME).is_some()) .unwrap_or(false) { resp.headers_mut().append( SET_COOKIE, HeaderValue::from_str(&format!( "{}=; Expires={}; {}", COOKIE_NAME, COOKIE_DELETE_DATE,
); // unwrap is safe as we control } Ok(AuthResult::Rejected(resp)) } fn cookie_params(req: &RequestWrapper) -> &'static str { if req.is_https() && get_config().is_cors_enabled(&req.request) { "SameSite=None; Secure" } else { "SameSite=Lax" } } impl Authenticator for SharedSecretAuthenticator { type Credentials = (); fn authenticate(&self, mut req: RequestWrapper) -> AuthFuture<()> { // this is part where client can authenticate itself and get token if req.method() == Method::POST && req.path() == "/authenticate" { debug!("Authentication request"); let auth = self.secrets.clone(); return Box::pin(async move { match req.body_bytes().await { Err(e) => bail!(e), Ok(b) => { let content_type = req .headers() .get("Content-Type") .and_then(|v| v.to_str().ok()) .map(|s| s.to_lowercase()); let params = if let Some(ct) = content_type { if ct.starts_with("application/x-www-form-urlencoded") { form_urlencoded::parse(b.as_ref()) .into_owned() .collect::<HashMap<String, String>>() } else if ct.starts_with("application/json") { match serde_json::from_slice::<HashMap<String, String>>(&b) { Ok(m) => m, Err(e) => { error!("Invalid JSON: {}", e); return deny(&req); } } } else { error!("Invalid content type {}", ct); return deny(&req); } } else { error!("Content-Type header is missing"); return deny(&req); }; if let Some(secret) = params.get("secret") { debug!("Authenticating user"); if auth.auth_token_ok(secret) { debug!("Authentication success"); let token = auth.new_auth_token(); let resp = Response::builder() .typed_header(ContentType::text()) .typed_header(ContentLength(token.len() as u64)) .header( SET_COOKIE, format!( "{}={}; Max-Age={}; {}", COOKIE_NAME, token, get_config().token_validity_hours * 3600, cookie_params(&req) ) .as_str(), ); Ok(AuthResult::LoggedIn(resp.body(token.into()).unwrap())) } else { error!( "Invalid authentication: invalid shared secret, client: {:?}", req.remote_addr() ); // Let's not return failure immediately, because somebody is using wrong shared secret // Legitimate user can wait a bit, but for brute force attack it can be advantage not to reply quickly sleep(Duration::from_millis(500)).await; deny(&req) } } else { error!( "Invalid authentication: missing shared secret, client: {:?}", req.remote_addr() ); deny(&req) } } } }); } else { // And in this part we check token let mut token = req .headers() .typed_get::<Authorization<Bearer>>() .map(|a| a.0.token().to_owned()); if token.is_none() { token = req .headers() .typed_get::<Cookie>() .and_then(|c| c.get(COOKIE_NAME).map(borrow::ToOwned::to_owned)); } if token.is_none() { error!( "Invalid access: missing token on path {}, client: {:?}", req.path(), req.remote_addr() ); return Box::pin(future::ready(deny(&req))); } if!self.secrets.token_ok(&token.unwrap()) { error!( "Invalid access: invalid token on path {}, client: {:?}", req.path(), req.remote_addr() ); return Box::pin(future::ready(deny(&req))); } } // If everything is ok we return credentials (in this case they are just unit type) and we return back request Box::pin(future::ok(AuthResult::Authenticated { request: req, credentials: (), })) } } impl Secrets { fn auth_token_ok(&self, token: &str) -> bool { let parts = token .split('|') .filter_map(|s| match BASE64.decode(s.as_bytes()) { Ok(x) => Some(x), Err(e) => { error!( "Invalid base64 in authentication token {} in string {}", e, s ); None } }) .collect::<Vec<_>>(); if parts.len() == 2 { if parts[0].len()!= 32 { error!("Random salt must be 32 bytes"); return false; } let mut hash2 = self.shared_secret.clone().into_bytes(); let hash = &parts[1]; hash2.extend(&parts[0]); let hash2 = digest(&SHA256, &hash2); return hash2.as_ref() == &hash[..]; } else { error!("Incorrectly formed login token - {} parts", parts.len()) } false } fn new_auth_token(&self) -> String { Token::new(self.token_validity_hours, &self.server_secret).into() } fn token_ok(&self, token: &str) -> bool { match token.parse::<Token>() { Ok(token) => token.is_valid(&self.server_secret), Err(e) => { warn!("Invalid token: {}", e); false } } } } #[derive(Clone, PartialEq, Debug)] struct Token { random: [u8; 32], validity: [u8; 8], signature: [u8; 32], } fn prepare_data(r: &[u8; 32], v: [u8; 8]) -> [u8; 40] { let mut to_sign = [0u8; 40]; to_sign[0..32].copy_from_slice(&r[..]); to_sign[32..40].copy_from_slice(&v[..]); to_sign } fn now() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Invalid system time") .as_secs() } impl Token { fn new(token_validity_hours: u32, secret: &[u8]) -> Self { let mut random = [0u8; 32]; let rng = SystemRandom::new(); rng.fill(&mut random) .expect("Cannot generate random number"); let validity: u64 = now() + u64::from(token_validity_hours) * 3600; let validity: [u8; 8] = validity.to_be_bytes(); let to_sign = prepare_data(&random, validity); let key = hmac::Key::new(hmac::HMAC_SHA256, secret); let sig = hmac::sign(&key, &to_sign); let slice = sig.as_ref(); assert!(slice.len() == 32); let mut signature = [0u8; 32]; signature.copy_from_slice(slice); Token { random, validity, signature, } } fn is_valid(&self, secret: &[u8]) -> bool { let key = hmac::Key::new(hmac::HMAC_SHA256, secret); let data = prepare_data(&self.random, self.validity); if hmac::verify(&key, &data, &self.signature).is_err() { return false; }; self.validity() > now() } fn validity(&self) -> u64 { let ts: u64 = unsafe { ::std::mem::transmute_copy(&self.validity) }; u64::from_be(ts) } } impl From<Token> for String { fn from(token: Token) -> String { let data = [&token.random[..], &token.validity[..], &token.signature[..]].concat(); BASE64.encode(&data) } } #[derive(Error, Debug, PartialEq)] enum TokenError { #[error("Invalid token size")] InvalidSize, #[error("Invalid token encoding")] InvalidEncoding(#[from] ::data_encoding::DecodeError), } impl ::std::str::FromStr for Token { type Err = TokenError; fn from_str(s: &str) -> Result<Self, Self::Err> { let bytes = BASE64.decode(s.as_bytes())?; if bytes.len()!= 72 { return Err(TokenError::InvalidSize); }; let mut random = [0u8; 32]; let mut validity = [0u8; 8]; let mut signature = [0u8; 32]; random.copy_from_slice(&bytes[0..32]); validity.copy_from_slice(&bytes[32..40]); signature.copy_from_slice(&bytes[40..72]); Ok(Token { random, validity, signature, }) } } #[cfg(test)] mod tests { use super::*; use crate::config::init::init_default_config; use borrow::Cow; use hyper::{Request, StatusCode}; #[test] fn test_token() { let token = Token::new(24, b"my big secret"); assert!(token.is_valid(b"my big secret")); let orig_token = token.clone(); let serialized_token: String = token.into(); assert!(serialized_token.len() >= 72); let new_token: Token = serialized_token.parse().unwrap(); assert_eq!(orig_token, new_token); assert!(new_token.is_valid(b"my big secret")); assert!(!new_token.is_valid(b"wrong secret")); assert!(new_token.validity() - now() <= 24 * 3600); } fn build_request(body: impl Into<Body>, json: bool) -> RequestWrapper { let b = body.into(); let req = Request::builder() .method(Method::POST) .header( "Content-Type", if json { "application/json" } else { "application/x-www-form-urlencoded" }, ) .uri("/authenticate") .body(b) .unwrap(); RequestWrapper::new(req, None, [192, 168, 1, 2].into(), false).unwrap() } fn build_authenticated_request(token: &str) -> RequestWrapper { let req = Request::builder() .method(Method::GET) .uri("/neco") .header("Authorization", format!("Bearer {}", token)) .body(Body::from("Hey")) .unwrap(); RequestWrapper::new(req, None, [192, 168, 1, 2].into(), false).unwrap() } fn shared_secret(sec: &str) -> String { let mut salt = [0u8; 32]; let rng = SystemRandom::new(); rng.fill(&mut salt).expect("cannot generate random number"); let mut res = BASE64.encode(&salt); res.push('|'); let mut hash: Vec<u8> = sec.into(); hash.extend(&salt); let hash = digest(&SHA256, &hash); res.push_str(&BASE64.encode(hash.as_ref())); res } fn shared_secret_form(sec: &str) -> String { let ss = shared_secret(sec); let encoded_ss: Cow<str> = percent_encoding::percent_encode(ss.as_bytes(), percent_encoding::NON_ALPHANUMERIC) .into(); "secret=".to_string() + encoded_ss.as_ref() } #[tokio::test] async fn test_json_login() { env_logger::try_init().ok(); init_default_config(); let sec = "MamelukLetiNaMesic74328"; let aut = SharedSecretAuthenticator::new( sec.into(), (&b"kjhfdakjjhafjhshjkjyuewqy87jkhakcjdsjk"[..]).into(), 24, ); let mut smap = HashMap::new(); smap.insert("secret".to_string(), shared_secret(sec)); let body = serde_json::to_string(&smap).expect("JSON serialization error"); let req = build_request(body, true); let res = aut .authenticate(req) .await .expect("authentication procedure internal error"); if let AuthResult::LoggedIn(res) = res { assert_eq!(res.status(), StatusCode::OK); } else { panic!("Authentication failure"); } } #[tokio::test] async fn test_authenticator_login() { env_logger::try_init().ok(); let invalid_secret = "secret=aaaaa"; let shared = "kulisak"; init_default_config(); let ss = shared_secret_form(shared); let aut = SharedSecretAuthenticator::new(shared.into(), (&b"123456"[..]).into(), 24); let req = build_request(ss, false); let res = aut .authenticate(req) .await .expect("authentication procedure internal error"); if let AuthResult::LoggedIn(res) = res { assert_eq!(res.status(), StatusCode::OK);
cookie_params(req) )) .unwrap(),
random_line_split
cli.rs
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 #![forbid(unsafe_code)] //! Functionality related to the command line interface of the Move prover. use anyhow::anyhow; use clap::{App, Arg}; use docgen::docgen::DocgenOptions; use log::LevelFilter; use serde::{Deserialize, Serialize}; use simplelog::{ CombinedLogger, Config, ConfigBuilder, LevelPadding, SimpleLogger, TermLogger, TerminalMode, }; use std::sync::atomic::{AtomicBool, Ordering}; /// Represents the virtual path to the boogie prelude which is inlined into the binary. pub const INLINE_PRELUDE: &str = "<inline-prelude>"; /// Default flags passed to boogie. Additional flags will be added to this via the -B option. const DEFAULT_BOOGIE_FLAGS: &[&str] = &[ "-doModSetAnalysis", "-noinfer", "-printVerifiedProceduresCount:0", "-printModel:4", // Right now, we let boogie only produce one error per procedure. The boogie wrapper isn't // capable to sort out multiple errors and associate them with models otherwise. "-errorLimit:1", ]; /// Atomic used to prevent re-initialization of logging. static LOGGER_CONFIGURED: AtomicBool = AtomicBool::new(false); /// Atomic used to detect whether we are running in test mode. static TEST_MODE: AtomicBool = AtomicBool::new(false); /// Default for what functions to verify. #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] pub enum VerificationScope { /// Verify only public functions. Public, /// Verify all functions. All, /// Verify no functions None, } impl Default for VerificationScope { fn default() -> Self { Self::Public } } /// Represents options provided to the tool. Most of those options are configured via a toml /// source; some over the command line flags. #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(default, deny_unknown_fields)] pub struct Options { /// Path to the boogie prelude. The special string `INLINE_PRELUDE` is used to refer to /// a prelude build into this binary. pub prelude_path: String, /// The path to the boogie output which represents the verification problem. pub output_path: String, /// Verbosity level for logging. pub verbosity_level: LevelFilter, /// Whether to run the documentation generator instead of the prover. pub run_docgen: bool, /// An account address to use if none is specified in the source. pub account_address: String, /// The paths to the Move sources. pub move_sources: Vec<String>, /// The paths to any dependencies for the Move sources. Those will not be verified but /// can be used by `move_sources`. pub move_deps: Vec<String>, /// Options for the prover. pub prover: ProverOptions, /// Options for the prover backend. pub backend: BackendOptions, /// Options for the documentation generator. pub docgen: DocgenOptions, } impl Default for Options { fn default() -> Self { Self { prelude_path: INLINE_PRELUDE.to_string(), output_path: "output.bpl".to_string(), run_docgen: false, account_address: "0x234567".to_string(), verbosity_level: LevelFilter::Info, move_sources: vec![], move_deps: vec![], docgen: DocgenOptions::default(), prover: ProverOptions::default(), backend: BackendOptions::default(), } } } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(default, deny_unknown_fields)] pub struct ProverOptions { /// Whether to only generate backend code. pub generate_only: bool, /// Whether to generate stubs for native functions. pub native_stubs: bool, /// Whether to minimize execution traces in errors. pub minimize_execution_trace: bool, /// Whether to omit debug information in generated model. pub omit_model_debug: bool, /// Whether output for e.g. diagnosis shall be stable/redacted so it can be used in test /// output. pub stable_test_output: bool, /// Scope of what functions to verify. pub verify_scope: VerificationScope, /// Whether to emit global axiom that resources are well-formed. pub resource_wellformed_axiom: bool, /// Whether to automatically debug trace values of specification expression leafs. pub debug_trace: bool, } impl Default for ProverOptions { fn default() -> Self { Self { generate_only: false, native_stubs: false, minimize_execution_trace: true, omit_model_debug: false, stable_test_output: false, verify_scope: VerificationScope::Public, resource_wellformed_axiom: true, debug_trace: false, } } } /// Backend options. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(default, deny_unknown_fields)] pub struct BackendOptions { /// Path to the boogie executable. pub boogie_exe: String, /// Path to the z3 executable. pub z3_exe: String, /// Whether to use cvc4. pub use_cvc4: bool, /// Path to the cvc4 executable. pub cvc4_exe: String, /// List of flags to pass on to boogie. pub boogie_flags: Vec<String>, /// Whether to use native array theory. pub use_array_theory: bool, /// Whether to produce an SMT file for each verification problem. pub generate_smt: bool, /// Whether native instead of stratified equality should be used. pub native_equality: bool, /// A string determining the type of requires used for parameter type checks. Can be /// `"requires"` or `"free requires`". pub type_requires: String, /// The depth until which stratified functions are expanded. pub stratification_depth: usize, /// A string to be used to inline a function of medium size. Can be empty or `{:inline}`. pub aggressive_func_inline: String, /// A string to be used to inline a function of small size. Can be empty or `{:inline}`. pub func_inline: String, /// A bound to apply to the length of serialization results. pub serialize_bound: usize, /// How many times to call the prover backend for the verification problem. This is used for /// benchmarking. pub bench_repeat: usize, } impl Default for BackendOptions { fn default() -> Self { let get_env = |s| std::env::var(s).unwrap_or_else(|_| String::new()); Self { bench_repeat: 1, boogie_exe: get_env("BOOGIE_EXE"), z3_exe: get_env("Z3_EXE"), use_cvc4: false, cvc4_exe: get_env("CVC4_EXE"), boogie_flags: vec![], use_array_theory: false, generate_smt: false, native_equality: false, type_requires: "free requires".to_owned(), stratification_depth: 4, aggressive_func_inline: "".to_owned(), func_inline: "{:inline}".to_owned(), serialize_bound: 4, } } } impl Options { /// Creates options from toml configuration source. pub fn create_from_toml(toml_source: &str) -> anyhow::Result<Options> { Ok(toml::from_str(toml_source)?) } /// Creates options from toml configuration file. pub fn create_from_toml_file(toml_file: &str) -> anyhow::Result<Options> { Self::create_from_toml(&std::fs::read_to_string(toml_file)?) } // Creates options from command line arguments. This parses the arguments and terminates // the program on errors, printing usage information. The first argument is expected to be // the program name. pub fn create_from_args(args: &[String]) -> anyhow::Result<Options> { // Clap definition of the command line interface. let is_number = |s: String| { s.parse::<usize>() .map(|_| ()) .map_err(|_| "expected number".to_string()) }; let cli = App::new("mvp") .version("0.1.0") .about("The Move Prover") .author("The Libra Core Contributors") .arg( Arg::with_name("config") .short("c") .long("config") .takes_value(true) .value_name("TOML_FILE") .env("MOVE_PROVER_CONFIG") .help("path to a configuration file. \ Values in this file will be overridden by command line flags"), ) .arg( Arg::with_name("config-str") .conflicts_with("config") .short("C") .long("config-str") .takes_value(true) .multiple(true) .number_of_values(1) .value_name("TOML_STRING") .help("inline configuration string in toml syntax. Can be repeated. \ Use as in `-C=prover.opt=value -C=backend.opt=value`"), ) .arg( Arg::with_name("print-config") .long("print-config") .help("prints the effective toml configuration, then exits") ) .arg( Arg::with_name("output") .short("o") .long("output") .takes_value(true) .value_name("BOOGIE_FILE") .help("path to the boogie output which represents the verification problem"), ) .arg( Arg::with_name("verbosity") .short("v") .long("verbose") .takes_value(true) .possible_values(&["error", "warn", "info", "debug"]) .help("verbosity level."), ) .arg( Arg::with_name("generate-only") .short("g") .long("generate-only") .help("only generate boogie file but do not call boogie"), ) .arg( Arg::with_name("trace") .long("trace") .short("t") .help("enables automatic tracing of expressions in prover errors") ) .arg( Arg::with_name("docgen") .long("docgen") .help("run the documentation generator instead of the prover. \ Generated docs will be written into the directory `./doc` unless configured otherwise via toml"), ) .arg( Arg::with_name("verify") .long("verify") .takes_value(true) .possible_values(&["public", "all", "none"]) .value_name("SCOPE") .help("default scope of verification \ (can be overridden by `pragma verify=true|false`)"), ) .arg( Arg::with_name("bench-repeat") .long("bench-repeat") .takes_value(true) .value_name("COUNT") .validator(is_number) .help( "for benchmarking: how many times to call the backend on the verification problem", ), ) .arg( Arg::with_name("dependencies") .long("dependency") .short("d") .multiple(true) .number_of_values(1) .takes_value(true) .value_name("PATH_TO_DEPENDENCY") .help("path to a Move file, or a directory which will be searched for \ Move files, containing dependencies which will not be verified") ) .arg( Arg::with_name("sources") .multiple(true) .value_name("PATH_TO_SOURCE_FILE") .min_values(1) .help("the source files to verify"), ) .after_help("More options available via `--config file` or `--config-str str`. \ Use `--print-config` to see format and current values. \ See `move-prover/src/cli.rs::Option` for documentation."); // Parse the arguments. This will abort the program on parsing errors and print help. // It will also accept options like --help. let matches = cli.get_matches_from(args); // Initialize options. let get_vec = |s: &str| -> Vec<String> { match matches.values_of(s) { Some(vs) => vs.map(|v| v.to_string()).collect(), _ => vec![], } }; let mut options = if matches.is_present("config") { Self::create_from_toml_file(matches.value_of("config").unwrap())? } else if matches.is_present("config-str") { let config_lines = get_vec("config-str").join("\n"); Self::create_from_toml(&config_lines)? } else { Options::default() }; // Analyze arguments. if matches.is_present("output") { options.output_path = matches.value_of("output").unwrap().to_string(); } if matches.is_present("verbosity") { options.verbosity_level = match matches.value_of("verbosity").unwrap() { "error" => LevelFilter::Error, "warn" => LevelFilter::Warn, "info" => LevelFilter::Info, "debug" => LevelFilter::Debug, _ => unreachable!("should not happen"), } } if matches.occurrences_of("sources") > 0 { options.move_sources = get_vec("sources"); } if matches.occurrences_of("dependencies") > 0 { options.move_deps = get_vec("dependencies"); } if matches.is_present("verify") { options.prover.verify_scope = match matches.value_of("verify").unwrap() { "public" => VerificationScope::Public, "all" => VerificationScope::All, "none" => VerificationScope::None, _ => unreachable!("should not happen"), } } if matches.is_present("bench-repeat") { options.backend.bench_repeat = matches.value_of("bench-repeat").unwrap().parse::<usize>()?; } if matches.is_present("docgen") { options.run_docgen = true; } if matches.is_present("trace") { options.prover.debug_trace = true; } if matches.is_present("print-config") { println!("{}", toml::to_string(&options).unwrap()); Err(anyhow!("exiting")) } else { Ok(options) } } /// Sets up logging based on provided options. This should be called as early as possible /// and before any use of info!, warn! etc. pub fn setup_logging(&self) { CombinedLogger::init(vec![TermLogger::new( self.verbosity_level, ConfigBuilder::new() .set_time_level(LevelFilter::Debug) .set_level_padding(LevelPadding::Off) .build(), TerminalMode::Mixed, )]) .expect("Unexpected CombinedLogger init failure"); } pub fn setup_logging_for_test(&self) { // Loggers are global static, so we have to protect against reinitializing. if LOGGER_CONFIGURED.compare_and_swap(false, true, Ordering::Relaxed) { return; } TEST_MODE.store(true, Ordering::Relaxed); SimpleLogger::init(self.verbosity_level, Config::default()) .expect("UnexpectedSimpleLogger failure"); } /// Returns command line to call boogie. pub fn
(&self, boogie_file: &str) -> Vec<String> { let mut result = vec![self.backend.boogie_exe.clone()]; let mut add = |sl: &[&str]| result.extend(sl.iter().map(|s| (*s).to_string())); add(DEFAULT_BOOGIE_FLAGS); if self.backend.use_cvc4 { add(&[ "-proverOpt:SOLVER=cvc4", &format!("-proverOpt:PROVER_PATH={}", &self.backend.cvc4_exe), ]); } else { add(&[&format!("-proverOpt:PROVER_PATH={}", &self.backend.z3_exe)]); } if self.backend.use_array_theory { add(&["-useArrayTheory"]); } add(&["-proverOpt:O:smt.QI.EAGER_THRESHOLD=100"]); add(&["-proverOpt:O:smt.QI.LAZY_THRESHOLD=100"]); // TODO: see what we can make out of these flags. //add(&["-proverOpt:O:smt.QI.PROFILE=true"]); //add(&["-proverOpt:O:trace=true"]); //add(&["-proverOpt:VERBOSITY=3"]); //add(&["-proverOpt:C:-st"]); if self.backend.generate_smt { add(&["-proverLog:@[email protected]"]); } for f in &self.backend.boogie_flags { add(&[f.as_str()]); } add(&[boogie_file]); result } /// Returns name of file where to log boogie output. pub fn get_boogie_log_file(&self, boogie_file: &str) -> String { format!("{}.log", boogie_file) } }
get_boogie_command
identifier_name
cli.rs
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 #![forbid(unsafe_code)] //! Functionality related to the command line interface of the Move prover. use anyhow::anyhow; use clap::{App, Arg}; use docgen::docgen::DocgenOptions; use log::LevelFilter; use serde::{Deserialize, Serialize}; use simplelog::{ CombinedLogger, Config, ConfigBuilder, LevelPadding, SimpleLogger, TermLogger, TerminalMode, }; use std::sync::atomic::{AtomicBool, Ordering}; /// Represents the virtual path to the boogie prelude which is inlined into the binary. pub const INLINE_PRELUDE: &str = "<inline-prelude>"; /// Default flags passed to boogie. Additional flags will be added to this via the -B option. const DEFAULT_BOOGIE_FLAGS: &[&str] = &[ "-doModSetAnalysis", "-noinfer", "-printVerifiedProceduresCount:0", "-printModel:4", // Right now, we let boogie only produce one error per procedure. The boogie wrapper isn't // capable to sort out multiple errors and associate them with models otherwise. "-errorLimit:1", ]; /// Atomic used to prevent re-initialization of logging. static LOGGER_CONFIGURED: AtomicBool = AtomicBool::new(false); /// Atomic used to detect whether we are running in test mode. static TEST_MODE: AtomicBool = AtomicBool::new(false); /// Default for what functions to verify. #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] pub enum VerificationScope { /// Verify only public functions. Public, /// Verify all functions. All, /// Verify no functions None, } impl Default for VerificationScope { fn default() -> Self { Self::Public } } /// Represents options provided to the tool. Most of those options are configured via a toml /// source; some over the command line flags. #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(default, deny_unknown_fields)] pub struct Options { /// Path to the boogie prelude. The special string `INLINE_PRELUDE` is used to refer to /// a prelude build into this binary. pub prelude_path: String, /// The path to the boogie output which represents the verification problem. pub output_path: String, /// Verbosity level for logging. pub verbosity_level: LevelFilter, /// Whether to run the documentation generator instead of the prover. pub run_docgen: bool, /// An account address to use if none is specified in the source. pub account_address: String, /// The paths to the Move sources. pub move_sources: Vec<String>, /// The paths to any dependencies for the Move sources. Those will not be verified but /// can be used by `move_sources`. pub move_deps: Vec<String>, /// Options for the prover. pub prover: ProverOptions, /// Options for the prover backend. pub backend: BackendOptions, /// Options for the documentation generator. pub docgen: DocgenOptions, } impl Default for Options { fn default() -> Self { Self { prelude_path: INLINE_PRELUDE.to_string(), output_path: "output.bpl".to_string(), run_docgen: false, account_address: "0x234567".to_string(), verbosity_level: LevelFilter::Info, move_sources: vec![], move_deps: vec![], docgen: DocgenOptions::default(), prover: ProverOptions::default(), backend: BackendOptions::default(), } } } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(default, deny_unknown_fields)] pub struct ProverOptions { /// Whether to only generate backend code.
/// Whether to minimize execution traces in errors. pub minimize_execution_trace: bool, /// Whether to omit debug information in generated model. pub omit_model_debug: bool, /// Whether output for e.g. diagnosis shall be stable/redacted so it can be used in test /// output. pub stable_test_output: bool, /// Scope of what functions to verify. pub verify_scope: VerificationScope, /// Whether to emit global axiom that resources are well-formed. pub resource_wellformed_axiom: bool, /// Whether to automatically debug trace values of specification expression leafs. pub debug_trace: bool, } impl Default for ProverOptions { fn default() -> Self { Self { generate_only: false, native_stubs: false, minimize_execution_trace: true, omit_model_debug: false, stable_test_output: false, verify_scope: VerificationScope::Public, resource_wellformed_axiom: true, debug_trace: false, } } } /// Backend options. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(default, deny_unknown_fields)] pub struct BackendOptions { /// Path to the boogie executable. pub boogie_exe: String, /// Path to the z3 executable. pub z3_exe: String, /// Whether to use cvc4. pub use_cvc4: bool, /// Path to the cvc4 executable. pub cvc4_exe: String, /// List of flags to pass on to boogie. pub boogie_flags: Vec<String>, /// Whether to use native array theory. pub use_array_theory: bool, /// Whether to produce an SMT file for each verification problem. pub generate_smt: bool, /// Whether native instead of stratified equality should be used. pub native_equality: bool, /// A string determining the type of requires used for parameter type checks. Can be /// `"requires"` or `"free requires`". pub type_requires: String, /// The depth until which stratified functions are expanded. pub stratification_depth: usize, /// A string to be used to inline a function of medium size. Can be empty or `{:inline}`. pub aggressive_func_inline: String, /// A string to be used to inline a function of small size. Can be empty or `{:inline}`. pub func_inline: String, /// A bound to apply to the length of serialization results. pub serialize_bound: usize, /// How many times to call the prover backend for the verification problem. This is used for /// benchmarking. pub bench_repeat: usize, } impl Default for BackendOptions { fn default() -> Self { let get_env = |s| std::env::var(s).unwrap_or_else(|_| String::new()); Self { bench_repeat: 1, boogie_exe: get_env("BOOGIE_EXE"), z3_exe: get_env("Z3_EXE"), use_cvc4: false, cvc4_exe: get_env("CVC4_EXE"), boogie_flags: vec![], use_array_theory: false, generate_smt: false, native_equality: false, type_requires: "free requires".to_owned(), stratification_depth: 4, aggressive_func_inline: "".to_owned(), func_inline: "{:inline}".to_owned(), serialize_bound: 4, } } } impl Options { /// Creates options from toml configuration source. pub fn create_from_toml(toml_source: &str) -> anyhow::Result<Options> { Ok(toml::from_str(toml_source)?) } /// Creates options from toml configuration file. pub fn create_from_toml_file(toml_file: &str) -> anyhow::Result<Options> { Self::create_from_toml(&std::fs::read_to_string(toml_file)?) } // Creates options from command line arguments. This parses the arguments and terminates // the program on errors, printing usage information. The first argument is expected to be // the program name. pub fn create_from_args(args: &[String]) -> anyhow::Result<Options> { // Clap definition of the command line interface. let is_number = |s: String| { s.parse::<usize>() .map(|_| ()) .map_err(|_| "expected number".to_string()) }; let cli = App::new("mvp") .version("0.1.0") .about("The Move Prover") .author("The Libra Core Contributors") .arg( Arg::with_name("config") .short("c") .long("config") .takes_value(true) .value_name("TOML_FILE") .env("MOVE_PROVER_CONFIG") .help("path to a configuration file. \ Values in this file will be overridden by command line flags"), ) .arg( Arg::with_name("config-str") .conflicts_with("config") .short("C") .long("config-str") .takes_value(true) .multiple(true) .number_of_values(1) .value_name("TOML_STRING") .help("inline configuration string in toml syntax. Can be repeated. \ Use as in `-C=prover.opt=value -C=backend.opt=value`"), ) .arg( Arg::with_name("print-config") .long("print-config") .help("prints the effective toml configuration, then exits") ) .arg( Arg::with_name("output") .short("o") .long("output") .takes_value(true) .value_name("BOOGIE_FILE") .help("path to the boogie output which represents the verification problem"), ) .arg( Arg::with_name("verbosity") .short("v") .long("verbose") .takes_value(true) .possible_values(&["error", "warn", "info", "debug"]) .help("verbosity level."), ) .arg( Arg::with_name("generate-only") .short("g") .long("generate-only") .help("only generate boogie file but do not call boogie"), ) .arg( Arg::with_name("trace") .long("trace") .short("t") .help("enables automatic tracing of expressions in prover errors") ) .arg( Arg::with_name("docgen") .long("docgen") .help("run the documentation generator instead of the prover. \ Generated docs will be written into the directory `./doc` unless configured otherwise via toml"), ) .arg( Arg::with_name("verify") .long("verify") .takes_value(true) .possible_values(&["public", "all", "none"]) .value_name("SCOPE") .help("default scope of verification \ (can be overridden by `pragma verify=true|false`)"), ) .arg( Arg::with_name("bench-repeat") .long("bench-repeat") .takes_value(true) .value_name("COUNT") .validator(is_number) .help( "for benchmarking: how many times to call the backend on the verification problem", ), ) .arg( Arg::with_name("dependencies") .long("dependency") .short("d") .multiple(true) .number_of_values(1) .takes_value(true) .value_name("PATH_TO_DEPENDENCY") .help("path to a Move file, or a directory which will be searched for \ Move files, containing dependencies which will not be verified") ) .arg( Arg::with_name("sources") .multiple(true) .value_name("PATH_TO_SOURCE_FILE") .min_values(1) .help("the source files to verify"), ) .after_help("More options available via `--config file` or `--config-str str`. \ Use `--print-config` to see format and current values. \ See `move-prover/src/cli.rs::Option` for documentation."); // Parse the arguments. This will abort the program on parsing errors and print help. // It will also accept options like --help. let matches = cli.get_matches_from(args); // Initialize options. let get_vec = |s: &str| -> Vec<String> { match matches.values_of(s) { Some(vs) => vs.map(|v| v.to_string()).collect(), _ => vec![], } }; let mut options = if matches.is_present("config") { Self::create_from_toml_file(matches.value_of("config").unwrap())? } else if matches.is_present("config-str") { let config_lines = get_vec("config-str").join("\n"); Self::create_from_toml(&config_lines)? } else { Options::default() }; // Analyze arguments. if matches.is_present("output") { options.output_path = matches.value_of("output").unwrap().to_string(); } if matches.is_present("verbosity") { options.verbosity_level = match matches.value_of("verbosity").unwrap() { "error" => LevelFilter::Error, "warn" => LevelFilter::Warn, "info" => LevelFilter::Info, "debug" => LevelFilter::Debug, _ => unreachable!("should not happen"), } } if matches.occurrences_of("sources") > 0 { options.move_sources = get_vec("sources"); } if matches.occurrences_of("dependencies") > 0 { options.move_deps = get_vec("dependencies"); } if matches.is_present("verify") { options.prover.verify_scope = match matches.value_of("verify").unwrap() { "public" => VerificationScope::Public, "all" => VerificationScope::All, "none" => VerificationScope::None, _ => unreachable!("should not happen"), } } if matches.is_present("bench-repeat") { options.backend.bench_repeat = matches.value_of("bench-repeat").unwrap().parse::<usize>()?; } if matches.is_present("docgen") { options.run_docgen = true; } if matches.is_present("trace") { options.prover.debug_trace = true; } if matches.is_present("print-config") { println!("{}", toml::to_string(&options).unwrap()); Err(anyhow!("exiting")) } else { Ok(options) } } /// Sets up logging based on provided options. This should be called as early as possible /// and before any use of info!, warn! etc. pub fn setup_logging(&self) { CombinedLogger::init(vec![TermLogger::new( self.verbosity_level, ConfigBuilder::new() .set_time_level(LevelFilter::Debug) .set_level_padding(LevelPadding::Off) .build(), TerminalMode::Mixed, )]) .expect("Unexpected CombinedLogger init failure"); } pub fn setup_logging_for_test(&self) { // Loggers are global static, so we have to protect against reinitializing. if LOGGER_CONFIGURED.compare_and_swap(false, true, Ordering::Relaxed) { return; } TEST_MODE.store(true, Ordering::Relaxed); SimpleLogger::init(self.verbosity_level, Config::default()) .expect("UnexpectedSimpleLogger failure"); } /// Returns command line to call boogie. pub fn get_boogie_command(&self, boogie_file: &str) -> Vec<String> { let mut result = vec![self.backend.boogie_exe.clone()]; let mut add = |sl: &[&str]| result.extend(sl.iter().map(|s| (*s).to_string())); add(DEFAULT_BOOGIE_FLAGS); if self.backend.use_cvc4 { add(&[ "-proverOpt:SOLVER=cvc4", &format!("-proverOpt:PROVER_PATH={}", &self.backend.cvc4_exe), ]); } else { add(&[&format!("-proverOpt:PROVER_PATH={}", &self.backend.z3_exe)]); } if self.backend.use_array_theory { add(&["-useArrayTheory"]); } add(&["-proverOpt:O:smt.QI.EAGER_THRESHOLD=100"]); add(&["-proverOpt:O:smt.QI.LAZY_THRESHOLD=100"]); // TODO: see what we can make out of these flags. //add(&["-proverOpt:O:smt.QI.PROFILE=true"]); //add(&["-proverOpt:O:trace=true"]); //add(&["-proverOpt:VERBOSITY=3"]); //add(&["-proverOpt:C:-st"]); if self.backend.generate_smt { add(&["-proverLog:@[email protected]"]); } for f in &self.backend.boogie_flags { add(&[f.as_str()]); } add(&[boogie_file]); result } /// Returns name of file where to log boogie output. pub fn get_boogie_log_file(&self, boogie_file: &str) -> String { format!("{}.log", boogie_file) } }
pub generate_only: bool, /// Whether to generate stubs for native functions. pub native_stubs: bool,
random_line_split
cli.rs
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 #![forbid(unsafe_code)] //! Functionality related to the command line interface of the Move prover. use anyhow::anyhow; use clap::{App, Arg}; use docgen::docgen::DocgenOptions; use log::LevelFilter; use serde::{Deserialize, Serialize}; use simplelog::{ CombinedLogger, Config, ConfigBuilder, LevelPadding, SimpleLogger, TermLogger, TerminalMode, }; use std::sync::atomic::{AtomicBool, Ordering}; /// Represents the virtual path to the boogie prelude which is inlined into the binary. pub const INLINE_PRELUDE: &str = "<inline-prelude>"; /// Default flags passed to boogie. Additional flags will be added to this via the -B option. const DEFAULT_BOOGIE_FLAGS: &[&str] = &[ "-doModSetAnalysis", "-noinfer", "-printVerifiedProceduresCount:0", "-printModel:4", // Right now, we let boogie only produce one error per procedure. The boogie wrapper isn't // capable to sort out multiple errors and associate them with models otherwise. "-errorLimit:1", ]; /// Atomic used to prevent re-initialization of logging. static LOGGER_CONFIGURED: AtomicBool = AtomicBool::new(false); /// Atomic used to detect whether we are running in test mode. static TEST_MODE: AtomicBool = AtomicBool::new(false); /// Default for what functions to verify. #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] pub enum VerificationScope { /// Verify only public functions. Public, /// Verify all functions. All, /// Verify no functions None, } impl Default for VerificationScope { fn default() -> Self { Self::Public } } /// Represents options provided to the tool. Most of those options are configured via a toml /// source; some over the command line flags. #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(default, deny_unknown_fields)] pub struct Options { /// Path to the boogie prelude. The special string `INLINE_PRELUDE` is used to refer to /// a prelude build into this binary. pub prelude_path: String, /// The path to the boogie output which represents the verification problem. pub output_path: String, /// Verbosity level for logging. pub verbosity_level: LevelFilter, /// Whether to run the documentation generator instead of the prover. pub run_docgen: bool, /// An account address to use if none is specified in the source. pub account_address: String, /// The paths to the Move sources. pub move_sources: Vec<String>, /// The paths to any dependencies for the Move sources. Those will not be verified but /// can be used by `move_sources`. pub move_deps: Vec<String>, /// Options for the prover. pub prover: ProverOptions, /// Options for the prover backend. pub backend: BackendOptions, /// Options for the documentation generator. pub docgen: DocgenOptions, } impl Default for Options { fn default() -> Self { Self { prelude_path: INLINE_PRELUDE.to_string(), output_path: "output.bpl".to_string(), run_docgen: false, account_address: "0x234567".to_string(), verbosity_level: LevelFilter::Info, move_sources: vec![], move_deps: vec![], docgen: DocgenOptions::default(), prover: ProverOptions::default(), backend: BackendOptions::default(), } } } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(default, deny_unknown_fields)] pub struct ProverOptions { /// Whether to only generate backend code. pub generate_only: bool, /// Whether to generate stubs for native functions. pub native_stubs: bool, /// Whether to minimize execution traces in errors. pub minimize_execution_trace: bool, /// Whether to omit debug information in generated model. pub omit_model_debug: bool, /// Whether output for e.g. diagnosis shall be stable/redacted so it can be used in test /// output. pub stable_test_output: bool, /// Scope of what functions to verify. pub verify_scope: VerificationScope, /// Whether to emit global axiom that resources are well-formed. pub resource_wellformed_axiom: bool, /// Whether to automatically debug trace values of specification expression leafs. pub debug_trace: bool, } impl Default for ProverOptions { fn default() -> Self { Self { generate_only: false, native_stubs: false, minimize_execution_trace: true, omit_model_debug: false, stable_test_output: false, verify_scope: VerificationScope::Public, resource_wellformed_axiom: true, debug_trace: false, } } } /// Backend options. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(default, deny_unknown_fields)] pub struct BackendOptions { /// Path to the boogie executable. pub boogie_exe: String, /// Path to the z3 executable. pub z3_exe: String, /// Whether to use cvc4. pub use_cvc4: bool, /// Path to the cvc4 executable. pub cvc4_exe: String, /// List of flags to pass on to boogie. pub boogie_flags: Vec<String>, /// Whether to use native array theory. pub use_array_theory: bool, /// Whether to produce an SMT file for each verification problem. pub generate_smt: bool, /// Whether native instead of stratified equality should be used. pub native_equality: bool, /// A string determining the type of requires used for parameter type checks. Can be /// `"requires"` or `"free requires`". pub type_requires: String, /// The depth until which stratified functions are expanded. pub stratification_depth: usize, /// A string to be used to inline a function of medium size. Can be empty or `{:inline}`. pub aggressive_func_inline: String, /// A string to be used to inline a function of small size. Can be empty or `{:inline}`. pub func_inline: String, /// A bound to apply to the length of serialization results. pub serialize_bound: usize, /// How many times to call the prover backend for the verification problem. This is used for /// benchmarking. pub bench_repeat: usize, } impl Default for BackendOptions { fn default() -> Self { let get_env = |s| std::env::var(s).unwrap_or_else(|_| String::new()); Self { bench_repeat: 1, boogie_exe: get_env("BOOGIE_EXE"), z3_exe: get_env("Z3_EXE"), use_cvc4: false, cvc4_exe: get_env("CVC4_EXE"), boogie_flags: vec![], use_array_theory: false, generate_smt: false, native_equality: false, type_requires: "free requires".to_owned(), stratification_depth: 4, aggressive_func_inline: "".to_owned(), func_inline: "{:inline}".to_owned(), serialize_bound: 4, } } } impl Options { /// Creates options from toml configuration source. pub fn create_from_toml(toml_source: &str) -> anyhow::Result<Options> { Ok(toml::from_str(toml_source)?) } /// Creates options from toml configuration file. pub fn create_from_toml_file(toml_file: &str) -> anyhow::Result<Options> { Self::create_from_toml(&std::fs::read_to_string(toml_file)?) } // Creates options from command line arguments. This parses the arguments and terminates // the program on errors, printing usage information. The first argument is expected to be // the program name. pub fn create_from_args(args: &[String]) -> anyhow::Result<Options> { // Clap definition of the command line interface. let is_number = |s: String| { s.parse::<usize>() .map(|_| ()) .map_err(|_| "expected number".to_string()) }; let cli = App::new("mvp") .version("0.1.0") .about("The Move Prover") .author("The Libra Core Contributors") .arg( Arg::with_name("config") .short("c") .long("config") .takes_value(true) .value_name("TOML_FILE") .env("MOVE_PROVER_CONFIG") .help("path to a configuration file. \ Values in this file will be overridden by command line flags"), ) .arg( Arg::with_name("config-str") .conflicts_with("config") .short("C") .long("config-str") .takes_value(true) .multiple(true) .number_of_values(1) .value_name("TOML_STRING") .help("inline configuration string in toml syntax. Can be repeated. \ Use as in `-C=prover.opt=value -C=backend.opt=value`"), ) .arg( Arg::with_name("print-config") .long("print-config") .help("prints the effective toml configuration, then exits") ) .arg( Arg::with_name("output") .short("o") .long("output") .takes_value(true) .value_name("BOOGIE_FILE") .help("path to the boogie output which represents the verification problem"), ) .arg( Arg::with_name("verbosity") .short("v") .long("verbose") .takes_value(true) .possible_values(&["error", "warn", "info", "debug"]) .help("verbosity level."), ) .arg( Arg::with_name("generate-only") .short("g") .long("generate-only") .help("only generate boogie file but do not call boogie"), ) .arg( Arg::with_name("trace") .long("trace") .short("t") .help("enables automatic tracing of expressions in prover errors") ) .arg( Arg::with_name("docgen") .long("docgen") .help("run the documentation generator instead of the prover. \ Generated docs will be written into the directory `./doc` unless configured otherwise via toml"), ) .arg( Arg::with_name("verify") .long("verify") .takes_value(true) .possible_values(&["public", "all", "none"]) .value_name("SCOPE") .help("default scope of verification \ (can be overridden by `pragma verify=true|false`)"), ) .arg( Arg::with_name("bench-repeat") .long("bench-repeat") .takes_value(true) .value_name("COUNT") .validator(is_number) .help( "for benchmarking: how many times to call the backend on the verification problem", ), ) .arg( Arg::with_name("dependencies") .long("dependency") .short("d") .multiple(true) .number_of_values(1) .takes_value(true) .value_name("PATH_TO_DEPENDENCY") .help("path to a Move file, or a directory which will be searched for \ Move files, containing dependencies which will not be verified") ) .arg( Arg::with_name("sources") .multiple(true) .value_name("PATH_TO_SOURCE_FILE") .min_values(1) .help("the source files to verify"), ) .after_help("More options available via `--config file` or `--config-str str`. \ Use `--print-config` to see format and current values. \ See `move-prover/src/cli.rs::Option` for documentation."); // Parse the arguments. This will abort the program on parsing errors and print help. // It will also accept options like --help. let matches = cli.get_matches_from(args); // Initialize options. let get_vec = |s: &str| -> Vec<String> { match matches.values_of(s) { Some(vs) => vs.map(|v| v.to_string()).collect(), _ => vec![], } }; let mut options = if matches.is_present("config") { Self::create_from_toml_file(matches.value_of("config").unwrap())? } else if matches.is_present("config-str") { let config_lines = get_vec("config-str").join("\n"); Self::create_from_toml(&config_lines)? } else { Options::default() }; // Analyze arguments. if matches.is_present("output") { options.output_path = matches.value_of("output").unwrap().to_string(); } if matches.is_present("verbosity") { options.verbosity_level = match matches.value_of("verbosity").unwrap() { "error" => LevelFilter::Error, "warn" => LevelFilter::Warn, "info" => LevelFilter::Info, "debug" => LevelFilter::Debug, _ => unreachable!("should not happen"), } } if matches.occurrences_of("sources") > 0 { options.move_sources = get_vec("sources"); } if matches.occurrences_of("dependencies") > 0 { options.move_deps = get_vec("dependencies"); } if matches.is_present("verify") { options.prover.verify_scope = match matches.value_of("verify").unwrap() { "public" => VerificationScope::Public, "all" => VerificationScope::All, "none" => VerificationScope::None, _ => unreachable!("should not happen"), } } if matches.is_present("bench-repeat") { options.backend.bench_repeat = matches.value_of("bench-repeat").unwrap().parse::<usize>()?; } if matches.is_present("docgen") { options.run_docgen = true; } if matches.is_present("trace") { options.prover.debug_trace = true; } if matches.is_present("print-config") { println!("{}", toml::to_string(&options).unwrap()); Err(anyhow!("exiting")) } else
} /// Sets up logging based on provided options. This should be called as early as possible /// and before any use of info!, warn! etc. pub fn setup_logging(&self) { CombinedLogger::init(vec![TermLogger::new( self.verbosity_level, ConfigBuilder::new() .set_time_level(LevelFilter::Debug) .set_level_padding(LevelPadding::Off) .build(), TerminalMode::Mixed, )]) .expect("Unexpected CombinedLogger init failure"); } pub fn setup_logging_for_test(&self) { // Loggers are global static, so we have to protect against reinitializing. if LOGGER_CONFIGURED.compare_and_swap(false, true, Ordering::Relaxed) { return; } TEST_MODE.store(true, Ordering::Relaxed); SimpleLogger::init(self.verbosity_level, Config::default()) .expect("UnexpectedSimpleLogger failure"); } /// Returns command line to call boogie. pub fn get_boogie_command(&self, boogie_file: &str) -> Vec<String> { let mut result = vec![self.backend.boogie_exe.clone()]; let mut add = |sl: &[&str]| result.extend(sl.iter().map(|s| (*s).to_string())); add(DEFAULT_BOOGIE_FLAGS); if self.backend.use_cvc4 { add(&[ "-proverOpt:SOLVER=cvc4", &format!("-proverOpt:PROVER_PATH={}", &self.backend.cvc4_exe), ]); } else { add(&[&format!("-proverOpt:PROVER_PATH={}", &self.backend.z3_exe)]); } if self.backend.use_array_theory { add(&["-useArrayTheory"]); } add(&["-proverOpt:O:smt.QI.EAGER_THRESHOLD=100"]); add(&["-proverOpt:O:smt.QI.LAZY_THRESHOLD=100"]); // TODO: see what we can make out of these flags. //add(&["-proverOpt:O:smt.QI.PROFILE=true"]); //add(&["-proverOpt:O:trace=true"]); //add(&["-proverOpt:VERBOSITY=3"]); //add(&["-proverOpt:C:-st"]); if self.backend.generate_smt { add(&["-proverLog:@[email protected]"]); } for f in &self.backend.boogie_flags { add(&[f.as_str()]); } add(&[boogie_file]); result } /// Returns name of file where to log boogie output. pub fn get_boogie_log_file(&self, boogie_file: &str) -> String { format!("{}.log", boogie_file) } }
{ Ok(options) }
conditional_block
main.rs
#![feature(entry_insert, destructuring_assignment)] use anyhow::{anyhow, bail, ensure}; use clap::{App, Arg}; use derive_more::From; use digits_iterator::*; use itertools::Itertools; use std::{collections::HashMap, convert::TryFrom, fmt, fs, iter, sync::Mutex}; use tokio::pin; use tokio_stream::{Stream, StreamExt}; fn main() -> Result<(), anyhow::Error> { let matches = App::new("2019-11") .arg(Arg::from_usage("[input] 'Problem input file'").default_value("input.txt")) .get_matches(); let input_filename = matches.value_of("input").unwrap(); let program_str = fs::read_to_string(input_filename)?.replace("\r\n", "\n"); let robot_program = parse_input(&program_str)?; let painted_hull = paint_hull(robot_program.clone(), HashMap::new(), Color::Black)?; println!( "Number of panels painted at least once: {}", painted_hull.len() ); let registration_id_hull = paint_hull( robot_program, iter::once((Point::origin(), Color::White)).collect(), Color::Black, )?; print_hull(&registration_id_hull, Color::Black); Ok(()) } fn print_hull(hull: &HashMap<Point, Color>, default_color: Color) { let ((min_x, max_x), (min_y, max_y)) = ( hull.keys() .map(|p| p.x) .minmax() .into_option() .unwrap_or_default(), hull.keys() .map(|p| p.y) .minmax() .into_option() .unwrap_or_default(), ); for y in (min_y..=max_y).rev() { for x in min_x..=max_x { if hull.get(&Point::new(x, y)).unwrap_or(&default_color) == &Color::Black { print!("█"); } else { print!(" "); } } println!() } } fn paint_hull( robot_program: Vec<isize>, starting_hull: HashMap<Point, Color>, default_color: Color, ) -> Result<HashMap<Point, Color>, anyhow::Error> { use Color::*; use Direction::*; // Basically, we're using Mutex as a way of telling Rust that we know // for sure we aren't gonna be accessing these values concurrently. // The borrow checker is then satisfied. let hull = Mutex::new(starting_hull); let current_location = Mutex::new(Point::origin()); let mut is_paint_output = true; let mut facing_direction = Up; futures_executor::block_on(run_program( robot_program, tokio_stream::iter(iter::from_fn(|| { let current_location = *(current_location.lock().unwrap()); Some( hull.lock() .unwrap() .get(&current_location) .copied() .unwrap_or(default_color), ) })) .map(|color| if color == Black { 0 } else { 1 }), |output| { let mut current_location = current_location.lock().unwrap(); if is_paint_output { hull.lock() .unwrap() .entry(*current_location) .insert(if output == 0 { Black } else { White }); } else { let turn_direction = if output == 0 { Left } else { Right }; (*current_location, facing_direction) = match (turn_direction, facing_direction) { (Left, Right) | (Right, Left) => { (Point::new(current_location.x, current_location.y + 1), Up) } (Left, Left) | (Right, Right) => { (Point::new(current_location.x, current_location.y - 1), Down) } (Left, Up) | (Right, Down) => { (Point::new(current_location.x - 1, current_location.y), Left) } (Left, Down) | (Right, Up) => ( Point::new(current_location.x + 1, current_location.y), Right, ), _ => unsafe { std::hint::unreachable_unchecked() }, } } is_paint_output =!is_paint_output; }, ))?; Ok(hull.into_inner().unwrap()) } #[derive(Clone, Copy, PartialEq, Eq)] enum Color { White, Black, } #[derive(Clone, Copy, PartialEq, Eq)] enum Direction { Up, Down, Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, From)] struct Point { x: isize, y: isize, } impl fmt::Debug for Point { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("").field(&self.x).field(&self.y).finish() } } impl Point { fn origin() -> Self { Self::new(0, 0) } fn new(x: isize, y: isize) -> Self { Self::from((x, y)) } } async fn run_program( mut program: Vec<isize>, input: impl Stream<Item = isize>, mut output_fn: impl FnMut(isize), ) -> Result<Vec<isize>, anyhow::Error> { pin!(input); let mut instruction_pointer = 0; let mut relative_base = 0; loop { let opcode = usize::try_from(program[instruction_pointer]) .map_err(|_| anyhow!("Found a negative integer where an opcode was expected"))?; let parameter_modes = get_parameter_modes(opcode)?; let parameter_mode_of = |param: usize| { parameter_modes .get(param) .unwrap_or(&ParameterModes::Position) }; let mut get_param = |param: usize, need_write: bool| { let param_value = program .get(instruction_pointer + param + 1) .copied() .ok_or(anyhow!("Parameter not found"))?; let param_mode = parameter_mode_of(param); if need_write { ensure!( [ParameterModes::Position, ParameterModes::Relative].contains(param_mode), "Invalid argument for opcode {}: {}", opcode, param_value ); } Ok(match param_mode { ParameterModes::Position | ParameterModes::Relative => { let raw_idx = if param_mode == &ParameterModes::Relative { relative_base + param_value } else { param_value }; let idx = usize::try_from(raw_idx).map_err(|_| { anyhow!( "The program is attempting to access a negative index: {}", raw_idx ) })?; if idx >= program.len() { program.resize_with(idx + 1, || 0); } if!need_write { program[idx] } else { raw_idx } } ParameterModes::Immediate => param_value, }) }; // x % 100 gets the last 2 digits of a number, // no matter how long it is. match opcode % 100 { 1 | 2 | 7 | 8 => { let (x, y, result_idx) = ( get_param(0, false)?, get_param(1, false)?, get_param(2, true)? as usize, ); match opcode % 100 { 1 => program[result_idx] = x + y, 2 => program[result_idx] = x * y, 7 => program[result_idx] = (x < y) as isize, 8 => program[result_idx] = (x == y) as isize, _ => unsafe { std::hint::unreachable_unchecked() }, } instruction_pointer += 4; } 5 | 6 => { let (checked_value, jump_point) = ( get_param(0, false)?, usize::try_from(get_param(1, false)?).map_err(|_| { anyhow!("Found a negative integer where a jump point was expected") })?, ); let should_jump = match opcode % 100 { 5 => checked_value!= 0, 6 => checked_value == 0, _ => unsafe { std::hint::unreachable_unchecked() }, }; if should_jump { instruction_pointer = jump_point; } else { instruction_pointer += 3; } } 3 | 4 | 9 => { match opcode % 100 { 3 => { let input = input .next() .await .ok_or(anyhow!("Found an input opcode but no input was provided"))?; let input_storage = get_param(0, true)? as usize; program[input_storage] = input; } 4 => output_fn(get_param(0, false)?), 9 => relative_base += get_param(0, false)?, _ => unsafe { std::hint::unreachable_unchecked() }, } instruction_pointer += 2; } 99 => return Ok(program), op => bail!("Encountered an unknown opcode: {}", op), } } } fn get_parameter_modes(opcode: usize) -> Result<Vec<ParameterModes>, anyhow::Error> { opcode .digits() .rev() .skip(2) .map(ParameterModes::try_from) .try_collect() } #[derive(Debug, PartialEq, Eq, Clone, Copy)] enum ParameterModes { Position, Immediate, Relative, } impl TryFrom<u8> for ParameterModes { type Error = anyhow::Error; fn try_from(value: u8) -> Result<Self, Self::Error> { Ok(match value { 0 => Self::Position, 1 => Self::Immediate, 2 => Self::Relative, _ => bail!("Unknown parameter mode: {}", value), }) } } fn parse_input(program_str: &str) -> Result<Vec<isize>, anyhow::Error> {
program_str .split(",") .map(|num_str| { num_str .trim() .parse() .map_err(|_| anyhow!("Could not parse number in program as isize: '{}'", num_str)) }) .try_collect() }
identifier_body
main.rs
#![feature(entry_insert, destructuring_assignment)] use anyhow::{anyhow, bail, ensure}; use clap::{App, Arg}; use derive_more::From; use digits_iterator::*; use itertools::Itertools; use std::{collections::HashMap, convert::TryFrom, fmt, fs, iter, sync::Mutex}; use tokio::pin; use tokio_stream::{Stream, StreamExt}; fn main() -> Result<(), anyhow::Error> { let matches = App::new("2019-11") .arg(Arg::from_usage("[input] 'Problem input file'").default_value("input.txt")) .get_matches(); let input_filename = matches.value_of("input").unwrap(); let program_str = fs::read_to_string(input_filename)?.replace("\r\n", "\n"); let robot_program = parse_input(&program_str)?; let painted_hull = paint_hull(robot_program.clone(), HashMap::new(), Color::Black)?; println!( "Number of panels painted at least once: {}", painted_hull.len() ); let registration_id_hull = paint_hull( robot_program, iter::once((Point::origin(), Color::White)).collect(), Color::Black, )?; print_hull(&registration_id_hull, Color::Black); Ok(()) } fn print_hull(hull: &HashMap<Point, Color>, default_color: Color) { let ((min_x, max_x), (min_y, max_y)) = ( hull.keys() .map(|p| p.x) .minmax() .into_option() .unwrap_or_default(), hull.keys() .map(|p| p.y) .minmax() .into_option() .unwrap_or_default(), ); for y in (min_y..=max_y).rev() { for x in min_x..=max_x { if hull.get(&Point::new(x, y)).unwrap_or(&default_color) == &Color::Black { print!("█"); } else { print!(" "); } } println!() } } fn paint_hull( robot_program: Vec<isize>, starting_hull: HashMap<Point, Color>, default_color: Color, ) -> Result<HashMap<Point, Color>, anyhow::Error> { use Color::*; use Direction::*; // Basically, we're using Mutex as a way of telling Rust that we know // for sure we aren't gonna be accessing these values concurrently. // The borrow checker is then satisfied. let hull = Mutex::new(starting_hull); let current_location = Mutex::new(Point::origin()); let mut is_paint_output = true; let mut facing_direction = Up; futures_executor::block_on(run_program( robot_program, tokio_stream::iter(iter::from_fn(|| { let current_location = *(current_location.lock().unwrap()); Some( hull.lock() .unwrap() .get(&current_location) .copied() .unwrap_or(default_color), ) })) .map(|color| if color == Black { 0 } else { 1 }), |output| { let mut current_location = current_location.lock().unwrap(); if is_paint_output { hull.lock() .unwrap() .entry(*current_location) .insert(if output == 0 { Black } else { White }); } else { let turn_direction = if output == 0 { Left } else { Right }; (*current_location, facing_direction) = match (turn_direction, facing_direction) { (Left, Right) | (Right, Left) => { (Point::new(current_location.x, current_location.y + 1), Up) } (Left, Left) | (Right, Right) => { (Point::new(current_location.x, current_location.y - 1), Down) } (Left, Up) | (Right, Down) => { (Point::new(current_location.x - 1, current_location.y), Left) } (Left, Down) | (Right, Up) => ( Point::new(current_location.x + 1, current_location.y), Right, ), _ => unsafe { std::hint::unreachable_unchecked() }, } } is_paint_output =!is_paint_output; }, ))?; Ok(hull.into_inner().unwrap()) } #[derive(Clone, Copy, PartialEq, Eq)] enum Color { White, Black, } #[derive(Clone, Copy, PartialEq, Eq)] enum Direction { Up, Down, Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, From)] struct Point { x: isize, y: isize, } impl fmt::Debug for Point { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("").field(&self.x).field(&self.y).finish() } } impl Point { fn origin() -> Self { Self::new(0, 0) } fn new(x: isize, y: isize) -> Self { Self::from((x, y)) } } async fn run_program( mut program: Vec<isize>, input: impl Stream<Item = isize>, mut output_fn: impl FnMut(isize), ) -> Result<Vec<isize>, anyhow::Error> { pin!(input); let mut instruction_pointer = 0; let mut relative_base = 0; loop { let opcode = usize::try_from(program[instruction_pointer]) .map_err(|_| anyhow!("Found a negative integer where an opcode was expected"))?; let parameter_modes = get_parameter_modes(opcode)?; let parameter_mode_of = |param: usize| { parameter_modes .get(param) .unwrap_or(&ParameterModes::Position) }; let mut get_param = |param: usize, need_write: bool| { let param_value = program .get(instruction_pointer + param + 1) .copied() .ok_or(anyhow!("Parameter not found"))?; let param_mode = parameter_mode_of(param); if need_write { ensure!( [ParameterModes::Position, ParameterModes::Relative].contains(param_mode), "Invalid argument for opcode {}: {}", opcode, param_value ); } Ok(match param_mode { ParameterModes::Position | ParameterModes::Relative => { let raw_idx = if param_mode == &ParameterModes::Relative { relative_base + param_value } else { param_value }; let idx = usize::try_from(raw_idx).map_err(|_| { anyhow!( "The program is attempting to access a negative index: {}", raw_idx ) })?; if idx >= program.len() { program.resize_with(idx + 1, || 0); } if!need_write { program[idx] } else { raw_idx } } ParameterModes::Immediate => param_value, }) }; // x % 100 gets the last 2 digits of a number, // no matter how long it is. match opcode % 100 { 1 | 2 | 7 | 8 => { let (x, y, result_idx) = ( get_param(0, false)?, get_param(1, false)?, get_param(2, true)? as usize, ); match opcode % 100 { 1 => program[result_idx] = x + y, 2 => program[result_idx] = x * y, 7 => program[result_idx] = (x < y) as isize, 8 => program[result_idx] = (x == y) as isize, _ => unsafe { std::hint::unreachable_unchecked() }, } instruction_pointer += 4; } 5 | 6 => { let (checked_value, jump_point) = ( get_param(0, false)?, usize::try_from(get_param(1, false)?).map_err(|_| { anyhow!("Found a negative integer where a jump point was expected") })?, ); let should_jump = match opcode % 100 { 5 => checked_value!= 0, 6 => checked_value == 0, _ => unsafe { std::hint::unreachable_unchecked() }, }; if should_jump { instruction_pointer = jump_point; } else { instruction_pointer += 3; } } 3 | 4 | 9 => { match opcode % 100 { 3 => { let input = input .next() .await .ok_or(anyhow!("Found an input opcode but no input was provided"))?; let input_storage = get_param(0, true)? as usize; program[input_storage] = input; } 4 => output_fn(get_param(0, false)?), 9 => relative_base += get_param(0, false)?, _ => unsafe { std::hint::unreachable_unchecked() }, } instruction_pointer += 2; } 99 => return Ok(program), op => bail!("Encountered an unknown opcode: {}", op), } } } fn ge
pcode: usize) -> Result<Vec<ParameterModes>, anyhow::Error> { opcode .digits() .rev() .skip(2) .map(ParameterModes::try_from) .try_collect() } #[derive(Debug, PartialEq, Eq, Clone, Copy)] enum ParameterModes { Position, Immediate, Relative, } impl TryFrom<u8> for ParameterModes { type Error = anyhow::Error; fn try_from(value: u8) -> Result<Self, Self::Error> { Ok(match value { 0 => Self::Position, 1 => Self::Immediate, 2 => Self::Relative, _ => bail!("Unknown parameter mode: {}", value), }) } } fn parse_input(program_str: &str) -> Result<Vec<isize>, anyhow::Error> { program_str .split(",") .map(|num_str| { num_str .trim() .parse() .map_err(|_| anyhow!("Could not parse number in program as isize: '{}'", num_str)) }) .try_collect() }
t_parameter_modes(o
identifier_name
main.rs
#![feature(entry_insert, destructuring_assignment)] use anyhow::{anyhow, bail, ensure}; use clap::{App, Arg}; use derive_more::From; use digits_iterator::*; use itertools::Itertools; use std::{collections::HashMap, convert::TryFrom, fmt, fs, iter, sync::Mutex}; use tokio::pin; use tokio_stream::{Stream, StreamExt}; fn main() -> Result<(), anyhow::Error> { let matches = App::new("2019-11") .arg(Arg::from_usage("[input] 'Problem input file'").default_value("input.txt")) .get_matches(); let input_filename = matches.value_of("input").unwrap(); let program_str = fs::read_to_string(input_filename)?.replace("\r\n", "\n"); let robot_program = parse_input(&program_str)?; let painted_hull = paint_hull(robot_program.clone(), HashMap::new(), Color::Black)?; println!( "Number of panels painted at least once: {}", painted_hull.len() ); let registration_id_hull = paint_hull( robot_program, iter::once((Point::origin(), Color::White)).collect(), Color::Black, )?; print_hull(&registration_id_hull, Color::Black); Ok(()) } fn print_hull(hull: &HashMap<Point, Color>, default_color: Color) { let ((min_x, max_x), (min_y, max_y)) = ( hull.keys() .map(|p| p.x) .minmax() .into_option() .unwrap_or_default(), hull.keys() .map(|p| p.y) .minmax() .into_option() .unwrap_or_default(), ); for y in (min_y..=max_y).rev() { for x in min_x..=max_x { if hull.get(&Point::new(x, y)).unwrap_or(&default_color) == &Color::Black { print!("█"); } else { print!(" "); } } println!() } } fn paint_hull( robot_program: Vec<isize>, starting_hull: HashMap<Point, Color>, default_color: Color, ) -> Result<HashMap<Point, Color>, anyhow::Error> { use Color::*; use Direction::*; // Basically, we're using Mutex as a way of telling Rust that we know // for sure we aren't gonna be accessing these values concurrently. // The borrow checker is then satisfied. let hull = Mutex::new(starting_hull); let current_location = Mutex::new(Point::origin()); let mut is_paint_output = true; let mut facing_direction = Up; futures_executor::block_on(run_program( robot_program, tokio_stream::iter(iter::from_fn(|| { let current_location = *(current_location.lock().unwrap()); Some( hull.lock() .unwrap() .get(&current_location) .copied() .unwrap_or(default_color), ) })) .map(|color| if color == Black { 0 } else { 1 }), |output| { let mut current_location = current_location.lock().unwrap(); if is_paint_output { hull.lock() .unwrap() .entry(*current_location) .insert(if output == 0 { Black } else { White }); } else { let turn_direction = if output == 0 { Left } else { Right }; (*current_location, facing_direction) = match (turn_direction, facing_direction) { (Left, Right) | (Right, Left) => { (Point::new(current_location.x, current_location.y + 1), Up) } (Left, Left) | (Right, Right) => { (Point::new(current_location.x, current_location.y - 1), Down) } (Left, Up) | (Right, Down) => { (Point::new(current_location.x - 1, current_location.y), Left) } (Left, Down) | (Right, Up) => ( Point::new(current_location.x + 1, current_location.y), Right, ), _ => unsafe { std::hint::unreachable_unchecked() }, } } is_paint_output =!is_paint_output; }, ))?; Ok(hull.into_inner().unwrap()) } #[derive(Clone, Copy, PartialEq, Eq)] enum Color { White, Black,
Up, Down, Left, Right, } #[derive(Clone, Copy, PartialEq, Eq, Hash, From)] struct Point { x: isize, y: isize, } impl fmt::Debug for Point { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("").field(&self.x).field(&self.y).finish() } } impl Point { fn origin() -> Self { Self::new(0, 0) } fn new(x: isize, y: isize) -> Self { Self::from((x, y)) } } async fn run_program( mut program: Vec<isize>, input: impl Stream<Item = isize>, mut output_fn: impl FnMut(isize), ) -> Result<Vec<isize>, anyhow::Error> { pin!(input); let mut instruction_pointer = 0; let mut relative_base = 0; loop { let opcode = usize::try_from(program[instruction_pointer]) .map_err(|_| anyhow!("Found a negative integer where an opcode was expected"))?; let parameter_modes = get_parameter_modes(opcode)?; let parameter_mode_of = |param: usize| { parameter_modes .get(param) .unwrap_or(&ParameterModes::Position) }; let mut get_param = |param: usize, need_write: bool| { let param_value = program .get(instruction_pointer + param + 1) .copied() .ok_or(anyhow!("Parameter not found"))?; let param_mode = parameter_mode_of(param); if need_write { ensure!( [ParameterModes::Position, ParameterModes::Relative].contains(param_mode), "Invalid argument for opcode {}: {}", opcode, param_value ); } Ok(match param_mode { ParameterModes::Position | ParameterModes::Relative => { let raw_idx = if param_mode == &ParameterModes::Relative { relative_base + param_value } else { param_value }; let idx = usize::try_from(raw_idx).map_err(|_| { anyhow!( "The program is attempting to access a negative index: {}", raw_idx ) })?; if idx >= program.len() { program.resize_with(idx + 1, || 0); } if!need_write { program[idx] } else { raw_idx } } ParameterModes::Immediate => param_value, }) }; // x % 100 gets the last 2 digits of a number, // no matter how long it is. match opcode % 100 { 1 | 2 | 7 | 8 => { let (x, y, result_idx) = ( get_param(0, false)?, get_param(1, false)?, get_param(2, true)? as usize, ); match opcode % 100 { 1 => program[result_idx] = x + y, 2 => program[result_idx] = x * y, 7 => program[result_idx] = (x < y) as isize, 8 => program[result_idx] = (x == y) as isize, _ => unsafe { std::hint::unreachable_unchecked() }, } instruction_pointer += 4; } 5 | 6 => { let (checked_value, jump_point) = ( get_param(0, false)?, usize::try_from(get_param(1, false)?).map_err(|_| { anyhow!("Found a negative integer where a jump point was expected") })?, ); let should_jump = match opcode % 100 { 5 => checked_value!= 0, 6 => checked_value == 0, _ => unsafe { std::hint::unreachable_unchecked() }, }; if should_jump { instruction_pointer = jump_point; } else { instruction_pointer += 3; } } 3 | 4 | 9 => { match opcode % 100 { 3 => { let input = input .next() .await .ok_or(anyhow!("Found an input opcode but no input was provided"))?; let input_storage = get_param(0, true)? as usize; program[input_storage] = input; } 4 => output_fn(get_param(0, false)?), 9 => relative_base += get_param(0, false)?, _ => unsafe { std::hint::unreachable_unchecked() }, } instruction_pointer += 2; } 99 => return Ok(program), op => bail!("Encountered an unknown opcode: {}", op), } } } fn get_parameter_modes(opcode: usize) -> Result<Vec<ParameterModes>, anyhow::Error> { opcode .digits() .rev() .skip(2) .map(ParameterModes::try_from) .try_collect() } #[derive(Debug, PartialEq, Eq, Clone, Copy)] enum ParameterModes { Position, Immediate, Relative, } impl TryFrom<u8> for ParameterModes { type Error = anyhow::Error; fn try_from(value: u8) -> Result<Self, Self::Error> { Ok(match value { 0 => Self::Position, 1 => Self::Immediate, 2 => Self::Relative, _ => bail!("Unknown parameter mode: {}", value), }) } } fn parse_input(program_str: &str) -> Result<Vec<isize>, anyhow::Error> { program_str .split(",") .map(|num_str| { num_str .trim() .parse() .map_err(|_| anyhow!("Could not parse number in program as isize: '{}'", num_str)) }) .try_collect() }
} #[derive(Clone, Copy, PartialEq, Eq)] enum Direction {
random_line_split
env.rs
use config::{self, Alias, ClickConfig, Config}; use error::KubeError; use kobj::{KObj, ObjType}; use kube::{ ConfigMapList, DeploymentList, JobList, Kluster, NodeList, PodList, ReplicaSetList, SecretList, ServiceList, StatefulSetList, }; use ansi_term::Colour::{Blue, Green, Red, Yellow}; use rustyline::config as rustyconfig; use tempdir::TempDir; use std::collections::BTreeMap; use std::fmt; use std::path::PathBuf; use std::process::Child; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; pub enum LastList { None, PodList(PodList), NodeList(NodeList), DeploymentList(DeploymentList), ServiceList(ServiceList), ReplicaSetList(ReplicaSetList), StatefulSetList(StatefulSetList), ConfigMapList(ConfigMapList), SecretList(SecretList), JobList(JobList), } // TODO: Maybe make less of this pub /// An ongoing port forward pub struct PortForward { pub child: Child, pub pod: String, pub ports: Vec<String>, pub output: Arc<Mutex<String>>, } #[derive(Debug)] pub struct ExpandedAlias<'a> { pub expansion: Option<&'a Alias>, pub rest: &'a str, } #[derive(Debug, PartialEq)] pub enum
{ Single(KObj), Range(Vec<KObj>), None, } /// Keep track of our repl environment pub struct Env { pub config: Config, pub click_config: ClickConfig, click_config_path: PathBuf, pub quit: bool, pub need_new_editor: bool, pub kluster: Option<Kluster>, pub namespace: Option<String>, current_selection: ObjectSelection, last_objs: LastList, pub ctrlcbool: Arc<AtomicBool>, port_forwards: Vec<PortForward>, pub prompt: String, range_str: Option<String>, pub tempdir: std::io::Result<TempDir>, } lazy_static! { static ref CTC_BOOL: Arc<AtomicBool> = { let b = Arc::new(AtomicBool::new(false)); let r = b.clone(); ctrlc::set_handler(move || { r.store(true, Ordering::SeqCst); }) .expect("Error setting Ctrl-C handler"); b }; } impl Env { pub fn new(config: Config, click_config: ClickConfig, click_config_path: PathBuf) -> Env { let namespace = click_config.namespace.clone(); let context = click_config.context.clone(); let mut env = Env { config, click_config, click_config_path, quit: false, need_new_editor: false, kluster: None, namespace, current_selection: ObjectSelection::None, last_objs: LastList::None, ctrlcbool: CTC_BOOL.clone(), port_forwards: Vec::new(), prompt: format!( "[{}] [{}] [{}] > ", Red.paint("none"), Green.paint("none"), Yellow.paint("none") ), range_str: None, tempdir: TempDir::new("click"), }; env.set_context(context.as_deref()); env } pub fn current_selection(&self) -> &ObjectSelection { &self.current_selection } pub fn save_click_config(&mut self) { self.click_config.namespace = self.namespace.clone(); self.click_config.context = self.kluster.as_ref().map(|k| k.name.clone()); self.click_config .save_to_file(self.click_config_path.as_path().to_str().unwrap()) .unwrap(); } // sets the prompt string based on current settings fn set_prompt(&mut self) { self.prompt = format!( "[{}] [{}] [{}] > ", if let Some(ref k) = self.kluster { Red.bold().paint(k.name.as_str()) } else { Red.paint("none") }, if let Some(ref n) = self.namespace { Green.bold().paint(n.as_str()) } else { Green.paint("none") }, match self.current_selection { ObjectSelection::Single(ref obj) => obj.prompt_str(), ObjectSelection::Range(_) => Blue.paint(self.range_str.as_ref().unwrap()), ObjectSelection::None => Yellow.paint("none"), } ); } pub fn get_rustyline_conf(&self) -> rustyconfig::Config { self.click_config.get_rustyline_conf() } pub fn get_contexts(&self) -> &BTreeMap<String, ::config::ContextConf> { &self.config.contexts } pub fn set_context(&mut self, ctx: Option<&str>) { if let Some(cname) = ctx { self.kluster = match self.config.cluster_for_context(cname, &self.click_config) { Ok(k) => Some(k), Err(e) => { println!( "[WARN] Couldn't find/load context {}, now no current context. \ Error: {}", cname, e ); None } }; self.save_click_config(); self.set_prompt(); } } pub fn set_namespace(&mut self, namespace: Option<&str>) { let mut do_clear = false; if let (&Some(ref my_ns), Some(new_ns)) = (&self.namespace, namespace) { if my_ns.as_str()!= new_ns { do_clear = true; // need to use bool since self is borrowed here } } if do_clear { self.clear_current(); } self.namespace = namespace.map(|n| n.to_owned()); self.set_prompt(); } pub fn set_editor(&mut self, editor: Option<&str>) { self.click_config.editor = editor.map(|s| s.to_string()); } pub fn set_terminal(&mut self, terminal: Option<&str>) { self.click_config.terminal = terminal.map(|s| s.to_string()); } pub fn set_completion_type(&mut self, comptype: config::CompletionType) { self.click_config.completiontype = comptype; self.need_new_editor = true; } pub fn set_edit_mode(&mut self, editmode: config::EditMode) { self.click_config.editmode = editmode; self.need_new_editor = true; } // Return the current position of the specified alias in the Vec, or None if it's not there fn alias_position(&self, alias: &str) -> Option<usize> { self.click_config .aliases .iter() .position(|a| a.alias == *alias) } pub fn add_alias(&mut self, alias: Alias) { self.remove_alias(&alias.alias); self.click_config.aliases.push(alias); self.save_click_config(); } pub fn remove_alias(&mut self, alias: &str) -> bool { match self.alias_position(alias) { Some(p) => { self.click_config.aliases.remove(p); self.save_click_config(); true } None => false, } } pub fn set_lastlist(&mut self, list: LastList) { self.last_objs = list; } pub fn clear_current(&mut self) { self.current_selection = ObjectSelection::None; self.range_str = None; self.set_prompt(); } /// get the item from the last list at the specified index pub fn item_at(&self, index: usize) -> Option<KObj> { match self.last_objs { LastList::None => { println!("No active object list"); None } LastList::PodList(ref pl) => pl.items.get(index).map(|pod| { let containers = pod .spec .containers .iter() .map(|cspec| cspec.name.clone()) .collect(); KObj::from_metadata(&pod.metadata, ObjType::Pod { containers }) }), LastList::NodeList(ref nl) => nl.items.get(index).map(|n| KObj { name: n.metadata.name.clone(), namespace: None, typ: ObjType::Node, }), LastList::DeploymentList(ref dl) => dl .items .get(index) .map(|dep| KObj::from_metadata(&dep.metadata, ObjType::Deployment)), LastList::ServiceList(ref sl) => sl .items .get(index) .map(|service| KObj::from_metadata(&service.metadata, ObjType::Service)), LastList::ReplicaSetList(ref rsl) => rsl .items .get(index) .and_then(|replicaset| KObj::from_value(replicaset, ObjType::ReplicaSet)), LastList::StatefulSetList(ref stfs) => stfs .items .get(index) .and_then(|statefulset| KObj::from_value(statefulset, ObjType::StatefulSet)), LastList::ConfigMapList(ref cml) => cml .items .get(index) .and_then(|cm| KObj::from_value(cm, ObjType::ConfigMap)), LastList::SecretList(ref sl) => sl .items .get(index) .and_then(|secret| KObj::from_value(secret, ObjType::Secret)), LastList::JobList(ref jl) => jl .items .get(index) .and_then(|job| KObj::from_value(job, ObjType::Job)), } } pub fn set_current(&mut self, num: usize) { self.current_selection = match self.item_at(num) { Some(obj) => ObjectSelection::Single(obj), None => ObjectSelection::None, }; self.range_str = None; self.set_prompt(); } pub fn set_range(&mut self, range: Vec<KObj>) { let range_str = if range.is_empty() { "Empty range".to_string() } else { let mut r = format!("{} {}", range.len(), range.get(0).unwrap().type_str()); if range.len() > 1 { r.push('s'); } r.push_str(" selected"); r }; self.current_selection = ObjectSelection::Range(range); self.range_str = Some(range_str); self.set_prompt(); } pub fn current_pod(&self) -> Option<&KObj> { match self.current_selection { ObjectSelection::Single(ref obj) => match obj.typ { ObjType::Pod {.. } => Some(obj), _ => None, }, _ => None, } } pub fn run_on_kluster<F, R>(&self, f: F) -> Option<R> where F: FnOnce(&Kluster) -> Result<R, KubeError>, { match self.kluster { Some(ref k) => match f(k) { Ok(r) => Some(r), Err(e) => { println!("{}", e); None } }, None => { println!("Need to have an active context"); None } } } /// Add a new task for the env to keep track of pub fn add_port_forward(&mut self, pf: PortForward) { self.port_forwards.push(pf); } pub fn get_port_forwards(&self) -> std::slice::Iter<PortForward> { self.port_forwards.iter() } pub fn get_port_forward(&mut self, i: usize) -> Option<&mut PortForward> { self.port_forwards.get_mut(i) } pub fn stop_port_forward(&mut self, i: usize) -> Result<(), std::io::Error> { if i < self.port_forwards.len() { let mut pf = self.port_forwards.remove(i); pf.child.kill() } else { Ok(()) } } pub fn stop_all_forwards(&mut self) { for pf in self.port_forwards.iter_mut() { pf.child.kill().unwrap(); } self.port_forwards = Vec::new(); } /// Try and expand alias. /// This function looks at the first word (whitespace delimited) of the /// line, checks if it matches an alias, if it does it returns and ExpandedAlias with the /// expansion and the rest of the line, otherwise the expansion field will be None and rest will /// contain the whole line pub fn try_expand_alias<'a>( &'a self, line: &'a str, prev_word: Option<&'a str>, ) -> ExpandedAlias<'a> { let pos = line.find(char::is_whitespace).unwrap_or_else(|| line.len()); let word = &line[0..pos]; // don't expand if prev_word is Some, and is equal to my word // this means an alias maps to itself, and we want to stop expanding // to avoid an infinite loop if prev_word.filter(|pw| *pw == word).is_none() { for alias in self.click_config.aliases.iter() { if word == alias.alias.as_str() { return ExpandedAlias { expansion: Some(alias), rest: &line[pos..], }; } } } ExpandedAlias { expansion: None, rest: line, } } } impl fmt::Display for Env { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "Env {{ Current Context: {} Availble Contexts: {:?} Kubernetes Config File(s): {} Completion Type: {} Edit Mode: {} Editor: {} Terminal: {} Range Separator: {} }}", if let Some(ref k) = self.kluster { Green.bold().paint(k.name.as_str()) } else { Green.paint("none") }, self.config.contexts.keys(), Green.paint(&self.config.source_file), { let ctstr: String = (&self.click_config.completiontype).into(); Green.paint(ctstr) }, { let emstr: String = (&self.click_config.editmode).into(); Green.paint(emstr) }, Green.paint( self.click_config .editor .as_ref() .unwrap_or(&"<unset, will use $EDITOR>".to_owned()) ), Green.paint( self.click_config .terminal .as_ref() .unwrap_or(&"<unset, will use xterm>".to_owned()) ), Green.paint(&self.click_config.range_separator), ) } } #[cfg(test)] mod tests { use super::*; use config::get_test_config; #[test] fn try_expand_alias() { let mut cc = ClickConfig::default(); let pn_alias = Alias { alias: "pn".to_string(), expanded: "pods --sort node".to_string(), }; let x_alias = Alias { alias: "x".to_string(), expanded: "xpand".to_string(), }; cc.aliases.push(pn_alias.clone()); cc.aliases.push(x_alias.clone()); let env = Env::new(get_test_config(), cc, PathBuf::from("/tmp/click.config")); let exp1 = env.try_expand_alias("pn", None); assert_eq!(exp1.expansion, Some(&pn_alias)); assert_eq!(exp1.rest, ""); let exp2 = env.try_expand_alias("x", None); assert_eq!(exp2.expansion, Some(&x_alias)); assert_eq!(exp2.rest, ""); let exp2 = env.try_expand_alias("x rest is this", None); assert_eq!(exp2.expansion, Some(&x_alias)); assert_eq!(exp2.rest, " rest is this"); let exp3 = env.try_expand_alias("no alias", None); assert_eq!(exp3.expansion, None); assert_eq!(exp3.rest, "no alias"); let exp4 = env.try_expand_alias("x", Some("x")); assert_eq!(exp4.expansion, None); assert_eq!(exp4.rest, "x"); } }
ObjectSelection
identifier_name
env.rs
use config::{self, Alias, ClickConfig, Config}; use error::KubeError; use kobj::{KObj, ObjType}; use kube::{ ConfigMapList, DeploymentList, JobList, Kluster, NodeList, PodList, ReplicaSetList, SecretList, ServiceList, StatefulSetList, }; use ansi_term::Colour::{Blue, Green, Red, Yellow}; use rustyline::config as rustyconfig; use tempdir::TempDir; use std::collections::BTreeMap; use std::fmt; use std::path::PathBuf; use std::process::Child; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; pub enum LastList { None, PodList(PodList), NodeList(NodeList), DeploymentList(DeploymentList), ServiceList(ServiceList), ReplicaSetList(ReplicaSetList), StatefulSetList(StatefulSetList), ConfigMapList(ConfigMapList), SecretList(SecretList), JobList(JobList), } // TODO: Maybe make less of this pub /// An ongoing port forward pub struct PortForward { pub child: Child, pub pod: String, pub ports: Vec<String>, pub output: Arc<Mutex<String>>, } #[derive(Debug)] pub struct ExpandedAlias<'a> { pub expansion: Option<&'a Alias>, pub rest: &'a str, } #[derive(Debug, PartialEq)] pub enum ObjectSelection { Single(KObj), Range(Vec<KObj>), None, } /// Keep track of our repl environment pub struct Env { pub config: Config, pub click_config: ClickConfig, click_config_path: PathBuf, pub quit: bool, pub need_new_editor: bool, pub kluster: Option<Kluster>, pub namespace: Option<String>, current_selection: ObjectSelection, last_objs: LastList, pub ctrlcbool: Arc<AtomicBool>, port_forwards: Vec<PortForward>, pub prompt: String, range_str: Option<String>, pub tempdir: std::io::Result<TempDir>, } lazy_static! { static ref CTC_BOOL: Arc<AtomicBool> = { let b = Arc::new(AtomicBool::new(false)); let r = b.clone(); ctrlc::set_handler(move || { r.store(true, Ordering::SeqCst); }) .expect("Error setting Ctrl-C handler"); b }; } impl Env { pub fn new(config: Config, click_config: ClickConfig, click_config_path: PathBuf) -> Env { let namespace = click_config.namespace.clone(); let context = click_config.context.clone(); let mut env = Env { config, click_config, click_config_path, quit: false, need_new_editor: false, kluster: None, namespace, current_selection: ObjectSelection::None, last_objs: LastList::None, ctrlcbool: CTC_BOOL.clone(), port_forwards: Vec::new(), prompt: format!( "[{}] [{}] [{}] > ", Red.paint("none"), Green.paint("none"), Yellow.paint("none") ), range_str: None, tempdir: TempDir::new("click"), }; env.set_context(context.as_deref()); env } pub fn current_selection(&self) -> &ObjectSelection { &self.current_selection } pub fn save_click_config(&mut self) { self.click_config.namespace = self.namespace.clone(); self.click_config.context = self.kluster.as_ref().map(|k| k.name.clone()); self.click_config .save_to_file(self.click_config_path.as_path().to_str().unwrap()) .unwrap(); } // sets the prompt string based on current settings fn set_prompt(&mut self) { self.prompt = format!( "[{}] [{}] [{}] > ", if let Some(ref k) = self.kluster { Red.bold().paint(k.name.as_str()) } else { Red.paint("none") }, if let Some(ref n) = self.namespace { Green.bold().paint(n.as_str()) } else { Green.paint("none") }, match self.current_selection { ObjectSelection::Single(ref obj) => obj.prompt_str(), ObjectSelection::Range(_) => Blue.paint(self.range_str.as_ref().unwrap()), ObjectSelection::None => Yellow.paint("none"), } ); } pub fn get_rustyline_conf(&self) -> rustyconfig::Config { self.click_config.get_rustyline_conf() } pub fn get_contexts(&self) -> &BTreeMap<String, ::config::ContextConf> { &self.config.contexts } pub fn set_context(&mut self, ctx: Option<&str>) { if let Some(cname) = ctx { self.kluster = match self.config.cluster_for_context(cname, &self.click_config) { Ok(k) => Some(k), Err(e) => { println!( "[WARN] Couldn't find/load context {}, now no current context. \ Error: {}", cname, e ); None } }; self.save_click_config(); self.set_prompt(); } } pub fn set_namespace(&mut self, namespace: Option<&str>) { let mut do_clear = false; if let (&Some(ref my_ns), Some(new_ns)) = (&self.namespace, namespace) { if my_ns.as_str()!= new_ns { do_clear = true; // need to use bool since self is borrowed here } } if do_clear { self.clear_current(); } self.namespace = namespace.map(|n| n.to_owned()); self.set_prompt(); } pub fn set_editor(&mut self, editor: Option<&str>) { self.click_config.editor = editor.map(|s| s.to_string()); } pub fn set_terminal(&mut self, terminal: Option<&str>) { self.click_config.terminal = terminal.map(|s| s.to_string()); } pub fn set_completion_type(&mut self, comptype: config::CompletionType) { self.click_config.completiontype = comptype; self.need_new_editor = true; } pub fn set_edit_mode(&mut self, editmode: config::EditMode) { self.click_config.editmode = editmode; self.need_new_editor = true; } // Return the current position of the specified alias in the Vec, or None if it's not there fn alias_position(&self, alias: &str) -> Option<usize> { self.click_config .aliases .iter() .position(|a| a.alias == *alias) } pub fn add_alias(&mut self, alias: Alias) { self.remove_alias(&alias.alias); self.click_config.aliases.push(alias); self.save_click_config(); } pub fn remove_alias(&mut self, alias: &str) -> bool { match self.alias_position(alias) { Some(p) => { self.click_config.aliases.remove(p); self.save_click_config(); true } None => false, } } pub fn set_lastlist(&mut self, list: LastList) { self.last_objs = list; } pub fn clear_current(&mut self) { self.current_selection = ObjectSelection::None; self.range_str = None; self.set_prompt(); } /// get the item from the last list at the specified index pub fn item_at(&self, index: usize) -> Option<KObj> { match self.last_objs { LastList::None => { println!("No active object list"); None } LastList::PodList(ref pl) => pl.items.get(index).map(|pod| { let containers = pod .spec .containers .iter() .map(|cspec| cspec.name.clone()) .collect(); KObj::from_metadata(&pod.metadata, ObjType::Pod { containers }) }), LastList::NodeList(ref nl) => nl.items.get(index).map(|n| KObj { name: n.metadata.name.clone(), namespace: None, typ: ObjType::Node, }), LastList::DeploymentList(ref dl) => dl .items .get(index) .map(|dep| KObj::from_metadata(&dep.metadata, ObjType::Deployment)), LastList::ServiceList(ref sl) => sl .items .get(index) .map(|service| KObj::from_metadata(&service.metadata, ObjType::Service)), LastList::ReplicaSetList(ref rsl) => rsl .items .get(index) .and_then(|replicaset| KObj::from_value(replicaset, ObjType::ReplicaSet)), LastList::StatefulSetList(ref stfs) => stfs .items .get(index) .and_then(|statefulset| KObj::from_value(statefulset, ObjType::StatefulSet)), LastList::ConfigMapList(ref cml) => cml .items .get(index) .and_then(|cm| KObj::from_value(cm, ObjType::ConfigMap)), LastList::SecretList(ref sl) => sl .items .get(index) .and_then(|secret| KObj::from_value(secret, ObjType::Secret)), LastList::JobList(ref jl) => jl .items .get(index) .and_then(|job| KObj::from_value(job, ObjType::Job)), } } pub fn set_current(&mut self, num: usize) { self.current_selection = match self.item_at(num) { Some(obj) => ObjectSelection::Single(obj), None => ObjectSelection::None, }; self.range_str = None; self.set_prompt(); } pub fn set_range(&mut self, range: Vec<KObj>) { let range_str = if range.is_empty() { "Empty range".to_string() } else { let mut r = format!("{} {}", range.len(), range.get(0).unwrap().type_str()); if range.len() > 1 { r.push('s'); } r.push_str(" selected"); r }; self.current_selection = ObjectSelection::Range(range); self.range_str = Some(range_str); self.set_prompt(); } pub fn current_pod(&self) -> Option<&KObj> { match self.current_selection { ObjectSelection::Single(ref obj) => match obj.typ { ObjType::Pod {.. } => Some(obj), _ => None, }, _ => None, } } pub fn run_on_kluster<F, R>(&self, f: F) -> Option<R> where F: FnOnce(&Kluster) -> Result<R, KubeError>, { match self.kluster { Some(ref k) => match f(k) { Ok(r) => Some(r), Err(e) => { println!("{}", e); None } }, None => { println!("Need to have an active context"); None }
} /// Add a new task for the env to keep track of pub fn add_port_forward(&mut self, pf: PortForward) { self.port_forwards.push(pf); } pub fn get_port_forwards(&self) -> std::slice::Iter<PortForward> { self.port_forwards.iter() } pub fn get_port_forward(&mut self, i: usize) -> Option<&mut PortForward> { self.port_forwards.get_mut(i) } pub fn stop_port_forward(&mut self, i: usize) -> Result<(), std::io::Error> { if i < self.port_forwards.len() { let mut pf = self.port_forwards.remove(i); pf.child.kill() } else { Ok(()) } } pub fn stop_all_forwards(&mut self) { for pf in self.port_forwards.iter_mut() { pf.child.kill().unwrap(); } self.port_forwards = Vec::new(); } /// Try and expand alias. /// This function looks at the first word (whitespace delimited) of the /// line, checks if it matches an alias, if it does it returns and ExpandedAlias with the /// expansion and the rest of the line, otherwise the expansion field will be None and rest will /// contain the whole line pub fn try_expand_alias<'a>( &'a self, line: &'a str, prev_word: Option<&'a str>, ) -> ExpandedAlias<'a> { let pos = line.find(char::is_whitespace).unwrap_or_else(|| line.len()); let word = &line[0..pos]; // don't expand if prev_word is Some, and is equal to my word // this means an alias maps to itself, and we want to stop expanding // to avoid an infinite loop if prev_word.filter(|pw| *pw == word).is_none() { for alias in self.click_config.aliases.iter() { if word == alias.alias.as_str() { return ExpandedAlias { expansion: Some(alias), rest: &line[pos..], }; } } } ExpandedAlias { expansion: None, rest: line, } } } impl fmt::Display for Env { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "Env {{ Current Context: {} Availble Contexts: {:?} Kubernetes Config File(s): {} Completion Type: {} Edit Mode: {} Editor: {} Terminal: {} Range Separator: {} }}", if let Some(ref k) = self.kluster { Green.bold().paint(k.name.as_str()) } else { Green.paint("none") }, self.config.contexts.keys(), Green.paint(&self.config.source_file), { let ctstr: String = (&self.click_config.completiontype).into(); Green.paint(ctstr) }, { let emstr: String = (&self.click_config.editmode).into(); Green.paint(emstr) }, Green.paint( self.click_config .editor .as_ref() .unwrap_or(&"<unset, will use $EDITOR>".to_owned()) ), Green.paint( self.click_config .terminal .as_ref() .unwrap_or(&"<unset, will use xterm>".to_owned()) ), Green.paint(&self.click_config.range_separator), ) } } #[cfg(test)] mod tests { use super::*; use config::get_test_config; #[test] fn try_expand_alias() { let mut cc = ClickConfig::default(); let pn_alias = Alias { alias: "pn".to_string(), expanded: "pods --sort node".to_string(), }; let x_alias = Alias { alias: "x".to_string(), expanded: "xpand".to_string(), }; cc.aliases.push(pn_alias.clone()); cc.aliases.push(x_alias.clone()); let env = Env::new(get_test_config(), cc, PathBuf::from("/tmp/click.config")); let exp1 = env.try_expand_alias("pn", None); assert_eq!(exp1.expansion, Some(&pn_alias)); assert_eq!(exp1.rest, ""); let exp2 = env.try_expand_alias("x", None); assert_eq!(exp2.expansion, Some(&x_alias)); assert_eq!(exp2.rest, ""); let exp2 = env.try_expand_alias("x rest is this", None); assert_eq!(exp2.expansion, Some(&x_alias)); assert_eq!(exp2.rest, " rest is this"); let exp3 = env.try_expand_alias("no alias", None); assert_eq!(exp3.expansion, None); assert_eq!(exp3.rest, "no alias"); let exp4 = env.try_expand_alias("x", Some("x")); assert_eq!(exp4.expansion, None); assert_eq!(exp4.rest, "x"); } }
}
random_line_split
env.rs
use config::{self, Alias, ClickConfig, Config}; use error::KubeError; use kobj::{KObj, ObjType}; use kube::{ ConfigMapList, DeploymentList, JobList, Kluster, NodeList, PodList, ReplicaSetList, SecretList, ServiceList, StatefulSetList, }; use ansi_term::Colour::{Blue, Green, Red, Yellow}; use rustyline::config as rustyconfig; use tempdir::TempDir; use std::collections::BTreeMap; use std::fmt; use std::path::PathBuf; use std::process::Child; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; pub enum LastList { None, PodList(PodList), NodeList(NodeList), DeploymentList(DeploymentList), ServiceList(ServiceList), ReplicaSetList(ReplicaSetList), StatefulSetList(StatefulSetList), ConfigMapList(ConfigMapList), SecretList(SecretList), JobList(JobList), } // TODO: Maybe make less of this pub /// An ongoing port forward pub struct PortForward { pub child: Child, pub pod: String, pub ports: Vec<String>, pub output: Arc<Mutex<String>>, } #[derive(Debug)] pub struct ExpandedAlias<'a> { pub expansion: Option<&'a Alias>, pub rest: &'a str, } #[derive(Debug, PartialEq)] pub enum ObjectSelection { Single(KObj), Range(Vec<KObj>), None, } /// Keep track of our repl environment pub struct Env { pub config: Config, pub click_config: ClickConfig, click_config_path: PathBuf, pub quit: bool, pub need_new_editor: bool, pub kluster: Option<Kluster>, pub namespace: Option<String>, current_selection: ObjectSelection, last_objs: LastList, pub ctrlcbool: Arc<AtomicBool>, port_forwards: Vec<PortForward>, pub prompt: String, range_str: Option<String>, pub tempdir: std::io::Result<TempDir>, } lazy_static! { static ref CTC_BOOL: Arc<AtomicBool> = { let b = Arc::new(AtomicBool::new(false)); let r = b.clone(); ctrlc::set_handler(move || { r.store(true, Ordering::SeqCst); }) .expect("Error setting Ctrl-C handler"); b }; } impl Env { pub fn new(config: Config, click_config: ClickConfig, click_config_path: PathBuf) -> Env { let namespace = click_config.namespace.clone(); let context = click_config.context.clone(); let mut env = Env { config, click_config, click_config_path, quit: false, need_new_editor: false, kluster: None, namespace, current_selection: ObjectSelection::None, last_objs: LastList::None, ctrlcbool: CTC_BOOL.clone(), port_forwards: Vec::new(), prompt: format!( "[{}] [{}] [{}] > ", Red.paint("none"), Green.paint("none"), Yellow.paint("none") ), range_str: None, tempdir: TempDir::new("click"), }; env.set_context(context.as_deref()); env } pub fn current_selection(&self) -> &ObjectSelection { &self.current_selection } pub fn save_click_config(&mut self) { self.click_config.namespace = self.namespace.clone(); self.click_config.context = self.kluster.as_ref().map(|k| k.name.clone()); self.click_config .save_to_file(self.click_config_path.as_path().to_str().unwrap()) .unwrap(); } // sets the prompt string based on current settings fn set_prompt(&mut self) { self.prompt = format!( "[{}] [{}] [{}] > ", if let Some(ref k) = self.kluster { Red.bold().paint(k.name.as_str()) } else { Red.paint("none") }, if let Some(ref n) = self.namespace { Green.bold().paint(n.as_str()) } else { Green.paint("none") }, match self.current_selection { ObjectSelection::Single(ref obj) => obj.prompt_str(), ObjectSelection::Range(_) => Blue.paint(self.range_str.as_ref().unwrap()), ObjectSelection::None => Yellow.paint("none"), } ); } pub fn get_rustyline_conf(&self) -> rustyconfig::Config { self.click_config.get_rustyline_conf() } pub fn get_contexts(&self) -> &BTreeMap<String, ::config::ContextConf> { &self.config.contexts } pub fn set_context(&mut self, ctx: Option<&str>) { if let Some(cname) = ctx { self.kluster = match self.config.cluster_for_context(cname, &self.click_config) { Ok(k) => Some(k), Err(e) => { println!( "[WARN] Couldn't find/load context {}, now no current context. \ Error: {}", cname, e ); None } }; self.save_click_config(); self.set_prompt(); } } pub fn set_namespace(&mut self, namespace: Option<&str>) { let mut do_clear = false; if let (&Some(ref my_ns), Some(new_ns)) = (&self.namespace, namespace) { if my_ns.as_str()!= new_ns { do_clear = true; // need to use bool since self is borrowed here } } if do_clear { self.clear_current(); } self.namespace = namespace.map(|n| n.to_owned()); self.set_prompt(); } pub fn set_editor(&mut self, editor: Option<&str>) { self.click_config.editor = editor.map(|s| s.to_string()); } pub fn set_terminal(&mut self, terminal: Option<&str>) { self.click_config.terminal = terminal.map(|s| s.to_string()); } pub fn set_completion_type(&mut self, comptype: config::CompletionType) { self.click_config.completiontype = comptype; self.need_new_editor = true; } pub fn set_edit_mode(&mut self, editmode: config::EditMode) { self.click_config.editmode = editmode; self.need_new_editor = true; } // Return the current position of the specified alias in the Vec, or None if it's not there fn alias_position(&self, alias: &str) -> Option<usize> { self.click_config .aliases .iter() .position(|a| a.alias == *alias) } pub fn add_alias(&mut self, alias: Alias) { self.remove_alias(&alias.alias); self.click_config.aliases.push(alias); self.save_click_config(); } pub fn remove_alias(&mut self, alias: &str) -> bool { match self.alias_position(alias) { Some(p) => { self.click_config.aliases.remove(p); self.save_click_config(); true } None => false, } } pub fn set_lastlist(&mut self, list: LastList) { self.last_objs = list; } pub fn clear_current(&mut self) { self.current_selection = ObjectSelection::None; self.range_str = None; self.set_prompt(); } /// get the item from the last list at the specified index pub fn item_at(&self, index: usize) -> Option<KObj> { match self.last_objs { LastList::None => { println!("No active object list"); None } LastList::PodList(ref pl) => pl.items.get(index).map(|pod| { let containers = pod .spec .containers .iter() .map(|cspec| cspec.name.clone()) .collect(); KObj::from_metadata(&pod.metadata, ObjType::Pod { containers }) }), LastList::NodeList(ref nl) => nl.items.get(index).map(|n| KObj { name: n.metadata.name.clone(), namespace: None, typ: ObjType::Node, }), LastList::DeploymentList(ref dl) => dl .items .get(index) .map(|dep| KObj::from_metadata(&dep.metadata, ObjType::Deployment)), LastList::ServiceList(ref sl) => sl .items .get(index) .map(|service| KObj::from_metadata(&service.metadata, ObjType::Service)), LastList::ReplicaSetList(ref rsl) => rsl .items .get(index) .and_then(|replicaset| KObj::from_value(replicaset, ObjType::ReplicaSet)), LastList::StatefulSetList(ref stfs) => stfs .items .get(index) .and_then(|statefulset| KObj::from_value(statefulset, ObjType::StatefulSet)), LastList::ConfigMapList(ref cml) => cml .items .get(index) .and_then(|cm| KObj::from_value(cm, ObjType::ConfigMap)), LastList::SecretList(ref sl) => sl .items .get(index) .and_then(|secret| KObj::from_value(secret, ObjType::Secret)), LastList::JobList(ref jl) => jl .items .get(index) .and_then(|job| KObj::from_value(job, ObjType::Job)), } } pub fn set_current(&mut self, num: usize) { self.current_selection = match self.item_at(num) { Some(obj) => ObjectSelection::Single(obj), None => ObjectSelection::None, }; self.range_str = None; self.set_prompt(); } pub fn set_range(&mut self, range: Vec<KObj>) { let range_str = if range.is_empty() { "Empty range".to_string() } else { let mut r = format!("{} {}", range.len(), range.get(0).unwrap().type_str()); if range.len() > 1 { r.push('s'); } r.push_str(" selected"); r }; self.current_selection = ObjectSelection::Range(range); self.range_str = Some(range_str); self.set_prompt(); } pub fn current_pod(&self) -> Option<&KObj> { match self.current_selection { ObjectSelection::Single(ref obj) => match obj.typ { ObjType::Pod {.. } => Some(obj), _ => None, }, _ => None, } } pub fn run_on_kluster<F, R>(&self, f: F) -> Option<R> where F: FnOnce(&Kluster) -> Result<R, KubeError>, { match self.kluster { Some(ref k) => match f(k) { Ok(r) => Some(r), Err(e) => { println!("{}", e); None } }, None => { println!("Need to have an active context"); None } } } /// Add a new task for the env to keep track of pub fn add_port_forward(&mut self, pf: PortForward) { self.port_forwards.push(pf); } pub fn get_port_forwards(&self) -> std::slice::Iter<PortForward>
pub fn get_port_forward(&mut self, i: usize) -> Option<&mut PortForward> { self.port_forwards.get_mut(i) } pub fn stop_port_forward(&mut self, i: usize) -> Result<(), std::io::Error> { if i < self.port_forwards.len() { let mut pf = self.port_forwards.remove(i); pf.child.kill() } else { Ok(()) } } pub fn stop_all_forwards(&mut self) { for pf in self.port_forwards.iter_mut() { pf.child.kill().unwrap(); } self.port_forwards = Vec::new(); } /// Try and expand alias. /// This function looks at the first word (whitespace delimited) of the /// line, checks if it matches an alias, if it does it returns and ExpandedAlias with the /// expansion and the rest of the line, otherwise the expansion field will be None and rest will /// contain the whole line pub fn try_expand_alias<'a>( &'a self, line: &'a str, prev_word: Option<&'a str>, ) -> ExpandedAlias<'a> { let pos = line.find(char::is_whitespace).unwrap_or_else(|| line.len()); let word = &line[0..pos]; // don't expand if prev_word is Some, and is equal to my word // this means an alias maps to itself, and we want to stop expanding // to avoid an infinite loop if prev_word.filter(|pw| *pw == word).is_none() { for alias in self.click_config.aliases.iter() { if word == alias.alias.as_str() { return ExpandedAlias { expansion: Some(alias), rest: &line[pos..], }; } } } ExpandedAlias { expansion: None, rest: line, } } } impl fmt::Display for Env { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "Env {{ Current Context: {} Availble Contexts: {:?} Kubernetes Config File(s): {} Completion Type: {} Edit Mode: {} Editor: {} Terminal: {} Range Separator: {} }}", if let Some(ref k) = self.kluster { Green.bold().paint(k.name.as_str()) } else { Green.paint("none") }, self.config.contexts.keys(), Green.paint(&self.config.source_file), { let ctstr: String = (&self.click_config.completiontype).into(); Green.paint(ctstr) }, { let emstr: String = (&self.click_config.editmode).into(); Green.paint(emstr) }, Green.paint( self.click_config .editor .as_ref() .unwrap_or(&"<unset, will use $EDITOR>".to_owned()) ), Green.paint( self.click_config .terminal .as_ref() .unwrap_or(&"<unset, will use xterm>".to_owned()) ), Green.paint(&self.click_config.range_separator), ) } } #[cfg(test)] mod tests { use super::*; use config::get_test_config; #[test] fn try_expand_alias() { let mut cc = ClickConfig::default(); let pn_alias = Alias { alias: "pn".to_string(), expanded: "pods --sort node".to_string(), }; let x_alias = Alias { alias: "x".to_string(), expanded: "xpand".to_string(), }; cc.aliases.push(pn_alias.clone()); cc.aliases.push(x_alias.clone()); let env = Env::new(get_test_config(), cc, PathBuf::from("/tmp/click.config")); let exp1 = env.try_expand_alias("pn", None); assert_eq!(exp1.expansion, Some(&pn_alias)); assert_eq!(exp1.rest, ""); let exp2 = env.try_expand_alias("x", None); assert_eq!(exp2.expansion, Some(&x_alias)); assert_eq!(exp2.rest, ""); let exp2 = env.try_expand_alias("x rest is this", None); assert_eq!(exp2.expansion, Some(&x_alias)); assert_eq!(exp2.rest, " rest is this"); let exp3 = env.try_expand_alias("no alias", None); assert_eq!(exp3.expansion, None); assert_eq!(exp3.rest, "no alias"); let exp4 = env.try_expand_alias("x", Some("x")); assert_eq!(exp4.expansion, None); assert_eq!(exp4.rest, "x"); } }
{ self.port_forwards.iter() }
identifier_body
lib.rs
fn baseref_and_options( refname: &str, ) -> josh::JoshResult<(String, String, Vec<String>)> { let mut split = refname.splitn(2, '%'); let push_to = split.next().ok_or(josh::josh_error("no next"))?.to_owned(); let options = if let Some(options) = split.next() { options.split(',').map(|x| x.to_string()).collect() } else { vec![] }; let mut baseref = push_to.to_owned(); if baseref.starts_with("refs/for") { baseref = baseref.replacen("refs/for", "refs/heads", 1) } if baseref.starts_with("refs/drafts") { baseref = baseref.replacen("refs/drafts", "refs/heads", 1) } return Ok((baseref, push_to, options)); } #[derive(serde::Serialize, serde::Deserialize, Debug)] pub struct RepoUpdate { pub refs: std::collections::HashMap<String, (String, String)>, pub remote_url: String, pub username: String, pub password: HashedPassword, pub port: String, pub filter_spec: String, pub base_ns: String, pub git_ns: String, pub git_dir: String, } #[tracing::instrument(skip(credential_store))] pub fn process_repo_update( credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>, repo_update: RepoUpdate, ) -> josh::JoshResult<String> { let mut resp = String::new(); let p = std::path::PathBuf::from(&repo_update.git_dir) .join("refs/namespaces") .join(&repo_update.git_ns) .join("push_options"); let push_options_string = std::fs::read_to_string(p)?; let push_options: Vec<&str> = push_options_string.split("\n").collect(); for (refname, (old, new)) in repo_update.refs.iter() { tracing::debug!("REPO_UPDATE env ok"); let transaction = josh::cache::Transaction::open( &std::path::Path::new(&repo_update.git_dir), )?; let old = git2::Oid::from_str(old)?; let (baseref, push_to, options) = baseref_and_options(refname)?; let josh_merge = push_options.contains(&"merge"); tracing::debug!("push options: {:?}", push_options); tracing::debug!("josh-merge: {:?}", josh_merge); let old = if old == git2::Oid::zero() { let rev = format!("refs/namespaces/{}/{}", repo_update.git_ns, &baseref); let oid = if let Ok(x) = transaction.repo().revparse_single(&rev) { x.id() } else { old }; tracing::debug!("push: old oid: {:?}, rev: {:?}", oid, rev); oid } else { tracing::debug!("push: old oid: {:?}, refname: {:?}", old, refname); old }; let unfiltered_old = { let rev = format!( "refs/josh/upstream/{}/{}", repo_update.base_ns, &baseref ); let oid = transaction .repo() .refname_to_id(&rev) .unwrap_or(git2::Oid::zero()); tracing::debug!( "push: unfiltered_old oid: {:?}, rev: {:?}", oid, rev ); oid }; let amends = { let gerrit_changes = format!( "refs/josh/upstream/{}/refs/gerrit_changes/all", repo_update.base_ns, ); let mut amends = std::collections::HashMap::new(); if let Ok(tree) = transaction .repo() .find_reference(&gerrit_changes) .and_then(|x| x.peel_to_commit()) .and_then(|x| x.tree()) { tree.walk(git2::TreeWalkMode::PreOrder, |_, entry| { if let Ok(commit) = transaction.repo().find_commit(entry.id()) { if let Some(id) = josh::get_change_id(&commit) { amends.insert(id, commit.id()); } } git2::TreeWalkResult::Ok })?; } amends }; let filterobj = josh::filter::parse(&repo_update.filter_spec)?; let new_oid = git2::Oid::from_str(&new)?; let backward_new_oid = { tracing::debug!("=== MORE"); tracing::debug!("=== processed_old {:?}", old); match josh::history::unapply_filter( &transaction, filterobj, unfiltered_old, old, new_oid, josh_merge, &amends, )? { josh::UnapplyResult::Done(rewritten) => { tracing::debug!("rewritten"); rewritten } josh::UnapplyResult::BranchDoesNotExist => { return Err(josh::josh_error( "branch does not exist on remote", )); } josh::UnapplyResult::RejectMerge(parent_count) => { return Err(josh::josh_error(&format!( "rejecting merge with {} parents", parent_count ))); } josh::UnapplyResult::RejectAmend(msg) => { return Err(josh::josh_error(&format!( "rejecting to amend {:?} with conflicting changes", msg ))); } } }; let oid_to_push = if josh_merge { let rev = format!( "refs/josh/upstream/{}/{}", &repo_update.base_ns, &baseref ); let backward_commit = transaction.repo().find_commit(backward_new_oid)?; if let Ok(Ok(base_commit)) = transaction .repo() .revparse_single(&rev) .map(|x| x.peel_to_commit()) { let merged_tree = transaction .repo() .merge_commits(&base_commit, &backward_commit, None)? .write_tree_to(&transaction.repo())?; transaction.repo().commit( None, &backward_commit.author(), &backward_commit.committer(), &format!("Merge from {}", &repo_update.filter_spec), &transaction.repo().find_tree(merged_tree)?, &[&base_commit, &backward_commit], )? } else { return Err(josh::josh_error("josh_merge failed")); } } else { backward_new_oid }; let push_with_options = if options.len()!= 0 { push_to + "%" + &options.join(",") } else { push_to }; let password = credential_store .read()? .get(&repo_update.password) .unwrap_or(&Password { value: "".to_owned(), }) .to_owned(); let reapply = josh::filter::apply_to_commit( filterobj, &transaction.repo().find_commit(oid_to_push)?, &transaction, )?; resp = format!( "{}{}", resp, push_head_url( &transaction.repo(), oid_to_push, &push_with_options, &repo_update.remote_url, &repo_update.username, &password, &repo_update.git_ns, )? ); if new_oid!= reapply { transaction.repo().reference( &format!( "refs/josh/rewrites/{}/r_{}", repo_update.base_ns, reapply ), reapply, true, "reapply", )?; resp = format!("{}\nREWRITE({} -> {})", resp, new_oid, reapply); } } return Ok(resp); } fn push_head_url( repo: &git2::Repository, oid: git2::Oid, refname: &str, url: &str, username: &str, password: &Password, namespace: &str, ) -> josh::JoshResult<String> { let rn = format!("refs/{}", &namespace); let spec = format!("{}:{}", &rn, &refname); let shell = josh::shell::Shell { cwd: repo.path().to_owned(), }; let nurl = if username!= "" { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, &username, &rest) } else { url.to_owned() }; let cmd = format!("git push {} '{}'", &nurl, &spec); let mut fakehead = repo.reference(&rn, oid, true, "push_head_url")?; let (stdout, stderr, status) = shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]); fakehead.delete()?; tracing::debug!("{}", &stderr); tracing::debug!("{}", &stdout); let stderr = stderr.replace(&rn, "JOSH_PUSH"); if status!= 0 { return Err(josh::josh_error(&stderr)); } return Ok(stderr); } pub fn create_repo(path: &std::path::Path) -> josh::JoshResult<()> { tracing::debug!("init base repo: {:?}", path); std::fs::create_dir_all(path).expect("can't create_dir_all"); git2::Repository::init_bare(path)?; let shell = josh::shell::Shell { cwd: path.to_path_buf(), }; shell.command("git config http.receivepack true"); shell.command("git config uploadpack.allowsidebandall true"); shell.command("git config receive.advertisePushOptions true"); let ce = std::env::current_exe().expect("can't find path to exe"); shell.command("rm -Rf hooks"); shell.command("mkdir hooks"); std::os::unix::fs::symlink(ce.clone(), path.join("hooks").join("update")) .expect("can't symlink update hook"); std::os::unix::fs::symlink(ce, path.join("hooks").join("pre-receive")) .expect("can't symlink pre-receive hook"); shell.command(&format!( "git config credential.helper '!f() {{ echo \"password=\"$GIT_PASSWORD\"\"; }}; f'" )); shell.command(&"git config gc.auto 0"); if std::env::var_os("JOSH_KEEP_NS") == None { std::fs::remove_dir_all(path.join("refs/namespaces")).ok(); } tracing::info!("repo initialized"); return Ok(()); } #[tracing::instrument(skip(credential_store))] pub fn fetch_refs_from_url( path: &std::path::Path, upstream_repo: &str, url: &str, refs_prefixes: &[String], username: &str, password: &HashedPassword, credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>, ) -> josh::JoshResult<bool> { let specs: Vec<_> = refs_prefixes .iter() .map(|r| { format!( "'+{}:refs/josh/upstream/{}/{}'", &r, josh::to_ns(upstream_repo), &r ) }) .collect(); let shell = josh::shell::Shell { cwd: path.to_owned(), }; let nurl = if username!= "" { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, &username, &rest) } else { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, "annonymous", &rest) }; let cmd = format!("git fetch --no-tags {} {}", &nurl, &specs.join(" ")); tracing::info!("fetch_refs_from_url {:?} {:?} {:?}", cmd, path, ""); let password = credential_store .read()? .get(&password) .unwrap_or(&Password { value: "".to_owned(), }) .to_owned(); let (_stdout, stderr, _) = shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]); tracing::debug!( "fetch_refs_from_url done {:?} {:?} {:?}", cmd, path, stderr ); if stderr.contains("fatal: Authentication failed") { return Ok(false); } if stderr.contains("fatal:") { return Err(josh::josh_error(&format!("git error: {:?}", stderr))); } if stderr.contains("error:") { return Err(josh::josh_error(&format!("git error: {:?}", stderr))); } return Ok(true); } // Wrapper struct for storing passwords to avoid having // them output to traces by accident #[derive(Clone)] pub struct Password { pub value: String, } #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub struct
{ pub hash: String, } pub type CredentialStore = std::collections::HashMap<HashedPassword, Password>; impl std::fmt::Debug for HashedPassword { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("HashedPassword") .field("value", &self.hash) .finish() } } pub struct TmpGitNamespace { name: String, repo_path: std::path::PathBuf, _span: tracing::Span, } impl TmpGitNamespace { pub fn new( repo_path: &std::path::Path, span: tracing::Span, ) -> TmpGitNamespace { let n = format!("request_{}", uuid::Uuid::new_v4()); let n2 = n.clone(); TmpGitNamespace { name: n, repo_path: repo_path.to_owned(), _span: tracing::span!( parent: span, tracing::Level::TRACE, "TmpGitNamespace", name = n2.as_str(), ), } } pub fn name(&self) -> &str { return &self.name; } pub fn reference(&self, refname: &str) -> String { return format!("refs/namespaces/{}/{}", &self.name, refname); } } impl std::fmt::Debug for TmpGitNamespace { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("JoshProxyService") .field("repo_path", &self.repo_path) .field("name", &self.name) .finish() } } impl Drop for TmpGitNamespace { fn drop(&mut self) { if std::env::var_os("JOSH_KEEP_NS")!= None { return; } let request_tmp_namespace = self.repo_path.join("refs/namespaces").join(&self.name); std::fs::remove_dir_all(&request_tmp_namespace).unwrap_or_else(|e| { tracing::error!( "remove_dir_all {:?} failed, error:{:?}", request_tmp_namespace, e ) }); } }
HashedPassword
identifier_name
lib.rs
fn baseref_and_options( refname: &str, ) -> josh::JoshResult<(String, String, Vec<String>)> { let mut split = refname.splitn(2, '%'); let push_to = split.next().ok_or(josh::josh_error("no next"))?.to_owned(); let options = if let Some(options) = split.next() { options.split(',').map(|x| x.to_string()).collect() } else { vec![] }; let mut baseref = push_to.to_owned(); if baseref.starts_with("refs/for") { baseref = baseref.replacen("refs/for", "refs/heads", 1) } if baseref.starts_with("refs/drafts") { baseref = baseref.replacen("refs/drafts", "refs/heads", 1) } return Ok((baseref, push_to, options)); } #[derive(serde::Serialize, serde::Deserialize, Debug)] pub struct RepoUpdate { pub refs: std::collections::HashMap<String, (String, String)>, pub remote_url: String, pub username: String, pub password: HashedPassword, pub port: String, pub filter_spec: String, pub base_ns: String, pub git_ns: String, pub git_dir: String, } #[tracing::instrument(skip(credential_store))] pub fn process_repo_update( credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>, repo_update: RepoUpdate, ) -> josh::JoshResult<String> { let mut resp = String::new(); let p = std::path::PathBuf::from(&repo_update.git_dir) .join("refs/namespaces") .join(&repo_update.git_ns) .join("push_options"); let push_options_string = std::fs::read_to_string(p)?; let push_options: Vec<&str> = push_options_string.split("\n").collect(); for (refname, (old, new)) in repo_update.refs.iter() { tracing::debug!("REPO_UPDATE env ok"); let transaction = josh::cache::Transaction::open( &std::path::Path::new(&repo_update.git_dir), )?; let old = git2::Oid::from_str(old)?; let (baseref, push_to, options) = baseref_and_options(refname)?; let josh_merge = push_options.contains(&"merge"); tracing::debug!("push options: {:?}", push_options); tracing::debug!("josh-merge: {:?}", josh_merge); let old = if old == git2::Oid::zero() { let rev = format!("refs/namespaces/{}/{}", repo_update.git_ns, &baseref); let oid = if let Ok(x) = transaction.repo().revparse_single(&rev) { x.id() } else { old }; tracing::debug!("push: old oid: {:?}, rev: {:?}", oid, rev); oid } else { tracing::debug!("push: old oid: {:?}, refname: {:?}", old, refname); old }; let unfiltered_old = { let rev = format!( "refs/josh/upstream/{}/{}", repo_update.base_ns, &baseref ); let oid = transaction .repo() .refname_to_id(&rev) .unwrap_or(git2::Oid::zero()); tracing::debug!( "push: unfiltered_old oid: {:?}, rev: {:?}", oid, rev ); oid }; let amends = { let gerrit_changes = format!( "refs/josh/upstream/{}/refs/gerrit_changes/all", repo_update.base_ns, ); let mut amends = std::collections::HashMap::new(); if let Ok(tree) = transaction .repo() .find_reference(&gerrit_changes) .and_then(|x| x.peel_to_commit()) .and_then(|x| x.tree()) { tree.walk(git2::TreeWalkMode::PreOrder, |_, entry| { if let Ok(commit) = transaction.repo().find_commit(entry.id()) { if let Some(id) = josh::get_change_id(&commit) { amends.insert(id, commit.id()); } } git2::TreeWalkResult::Ok })?; } amends }; let filterobj = josh::filter::parse(&repo_update.filter_spec)?; let new_oid = git2::Oid::from_str(&new)?; let backward_new_oid = { tracing::debug!("=== MORE"); tracing::debug!("=== processed_old {:?}", old); match josh::history::unapply_filter( &transaction, filterobj, unfiltered_old, old, new_oid, josh_merge, &amends, )? { josh::UnapplyResult::Done(rewritten) => { tracing::debug!("rewritten"); rewritten } josh::UnapplyResult::BranchDoesNotExist => { return Err(josh::josh_error( "branch does not exist on remote", )); } josh::UnapplyResult::RejectMerge(parent_count) => { return Err(josh::josh_error(&format!( "rejecting merge with {} parents", parent_count ))); } josh::UnapplyResult::RejectAmend(msg) => { return Err(josh::josh_error(&format!( "rejecting to amend {:?} with conflicting changes", msg ))); } } }; let oid_to_push = if josh_merge {
&repo_update.base_ns, &baseref ); let backward_commit = transaction.repo().find_commit(backward_new_oid)?; if let Ok(Ok(base_commit)) = transaction .repo() .revparse_single(&rev) .map(|x| x.peel_to_commit()) { let merged_tree = transaction .repo() .merge_commits(&base_commit, &backward_commit, None)? .write_tree_to(&transaction.repo())?; transaction.repo().commit( None, &backward_commit.author(), &backward_commit.committer(), &format!("Merge from {}", &repo_update.filter_spec), &transaction.repo().find_tree(merged_tree)?, &[&base_commit, &backward_commit], )? } else { return Err(josh::josh_error("josh_merge failed")); } } else { backward_new_oid }; let push_with_options = if options.len()!= 0 { push_to + "%" + &options.join(",") } else { push_to }; let password = credential_store .read()? .get(&repo_update.password) .unwrap_or(&Password { value: "".to_owned(), }) .to_owned(); let reapply = josh::filter::apply_to_commit( filterobj, &transaction.repo().find_commit(oid_to_push)?, &transaction, )?; resp = format!( "{}{}", resp, push_head_url( &transaction.repo(), oid_to_push, &push_with_options, &repo_update.remote_url, &repo_update.username, &password, &repo_update.git_ns, )? ); if new_oid!= reapply { transaction.repo().reference( &format!( "refs/josh/rewrites/{}/r_{}", repo_update.base_ns, reapply ), reapply, true, "reapply", )?; resp = format!("{}\nREWRITE({} -> {})", resp, new_oid, reapply); } } return Ok(resp); } fn push_head_url( repo: &git2::Repository, oid: git2::Oid, refname: &str, url: &str, username: &str, password: &Password, namespace: &str, ) -> josh::JoshResult<String> { let rn = format!("refs/{}", &namespace); let spec = format!("{}:{}", &rn, &refname); let shell = josh::shell::Shell { cwd: repo.path().to_owned(), }; let nurl = if username!= "" { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, &username, &rest) } else { url.to_owned() }; let cmd = format!("git push {} '{}'", &nurl, &spec); let mut fakehead = repo.reference(&rn, oid, true, "push_head_url")?; let (stdout, stderr, status) = shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]); fakehead.delete()?; tracing::debug!("{}", &stderr); tracing::debug!("{}", &stdout); let stderr = stderr.replace(&rn, "JOSH_PUSH"); if status!= 0 { return Err(josh::josh_error(&stderr)); } return Ok(stderr); } pub fn create_repo(path: &std::path::Path) -> josh::JoshResult<()> { tracing::debug!("init base repo: {:?}", path); std::fs::create_dir_all(path).expect("can't create_dir_all"); git2::Repository::init_bare(path)?; let shell = josh::shell::Shell { cwd: path.to_path_buf(), }; shell.command("git config http.receivepack true"); shell.command("git config uploadpack.allowsidebandall true"); shell.command("git config receive.advertisePushOptions true"); let ce = std::env::current_exe().expect("can't find path to exe"); shell.command("rm -Rf hooks"); shell.command("mkdir hooks"); std::os::unix::fs::symlink(ce.clone(), path.join("hooks").join("update")) .expect("can't symlink update hook"); std::os::unix::fs::symlink(ce, path.join("hooks").join("pre-receive")) .expect("can't symlink pre-receive hook"); shell.command(&format!( "git config credential.helper '!f() {{ echo \"password=\"$GIT_PASSWORD\"\"; }}; f'" )); shell.command(&"git config gc.auto 0"); if std::env::var_os("JOSH_KEEP_NS") == None { std::fs::remove_dir_all(path.join("refs/namespaces")).ok(); } tracing::info!("repo initialized"); return Ok(()); } #[tracing::instrument(skip(credential_store))] pub fn fetch_refs_from_url( path: &std::path::Path, upstream_repo: &str, url: &str, refs_prefixes: &[String], username: &str, password: &HashedPassword, credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>, ) -> josh::JoshResult<bool> { let specs: Vec<_> = refs_prefixes .iter() .map(|r| { format!( "'+{}:refs/josh/upstream/{}/{}'", &r, josh::to_ns(upstream_repo), &r ) }) .collect(); let shell = josh::shell::Shell { cwd: path.to_owned(), }; let nurl = if username!= "" { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, &username, &rest) } else { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, "annonymous", &rest) }; let cmd = format!("git fetch --no-tags {} {}", &nurl, &specs.join(" ")); tracing::info!("fetch_refs_from_url {:?} {:?} {:?}", cmd, path, ""); let password = credential_store .read()? .get(&password) .unwrap_or(&Password { value: "".to_owned(), }) .to_owned(); let (_stdout, stderr, _) = shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]); tracing::debug!( "fetch_refs_from_url done {:?} {:?} {:?}", cmd, path, stderr ); if stderr.contains("fatal: Authentication failed") { return Ok(false); } if stderr.contains("fatal:") { return Err(josh::josh_error(&format!("git error: {:?}", stderr))); } if stderr.contains("error:") { return Err(josh::josh_error(&format!("git error: {:?}", stderr))); } return Ok(true); } // Wrapper struct for storing passwords to avoid having // them output to traces by accident #[derive(Clone)] pub struct Password { pub value: String, } #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub struct HashedPassword { pub hash: String, } pub type CredentialStore = std::collections::HashMap<HashedPassword, Password>; impl std::fmt::Debug for HashedPassword { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("HashedPassword") .field("value", &self.hash) .finish() } } pub struct TmpGitNamespace { name: String, repo_path: std::path::PathBuf, _span: tracing::Span, } impl TmpGitNamespace { pub fn new( repo_path: &std::path::Path, span: tracing::Span, ) -> TmpGitNamespace { let n = format!("request_{}", uuid::Uuid::new_v4()); let n2 = n.clone(); TmpGitNamespace { name: n, repo_path: repo_path.to_owned(), _span: tracing::span!( parent: span, tracing::Level::TRACE, "TmpGitNamespace", name = n2.as_str(), ), } } pub fn name(&self) -> &str { return &self.name; } pub fn reference(&self, refname: &str) -> String { return format!("refs/namespaces/{}/{}", &self.name, refname); } } impl std::fmt::Debug for TmpGitNamespace { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("JoshProxyService") .field("repo_path", &self.repo_path) .field("name", &self.name) .finish() } } impl Drop for TmpGitNamespace { fn drop(&mut self) { if std::env::var_os("JOSH_KEEP_NS")!= None { return; } let request_tmp_namespace = self.repo_path.join("refs/namespaces").join(&self.name); std::fs::remove_dir_all(&request_tmp_namespace).unwrap_or_else(|e| { tracing::error!( "remove_dir_all {:?} failed, error:{:?}", request_tmp_namespace, e ) }); } }
let rev = format!( "refs/josh/upstream/{}/{}",
random_line_split
lib.rs
fn baseref_and_options( refname: &str, ) -> josh::JoshResult<(String, String, Vec<String>)> { let mut split = refname.splitn(2, '%'); let push_to = split.next().ok_or(josh::josh_error("no next"))?.to_owned(); let options = if let Some(options) = split.next() { options.split(',').map(|x| x.to_string()).collect() } else { vec![] }; let mut baseref = push_to.to_owned(); if baseref.starts_with("refs/for") { baseref = baseref.replacen("refs/for", "refs/heads", 1) } if baseref.starts_with("refs/drafts") { baseref = baseref.replacen("refs/drafts", "refs/heads", 1) } return Ok((baseref, push_to, options)); } #[derive(serde::Serialize, serde::Deserialize, Debug)] pub struct RepoUpdate { pub refs: std::collections::HashMap<String, (String, String)>, pub remote_url: String, pub username: String, pub password: HashedPassword, pub port: String, pub filter_spec: String, pub base_ns: String, pub git_ns: String, pub git_dir: String, } #[tracing::instrument(skip(credential_store))] pub fn process_repo_update( credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>, repo_update: RepoUpdate, ) -> josh::JoshResult<String> { let mut resp = String::new(); let p = std::path::PathBuf::from(&repo_update.git_dir) .join("refs/namespaces") .join(&repo_update.git_ns) .join("push_options"); let push_options_string = std::fs::read_to_string(p)?; let push_options: Vec<&str> = push_options_string.split("\n").collect(); for (refname, (old, new)) in repo_update.refs.iter() { tracing::debug!("REPO_UPDATE env ok"); let transaction = josh::cache::Transaction::open( &std::path::Path::new(&repo_update.git_dir), )?; let old = git2::Oid::from_str(old)?; let (baseref, push_to, options) = baseref_and_options(refname)?; let josh_merge = push_options.contains(&"merge"); tracing::debug!("push options: {:?}", push_options); tracing::debug!("josh-merge: {:?}", josh_merge); let old = if old == git2::Oid::zero() { let rev = format!("refs/namespaces/{}/{}", repo_update.git_ns, &baseref); let oid = if let Ok(x) = transaction.repo().revparse_single(&rev) { x.id() } else { old }; tracing::debug!("push: old oid: {:?}, rev: {:?}", oid, rev); oid } else { tracing::debug!("push: old oid: {:?}, refname: {:?}", old, refname); old }; let unfiltered_old = { let rev = format!( "refs/josh/upstream/{}/{}", repo_update.base_ns, &baseref ); let oid = transaction .repo() .refname_to_id(&rev) .unwrap_or(git2::Oid::zero()); tracing::debug!( "push: unfiltered_old oid: {:?}, rev: {:?}", oid, rev ); oid }; let amends = { let gerrit_changes = format!( "refs/josh/upstream/{}/refs/gerrit_changes/all", repo_update.base_ns, ); let mut amends = std::collections::HashMap::new(); if let Ok(tree) = transaction .repo() .find_reference(&gerrit_changes) .and_then(|x| x.peel_to_commit()) .and_then(|x| x.tree()) { tree.walk(git2::TreeWalkMode::PreOrder, |_, entry| { if let Ok(commit) = transaction.repo().find_commit(entry.id()) { if let Some(id) = josh::get_change_id(&commit) { amends.insert(id, commit.id()); } } git2::TreeWalkResult::Ok })?; } amends }; let filterobj = josh::filter::parse(&repo_update.filter_spec)?; let new_oid = git2::Oid::from_str(&new)?; let backward_new_oid = { tracing::debug!("=== MORE"); tracing::debug!("=== processed_old {:?}", old); match josh::history::unapply_filter( &transaction, filterobj, unfiltered_old, old, new_oid, josh_merge, &amends, )? { josh::UnapplyResult::Done(rewritten) => { tracing::debug!("rewritten"); rewritten } josh::UnapplyResult::BranchDoesNotExist => { return Err(josh::josh_error( "branch does not exist on remote", )); } josh::UnapplyResult::RejectMerge(parent_count) => { return Err(josh::josh_error(&format!( "rejecting merge with {} parents", parent_count ))); } josh::UnapplyResult::RejectAmend(msg) => { return Err(josh::josh_error(&format!( "rejecting to amend {:?} with conflicting changes", msg ))); } } }; let oid_to_push = if josh_merge { let rev = format!( "refs/josh/upstream/{}/{}", &repo_update.base_ns, &baseref ); let backward_commit = transaction.repo().find_commit(backward_new_oid)?; if let Ok(Ok(base_commit)) = transaction .repo() .revparse_single(&rev) .map(|x| x.peel_to_commit()) { let merged_tree = transaction .repo() .merge_commits(&base_commit, &backward_commit, None)? .write_tree_to(&transaction.repo())?; transaction.repo().commit( None, &backward_commit.author(), &backward_commit.committer(), &format!("Merge from {}", &repo_update.filter_spec), &transaction.repo().find_tree(merged_tree)?, &[&base_commit, &backward_commit], )? } else { return Err(josh::josh_error("josh_merge failed")); } } else { backward_new_oid }; let push_with_options = if options.len()!= 0 { push_to + "%" + &options.join(",") } else { push_to }; let password = credential_store .read()? .get(&repo_update.password) .unwrap_or(&Password { value: "".to_owned(), }) .to_owned(); let reapply = josh::filter::apply_to_commit( filterobj, &transaction.repo().find_commit(oid_to_push)?, &transaction, )?; resp = format!( "{}{}", resp, push_head_url( &transaction.repo(), oid_to_push, &push_with_options, &repo_update.remote_url, &repo_update.username, &password, &repo_update.git_ns, )? ); if new_oid!= reapply { transaction.repo().reference( &format!( "refs/josh/rewrites/{}/r_{}", repo_update.base_ns, reapply ), reapply, true, "reapply", )?; resp = format!("{}\nREWRITE({} -> {})", resp, new_oid, reapply); } } return Ok(resp); } fn push_head_url( repo: &git2::Repository, oid: git2::Oid, refname: &str, url: &str, username: &str, password: &Password, namespace: &str, ) -> josh::JoshResult<String> { let rn = format!("refs/{}", &namespace); let spec = format!("{}:{}", &rn, &refname); let shell = josh::shell::Shell { cwd: repo.path().to_owned(), }; let nurl = if username!= "" { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, &username, &rest) } else
; let cmd = format!("git push {} '{}'", &nurl, &spec); let mut fakehead = repo.reference(&rn, oid, true, "push_head_url")?; let (stdout, stderr, status) = shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]); fakehead.delete()?; tracing::debug!("{}", &stderr); tracing::debug!("{}", &stdout); let stderr = stderr.replace(&rn, "JOSH_PUSH"); if status!= 0 { return Err(josh::josh_error(&stderr)); } return Ok(stderr); } pub fn create_repo(path: &std::path::Path) -> josh::JoshResult<()> { tracing::debug!("init base repo: {:?}", path); std::fs::create_dir_all(path).expect("can't create_dir_all"); git2::Repository::init_bare(path)?; let shell = josh::shell::Shell { cwd: path.to_path_buf(), }; shell.command("git config http.receivepack true"); shell.command("git config uploadpack.allowsidebandall true"); shell.command("git config receive.advertisePushOptions true"); let ce = std::env::current_exe().expect("can't find path to exe"); shell.command("rm -Rf hooks"); shell.command("mkdir hooks"); std::os::unix::fs::symlink(ce.clone(), path.join("hooks").join("update")) .expect("can't symlink update hook"); std::os::unix::fs::symlink(ce, path.join("hooks").join("pre-receive")) .expect("can't symlink pre-receive hook"); shell.command(&format!( "git config credential.helper '!f() {{ echo \"password=\"$GIT_PASSWORD\"\"; }}; f'" )); shell.command(&"git config gc.auto 0"); if std::env::var_os("JOSH_KEEP_NS") == None { std::fs::remove_dir_all(path.join("refs/namespaces")).ok(); } tracing::info!("repo initialized"); return Ok(()); } #[tracing::instrument(skip(credential_store))] pub fn fetch_refs_from_url( path: &std::path::Path, upstream_repo: &str, url: &str, refs_prefixes: &[String], username: &str, password: &HashedPassword, credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>, ) -> josh::JoshResult<bool> { let specs: Vec<_> = refs_prefixes .iter() .map(|r| { format!( "'+{}:refs/josh/upstream/{}/{}'", &r, josh::to_ns(upstream_repo), &r ) }) .collect(); let shell = josh::shell::Shell { cwd: path.to_owned(), }; let nurl = if username!= "" { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, &username, &rest) } else { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, "annonymous", &rest) }; let cmd = format!("git fetch --no-tags {} {}", &nurl, &specs.join(" ")); tracing::info!("fetch_refs_from_url {:?} {:?} {:?}", cmd, path, ""); let password = credential_store .read()? .get(&password) .unwrap_or(&Password { value: "".to_owned(), }) .to_owned(); let (_stdout, stderr, _) = shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]); tracing::debug!( "fetch_refs_from_url done {:?} {:?} {:?}", cmd, path, stderr ); if stderr.contains("fatal: Authentication failed") { return Ok(false); } if stderr.contains("fatal:") { return Err(josh::josh_error(&format!("git error: {:?}", stderr))); } if stderr.contains("error:") { return Err(josh::josh_error(&format!("git error: {:?}", stderr))); } return Ok(true); } // Wrapper struct for storing passwords to avoid having // them output to traces by accident #[derive(Clone)] pub struct Password { pub value: String, } #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub struct HashedPassword { pub hash: String, } pub type CredentialStore = std::collections::HashMap<HashedPassword, Password>; impl std::fmt::Debug for HashedPassword { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("HashedPassword") .field("value", &self.hash) .finish() } } pub struct TmpGitNamespace { name: String, repo_path: std::path::PathBuf, _span: tracing::Span, } impl TmpGitNamespace { pub fn new( repo_path: &std::path::Path, span: tracing::Span, ) -> TmpGitNamespace { let n = format!("request_{}", uuid::Uuid::new_v4()); let n2 = n.clone(); TmpGitNamespace { name: n, repo_path: repo_path.to_owned(), _span: tracing::span!( parent: span, tracing::Level::TRACE, "TmpGitNamespace", name = n2.as_str(), ), } } pub fn name(&self) -> &str { return &self.name; } pub fn reference(&self, refname: &str) -> String { return format!("refs/namespaces/{}/{}", &self.name, refname); } } impl std::fmt::Debug for TmpGitNamespace { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("JoshProxyService") .field("repo_path", &self.repo_path) .field("name", &self.name) .finish() } } impl Drop for TmpGitNamespace { fn drop(&mut self) { if std::env::var_os("JOSH_KEEP_NS")!= None { return; } let request_tmp_namespace = self.repo_path.join("refs/namespaces").join(&self.name); std::fs::remove_dir_all(&request_tmp_namespace).unwrap_or_else(|e| { tracing::error!( "remove_dir_all {:?} failed, error:{:?}", request_tmp_namespace, e ) }); } }
{ url.to_owned() }
conditional_block
lib.rs
fn baseref_and_options( refname: &str, ) -> josh::JoshResult<(String, String, Vec<String>)> { let mut split = refname.splitn(2, '%'); let push_to = split.next().ok_or(josh::josh_error("no next"))?.to_owned(); let options = if let Some(options) = split.next() { options.split(',').map(|x| x.to_string()).collect() } else { vec![] }; let mut baseref = push_to.to_owned(); if baseref.starts_with("refs/for") { baseref = baseref.replacen("refs/for", "refs/heads", 1) } if baseref.starts_with("refs/drafts") { baseref = baseref.replacen("refs/drafts", "refs/heads", 1) } return Ok((baseref, push_to, options)); } #[derive(serde::Serialize, serde::Deserialize, Debug)] pub struct RepoUpdate { pub refs: std::collections::HashMap<String, (String, String)>, pub remote_url: String, pub username: String, pub password: HashedPassword, pub port: String, pub filter_spec: String, pub base_ns: String, pub git_ns: String, pub git_dir: String, } #[tracing::instrument(skip(credential_store))] pub fn process_repo_update( credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>, repo_update: RepoUpdate, ) -> josh::JoshResult<String> { let mut resp = String::new(); let p = std::path::PathBuf::from(&repo_update.git_dir) .join("refs/namespaces") .join(&repo_update.git_ns) .join("push_options"); let push_options_string = std::fs::read_to_string(p)?; let push_options: Vec<&str> = push_options_string.split("\n").collect(); for (refname, (old, new)) in repo_update.refs.iter() { tracing::debug!("REPO_UPDATE env ok"); let transaction = josh::cache::Transaction::open( &std::path::Path::new(&repo_update.git_dir), )?; let old = git2::Oid::from_str(old)?; let (baseref, push_to, options) = baseref_and_options(refname)?; let josh_merge = push_options.contains(&"merge"); tracing::debug!("push options: {:?}", push_options); tracing::debug!("josh-merge: {:?}", josh_merge); let old = if old == git2::Oid::zero() { let rev = format!("refs/namespaces/{}/{}", repo_update.git_ns, &baseref); let oid = if let Ok(x) = transaction.repo().revparse_single(&rev) { x.id() } else { old }; tracing::debug!("push: old oid: {:?}, rev: {:?}", oid, rev); oid } else { tracing::debug!("push: old oid: {:?}, refname: {:?}", old, refname); old }; let unfiltered_old = { let rev = format!( "refs/josh/upstream/{}/{}", repo_update.base_ns, &baseref ); let oid = transaction .repo() .refname_to_id(&rev) .unwrap_or(git2::Oid::zero()); tracing::debug!( "push: unfiltered_old oid: {:?}, rev: {:?}", oid, rev ); oid }; let amends = { let gerrit_changes = format!( "refs/josh/upstream/{}/refs/gerrit_changes/all", repo_update.base_ns, ); let mut amends = std::collections::HashMap::new(); if let Ok(tree) = transaction .repo() .find_reference(&gerrit_changes) .and_then(|x| x.peel_to_commit()) .and_then(|x| x.tree()) { tree.walk(git2::TreeWalkMode::PreOrder, |_, entry| { if let Ok(commit) = transaction.repo().find_commit(entry.id()) { if let Some(id) = josh::get_change_id(&commit) { amends.insert(id, commit.id()); } } git2::TreeWalkResult::Ok })?; } amends }; let filterobj = josh::filter::parse(&repo_update.filter_spec)?; let new_oid = git2::Oid::from_str(&new)?; let backward_new_oid = { tracing::debug!("=== MORE"); tracing::debug!("=== processed_old {:?}", old); match josh::history::unapply_filter( &transaction, filterobj, unfiltered_old, old, new_oid, josh_merge, &amends, )? { josh::UnapplyResult::Done(rewritten) => { tracing::debug!("rewritten"); rewritten } josh::UnapplyResult::BranchDoesNotExist => { return Err(josh::josh_error( "branch does not exist on remote", )); } josh::UnapplyResult::RejectMerge(parent_count) => { return Err(josh::josh_error(&format!( "rejecting merge with {} parents", parent_count ))); } josh::UnapplyResult::RejectAmend(msg) => { return Err(josh::josh_error(&format!( "rejecting to amend {:?} with conflicting changes", msg ))); } } }; let oid_to_push = if josh_merge { let rev = format!( "refs/josh/upstream/{}/{}", &repo_update.base_ns, &baseref ); let backward_commit = transaction.repo().find_commit(backward_new_oid)?; if let Ok(Ok(base_commit)) = transaction .repo() .revparse_single(&rev) .map(|x| x.peel_to_commit()) { let merged_tree = transaction .repo() .merge_commits(&base_commit, &backward_commit, None)? .write_tree_to(&transaction.repo())?; transaction.repo().commit( None, &backward_commit.author(), &backward_commit.committer(), &format!("Merge from {}", &repo_update.filter_spec), &transaction.repo().find_tree(merged_tree)?, &[&base_commit, &backward_commit], )? } else { return Err(josh::josh_error("josh_merge failed")); } } else { backward_new_oid }; let push_with_options = if options.len()!= 0 { push_to + "%" + &options.join(",") } else { push_to }; let password = credential_store .read()? .get(&repo_update.password) .unwrap_or(&Password { value: "".to_owned(), }) .to_owned(); let reapply = josh::filter::apply_to_commit( filterobj, &transaction.repo().find_commit(oid_to_push)?, &transaction, )?; resp = format!( "{}{}", resp, push_head_url( &transaction.repo(), oid_to_push, &push_with_options, &repo_update.remote_url, &repo_update.username, &password, &repo_update.git_ns, )? ); if new_oid!= reapply { transaction.repo().reference( &format!( "refs/josh/rewrites/{}/r_{}", repo_update.base_ns, reapply ), reapply, true, "reapply", )?; resp = format!("{}\nREWRITE({} -> {})", resp, new_oid, reapply); } } return Ok(resp); } fn push_head_url( repo: &git2::Repository, oid: git2::Oid, refname: &str, url: &str, username: &str, password: &Password, namespace: &str, ) -> josh::JoshResult<String> { let rn = format!("refs/{}", &namespace); let spec = format!("{}:{}", &rn, &refname); let shell = josh::shell::Shell { cwd: repo.path().to_owned(), }; let nurl = if username!= "" { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, &username, &rest) } else { url.to_owned() }; let cmd = format!("git push {} '{}'", &nurl, &spec); let mut fakehead = repo.reference(&rn, oid, true, "push_head_url")?; let (stdout, stderr, status) = shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]); fakehead.delete()?; tracing::debug!("{}", &stderr); tracing::debug!("{}", &stdout); let stderr = stderr.replace(&rn, "JOSH_PUSH"); if status!= 0 { return Err(josh::josh_error(&stderr)); } return Ok(stderr); } pub fn create_repo(path: &std::path::Path) -> josh::JoshResult<()> { tracing::debug!("init base repo: {:?}", path); std::fs::create_dir_all(path).expect("can't create_dir_all"); git2::Repository::init_bare(path)?; let shell = josh::shell::Shell { cwd: path.to_path_buf(), }; shell.command("git config http.receivepack true"); shell.command("git config uploadpack.allowsidebandall true"); shell.command("git config receive.advertisePushOptions true"); let ce = std::env::current_exe().expect("can't find path to exe"); shell.command("rm -Rf hooks"); shell.command("mkdir hooks"); std::os::unix::fs::symlink(ce.clone(), path.join("hooks").join("update")) .expect("can't symlink update hook"); std::os::unix::fs::symlink(ce, path.join("hooks").join("pre-receive")) .expect("can't symlink pre-receive hook"); shell.command(&format!( "git config credential.helper '!f() {{ echo \"password=\"$GIT_PASSWORD\"\"; }}; f'" )); shell.command(&"git config gc.auto 0"); if std::env::var_os("JOSH_KEEP_NS") == None { std::fs::remove_dir_all(path.join("refs/namespaces")).ok(); } tracing::info!("repo initialized"); return Ok(()); } #[tracing::instrument(skip(credential_store))] pub fn fetch_refs_from_url( path: &std::path::Path, upstream_repo: &str, url: &str, refs_prefixes: &[String], username: &str, password: &HashedPassword, credential_store: std::sync::Arc<std::sync::RwLock<CredentialStore>>, ) -> josh::JoshResult<bool> { let specs: Vec<_> = refs_prefixes .iter() .map(|r| { format!( "'+{}:refs/josh/upstream/{}/{}'", &r, josh::to_ns(upstream_repo), &r ) }) .collect(); let shell = josh::shell::Shell { cwd: path.to_owned(), }; let nurl = if username!= "" { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, &username, &rest) } else { let splitted: Vec<&str> = url.splitn(2, "://").collect(); let proto = splitted[0]; let rest = splitted[1]; format!("{}://{}@{}", &proto, "annonymous", &rest) }; let cmd = format!("git fetch --no-tags {} {}", &nurl, &specs.join(" ")); tracing::info!("fetch_refs_from_url {:?} {:?} {:?}", cmd, path, ""); let password = credential_store .read()? .get(&password) .unwrap_or(&Password { value: "".to_owned(), }) .to_owned(); let (_stdout, stderr, _) = shell.command_env(&cmd, &[], &[("GIT_PASSWORD", &password.value)]); tracing::debug!( "fetch_refs_from_url done {:?} {:?} {:?}", cmd, path, stderr ); if stderr.contains("fatal: Authentication failed") { return Ok(false); } if stderr.contains("fatal:") { return Err(josh::josh_error(&format!("git error: {:?}", stderr))); } if stderr.contains("error:") { return Err(josh::josh_error(&format!("git error: {:?}", stderr))); } return Ok(true); } // Wrapper struct for storing passwords to avoid having // them output to traces by accident #[derive(Clone)] pub struct Password { pub value: String, } #[derive(Clone, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] pub struct HashedPassword { pub hash: String, } pub type CredentialStore = std::collections::HashMap<HashedPassword, Password>; impl std::fmt::Debug for HashedPassword { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result
} pub struct TmpGitNamespace { name: String, repo_path: std::path::PathBuf, _span: tracing::Span, } impl TmpGitNamespace { pub fn new( repo_path: &std::path::Path, span: tracing::Span, ) -> TmpGitNamespace { let n = format!("request_{}", uuid::Uuid::new_v4()); let n2 = n.clone(); TmpGitNamespace { name: n, repo_path: repo_path.to_owned(), _span: tracing::span!( parent: span, tracing::Level::TRACE, "TmpGitNamespace", name = n2.as_str(), ), } } pub fn name(&self) -> &str { return &self.name; } pub fn reference(&self, refname: &str) -> String { return format!("refs/namespaces/{}/{}", &self.name, refname); } } impl std::fmt::Debug for TmpGitNamespace { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("JoshProxyService") .field("repo_path", &self.repo_path) .field("name", &self.name) .finish() } } impl Drop for TmpGitNamespace { fn drop(&mut self) { if std::env::var_os("JOSH_KEEP_NS")!= None { return; } let request_tmp_namespace = self.repo_path.join("refs/namespaces").join(&self.name); std::fs::remove_dir_all(&request_tmp_namespace).unwrap_or_else(|e| { tracing::error!( "remove_dir_all {:?} failed, error:{:?}", request_tmp_namespace, e ) }); } }
{ f.debug_struct("HashedPassword") .field("value", &self.hash) .finish() }
identifier_body
build.rs
extern crate avr_mcu; use std::path::Path; fn main() { let crate_root = Path::new(env!("CARGO_MANIFEST_DIR")); let mcus = if cfg!(feature = "all_mcus") { avr_mcu::microcontrollers().to_owned() } else { // By default, when compiling for AVR we should hard error if // microcontroller is not specified. if cfg!(arch = "avr") { let current_mcu = avr_mcu::current::mcu() .expect("no target cpu set"); vec![current_mcu] } else { // On non-avr architectures, support all microcontrollers. avr_mcu::microcontrollers().to_owned() } }; // Useful for test // let mcus = vec![ // avr_mcu::microcontroller("ata5795").clone(), // avr_mcu::microcontroller("atmega328").clone(), // required when compiling for PC // ]; gen::all(&crate_root.join("src").join("gen"), &mcus).unwrap(); } mod gen { use avr_mcu::*; use std::collections::{HashMap, HashSet}; use std::fs::{self, File}; use std::io::prelude::*; use std::io; use std::path::Path; pub fn all(path: &Path, mcus: &[Mcu]) -> Result<(), io::Error> { fs::create_dir_all(path)?; let mut module_names = Vec::new(); // Create modules for each mcu. for mcu in mcus.iter() { let module_name = self::mcu_module_name(mcu); let module_path = path.join(format!("{}.rs", module_name)); eprintln!("generating module for {}", mcu.device.name); generate_mcu_module(mcu, &module_path)?; module_names.push(module_name); } generate_entry_module(path, &module_names) } /// Generate a `mod.rs` file that binds a list of submodules. fn generate_entry_module(output_path: &Path, module_names: &[String]) -> Result<(), io::Error> { let mut mod_rs = File::create(output_path.join("mod.rs"))?; writeln!(mod_rs, "// Device definitions")?; for module_name in module_names { writeln!(mod_rs, "pub mod {};", module_name)?; } writeln!(mod_rs)?; const CURRENT_MOD_SUMMARY: &'static str = "Contains definitions for the current AVR device"; writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?; writeln!(mod_rs, "///")?; writeln!(mod_rs, "/// **NOTE**: We are showing the ATmega328 here, even though the library")?; writeln!(mod_rs, "/// is not targeting a real AVR device. If you compile this library for")?; writeln!(mod_rs, "/// a specific AVR MCU, the module for that device will aliased here.")?; writeln!(mod_rs, "// If we are targeting a non-AVR device, just pick the ATmega328p so")?; writeln!(mod_rs, "// that users can see what the API would look like")?; writeln!(mod_rs, "//")?; writeln!(mod_rs, "// Note that we reexport rather than alias so that we can add a note about")?; writeln!(mod_rs, "// this behaviour to the documentation.")?; writeln!(mod_rs, "#[cfg(not(target_arch = \"avr\"))]")?; writeln!(mod_rs, "pub mod current {{ pub use super::atmega328::*; }}")?; writeln!(mod_rs)?; writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?; writeln!(mod_rs, "// If we are targeting AVR, lookup the current device's module")?; writeln!(mod_rs, "// and alias it to the `current` module.")?; writeln!(mod_rs, "#[cfg(target_arch = \"avr\")]")?; writeln!(mod_rs, "pub mod current {{")?; writeln!(mod_rs, " // NOTE: 'target_cpu' is a cfg flag specific to the avr-rust fork")?; for module_name in module_names { writeln!(mod_rs, " #[cfg(target_cpu = \"{}\")] pub use super::{} as current;", module_name, module_name)?; } writeln!(mod_rs, "}}")?; Ok(()) } /// Generates a self-contained module for each individual mcu. fn generate_mcu_module(mcu: &Mcu, path: &Path) -> Result<(), io::Error> { let mut file = File::create(path)?; self::mcu_module_doc(mcu, &mut file)?; writeln!(file)?; self::mcu_module_code(mcu, &mut file)?; Ok(()) } /// Gets the module name for a mcu. fn mcu_module_name(mcu: &Mcu) -> String { mcu.device.name.to_lowercase() } pub fn mcu_module_doc(mcu: &Mcu, w: &mut Write) -> Result<(), io::Error> { writeln!(w, "//! The AVR {} microcontroller", mcu.device.name)?; writeln!(w, "//!")?; writeln!(w, "//! # Variants")?; writeln!(w, "//! | | Pinout | Mcuage | Operating temperature | Operating voltage | Max speed |")?; writeln!(w, "//! |--------|--------|---------|-----------------------|-------------------|-----------|")?; for variant in mcu.variants.iter() { let pinout_label = variant.pinout.as_ref().map(|p| p.replace('_', "-").to_owned()).unwrap_or_else(|| String::new()); let speed_mhz = variant.speed_max_hz / 1_000_000; writeln!(w, "//! | {} | {} | {} | {}°C - {}°C | {}V - {}V | {} MHz |", variant.name, pinout_label, variant.package, variant.temperature_min, variant.temperature_max, variant.voltage_min, variant.voltage_max, speed_mhz)?; } writeln!(w, "//!")?; Ok(()) } pub fn mcu_module_code(mcu: &Mcu, w: &mut Write) -> Result<(), io::Error> { let registers = ordered_registers(mcu); let register_bitfields = documentable_bitfields(&registers); writeln!(w, "#![allow(non_upper_case_globals)]")?; writeln!(w)?; for register in registers.iter() { let ty = integer_type(register.size); if!register.caption.is_empty() { let mut caption = register.caption.trim().to_owned(); if!caption.ends_with('.') { caption.push('.') } writeln!(w, "/// {}", caption)?; } else { writeln!(w, "/// {} register", register.name)?; } let mut bitfields = register_bitfields.iter().filter_map(|&(reg,bitfield)| { if reg == register { Some(bitfield) } else { None } }).peekable(); if bitfields.peek().is_some() { writeln!(w, "///")?; writeln!(w, "/// Bitfields:")?; writeln!(w, "///")?; writeln!(w, "/// | Name | Mask (binary) |")?; writeln!(w, "/// | ---- | ------------- |")?; while let Some(bitfield) = bitfields.next() { writeln!(w, "/// | {} | {:b} |", bitfield.name, bitfield.mask)?; } } writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};", register.name, ty, register.offset, ty)?; writeln!(w)?; } for (register, bitfield) in register_bitfields { let ty = integer_type(bitfield.size); writeln!(w, "/// Bitfield on register {}", register.name)?; writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};", bitfield.name, ty, bitfield.mask, ty)?; writeln!(w)?; } Ok(()) } fn ordered_registers(mcu: &Mcu) -> Vec<Register> { let mut unique_registers = self::unique_registers(mcu); insert_high_low_variants(&mut unique_registers); let mut registers: Vec<Register> = unique_registers.into_iter().map(|a| a.1).collect(); registers.sort_by_key(|r| r.offset); registers } fn insert_high_low_variants(registers: &mut HashMap<String, Register>) { let wide_registers: Vec<_> = registers.values() .filter(|r| r.size == 2) .cloned() .collect(); for r in wide_registers { let (high, low) = high_low_variants(&r); if!registers.contains_key(&high.name) { registers.insert(high.name.clone(), high); } if!registers.contains_key(&low.name) { registers.insert(low.name.clone(), low); } } } fn high_low_variants(r: &Register) -> (Register, Register) { assert_eq!(2, r.size, "only 16-bit registers have high low variants"); ( Register { name: r.name.clone() + "H", caption: r.caption.clone() + " high byte", offset: r.offset + 1, size: r.size / 2, mask: None, bitfields: Vec::new(), // these are already in parent. rw: r.rw.clone() }, Register { name: r.name.clone() + "L", caption: r.caption.clone() + " low byte", offset: r.offset + 0, size: r.size / 2, mask: None, bitfields: Vec::new(), // these are already in parent. rw: r.rw.clone() }, ) } fn unique_registers(mcu: &Mcu) -> HashMap<String, Register> { let mut result = HashMap::new(); for module in mcu.modules.iter() { for register_group in module.register_groups.iter() { for register in register_group.registers.iter() { // Check if we've already seen this register. // Remove it if so and combine it with the current Register. let r: Register = if let Some(ref existing) = result.remove(&register.name) { register.union(existing) } else {
result.insert(r.name.clone(), r); } } } result } /// Gets the integer type of a specified width. fn integer_type(byte_count: u32) -> &'static str { match byte_count { 1 => "u8", 2 => "u16", 4 => "u32", _ => panic!("failed to get type of {}", byte_count), } } /// Collects all bitfields for a set of registers that are documentable. /// /// Some pack files (such as the one for atmega328p) do not have unique /// bitfield names. /// /// For example, all of the timer control registers (`TCCR0A`, `TCCR1B`,..) /// on the ATmega328p individually define bitfields named `WGM1` with /// different masks but a shared name. Note that even though these fields /// are in the packfiles, they do not appear in AVR-GCC's `iom328p.h`. /// /// This function specifically considers bitfields with unique /// names as documentable. /// /// In the case where we have an ambiguous name for a bitfield, it /// should be skipped. fn documentable_bitfields(registers: &[Register]) -> Vec<(&Register, &Bitfield)> { let register_names: HashSet<&str> = registers.iter().map(|r| &r.name[..]).collect(); // A hash map of bitfield names to possible instantiations. let mut history: HashMap<&str, Vec<(&Register, &Bitfield)>> = HashMap::new(); for register in registers.iter(){ for bitfield in register.bitfields.iter() { let bitfields = history.entry(&bitfield.name). or_insert_with(|| Vec::new()); // Track the bitfield of this bitfield. bitfields.push((register, bitfield)); } } // Convert the hash map to a list and sort it so it is deterministic. let mut register_bitfields: Vec<_> = history.into_iter().map(|(_, register_bitfields)| register_bitfields).collect(); register_bitfields.sort_by_key(|register_bitfields| &register_bitfields[0].0.name); let unique_bitfields = register_bitfields.into_iter().filter_map(|register_bitfields| { if register_bitfields.len() == 1 { Some(register_bitfields.into_iter().next().unwrap()) } else { None } }); // Skip bitmasks that cover all bits or share the same name as their parent register. // There are strange cases like this in the packfiles. let bitfields = unique_bitfields.filter(|&(register, bitfield)| { let full_mask = match register.size { 1 => 0xff, 2 => 0xffff, _ => panic!("register is too large"), }; bitfield.mask!= full_mask && bitfield.name!= register.name && !register_names.contains(&bitfield.name[..]) }); bitfields.collect() } }
register.clone() };
conditional_block
build.rs
extern crate avr_mcu; use std::path::Path; fn main() { let crate_root = Path::new(env!("CARGO_MANIFEST_DIR")); let mcus = if cfg!(feature = "all_mcus") { avr_mcu::microcontrollers().to_owned() } else { // By default, when compiling for AVR we should hard error if // microcontroller is not specified. if cfg!(arch = "avr") { let current_mcu = avr_mcu::current::mcu() .expect("no target cpu set"); vec![current_mcu] } else { // On non-avr architectures, support all microcontrollers. avr_mcu::microcontrollers().to_owned() } }; // Useful for test // let mcus = vec![ // avr_mcu::microcontroller("ata5795").clone(), // avr_mcu::microcontroller("atmega328").clone(), // required when compiling for PC // ]; gen::all(&crate_root.join("src").join("gen"), &mcus).unwrap(); } mod gen { use avr_mcu::*; use std::collections::{HashMap, HashSet}; use std::fs::{self, File}; use std::io::prelude::*; use std::io; use std::path::Path; pub fn all(path: &Path, mcus: &[Mcu]) -> Result<(), io::Error> { fs::create_dir_all(path)?; let mut module_names = Vec::new(); // Create modules for each mcu. for mcu in mcus.iter() { let module_name = self::mcu_module_name(mcu); let module_path = path.join(format!("{}.rs", module_name)); eprintln!("generating module for {}", mcu.device.name); generate_mcu_module(mcu, &module_path)?; module_names.push(module_name); } generate_entry_module(path, &module_names) } /// Generate a `mod.rs` file that binds a list of submodules. fn generate_entry_module(output_path: &Path, module_names: &[String]) -> Result<(), io::Error> { let mut mod_rs = File::create(output_path.join("mod.rs"))?; writeln!(mod_rs, "// Device definitions")?; for module_name in module_names { writeln!(mod_rs, "pub mod {};", module_name)?; } writeln!(mod_rs)?; const CURRENT_MOD_SUMMARY: &'static str = "Contains definitions for the current AVR device"; writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?; writeln!(mod_rs, "///")?; writeln!(mod_rs, "/// **NOTE**: We are showing the ATmega328 here, even though the library")?; writeln!(mod_rs, "/// is not targeting a real AVR device. If you compile this library for")?; writeln!(mod_rs, "/// a specific AVR MCU, the module for that device will aliased here.")?; writeln!(mod_rs, "// If we are targeting a non-AVR device, just pick the ATmega328p so")?; writeln!(mod_rs, "// that users can see what the API would look like")?; writeln!(mod_rs, "//")?; writeln!(mod_rs, "// Note that we reexport rather than alias so that we can add a note about")?; writeln!(mod_rs, "// this behaviour to the documentation.")?; writeln!(mod_rs, "#[cfg(not(target_arch = \"avr\"))]")?; writeln!(mod_rs, "pub mod current {{ pub use super::atmega328::*; }}")?; writeln!(mod_rs)?; writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?; writeln!(mod_rs, "// If we are targeting AVR, lookup the current device's module")?; writeln!(mod_rs, "// and alias it to the `current` module.")?; writeln!(mod_rs, "#[cfg(target_arch = \"avr\")]")?; writeln!(mod_rs, "pub mod current {{")?; writeln!(mod_rs, " // NOTE: 'target_cpu' is a cfg flag specific to the avr-rust fork")?; for module_name in module_names { writeln!(mod_rs, " #[cfg(target_cpu = \"{}\")] pub use super::{} as current;", module_name, module_name)?; } writeln!(mod_rs, "}}")?; Ok(()) } /// Generates a self-contained module for each individual mcu. fn generate_mcu_module(mcu: &Mcu, path: &Path) -> Result<(), io::Error> { let mut file = File::create(path)?; self::mcu_module_doc(mcu, &mut file)?; writeln!(file)?; self::mcu_module_code(mcu, &mut file)?; Ok(()) } /// Gets the module name for a mcu. fn mcu_module_name(mcu: &Mcu) -> String { mcu.device.name.to_lowercase() } pub fn mcu_module_doc(mcu: &Mcu, w: &mut Write) -> Result<(), io::Error> { writeln!(w, "//! The AVR {} microcontroller", mcu.device.name)?; writeln!(w, "//!")?; writeln!(w, "//! # Variants")?; writeln!(w, "//! | | Pinout | Mcuage | Operating temperature | Operating voltage | Max speed |")?; writeln!(w, "//! |--------|--------|---------|-----------------------|-------------------|-----------|")?; for variant in mcu.variants.iter() { let pinout_label = variant.pinout.as_ref().map(|p| p.replace('_', "-").to_owned()).unwrap_or_else(|| String::new()); let speed_mhz = variant.speed_max_hz / 1_000_000; writeln!(w, "//! | {} | {} | {} | {}°C - {}°C | {}V - {}V | {} MHz |", variant.name, pinout_label, variant.package, variant.temperature_min, variant.temperature_max, variant.voltage_min, variant.voltage_max, speed_mhz)?; } writeln!(w, "//!")?; Ok(()) } pub fn mcu_module_code(mcu: &Mcu, w: &mut Write) -> Result<(), io::Error> { let registers = ordered_registers(mcu); let register_bitfields = documentable_bitfields(&registers); writeln!(w, "#![allow(non_upper_case_globals)]")?; writeln!(w)?; for register in registers.iter() { let ty = integer_type(register.size); if!register.caption.is_empty() { let mut caption = register.caption.trim().to_owned(); if!caption.ends_with('.') { caption.push('.') } writeln!(w, "/// {}", caption)?; } else { writeln!(w, "/// {} register", register.name)?; } let mut bitfields = register_bitfields.iter().filter_map(|&(reg,bitfield)| { if reg == register { Some(bitfield) } else { None } }).peekable(); if bitfields.peek().is_some() { writeln!(w, "///")?; writeln!(w, "/// Bitfields:")?; writeln!(w, "///")?; writeln!(w, "/// | Name | Mask (binary) |")?; writeln!(w, "/// | ---- | ------------- |")?; while let Some(bitfield) = bitfields.next() { writeln!(w, "/// | {} | {:b} |", bitfield.name, bitfield.mask)?; } } writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};", register.name, ty, register.offset, ty)?; writeln!(w)?; } for (register, bitfield) in register_bitfields { let ty = integer_type(bitfield.size); writeln!(w, "/// Bitfield on register {}", register.name)?; writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};", bitfield.name, ty, bitfield.mask, ty)?; writeln!(w)?;
} Ok(()) } fn ordered_registers(mcu: &Mcu) -> Vec<Register> { let mut unique_registers = self::unique_registers(mcu); insert_high_low_variants(&mut unique_registers); let mut registers: Vec<Register> = unique_registers.into_iter().map(|a| a.1).collect(); registers.sort_by_key(|r| r.offset); registers } fn insert_high_low_variants(registers: &mut HashMap<String, Register>) { let wide_registers: Vec<_> = registers.values() .filter(|r| r.size == 2) .cloned() .collect(); for r in wide_registers { let (high, low) = high_low_variants(&r); if!registers.contains_key(&high.name) { registers.insert(high.name.clone(), high); } if!registers.contains_key(&low.name) { registers.insert(low.name.clone(), low); } } } fn high_low_variants(r: &Register) -> (Register, Register) { assert_eq!(2, r.size, "only 16-bit registers have high low variants"); ( Register { name: r.name.clone() + "H", caption: r.caption.clone() + " high byte", offset: r.offset + 1, size: r.size / 2, mask: None, bitfields: Vec::new(), // these are already in parent. rw: r.rw.clone() }, Register { name: r.name.clone() + "L", caption: r.caption.clone() + " low byte", offset: r.offset + 0, size: r.size / 2, mask: None, bitfields: Vec::new(), // these are already in parent. rw: r.rw.clone() }, ) } fn unique_registers(mcu: &Mcu) -> HashMap<String, Register> { let mut result = HashMap::new(); for module in mcu.modules.iter() { for register_group in module.register_groups.iter() { for register in register_group.registers.iter() { // Check if we've already seen this register. // Remove it if so and combine it with the current Register. let r: Register = if let Some(ref existing) = result.remove(&register.name) { register.union(existing) } else { register.clone() }; result.insert(r.name.clone(), r); } } } result } /// Gets the integer type of a specified width. fn integer_type(byte_count: u32) -> &'static str { match byte_count { 1 => "u8", 2 => "u16", 4 => "u32", _ => panic!("failed to get type of {}", byte_count), } } /// Collects all bitfields for a set of registers that are documentable. /// /// Some pack files (such as the one for atmega328p) do not have unique /// bitfield names. /// /// For example, all of the timer control registers (`TCCR0A`, `TCCR1B`,..) /// on the ATmega328p individually define bitfields named `WGM1` with /// different masks but a shared name. Note that even though these fields /// are in the packfiles, they do not appear in AVR-GCC's `iom328p.h`. /// /// This function specifically considers bitfields with unique /// names as documentable. /// /// In the case where we have an ambiguous name for a bitfield, it /// should be skipped. fn documentable_bitfields(registers: &[Register]) -> Vec<(&Register, &Bitfield)> { let register_names: HashSet<&str> = registers.iter().map(|r| &r.name[..]).collect(); // A hash map of bitfield names to possible instantiations. let mut history: HashMap<&str, Vec<(&Register, &Bitfield)>> = HashMap::new(); for register in registers.iter(){ for bitfield in register.bitfields.iter() { let bitfields = history.entry(&bitfield.name). or_insert_with(|| Vec::new()); // Track the bitfield of this bitfield. bitfields.push((register, bitfield)); } } // Convert the hash map to a list and sort it so it is deterministic. let mut register_bitfields: Vec<_> = history.into_iter().map(|(_, register_bitfields)| register_bitfields).collect(); register_bitfields.sort_by_key(|register_bitfields| &register_bitfields[0].0.name); let unique_bitfields = register_bitfields.into_iter().filter_map(|register_bitfields| { if register_bitfields.len() == 1 { Some(register_bitfields.into_iter().next().unwrap()) } else { None } }); // Skip bitmasks that cover all bits or share the same name as their parent register. // There are strange cases like this in the packfiles. let bitfields = unique_bitfields.filter(|&(register, bitfield)| { let full_mask = match register.size { 1 => 0xff, 2 => 0xffff, _ => panic!("register is too large"), }; bitfield.mask!= full_mask && bitfield.name!= register.name && !register_names.contains(&bitfield.name[..]) }); bitfields.collect() } }
random_line_split
build.rs
extern crate avr_mcu; use std::path::Path; fn main() { let crate_root = Path::new(env!("CARGO_MANIFEST_DIR")); let mcus = if cfg!(feature = "all_mcus") { avr_mcu::microcontrollers().to_owned() } else { // By default, when compiling for AVR we should hard error if // microcontroller is not specified. if cfg!(arch = "avr") { let current_mcu = avr_mcu::current::mcu() .expect("no target cpu set"); vec![current_mcu] } else { // On non-avr architectures, support all microcontrollers. avr_mcu::microcontrollers().to_owned() } }; // Useful for test // let mcus = vec![ // avr_mcu::microcontroller("ata5795").clone(), // avr_mcu::microcontroller("atmega328").clone(), // required when compiling for PC // ]; gen::all(&crate_root.join("src").join("gen"), &mcus).unwrap(); } mod gen { use avr_mcu::*; use std::collections::{HashMap, HashSet}; use std::fs::{self, File}; use std::io::prelude::*; use std::io; use std::path::Path; pub fn all(path: &Path, mcus: &[Mcu]) -> Result<(), io::Error> { fs::create_dir_all(path)?; let mut module_names = Vec::new(); // Create modules for each mcu. for mcu in mcus.iter() { let module_name = self::mcu_module_name(mcu); let module_path = path.join(format!("{}.rs", module_name)); eprintln!("generating module for {}", mcu.device.name); generate_mcu_module(mcu, &module_path)?; module_names.push(module_name); } generate_entry_module(path, &module_names) } /// Generate a `mod.rs` file that binds a list of submodules. fn generate_entry_module(output_path: &Path, module_names: &[String]) -> Result<(), io::Error> { let mut mod_rs = File::create(output_path.join("mod.rs"))?; writeln!(mod_rs, "// Device definitions")?; for module_name in module_names { writeln!(mod_rs, "pub mod {};", module_name)?; } writeln!(mod_rs)?; const CURRENT_MOD_SUMMARY: &'static str = "Contains definitions for the current AVR device"; writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?; writeln!(mod_rs, "///")?; writeln!(mod_rs, "/// **NOTE**: We are showing the ATmega328 here, even though the library")?; writeln!(mod_rs, "/// is not targeting a real AVR device. If you compile this library for")?; writeln!(mod_rs, "/// a specific AVR MCU, the module for that device will aliased here.")?; writeln!(mod_rs, "// If we are targeting a non-AVR device, just pick the ATmega328p so")?; writeln!(mod_rs, "// that users can see what the API would look like")?; writeln!(mod_rs, "//")?; writeln!(mod_rs, "// Note that we reexport rather than alias so that we can add a note about")?; writeln!(mod_rs, "// this behaviour to the documentation.")?; writeln!(mod_rs, "#[cfg(not(target_arch = \"avr\"))]")?; writeln!(mod_rs, "pub mod current {{ pub use super::atmega328::*; }}")?; writeln!(mod_rs)?; writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?; writeln!(mod_rs, "// If we are targeting AVR, lookup the current device's module")?; writeln!(mod_rs, "// and alias it to the `current` module.")?; writeln!(mod_rs, "#[cfg(target_arch = \"avr\")]")?; writeln!(mod_rs, "pub mod current {{")?; writeln!(mod_rs, " // NOTE: 'target_cpu' is a cfg flag specific to the avr-rust fork")?; for module_name in module_names { writeln!(mod_rs, " #[cfg(target_cpu = \"{}\")] pub use super::{} as current;", module_name, module_name)?; } writeln!(mod_rs, "}}")?; Ok(()) } /// Generates a self-contained module for each individual mcu. fn generate_mcu_module(mcu: &Mcu, path: &Path) -> Result<(), io::Error> { let mut file = File::create(path)?; self::mcu_module_doc(mcu, &mut file)?; writeln!(file)?; self::mcu_module_code(mcu, &mut file)?; Ok(()) } /// Gets the module name for a mcu. fn mcu_module_name(mcu: &Mcu) -> String { mcu.device.name.to_lowercase() } pub fn mcu_module_doc(mcu: &Mcu, w: &mut Write) -> Result<(), io::Error> { writeln!(w, "//! The AVR {} microcontroller", mcu.device.name)?; writeln!(w, "//!")?; writeln!(w, "//! # Variants")?; writeln!(w, "//! | | Pinout | Mcuage | Operating temperature | Operating voltage | Max speed |")?; writeln!(w, "//! |--------|--------|---------|-----------------------|-------------------|-----------|")?; for variant in mcu.variants.iter() { let pinout_label = variant.pinout.as_ref().map(|p| p.replace('_', "-").to_owned()).unwrap_or_else(|| String::new()); let speed_mhz = variant.speed_max_hz / 1_000_000; writeln!(w, "//! | {} | {} | {} | {}°C - {}°C | {}V - {}V | {} MHz |", variant.name, pinout_label, variant.package, variant.temperature_min, variant.temperature_max, variant.voltage_min, variant.voltage_max, speed_mhz)?; } writeln!(w, "//!")?; Ok(()) } pub fn mcu_module_code(mcu: &Mcu, w: &mut Write) -> Result<(), io::Error> { let registers = ordered_registers(mcu); let register_bitfields = documentable_bitfields(&registers); writeln!(w, "#![allow(non_upper_case_globals)]")?; writeln!(w)?; for register in registers.iter() { let ty = integer_type(register.size); if!register.caption.is_empty() { let mut caption = register.caption.trim().to_owned(); if!caption.ends_with('.') { caption.push('.') } writeln!(w, "/// {}", caption)?; } else { writeln!(w, "/// {} register", register.name)?; } let mut bitfields = register_bitfields.iter().filter_map(|&(reg,bitfield)| { if reg == register { Some(bitfield) } else { None } }).peekable(); if bitfields.peek().is_some() { writeln!(w, "///")?; writeln!(w, "/// Bitfields:")?; writeln!(w, "///")?; writeln!(w, "/// | Name | Mask (binary) |")?; writeln!(w, "/// | ---- | ------------- |")?; while let Some(bitfield) = bitfields.next() { writeln!(w, "/// | {} | {:b} |", bitfield.name, bitfield.mask)?; } } writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};", register.name, ty, register.offset, ty)?; writeln!(w)?; } for (register, bitfield) in register_bitfields { let ty = integer_type(bitfield.size); writeln!(w, "/// Bitfield on register {}", register.name)?; writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};", bitfield.name, ty, bitfield.mask, ty)?; writeln!(w)?; } Ok(()) } fn ordered_registers(mcu: &Mcu) -> Vec<Register> {
fn insert_high_low_variants(registers: &mut HashMap<String, Register>) { let wide_registers: Vec<_> = registers.values() .filter(|r| r.size == 2) .cloned() .collect(); for r in wide_registers { let (high, low) = high_low_variants(&r); if!registers.contains_key(&high.name) { registers.insert(high.name.clone(), high); } if!registers.contains_key(&low.name) { registers.insert(low.name.clone(), low); } } } fn high_low_variants(r: &Register) -> (Register, Register) { assert_eq!(2, r.size, "only 16-bit registers have high low variants"); ( Register { name: r.name.clone() + "H", caption: r.caption.clone() + " high byte", offset: r.offset + 1, size: r.size / 2, mask: None, bitfields: Vec::new(), // these are already in parent. rw: r.rw.clone() }, Register { name: r.name.clone() + "L", caption: r.caption.clone() + " low byte", offset: r.offset + 0, size: r.size / 2, mask: None, bitfields: Vec::new(), // these are already in parent. rw: r.rw.clone() }, ) } fn unique_registers(mcu: &Mcu) -> HashMap<String, Register> { let mut result = HashMap::new(); for module in mcu.modules.iter() { for register_group in module.register_groups.iter() { for register in register_group.registers.iter() { // Check if we've already seen this register. // Remove it if so and combine it with the current Register. let r: Register = if let Some(ref existing) = result.remove(&register.name) { register.union(existing) } else { register.clone() }; result.insert(r.name.clone(), r); } } } result } /// Gets the integer type of a specified width. fn integer_type(byte_count: u32) -> &'static str { match byte_count { 1 => "u8", 2 => "u16", 4 => "u32", _ => panic!("failed to get type of {}", byte_count), } } /// Collects all bitfields for a set of registers that are documentable. /// /// Some pack files (such as the one for atmega328p) do not have unique /// bitfield names. /// /// For example, all of the timer control registers (`TCCR0A`, `TCCR1B`,..) /// on the ATmega328p individually define bitfields named `WGM1` with /// different masks but a shared name. Note that even though these fields /// are in the packfiles, they do not appear in AVR-GCC's `iom328p.h`. /// /// This function specifically considers bitfields with unique /// names as documentable. /// /// In the case where we have an ambiguous name for a bitfield, it /// should be skipped. fn documentable_bitfields(registers: &[Register]) -> Vec<(&Register, &Bitfield)> { let register_names: HashSet<&str> = registers.iter().map(|r| &r.name[..]).collect(); // A hash map of bitfield names to possible instantiations. let mut history: HashMap<&str, Vec<(&Register, &Bitfield)>> = HashMap::new(); for register in registers.iter(){ for bitfield in register.bitfields.iter() { let bitfields = history.entry(&bitfield.name). or_insert_with(|| Vec::new()); // Track the bitfield of this bitfield. bitfields.push((register, bitfield)); } } // Convert the hash map to a list and sort it so it is deterministic. let mut register_bitfields: Vec<_> = history.into_iter().map(|(_, register_bitfields)| register_bitfields).collect(); register_bitfields.sort_by_key(|register_bitfields| &register_bitfields[0].0.name); let unique_bitfields = register_bitfields.into_iter().filter_map(|register_bitfields| { if register_bitfields.len() == 1 { Some(register_bitfields.into_iter().next().unwrap()) } else { None } }); // Skip bitmasks that cover all bits or share the same name as their parent register. // There are strange cases like this in the packfiles. let bitfields = unique_bitfields.filter(|&(register, bitfield)| { let full_mask = match register.size { 1 => 0xff, 2 => 0xffff, _ => panic!("register is too large"), }; bitfield.mask!= full_mask && bitfield.name!= register.name && !register_names.contains(&bitfield.name[..]) }); bitfields.collect() } }
let mut unique_registers = self::unique_registers(mcu); insert_high_low_variants(&mut unique_registers); let mut registers: Vec<Register> = unique_registers.into_iter().map(|a| a.1).collect(); registers.sort_by_key(|r| r.offset); registers }
identifier_body
build.rs
extern crate avr_mcu; use std::path::Path; fn main() { let crate_root = Path::new(env!("CARGO_MANIFEST_DIR")); let mcus = if cfg!(feature = "all_mcus") { avr_mcu::microcontrollers().to_owned() } else { // By default, when compiling for AVR we should hard error if // microcontroller is not specified. if cfg!(arch = "avr") { let current_mcu = avr_mcu::current::mcu() .expect("no target cpu set"); vec![current_mcu] } else { // On non-avr architectures, support all microcontrollers. avr_mcu::microcontrollers().to_owned() } }; // Useful for test // let mcus = vec![ // avr_mcu::microcontroller("ata5795").clone(), // avr_mcu::microcontroller("atmega328").clone(), // required when compiling for PC // ]; gen::all(&crate_root.join("src").join("gen"), &mcus).unwrap(); } mod gen { use avr_mcu::*; use std::collections::{HashMap, HashSet}; use std::fs::{self, File}; use std::io::prelude::*; use std::io; use std::path::Path; pub fn all(path: &Path, mcus: &[Mcu]) -> Result<(), io::Error> { fs::create_dir_all(path)?; let mut module_names = Vec::new(); // Create modules for each mcu. for mcu in mcus.iter() { let module_name = self::mcu_module_name(mcu); let module_path = path.join(format!("{}.rs", module_name)); eprintln!("generating module for {}", mcu.device.name); generate_mcu_module(mcu, &module_path)?; module_names.push(module_name); } generate_entry_module(path, &module_names) } /// Generate a `mod.rs` file that binds a list of submodules. fn generate_entry_module(output_path: &Path, module_names: &[String]) -> Result<(), io::Error> { let mut mod_rs = File::create(output_path.join("mod.rs"))?; writeln!(mod_rs, "// Device definitions")?; for module_name in module_names { writeln!(mod_rs, "pub mod {};", module_name)?; } writeln!(mod_rs)?; const CURRENT_MOD_SUMMARY: &'static str = "Contains definitions for the current AVR device"; writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?; writeln!(mod_rs, "///")?; writeln!(mod_rs, "/// **NOTE**: We are showing the ATmega328 here, even though the library")?; writeln!(mod_rs, "/// is not targeting a real AVR device. If you compile this library for")?; writeln!(mod_rs, "/// a specific AVR MCU, the module for that device will aliased here.")?; writeln!(mod_rs, "// If we are targeting a non-AVR device, just pick the ATmega328p so")?; writeln!(mod_rs, "// that users can see what the API would look like")?; writeln!(mod_rs, "//")?; writeln!(mod_rs, "// Note that we reexport rather than alias so that we can add a note about")?; writeln!(mod_rs, "// this behaviour to the documentation.")?; writeln!(mod_rs, "#[cfg(not(target_arch = \"avr\"))]")?; writeln!(mod_rs, "pub mod current {{ pub use super::atmega328::*; }}")?; writeln!(mod_rs)?; writeln!(mod_rs, "/// {}", CURRENT_MOD_SUMMARY)?; writeln!(mod_rs, "// If we are targeting AVR, lookup the current device's module")?; writeln!(mod_rs, "// and alias it to the `current` module.")?; writeln!(mod_rs, "#[cfg(target_arch = \"avr\")]")?; writeln!(mod_rs, "pub mod current {{")?; writeln!(mod_rs, " // NOTE: 'target_cpu' is a cfg flag specific to the avr-rust fork")?; for module_name in module_names { writeln!(mod_rs, " #[cfg(target_cpu = \"{}\")] pub use super::{} as current;", module_name, module_name)?; } writeln!(mod_rs, "}}")?; Ok(()) } /// Generates a self-contained module for each individual mcu. fn
(mcu: &Mcu, path: &Path) -> Result<(), io::Error> { let mut file = File::create(path)?; self::mcu_module_doc(mcu, &mut file)?; writeln!(file)?; self::mcu_module_code(mcu, &mut file)?; Ok(()) } /// Gets the module name for a mcu. fn mcu_module_name(mcu: &Mcu) -> String { mcu.device.name.to_lowercase() } pub fn mcu_module_doc(mcu: &Mcu, w: &mut Write) -> Result<(), io::Error> { writeln!(w, "//! The AVR {} microcontroller", mcu.device.name)?; writeln!(w, "//!")?; writeln!(w, "//! # Variants")?; writeln!(w, "//! | | Pinout | Mcuage | Operating temperature | Operating voltage | Max speed |")?; writeln!(w, "//! |--------|--------|---------|-----------------------|-------------------|-----------|")?; for variant in mcu.variants.iter() { let pinout_label = variant.pinout.as_ref().map(|p| p.replace('_', "-").to_owned()).unwrap_or_else(|| String::new()); let speed_mhz = variant.speed_max_hz / 1_000_000; writeln!(w, "//! | {} | {} | {} | {}°C - {}°C | {}V - {}V | {} MHz |", variant.name, pinout_label, variant.package, variant.temperature_min, variant.temperature_max, variant.voltage_min, variant.voltage_max, speed_mhz)?; } writeln!(w, "//!")?; Ok(()) } pub fn mcu_module_code(mcu: &Mcu, w: &mut Write) -> Result<(), io::Error> { let registers = ordered_registers(mcu); let register_bitfields = documentable_bitfields(&registers); writeln!(w, "#![allow(non_upper_case_globals)]")?; writeln!(w)?; for register in registers.iter() { let ty = integer_type(register.size); if!register.caption.is_empty() { let mut caption = register.caption.trim().to_owned(); if!caption.ends_with('.') { caption.push('.') } writeln!(w, "/// {}", caption)?; } else { writeln!(w, "/// {} register", register.name)?; } let mut bitfields = register_bitfields.iter().filter_map(|&(reg,bitfield)| { if reg == register { Some(bitfield) } else { None } }).peekable(); if bitfields.peek().is_some() { writeln!(w, "///")?; writeln!(w, "/// Bitfields:")?; writeln!(w, "///")?; writeln!(w, "/// | Name | Mask (binary) |")?; writeln!(w, "/// | ---- | ------------- |")?; while let Some(bitfield) = bitfields.next() { writeln!(w, "/// | {} | {:b} |", bitfield.name, bitfield.mask)?; } } writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};", register.name, ty, register.offset, ty)?; writeln!(w)?; } for (register, bitfield) in register_bitfields { let ty = integer_type(bitfield.size); writeln!(w, "/// Bitfield on register {}", register.name)?; writeln!(w, "pub const {}: *mut {} = {:#X} as *mut {};", bitfield.name, ty, bitfield.mask, ty)?; writeln!(w)?; } Ok(()) } fn ordered_registers(mcu: &Mcu) -> Vec<Register> { let mut unique_registers = self::unique_registers(mcu); insert_high_low_variants(&mut unique_registers); let mut registers: Vec<Register> = unique_registers.into_iter().map(|a| a.1).collect(); registers.sort_by_key(|r| r.offset); registers } fn insert_high_low_variants(registers: &mut HashMap<String, Register>) { let wide_registers: Vec<_> = registers.values() .filter(|r| r.size == 2) .cloned() .collect(); for r in wide_registers { let (high, low) = high_low_variants(&r); if!registers.contains_key(&high.name) { registers.insert(high.name.clone(), high); } if!registers.contains_key(&low.name) { registers.insert(low.name.clone(), low); } } } fn high_low_variants(r: &Register) -> (Register, Register) { assert_eq!(2, r.size, "only 16-bit registers have high low variants"); ( Register { name: r.name.clone() + "H", caption: r.caption.clone() + " high byte", offset: r.offset + 1, size: r.size / 2, mask: None, bitfields: Vec::new(), // these are already in parent. rw: r.rw.clone() }, Register { name: r.name.clone() + "L", caption: r.caption.clone() + " low byte", offset: r.offset + 0, size: r.size / 2, mask: None, bitfields: Vec::new(), // these are already in parent. rw: r.rw.clone() }, ) } fn unique_registers(mcu: &Mcu) -> HashMap<String, Register> { let mut result = HashMap::new(); for module in mcu.modules.iter() { for register_group in module.register_groups.iter() { for register in register_group.registers.iter() { // Check if we've already seen this register. // Remove it if so and combine it with the current Register. let r: Register = if let Some(ref existing) = result.remove(&register.name) { register.union(existing) } else { register.clone() }; result.insert(r.name.clone(), r); } } } result } /// Gets the integer type of a specified width. fn integer_type(byte_count: u32) -> &'static str { match byte_count { 1 => "u8", 2 => "u16", 4 => "u32", _ => panic!("failed to get type of {}", byte_count), } } /// Collects all bitfields for a set of registers that are documentable. /// /// Some pack files (such as the one for atmega328p) do not have unique /// bitfield names. /// /// For example, all of the timer control registers (`TCCR0A`, `TCCR1B`,..) /// on the ATmega328p individually define bitfields named `WGM1` with /// different masks but a shared name. Note that even though these fields /// are in the packfiles, they do not appear in AVR-GCC's `iom328p.h`. /// /// This function specifically considers bitfields with unique /// names as documentable. /// /// In the case where we have an ambiguous name for a bitfield, it /// should be skipped. fn documentable_bitfields(registers: &[Register]) -> Vec<(&Register, &Bitfield)> { let register_names: HashSet<&str> = registers.iter().map(|r| &r.name[..]).collect(); // A hash map of bitfield names to possible instantiations. let mut history: HashMap<&str, Vec<(&Register, &Bitfield)>> = HashMap::new(); for register in registers.iter(){ for bitfield in register.bitfields.iter() { let bitfields = history.entry(&bitfield.name). or_insert_with(|| Vec::new()); // Track the bitfield of this bitfield. bitfields.push((register, bitfield)); } } // Convert the hash map to a list and sort it so it is deterministic. let mut register_bitfields: Vec<_> = history.into_iter().map(|(_, register_bitfields)| register_bitfields).collect(); register_bitfields.sort_by_key(|register_bitfields| &register_bitfields[0].0.name); let unique_bitfields = register_bitfields.into_iter().filter_map(|register_bitfields| { if register_bitfields.len() == 1 { Some(register_bitfields.into_iter().next().unwrap()) } else { None } }); // Skip bitmasks that cover all bits or share the same name as their parent register. // There are strange cases like this in the packfiles. let bitfields = unique_bitfields.filter(|&(register, bitfield)| { let full_mask = match register.size { 1 => 0xff, 2 => 0xffff, _ => panic!("register is too large"), }; bitfield.mask!= full_mask && bitfield.name!= register.name && !register_names.contains(&bitfield.name[..]) }); bitfields.collect() } }
generate_mcu_module
identifier_name
eth_pubsub.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Eth PUB-SUB rpc implementation. use std::sync::{Arc, Weak}; use std::collections::BTreeMap; use jsonrpc_core::{BoxFuture, Result, Error}; use jsonrpc_core::futures::{self, Future, IntoFuture, Stream, sync::mpsc}; use jsonrpc_pubsub::typed::{Sink, Subscriber}; use jsonrpc_pubsub::SubscriptionId; use v1::helpers::{errors, limit_logs, Subscribers, }; use v1::helpers::light_fetch::LightFetch; use v1::metadata::Metadata; use v1::traits::EthPubSub; use v1::types::{pubsub, RichHeader, Log}; use sync::{SyncState, Notification}; use client_traits::{BlockChainClient, ChainNotify}; use ethereum_types::H256; use light::cache::Cache; use light::client::{LightChainClient, LightChainNotify}; use light::on_demand::OnDemandRequester; use parity_runtime::Executor; use parking_lot::{RwLock, Mutex}; use sync::{LightSyncProvider, LightNetworkDispatcher, ManageNetwork}; use types::{ chain_notify::{NewBlocks, ChainRouteType}, ids::BlockId, encoded, filter::Filter as EthFilter, }; type Client = Sink<pubsub::Result>; /// Eth PubSub implementation. pub struct EthPubSubClient<C> { handler: Arc<ChainNotificationHandler<C>>, heads_subscribers: Arc<RwLock<Subscribers<Client>>>, logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>, transactions_subscribers: Arc<RwLock<Subscribers<Client>>>, sync_subscribers: Arc<RwLock<Subscribers<Client>>>, } impl<C> EthPubSubClient<C> where C:'static + Send + Sync { /// adds a sync notification channel to the pubsub client pub fn add_sync_notifier<F>(&mut self, receiver: Notification<SyncState>, f: F) where F:'static + Fn(SyncState) -> Option<pubsub::PubSubSyncStatus> + Send { let weak_handler = Arc::downgrade(&self.handler); self.handler.executor.spawn( receiver.for_each(move |state| { if let Some(status) = f(state) { if let Some(handler) = weak_handler.upgrade() { handler.notify_syncing(status); return Ok(()) } } Err(()) }) ) } } impl<C> EthPubSubClient<C> where C:'static + Send + Sync { /// Creates new `EthPubSubClient`. pub fn new(client: Arc<C>, executor: Executor, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>) -> Self { let heads_subscribers = Arc::new(RwLock::new(Subscribers::default())); let logs_subscribers = Arc::new(RwLock::new(Subscribers::default())); let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default())); let sync_subscribers = Arc::new(RwLock::new(Subscribers::default())); let handler = Arc::new(ChainNotificationHandler { client, executor, heads_subscribers: heads_subscribers.clone(), logs_subscribers: logs_subscribers.clone(), transactions_subscribers: transactions_subscribers.clone(), sync_subscribers: sync_subscribers.clone(), }); let handler2 = Arc::downgrade(&handler); handler.executor.spawn(pool_receiver .for_each(move |hashes| { if let Some(handler2) = handler2.upgrade() { handler2.notify_new_transactions(&hashes.to_vec()); return Ok(()) } Err(()) }) ); EthPubSubClient { handler, sync_subscribers, heads_subscribers, logs_subscribers, transactions_subscribers, } } /// Returns a chain notification handler. pub fn handler(&self) -> Weak<ChainNotificationHandler<C>> { Arc::downgrade(&self.handler) } } impl<S, OD> EthPubSubClient<LightFetch<S, OD>> where S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork +'static, OD: OnDemandRequester +'static { /// Creates a new `EthPubSubClient` for `LightClient`. pub fn light( client: Arc<dyn LightChainClient>, on_demand: Arc<OD>, sync: Arc<S>, cache: Arc<Mutex<Cache>>, executor: Executor, gas_price_percentile: usize, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>> ) -> Self { let fetch = LightFetch { client, on_demand, sync, cache, gas_price_percentile, }; EthPubSubClient::new(Arc::new(fetch), executor, pool_receiver) } } /// PubSub Notification handler. pub struct ChainNotificationHandler<C> { client: Arc<C>, executor: Executor, heads_subscribers: Arc<RwLock<Subscribers<Client>>>, logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>, transactions_subscribers: Arc<RwLock<Subscribers<Client>>>, sync_subscribers: Arc<RwLock<Subscribers<Client>>>, } impl<C> ChainNotificationHandler<C> { fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) { executor.spawn(subscriber .notify(Ok(result)) .map(|_| ()) .map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e)) ); } fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) { for subscriber in self.heads_subscribers.read().values() { for &(ref header, ref extra_info) in headers { Self::notify(&self.executor, subscriber, pubsub::Result::Header(Box::new(RichHeader { inner: header.into(), extra_info: extra_info.clone(), }))); } } } fn notify_syncing(&self, sync_status: pubsub::PubSubSyncStatus) { for subscriber in self.sync_subscribers.read().values() { Self::notify(&self.executor, subscriber, pubsub::Result::SyncState(sync_status.clone())); } } fn notify_logs<F, T, Ex>(&self, enacted: &[(H256, Ex)], logs: F) where F: Fn(EthFilter, &Ex) -> T, Ex: Send, T: IntoFuture<Item = Vec<Log>, Error = Error>, T::Future: Send +'static, { for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() { let logs = futures::future::join_all(enacted .iter()
logs(filter, ex).into_future() }) .collect::<Vec<_>>() ); let limit = filter.limit; let executor = self.executor.clone(); let subscriber = subscriber.clone(); self.executor.spawn(logs .map(move |logs| { let logs = logs.into_iter().flat_map(|log| log).collect(); for log in limit_logs(logs, limit) { Self::notify(&executor, &subscriber, pubsub::Result::Log(Box::new(log))) } }) .map_err(|e| warn!("Unable to fetch latest logs: {:?}", e)) ); } } /// Notify all subscribers about new transaction hashes. fn notify_new_transactions(&self, hashes: &[H256]) { for subscriber in self.transactions_subscribers.read().values() { for hash in hashes { Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash(*hash)); } } } } /// A light client wrapper struct. pub trait LightClient: Send + Sync { /// Get a recent block header. fn block_header(&self, id: BlockId) -> Option<encoded::Header>; /// Fetch logs. fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>>; } impl<S, OD> LightClient for LightFetch<S, OD> where S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork +'static, OD: OnDemandRequester +'static { fn block_header(&self, id: BlockId) -> Option<encoded::Header> { self.client.block_header(id) } fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>> { Box::new(LightFetch::logs(self, filter)) as BoxFuture<_> } } impl<C: LightClient> LightChainNotify for ChainNotificationHandler<C> { fn new_headers(&self, enacted: &[H256]) { let headers = enacted .iter() .filter_map(|hash| self.client.block_header(BlockId::Hash(*hash))) .map(|header| (header, Default::default())) .collect::<Vec<_>>(); self.notify_heads(&headers); self.notify_logs(&enacted.iter().map(|h| (*h, ())).collect::<Vec<_>>(), |filter, _| self.client.logs(filter)) } } impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> { fn new_blocks(&self, new_blocks: NewBlocks) { if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { return } const EXTRA_INFO_PROOF: &str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed"; let headers = new_blocks.route.route() .iter() .filter_map(|&(hash, ref typ)| { match typ { ChainRouteType::Retracted => None, ChainRouteType::Enacted => self.client.block_header(BlockId::Hash(hash)) } }) .map(|header| { let hash = header.hash(); (header, self.client.block_extra_info(BlockId::Hash(hash)).expect(EXTRA_INFO_PROOF)) }) .collect::<Vec<_>>(); // Headers self.notify_heads(&headers); // We notify logs enacting and retracting as the order in route. self.notify_logs(new_blocks.route.route(), |filter, ex| { match ex { ChainRouteType::Enacted => Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).collect()), ChainRouteType::Retracted => Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).map(|mut log: Log| { log.log_type = "removed".into(); log.removed = true; log }).collect()), } }); } } impl<C: Send + Sync +'static> EthPubSub for EthPubSubClient<C> { type Metadata = Metadata; fn subscribe( &self, _meta: Metadata, subscriber: Subscriber<pubsub::Result>, kind: pubsub::Kind, params: Option<pubsub::Params>, ) { let error = match (kind, params) { (pubsub::Kind::NewHeads, None) => { self.heads_subscribers.write().push(subscriber); return; }, (pubsub::Kind::Syncing, None) => { self.sync_subscribers.write().push(subscriber); return; }, (pubsub::Kind::NewHeads, _) => { errors::invalid_params("newHeads", "Expected no parameters.") }, (pubsub::Kind::Logs, Some(pubsub::Params::Logs(filter))) => { match filter.try_into() { Ok(filter) => { self.logs_subscribers.write().push(subscriber, filter); return; }, Err(err) => err, } }, (pubsub::Kind::Logs, _) => { errors::invalid_params("logs", "Expected a filter object.") }, (pubsub::Kind::NewPendingTransactions, None) => { self.transactions_subscribers.write().push(subscriber); return; }, (pubsub::Kind::NewPendingTransactions, _) => { errors::invalid_params("newPendingTransactions", "Expected no parameters.") }, _ => { errors::unimplemented(None) }, }; let _ = subscriber.reject(error); } fn unsubscribe(&self, _: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> { let res = self.heads_subscribers.write().remove(&id).is_some(); let res2 = self.logs_subscribers.write().remove(&id).is_some(); let res3 = self.transactions_subscribers.write().remove(&id).is_some(); let res4 = self.sync_subscribers.write().remove(&id).is_some(); Ok(res || res2 || res3 || res4) } }
.map(|&(hash, ref ex)| { let mut filter = filter.clone(); filter.from_block = BlockId::Hash(hash); filter.to_block = filter.from_block;
random_line_split
eth_pubsub.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Eth PUB-SUB rpc implementation. use std::sync::{Arc, Weak}; use std::collections::BTreeMap; use jsonrpc_core::{BoxFuture, Result, Error}; use jsonrpc_core::futures::{self, Future, IntoFuture, Stream, sync::mpsc}; use jsonrpc_pubsub::typed::{Sink, Subscriber}; use jsonrpc_pubsub::SubscriptionId; use v1::helpers::{errors, limit_logs, Subscribers, }; use v1::helpers::light_fetch::LightFetch; use v1::metadata::Metadata; use v1::traits::EthPubSub; use v1::types::{pubsub, RichHeader, Log}; use sync::{SyncState, Notification}; use client_traits::{BlockChainClient, ChainNotify}; use ethereum_types::H256; use light::cache::Cache; use light::client::{LightChainClient, LightChainNotify}; use light::on_demand::OnDemandRequester; use parity_runtime::Executor; use parking_lot::{RwLock, Mutex}; use sync::{LightSyncProvider, LightNetworkDispatcher, ManageNetwork}; use types::{ chain_notify::{NewBlocks, ChainRouteType}, ids::BlockId, encoded, filter::Filter as EthFilter, }; type Client = Sink<pubsub::Result>; /// Eth PubSub implementation. pub struct EthPubSubClient<C> { handler: Arc<ChainNotificationHandler<C>>, heads_subscribers: Arc<RwLock<Subscribers<Client>>>, logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>, transactions_subscribers: Arc<RwLock<Subscribers<Client>>>, sync_subscribers: Arc<RwLock<Subscribers<Client>>>, } impl<C> EthPubSubClient<C> where C:'static + Send + Sync { /// adds a sync notification channel to the pubsub client pub fn add_sync_notifier<F>(&mut self, receiver: Notification<SyncState>, f: F) where F:'static + Fn(SyncState) -> Option<pubsub::PubSubSyncStatus> + Send { let weak_handler = Arc::downgrade(&self.handler); self.handler.executor.spawn( receiver.for_each(move |state| { if let Some(status) = f(state)
Err(()) }) ) } } impl<C> EthPubSubClient<C> where C:'static + Send + Sync { /// Creates new `EthPubSubClient`. pub fn new(client: Arc<C>, executor: Executor, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>) -> Self { let heads_subscribers = Arc::new(RwLock::new(Subscribers::default())); let logs_subscribers = Arc::new(RwLock::new(Subscribers::default())); let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default())); let sync_subscribers = Arc::new(RwLock::new(Subscribers::default())); let handler = Arc::new(ChainNotificationHandler { client, executor, heads_subscribers: heads_subscribers.clone(), logs_subscribers: logs_subscribers.clone(), transactions_subscribers: transactions_subscribers.clone(), sync_subscribers: sync_subscribers.clone(), }); let handler2 = Arc::downgrade(&handler); handler.executor.spawn(pool_receiver .for_each(move |hashes| { if let Some(handler2) = handler2.upgrade() { handler2.notify_new_transactions(&hashes.to_vec()); return Ok(()) } Err(()) }) ); EthPubSubClient { handler, sync_subscribers, heads_subscribers, logs_subscribers, transactions_subscribers, } } /// Returns a chain notification handler. pub fn handler(&self) -> Weak<ChainNotificationHandler<C>> { Arc::downgrade(&self.handler) } } impl<S, OD> EthPubSubClient<LightFetch<S, OD>> where S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork +'static, OD: OnDemandRequester +'static { /// Creates a new `EthPubSubClient` for `LightClient`. pub fn light( client: Arc<dyn LightChainClient>, on_demand: Arc<OD>, sync: Arc<S>, cache: Arc<Mutex<Cache>>, executor: Executor, gas_price_percentile: usize, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>> ) -> Self { let fetch = LightFetch { client, on_demand, sync, cache, gas_price_percentile, }; EthPubSubClient::new(Arc::new(fetch), executor, pool_receiver) } } /// PubSub Notification handler. pub struct ChainNotificationHandler<C> { client: Arc<C>, executor: Executor, heads_subscribers: Arc<RwLock<Subscribers<Client>>>, logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>, transactions_subscribers: Arc<RwLock<Subscribers<Client>>>, sync_subscribers: Arc<RwLock<Subscribers<Client>>>, } impl<C> ChainNotificationHandler<C> { fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) { executor.spawn(subscriber .notify(Ok(result)) .map(|_| ()) .map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e)) ); } fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) { for subscriber in self.heads_subscribers.read().values() { for &(ref header, ref extra_info) in headers { Self::notify(&self.executor, subscriber, pubsub::Result::Header(Box::new(RichHeader { inner: header.into(), extra_info: extra_info.clone(), }))); } } } fn notify_syncing(&self, sync_status: pubsub::PubSubSyncStatus) { for subscriber in self.sync_subscribers.read().values() { Self::notify(&self.executor, subscriber, pubsub::Result::SyncState(sync_status.clone())); } } fn notify_logs<F, T, Ex>(&self, enacted: &[(H256, Ex)], logs: F) where F: Fn(EthFilter, &Ex) -> T, Ex: Send, T: IntoFuture<Item = Vec<Log>, Error = Error>, T::Future: Send +'static, { for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() { let logs = futures::future::join_all(enacted .iter() .map(|&(hash, ref ex)| { let mut filter = filter.clone(); filter.from_block = BlockId::Hash(hash); filter.to_block = filter.from_block; logs(filter, ex).into_future() }) .collect::<Vec<_>>() ); let limit = filter.limit; let executor = self.executor.clone(); let subscriber = subscriber.clone(); self.executor.spawn(logs .map(move |logs| { let logs = logs.into_iter().flat_map(|log| log).collect(); for log in limit_logs(logs, limit) { Self::notify(&executor, &subscriber, pubsub::Result::Log(Box::new(log))) } }) .map_err(|e| warn!("Unable to fetch latest logs: {:?}", e)) ); } } /// Notify all subscribers about new transaction hashes. fn notify_new_transactions(&self, hashes: &[H256]) { for subscriber in self.transactions_subscribers.read().values() { for hash in hashes { Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash(*hash)); } } } } /// A light client wrapper struct. pub trait LightClient: Send + Sync { /// Get a recent block header. fn block_header(&self, id: BlockId) -> Option<encoded::Header>; /// Fetch logs. fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>>; } impl<S, OD> LightClient for LightFetch<S, OD> where S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork +'static, OD: OnDemandRequester +'static { fn block_header(&self, id: BlockId) -> Option<encoded::Header> { self.client.block_header(id) } fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>> { Box::new(LightFetch::logs(self, filter)) as BoxFuture<_> } } impl<C: LightClient> LightChainNotify for ChainNotificationHandler<C> { fn new_headers(&self, enacted: &[H256]) { let headers = enacted .iter() .filter_map(|hash| self.client.block_header(BlockId::Hash(*hash))) .map(|header| (header, Default::default())) .collect::<Vec<_>>(); self.notify_heads(&headers); self.notify_logs(&enacted.iter().map(|h| (*h, ())).collect::<Vec<_>>(), |filter, _| self.client.logs(filter)) } } impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> { fn new_blocks(&self, new_blocks: NewBlocks) { if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { return } const EXTRA_INFO_PROOF: &str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed"; let headers = new_blocks.route.route() .iter() .filter_map(|&(hash, ref typ)| { match typ { ChainRouteType::Retracted => None, ChainRouteType::Enacted => self.client.block_header(BlockId::Hash(hash)) } }) .map(|header| { let hash = header.hash(); (header, self.client.block_extra_info(BlockId::Hash(hash)).expect(EXTRA_INFO_PROOF)) }) .collect::<Vec<_>>(); // Headers self.notify_heads(&headers); // We notify logs enacting and retracting as the order in route. self.notify_logs(new_blocks.route.route(), |filter, ex| { match ex { ChainRouteType::Enacted => Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).collect()), ChainRouteType::Retracted => Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).map(|mut log: Log| { log.log_type = "removed".into(); log.removed = true; log }).collect()), } }); } } impl<C: Send + Sync +'static> EthPubSub for EthPubSubClient<C> { type Metadata = Metadata; fn subscribe( &self, _meta: Metadata, subscriber: Subscriber<pubsub::Result>, kind: pubsub::Kind, params: Option<pubsub::Params>, ) { let error = match (kind, params) { (pubsub::Kind::NewHeads, None) => { self.heads_subscribers.write().push(subscriber); return; }, (pubsub::Kind::Syncing, None) => { self.sync_subscribers.write().push(subscriber); return; }, (pubsub::Kind::NewHeads, _) => { errors::invalid_params("newHeads", "Expected no parameters.") }, (pubsub::Kind::Logs, Some(pubsub::Params::Logs(filter))) => { match filter.try_into() { Ok(filter) => { self.logs_subscribers.write().push(subscriber, filter); return; }, Err(err) => err, } }, (pubsub::Kind::Logs, _) => { errors::invalid_params("logs", "Expected a filter object.") }, (pubsub::Kind::NewPendingTransactions, None) => { self.transactions_subscribers.write().push(subscriber); return; }, (pubsub::Kind::NewPendingTransactions, _) => { errors::invalid_params("newPendingTransactions", "Expected no parameters.") }, _ => { errors::unimplemented(None) }, }; let _ = subscriber.reject(error); } fn unsubscribe(&self, _: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> { let res = self.heads_subscribers.write().remove(&id).is_some(); let res2 = self.logs_subscribers.write().remove(&id).is_some(); let res3 = self.transactions_subscribers.write().remove(&id).is_some(); let res4 = self.sync_subscribers.write().remove(&id).is_some(); Ok(res || res2 || res3 || res4) } }
{ if let Some(handler) = weak_handler.upgrade() { handler.notify_syncing(status); return Ok(()) } }
conditional_block
eth_pubsub.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Eth PUB-SUB rpc implementation. use std::sync::{Arc, Weak}; use std::collections::BTreeMap; use jsonrpc_core::{BoxFuture, Result, Error}; use jsonrpc_core::futures::{self, Future, IntoFuture, Stream, sync::mpsc}; use jsonrpc_pubsub::typed::{Sink, Subscriber}; use jsonrpc_pubsub::SubscriptionId; use v1::helpers::{errors, limit_logs, Subscribers, }; use v1::helpers::light_fetch::LightFetch; use v1::metadata::Metadata; use v1::traits::EthPubSub; use v1::types::{pubsub, RichHeader, Log}; use sync::{SyncState, Notification}; use client_traits::{BlockChainClient, ChainNotify}; use ethereum_types::H256; use light::cache::Cache; use light::client::{LightChainClient, LightChainNotify}; use light::on_demand::OnDemandRequester; use parity_runtime::Executor; use parking_lot::{RwLock, Mutex}; use sync::{LightSyncProvider, LightNetworkDispatcher, ManageNetwork}; use types::{ chain_notify::{NewBlocks, ChainRouteType}, ids::BlockId, encoded, filter::Filter as EthFilter, }; type Client = Sink<pubsub::Result>; /// Eth PubSub implementation. pub struct EthPubSubClient<C> { handler: Arc<ChainNotificationHandler<C>>, heads_subscribers: Arc<RwLock<Subscribers<Client>>>, logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>, transactions_subscribers: Arc<RwLock<Subscribers<Client>>>, sync_subscribers: Arc<RwLock<Subscribers<Client>>>, } impl<C> EthPubSubClient<C> where C:'static + Send + Sync { /// adds a sync notification channel to the pubsub client pub fn add_sync_notifier<F>(&mut self, receiver: Notification<SyncState>, f: F) where F:'static + Fn(SyncState) -> Option<pubsub::PubSubSyncStatus> + Send { let weak_handler = Arc::downgrade(&self.handler); self.handler.executor.spawn( receiver.for_each(move |state| { if let Some(status) = f(state) { if let Some(handler) = weak_handler.upgrade() { handler.notify_syncing(status); return Ok(()) } } Err(()) }) ) } } impl<C> EthPubSubClient<C> where C:'static + Send + Sync { /// Creates new `EthPubSubClient`. pub fn new(client: Arc<C>, executor: Executor, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>) -> Self { let heads_subscribers = Arc::new(RwLock::new(Subscribers::default())); let logs_subscribers = Arc::new(RwLock::new(Subscribers::default())); let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default())); let sync_subscribers = Arc::new(RwLock::new(Subscribers::default())); let handler = Arc::new(ChainNotificationHandler { client, executor, heads_subscribers: heads_subscribers.clone(), logs_subscribers: logs_subscribers.clone(), transactions_subscribers: transactions_subscribers.clone(), sync_subscribers: sync_subscribers.clone(), }); let handler2 = Arc::downgrade(&handler); handler.executor.spawn(pool_receiver .for_each(move |hashes| { if let Some(handler2) = handler2.upgrade() { handler2.notify_new_transactions(&hashes.to_vec()); return Ok(()) } Err(()) }) ); EthPubSubClient { handler, sync_subscribers, heads_subscribers, logs_subscribers, transactions_subscribers, } } /// Returns a chain notification handler. pub fn handler(&self) -> Weak<ChainNotificationHandler<C>> { Arc::downgrade(&self.handler) } } impl<S, OD> EthPubSubClient<LightFetch<S, OD>> where S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork +'static, OD: OnDemandRequester +'static { /// Creates a new `EthPubSubClient` for `LightClient`. pub fn light( client: Arc<dyn LightChainClient>, on_demand: Arc<OD>, sync: Arc<S>, cache: Arc<Mutex<Cache>>, executor: Executor, gas_price_percentile: usize, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>> ) -> Self { let fetch = LightFetch { client, on_demand, sync, cache, gas_price_percentile, }; EthPubSubClient::new(Arc::new(fetch), executor, pool_receiver) } } /// PubSub Notification handler. pub struct ChainNotificationHandler<C> { client: Arc<C>, executor: Executor, heads_subscribers: Arc<RwLock<Subscribers<Client>>>, logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>, transactions_subscribers: Arc<RwLock<Subscribers<Client>>>, sync_subscribers: Arc<RwLock<Subscribers<Client>>>, } impl<C> ChainNotificationHandler<C> { fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) { executor.spawn(subscriber .notify(Ok(result)) .map(|_| ()) .map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e)) ); } fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) { for subscriber in self.heads_subscribers.read().values() { for &(ref header, ref extra_info) in headers { Self::notify(&self.executor, subscriber, pubsub::Result::Header(Box::new(RichHeader { inner: header.into(), extra_info: extra_info.clone(), }))); } } } fn notify_syncing(&self, sync_status: pubsub::PubSubSyncStatus) { for subscriber in self.sync_subscribers.read().values() { Self::notify(&self.executor, subscriber, pubsub::Result::SyncState(sync_status.clone())); } } fn notify_logs<F, T, Ex>(&self, enacted: &[(H256, Ex)], logs: F) where F: Fn(EthFilter, &Ex) -> T, Ex: Send, T: IntoFuture<Item = Vec<Log>, Error = Error>, T::Future: Send +'static, { for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() { let logs = futures::future::join_all(enacted .iter() .map(|&(hash, ref ex)| { let mut filter = filter.clone(); filter.from_block = BlockId::Hash(hash); filter.to_block = filter.from_block; logs(filter, ex).into_future() }) .collect::<Vec<_>>() ); let limit = filter.limit; let executor = self.executor.clone(); let subscriber = subscriber.clone(); self.executor.spawn(logs .map(move |logs| { let logs = logs.into_iter().flat_map(|log| log).collect(); for log in limit_logs(logs, limit) { Self::notify(&executor, &subscriber, pubsub::Result::Log(Box::new(log))) } }) .map_err(|e| warn!("Unable to fetch latest logs: {:?}", e)) ); } } /// Notify all subscribers about new transaction hashes. fn notify_new_transactions(&self, hashes: &[H256]) { for subscriber in self.transactions_subscribers.read().values() { for hash in hashes { Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash(*hash)); } } } } /// A light client wrapper struct. pub trait LightClient: Send + Sync { /// Get a recent block header. fn block_header(&self, id: BlockId) -> Option<encoded::Header>; /// Fetch logs. fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>>; } impl<S, OD> LightClient for LightFetch<S, OD> where S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork +'static, OD: OnDemandRequester +'static { fn
(&self, id: BlockId) -> Option<encoded::Header> { self.client.block_header(id) } fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>> { Box::new(LightFetch::logs(self, filter)) as BoxFuture<_> } } impl<C: LightClient> LightChainNotify for ChainNotificationHandler<C> { fn new_headers(&self, enacted: &[H256]) { let headers = enacted .iter() .filter_map(|hash| self.client.block_header(BlockId::Hash(*hash))) .map(|header| (header, Default::default())) .collect::<Vec<_>>(); self.notify_heads(&headers); self.notify_logs(&enacted.iter().map(|h| (*h, ())).collect::<Vec<_>>(), |filter, _| self.client.logs(filter)) } } impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> { fn new_blocks(&self, new_blocks: NewBlocks) { if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { return } const EXTRA_INFO_PROOF: &str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed"; let headers = new_blocks.route.route() .iter() .filter_map(|&(hash, ref typ)| { match typ { ChainRouteType::Retracted => None, ChainRouteType::Enacted => self.client.block_header(BlockId::Hash(hash)) } }) .map(|header| { let hash = header.hash(); (header, self.client.block_extra_info(BlockId::Hash(hash)).expect(EXTRA_INFO_PROOF)) }) .collect::<Vec<_>>(); // Headers self.notify_heads(&headers); // We notify logs enacting and retracting as the order in route. self.notify_logs(new_blocks.route.route(), |filter, ex| { match ex { ChainRouteType::Enacted => Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).collect()), ChainRouteType::Retracted => Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).map(|mut log: Log| { log.log_type = "removed".into(); log.removed = true; log }).collect()), } }); } } impl<C: Send + Sync +'static> EthPubSub for EthPubSubClient<C> { type Metadata = Metadata; fn subscribe( &self, _meta: Metadata, subscriber: Subscriber<pubsub::Result>, kind: pubsub::Kind, params: Option<pubsub::Params>, ) { let error = match (kind, params) { (pubsub::Kind::NewHeads, None) => { self.heads_subscribers.write().push(subscriber); return; }, (pubsub::Kind::Syncing, None) => { self.sync_subscribers.write().push(subscriber); return; }, (pubsub::Kind::NewHeads, _) => { errors::invalid_params("newHeads", "Expected no parameters.") }, (pubsub::Kind::Logs, Some(pubsub::Params::Logs(filter))) => { match filter.try_into() { Ok(filter) => { self.logs_subscribers.write().push(subscriber, filter); return; }, Err(err) => err, } }, (pubsub::Kind::Logs, _) => { errors::invalid_params("logs", "Expected a filter object.") }, (pubsub::Kind::NewPendingTransactions, None) => { self.transactions_subscribers.write().push(subscriber); return; }, (pubsub::Kind::NewPendingTransactions, _) => { errors::invalid_params("newPendingTransactions", "Expected no parameters.") }, _ => { errors::unimplemented(None) }, }; let _ = subscriber.reject(error); } fn unsubscribe(&self, _: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> { let res = self.heads_subscribers.write().remove(&id).is_some(); let res2 = self.logs_subscribers.write().remove(&id).is_some(); let res3 = self.transactions_subscribers.write().remove(&id).is_some(); let res4 = self.sync_subscribers.write().remove(&id).is_some(); Ok(res || res2 || res3 || res4) } }
block_header
identifier_name
eth_pubsub.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Eth PUB-SUB rpc implementation. use std::sync::{Arc, Weak}; use std::collections::BTreeMap; use jsonrpc_core::{BoxFuture, Result, Error}; use jsonrpc_core::futures::{self, Future, IntoFuture, Stream, sync::mpsc}; use jsonrpc_pubsub::typed::{Sink, Subscriber}; use jsonrpc_pubsub::SubscriptionId; use v1::helpers::{errors, limit_logs, Subscribers, }; use v1::helpers::light_fetch::LightFetch; use v1::metadata::Metadata; use v1::traits::EthPubSub; use v1::types::{pubsub, RichHeader, Log}; use sync::{SyncState, Notification}; use client_traits::{BlockChainClient, ChainNotify}; use ethereum_types::H256; use light::cache::Cache; use light::client::{LightChainClient, LightChainNotify}; use light::on_demand::OnDemandRequester; use parity_runtime::Executor; use parking_lot::{RwLock, Mutex}; use sync::{LightSyncProvider, LightNetworkDispatcher, ManageNetwork}; use types::{ chain_notify::{NewBlocks, ChainRouteType}, ids::BlockId, encoded, filter::Filter as EthFilter, }; type Client = Sink<pubsub::Result>; /// Eth PubSub implementation. pub struct EthPubSubClient<C> { handler: Arc<ChainNotificationHandler<C>>, heads_subscribers: Arc<RwLock<Subscribers<Client>>>, logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>, transactions_subscribers: Arc<RwLock<Subscribers<Client>>>, sync_subscribers: Arc<RwLock<Subscribers<Client>>>, } impl<C> EthPubSubClient<C> where C:'static + Send + Sync { /// adds a sync notification channel to the pubsub client pub fn add_sync_notifier<F>(&mut self, receiver: Notification<SyncState>, f: F) where F:'static + Fn(SyncState) -> Option<pubsub::PubSubSyncStatus> + Send { let weak_handler = Arc::downgrade(&self.handler); self.handler.executor.spawn( receiver.for_each(move |state| { if let Some(status) = f(state) { if let Some(handler) = weak_handler.upgrade() { handler.notify_syncing(status); return Ok(()) } } Err(()) }) ) } } impl<C> EthPubSubClient<C> where C:'static + Send + Sync { /// Creates new `EthPubSubClient`. pub fn new(client: Arc<C>, executor: Executor, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>>) -> Self { let heads_subscribers = Arc::new(RwLock::new(Subscribers::default())); let logs_subscribers = Arc::new(RwLock::new(Subscribers::default())); let transactions_subscribers = Arc::new(RwLock::new(Subscribers::default())); let sync_subscribers = Arc::new(RwLock::new(Subscribers::default())); let handler = Arc::new(ChainNotificationHandler { client, executor, heads_subscribers: heads_subscribers.clone(), logs_subscribers: logs_subscribers.clone(), transactions_subscribers: transactions_subscribers.clone(), sync_subscribers: sync_subscribers.clone(), }); let handler2 = Arc::downgrade(&handler); handler.executor.spawn(pool_receiver .for_each(move |hashes| { if let Some(handler2) = handler2.upgrade() { handler2.notify_new_transactions(&hashes.to_vec()); return Ok(()) } Err(()) }) ); EthPubSubClient { handler, sync_subscribers, heads_subscribers, logs_subscribers, transactions_subscribers, } } /// Returns a chain notification handler. pub fn handler(&self) -> Weak<ChainNotificationHandler<C>> { Arc::downgrade(&self.handler) } } impl<S, OD> EthPubSubClient<LightFetch<S, OD>> where S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork +'static, OD: OnDemandRequester +'static { /// Creates a new `EthPubSubClient` for `LightClient`. pub fn light( client: Arc<dyn LightChainClient>, on_demand: Arc<OD>, sync: Arc<S>, cache: Arc<Mutex<Cache>>, executor: Executor, gas_price_percentile: usize, pool_receiver: mpsc::UnboundedReceiver<Arc<Vec<H256>>> ) -> Self { let fetch = LightFetch { client, on_demand, sync, cache, gas_price_percentile, }; EthPubSubClient::new(Arc::new(fetch), executor, pool_receiver) } } /// PubSub Notification handler. pub struct ChainNotificationHandler<C> { client: Arc<C>, executor: Executor, heads_subscribers: Arc<RwLock<Subscribers<Client>>>, logs_subscribers: Arc<RwLock<Subscribers<(Client, EthFilter)>>>, transactions_subscribers: Arc<RwLock<Subscribers<Client>>>, sync_subscribers: Arc<RwLock<Subscribers<Client>>>, } impl<C> ChainNotificationHandler<C> { fn notify(executor: &Executor, subscriber: &Client, result: pubsub::Result) { executor.spawn(subscriber .notify(Ok(result)) .map(|_| ()) .map_err(|e| warn!(target: "rpc", "Unable to send notification: {}", e)) ); } fn notify_heads(&self, headers: &[(encoded::Header, BTreeMap<String, String>)]) { for subscriber in self.heads_subscribers.read().values() { for &(ref header, ref extra_info) in headers { Self::notify(&self.executor, subscriber, pubsub::Result::Header(Box::new(RichHeader { inner: header.into(), extra_info: extra_info.clone(), }))); } } } fn notify_syncing(&self, sync_status: pubsub::PubSubSyncStatus) { for subscriber in self.sync_subscribers.read().values() { Self::notify(&self.executor, subscriber, pubsub::Result::SyncState(sync_status.clone())); } } fn notify_logs<F, T, Ex>(&self, enacted: &[(H256, Ex)], logs: F) where F: Fn(EthFilter, &Ex) -> T, Ex: Send, T: IntoFuture<Item = Vec<Log>, Error = Error>, T::Future: Send +'static,
Self::notify(&executor, &subscriber, pubsub::Result::Log(Box::new(log))) } }) .map_err(|e| warn!("Unable to fetch latest logs: {:?}", e)) ); } } /// Notify all subscribers about new transaction hashes. fn notify_new_transactions(&self, hashes: &[H256]) { for subscriber in self.transactions_subscribers.read().values() { for hash in hashes { Self::notify(&self.executor, subscriber, pubsub::Result::TransactionHash(*hash)); } } } } /// A light client wrapper struct. pub trait LightClient: Send + Sync { /// Get a recent block header. fn block_header(&self, id: BlockId) -> Option<encoded::Header>; /// Fetch logs. fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>>; } impl<S, OD> LightClient for LightFetch<S, OD> where S: LightSyncProvider + LightNetworkDispatcher + ManageNetwork +'static, OD: OnDemandRequester +'static { fn block_header(&self, id: BlockId) -> Option<encoded::Header> { self.client.block_header(id) } fn logs(&self, filter: EthFilter) -> BoxFuture<Vec<Log>> { Box::new(LightFetch::logs(self, filter)) as BoxFuture<_> } } impl<C: LightClient> LightChainNotify for ChainNotificationHandler<C> { fn new_headers(&self, enacted: &[H256]) { let headers = enacted .iter() .filter_map(|hash| self.client.block_header(BlockId::Hash(*hash))) .map(|header| (header, Default::default())) .collect::<Vec<_>>(); self.notify_heads(&headers); self.notify_logs(&enacted.iter().map(|h| (*h, ())).collect::<Vec<_>>(), |filter, _| self.client.logs(filter)) } } impl<C: BlockChainClient> ChainNotify for ChainNotificationHandler<C> { fn new_blocks(&self, new_blocks: NewBlocks) { if self.heads_subscribers.read().is_empty() && self.logs_subscribers.read().is_empty() { return } const EXTRA_INFO_PROOF: &str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed"; let headers = new_blocks.route.route() .iter() .filter_map(|&(hash, ref typ)| { match typ { ChainRouteType::Retracted => None, ChainRouteType::Enacted => self.client.block_header(BlockId::Hash(hash)) } }) .map(|header| { let hash = header.hash(); (header, self.client.block_extra_info(BlockId::Hash(hash)).expect(EXTRA_INFO_PROOF)) }) .collect::<Vec<_>>(); // Headers self.notify_heads(&headers); // We notify logs enacting and retracting as the order in route. self.notify_logs(new_blocks.route.route(), |filter, ex| { match ex { ChainRouteType::Enacted => Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).collect()), ChainRouteType::Retracted => Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).map(|mut log: Log| { log.log_type = "removed".into(); log.removed = true; log }).collect()), } }); } } impl<C: Send + Sync +'static> EthPubSub for EthPubSubClient<C> { type Metadata = Metadata; fn subscribe( &self, _meta: Metadata, subscriber: Subscriber<pubsub::Result>, kind: pubsub::Kind, params: Option<pubsub::Params>, ) { let error = match (kind, params) { (pubsub::Kind::NewHeads, None) => { self.heads_subscribers.write().push(subscriber); return; }, (pubsub::Kind::Syncing, None) => { self.sync_subscribers.write().push(subscriber); return; }, (pubsub::Kind::NewHeads, _) => { errors::invalid_params("newHeads", "Expected no parameters.") }, (pubsub::Kind::Logs, Some(pubsub::Params::Logs(filter))) => { match filter.try_into() { Ok(filter) => { self.logs_subscribers.write().push(subscriber, filter); return; }, Err(err) => err, } }, (pubsub::Kind::Logs, _) => { errors::invalid_params("logs", "Expected a filter object.") }, (pubsub::Kind::NewPendingTransactions, None) => { self.transactions_subscribers.write().push(subscriber); return; }, (pubsub::Kind::NewPendingTransactions, _) => { errors::invalid_params("newPendingTransactions", "Expected no parameters.") }, _ => { errors::unimplemented(None) }, }; let _ = subscriber.reject(error); } fn unsubscribe(&self, _: Option<Self::Metadata>, id: SubscriptionId) -> Result<bool> { let res = self.heads_subscribers.write().remove(&id).is_some(); let res2 = self.logs_subscribers.write().remove(&id).is_some(); let res3 = self.transactions_subscribers.write().remove(&id).is_some(); let res4 = self.sync_subscribers.write().remove(&id).is_some(); Ok(res || res2 || res3 || res4) } }
{ for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() { let logs = futures::future::join_all(enacted .iter() .map(|&(hash, ref ex)| { let mut filter = filter.clone(); filter.from_block = BlockId::Hash(hash); filter.to_block = filter.from_block; logs(filter, ex).into_future() }) .collect::<Vec<_>>() ); let limit = filter.limit; let executor = self.executor.clone(); let subscriber = subscriber.clone(); self.executor.spawn(logs .map(move |logs| { let logs = logs.into_iter().flat_map(|log| log).collect(); for log in limit_logs(logs, limit) {
identifier_body
time_client.rs
C2SLookupError(StoreError), TcpError(io::Error), TlsHandshakeError(io::Error), TlsSessionError(io::Error), NtskeProblem(ntske::NtskeProblem), NtskeNoCookies, CredentialSaveError(StoreError), CoreTickError(io::Error), CoreDepartureError(io::Error), UdpSocketError(io::Error), } impl RequestError { ///Level that this error should be logged at pub fn level(&self) -> log::Level { use log::Level::*; use RequestError::*; match self { ResolveError(_) => Warn, CookieLookupError(_) => Error, C2SLookupError(_) => Error, TcpError(_) => Warn, TlsHandshakeError(_) => Warn, TlsSessionError(_) => Warn, NtskeProblem(_) => Warn, NtskeNoCookies => Warn, CredentialSaveError(_) => Error, CoreTickError(_) => Error, CoreDepartureError(_) => Error, UdpSocketError(_) => Error, } } } impl fmt::Display for RequestError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use RequestError::*; match self { ResolveError(e) => write!(f, "Resolving DNS: {}", e), CookieLookupError(e) => write!(f, "Looking up cookie from store: {}", e), C2SLookupError(e) => write!(f, "Looking up C2S key from store: {}", e), TcpError(e) => write!(f, "Establishing TCP connection for NTS-KE: {}", e), TlsHandshakeError(e) => write!(f, "During TLS handshake: {}", e), TlsSessionError(e) => write!(f, "In TLS session: {}", e), NtskeProblem(e) => write!(f, "In NTS-KE response: {:?}", e), NtskeNoCookies => write!(f, "NTS-KE succeeded but no cookies were returned"), CredentialSaveError(e) => write!(f, "Saving credentials to store: {}", e), CoreTickError(e) => write!(f, "Handling tick in core: {}", e), CoreDepartureError(e) => write!(f, "Updating origin timestamp: {}", e), UdpSocketError(e) => write!(f, "Sending on UDP socket: {}", e), } } } impl std::error::Error for RequestError {} pub fn serialize_time_request( out: &mut Vec<u8>, unique_id: &core::UniqueId, c2s: &Aes128SivKey, cookie: Vec<u8>, cookies_requested: usize, ) { let plaintext = wire::Request { num_cookies: cookies_requested as u32, }; trace!( "Encoding plaintext for time request {:x?}: {:?}", unique_id, plaintext ); let mut plaintext_serialized = Vec::with_capacity(plaintext.encoded_len()); plaintext .encode(&mut plaintext_serialized) .expect("Error encoding plaintext for time request"); let cookie_len = cookie.len(); let mut nonce = Aes128SivNonce::default(); rand::thread_rng().fill_bytes(nonce.as_mut_slice()); let ad = wire::RequestAd { unique_id: unique_id.to_vec(), cookie, }; trace!( "Encoding associated data for time request {:x?}: {:?}", unique_id, ad ); let mut ad_serialized = Vec::with_capacity(ad.encoded_len()); ad.encode(&mut ad_serialized) .expect("Error encoding associated data for time request"); let aead_c2s = Aes128SivAead::new(c2s); let ciphertext = aead_c2s .encrypt( &nonce, Payload { aad: &ad_serialized, msg: &plaintext_serialized, }, ) .expect("Error encrypting time request"); let padding = vec![0; cookie_len * cookies_requested.saturating_sub(1) + wire::EXTRA_PADDING]; let packet = wire::Packet { msg: Some(wire::packet::Msg::Request(wire::RequestEnvelope { nonce: nonce.to_vec(), ad: ad_serialized, ciphertext, padding, })), }; trace!( "Encoding packet for time request {:x?}: {:?}", unique_id, packet ); out.reserve(packet.encoded_len()); packet .encode(out) .expect("Error encoding packet for time request"); } ///Send a time request /// ///Resolve `peer_config.host` using `resolver`. Take keys and cookies /// from `secret_store`. If they aren't there, run NTS-KE to obtain /// them. Send a time request over `socket_mutex` and record in /// `core_state` that it's in flight. pub async fn send_time_request( resolver: &trust_dns_resolver::TokioAsyncResolver, socket: &tokio::net::UdpSocket, peer_name: &PeerName, peer_config: &PeerConfig, core_state: &RwLock<core::CoreState>, secret_store: &SecretStore, ) -> Result<(), RequestError> { let ip_addr = resolver .lookup_ip(peer_config.host.as_str()) .await .map_err(RequestError::ResolveError)? .into_iter() .next() .expect("Got empty iterator from DNS lookup"); debug!( "Resolved DNS for peer '{}': {} -> {}", peer_name, peer_config.host, ip_addr ); let peer_addr = SocketAddr::new(ip_addr, peer_config.port); //These two secret_store calls each use separate transactions, so // it's possible to get a cookie that doesn't correspond to to the // c2s key if the results of an NTS-KE exchange get committed in // between the two calls. This can be elicited in testing by // setting an extremely short polling interval. Preventing this // would be easy — just add a method to SecretStore that fetches // both the C2S key and the cookie in a single transaction — but // it wouldn't actually improve anything because the new S2C key // will still get committed right afterward and we won't be able // to decrypt the server's response. The problem is harmless in // any case because we'll just recover on the next tick. Worst // that happens is that NTS-KE gets run twice rather than just // once. let (c2s, cookie, cookies_left) = match ( secret_store .get_c2s_key(peer_name) .map_err(RequestError::C2SLookupError)?, secret_store .take_cookie(peer_name) .map_err(RequestError::CookieLookupError)?, ) { (Some(c2s), (Some(cookie), cookies_left)) => (c2s, cookie, cookies_left), _ => { let tcp_stream = net::TcpStream::connect(&peer_addr) .await .map_err(RequestError::TcpError)?; debug!( "TCP connection established for NTS-KE with peer '{}'", peer_name ); let mut tls_stream = peer_config .tls_connector .connect(peer_config.cert_name.as_ref(), tcp_stream) .await .map_err(RequestError::TlsHandshakeError)?; debug!("TLS handshake completed with peer '{}'", peer_name); let mut ntske_output = ntske::request_ntske(&mut tls_stream) .await .map_err(RequestError::TlsSessionError)? .map_err(RequestError::NtskeProblem)?; debug!("Successful NTS-KE with peer '{}'", peer_name); let my_cookie = ntske_output .cookies .pop() .ok_or(RequestError::NtskeNoCookies)?; let cookies_left = ntske_output.cookies.len(); secret_store .set_credentials( peer_name, &ntske_output.c2s, &ntske_output.s2c, ntske_output.cookies.as_slice(), ) .map_err(RequestError::CredentialSaveError)?; debug!( "Stored session keys and {} cookies for peer '{}'", cookies_left, peer_name ); (ntske_output.c2s, my_cookie, cookies_left) } }; let query = core_state .write() .unwrap() .on_tick(peer_name, &mut rand::thread_rng()) .map_err(RequestError::CoreTickError)?; let cookies_requested = if cookies_left > 7 { 1 } else { 8 - cookies_left }; let mut send_buf = Vec::new(); serialize_time_request( &mut send_buf, &query.unique_id, &c2s, cookie, cookies_requested, ); core_state .write() .unwrap() .on_departure(peer_name) .map_err(RequestError::CoreDepartureError)?; debug!("Sending time request to peer '{}'", peer_name); socket .send_to(send_buf.as_slice(), &peer_addr) .await .map_err(RequestError::UdpSocketError)?; Ok(()) } ///Enumeration of errors that can occur when processing a time response #[derive(Debug)] pub enum ResponseError { DestTimeError(io::Error), PacketDecodingError(prost::DecodeError), NotAResponse, AdDecodingError(prost::DecodeError), WrongNonceLength, WrongUniqueIdLength, UnrecognizedErrorResponse, NonMatchingUniqueId, S2CLookupError(PeerName, StoreError), S2CNotFound(PeerName), DecryptionFailure(PeerName), PlaintextDecodingError(PeerName, prost::DecodeError), WrongEraLength(PeerName), NoLocalClock(PeerName), NoGlobalOffset(PeerName), CoreError(PeerName, io::Error), StoreCookiesError(PeerName, StoreError), StoreClearError(PeerName, StoreError), } impl ResponseError { fn level(&self) -> log::Level { use log::Level::*; use ResponseError::*; match self { DestTimeError(_) => Error, PacketDecodingError(_) => Debug, NotAResponse => Debug, AdDecodingError(_) => Debug, WrongNonceLength => Debug, WrongUniqueIdLength => Debug, UnrecognizedErrorResponse => Debug, NonMatchingUniqueId => Debug, S2CLookupError(_, _) => Error, S2CNotFound(_) => Warn, DecryptionFailure(_) => Warn, PlaintextDecodingError(_, _) => Warn, WrongEraLength(_) => Warn, NoLocalClock(_) => Warn, NoGlobalOffset(_) => Warn, CoreError(_, _) => Error, StoreCookiesError(_, _) => Error, StoreClearError(_, _) => Error, } } } impl fmt::Display for ResponseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use ResponseError::*; match self { DestTimeError(e) => write!(f, "Getting destination timestamp: {}", e), PacketDecodingError(e) => write!(f, "Decoding packet: {}", e), NotAResponse => write!(f, "Not a response packet"), AdDecodingError(e) => write!(f, "Decoding associated data: {}", e), WrongNonceLength => write!(f, "Wrong nonce length"), WrongUniqueIdLength => write!(f, "Wrong unique-ID length"), UnrecognizedErrorResponse => write!(f, "Unrecognized error response"), NonMatchingUniqueId => { write!(f, "Unique-ID does not correspond to any in-flight request") } S2CLookupError(peer, e) => write!(f, "Looking up S2C for peer '{}': {}", peer, e), S2CNotFound(peer) => write!(f, "S2C key not found for peer '{}'", peer), DecryptionFailure(peer) => write!(f, "Failed to decrypt response from peer '{}'", peer), PlaintextDecodingError(peer, e) => { write!(f, "Decoding plaintext send by peer '{}': {}", peer, e) } WrongEraLength(peer) => write!( f, "Response from peer '{}' has an era of the wrong length", peer ), NoLocalClock(peer) => write!( f, "Response from peer '{}' is missing its local-clock field", peer ), NoGlobalOffset(peer) => write!( f, "Response from peer '{}' is missing its global-offset field", peer ), CoreError(peer, e) => write!( f, "Updating core state for response from peer '{}': {}", peer, e ), StoreCookiesError(peer, e) => write!( f, "Writing new cookies from peer '{}' to secret store: {}", peer, e ), StoreClearError(peer, e) => write!( f, "Clearing secret store in response to crypto-NAK from peer '{}': {}", peer, e ), } } } impl std::error::Error for ResponseError {} ///Data extracted from a [wire::ResponseEnvelope](../wire/struct.ResponseEnvelope.html) pub struct ResponseEnvelopeData { unique_id: core::UniqueId, nonce: Aes128SivNonce, ad: Vec<u8>, ciphertext: Vec<u8>, } ///Data extracted from a crypto-NAK response pub struct Cryp
unique_id: core::UniqueId, } ///Deserialize a time response as far as the envelope, but don't try to decrypt it pub fn deserialize_response_envelope<Response: Buf>( response: Response, ) -> Result<Result<ResponseEnvelopeData, CryptoNakData>, ResponseError> { let packet = wire::Packet::decode(response).map_err(ResponseError::PacketDecodingError)?; trace!("Deserialized time response packet: {:?}", packet); match packet.msg { Some(wire::packet::Msg::Response(envelope)) => { let ad = wire::ResponseAd::decode(envelope.ad.as_ref()) .map_err(ResponseError::AdDecodingError)?; let nonce = Aes128SivNonce::try_clone_from_slice(envelope.nonce.as_slice()) .map_err(|_| ResponseError::WrongNonceLength)?; let unique_id = core::UniqueId::try_from(ad.unique_id.as_slice()) .map_err(|_| ResponseError::WrongUniqueIdLength)?; Ok(Ok(ResponseEnvelopeData { unique_id, nonce, ad: envelope.ad, ciphertext: envelope.ciphertext, })) } Some(wire::packet::Msg::Error(error)) => { let unique_id = core::UniqueId::try_from(error.unique_id.as_slice()) .map_err(|_| ResponseError::WrongUniqueIdLength)?; match error.error { Some(wire::error::Error::CryptoNak(_)) => Ok(Err(CryptoNakData { unique_id })), _ => Err(ResponseError::UnrecognizedErrorResponse), } } _ => Err(ResponseError::NotAResponse), } } ///Deserialize the plaintext of a time response, returning cookies and /// a [`core::Response`](../core/struct.Response.html). pub fn deserialize_response_plaintext<Plaintext: Buf>( peer_name: &PeerName, unique_id: &core::UniqueId, plaintext: Plaintext, ) -> Result<(Vec<Vec<u8>>, core::Response), ResponseError> { let response = wire::Response::decode(plaintext) .map_err(|e| ResponseError::PlaintextDecodingError(peer_name.clone(), e))?; trace!("Deserialized time response plaintext: {:?}", response); let era = Era(<[u8; 16]>::try_from(response.era.as_slice()) .map_err(|_| ResponseError::WrongEraLength(peer_name.clone()))?); let global_offset = response .offset .ok_or_else(|| ResponseError::NoGlobalOffset(peer_name.clone()))?; let local_clock = response .local_clock .ok_or_else(|| ResponseError::NoLocalClock(peer_name.clone()))?; Ok(( response.cookies, core::Response { era, unique_id: *unique_id, global_offset: Timestamp::new( global_offset.seconds as i64, global_offset.nanoseconds as i64, ), local_clock: Timestamp::new(local_clock.seconds as i64, local_clock.nanoseconds as i64), }, )) } ///Process a time response /// ///Deserialize and decrypt the `response` using `secret_store` to look up keys. /// Pass the response to `core_state`. Add any returned cookies to the store. pub fn handle_time_response<Response: Buf>( response: Response, core_state: &RwLock<core::CoreState>, secret_store: &SecretStore, ) -> Result<(), ResponseError> { let dest_time = Timestamp::local_time().map_err(ResponseError::DestTimeError)?; match deserialize_response_envelope(response)? { Ok(envelope) => { let peer_name = core_state .read() .unwrap() .lookup_peer(&envelope.unique_id) .ok_or(ResponseError::NonMatchingUniqueId)?; //It's possible for S2CNotFound to happen when request B // crosses request A on the wire, and response B is a // crypto-NAK which causes us to clear our // credentials. This can readily be elicited in testing // setting an extremely short polling interval, but should // never normally happen in production, barring // adversarial behavior by the network or the peer. If it // does, it's harmless; we'll log it at WARN level and // recover on the next tick. let s2c = secret_store .get_s2c_key(&peer_name) .map_err(|e| ResponseError::S2CLookupError(peer_name.clone(), e))? .ok_or_else(|| ResponseError::S2CNotFound(peer_name.clone()))?; let aead_s2c = Aes128SivAead::new(&s2c); let plaintext = aead_s2c .decrypt( &envelope.nonce, Payload { aad: &envelope.ad, msg: &envelope.ciphertext, }, ) .map_err(|_| ResponseError::DecryptionFailure(peer_name.clone()))?; let (cookies, response) = deserialize_response_plaintext( &peer_name, &envelope.unique_id, plaintext.as_ref(), )?; core_state .write() .unwrap() .on_response(&response, dest_time) .map_err(|e| ResponseError::CoreError(peer_name.clone(), e))?; secret_store .give_cookies(&peer_name, cookies) .map_err(|e| ResponseError::StoreCookiesError(peer_name.clone(), e))?; debug!( "Successfully handled time response from peer '{}'", peer_name ); Ok(()) } Err(crypto_nak) => { let peer_name = core_state
toNakData {
identifier_name
time_client.rs
), C2SLookupError(StoreError), TcpError(io::Error), TlsHandshakeError(io::Error), TlsSessionError(io::Error), NtskeProblem(ntske::NtskeProblem), NtskeNoCookies, CredentialSaveError(StoreError), CoreTickError(io::Error), CoreDepartureError(io::Error), UdpSocketError(io::Error), } impl RequestError { ///Level that this error should be logged at pub fn level(&self) -> log::Level { use log::Level::*; use RequestError::*; match self { ResolveError(_) => Warn, CookieLookupError(_) => Error, C2SLookupError(_) => Error, TcpError(_) => Warn, TlsHandshakeError(_) => Warn, TlsSessionError(_) => Warn, NtskeProblem(_) => Warn, NtskeNoCookies => Warn, CredentialSaveError(_) => Error, CoreTickError(_) => Error, CoreDepartureError(_) => Error, UdpSocketError(_) => Error, } } } impl fmt::Display for RequestError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use RequestError::*; match self { ResolveError(e) => write!(f, "Resolving DNS: {}", e), CookieLookupError(e) => write!(f, "Looking up cookie from store: {}", e), C2SLookupError(e) => write!(f, "Looking up C2S key from store: {}", e), TcpError(e) => write!(f, "Establishing TCP connection for NTS-KE: {}", e), TlsHandshakeError(e) => write!(f, "During TLS handshake: {}", e), TlsSessionError(e) => write!(f, "In TLS session: {}", e), NtskeProblem(e) => write!(f, "In NTS-KE response: {:?}", e), NtskeNoCookies => write!(f, "NTS-KE succeeded but no cookies were returned"), CredentialSaveError(e) => write!(f, "Saving credentials to store: {}", e), CoreTickError(e) => write!(f, "Handling tick in core: {}", e), CoreDepartureError(e) => write!(f, "Updating origin timestamp: {}", e), UdpSocketError(e) => write!(f, "Sending on UDP socket: {}", e), } } } impl std::error::Error for RequestError {} pub fn serialize_time_request( out: &mut Vec<u8>, unique_id: &core::UniqueId, c2s: &Aes128SivKey, cookie: Vec<u8>, cookies_requested: usize, ) { let plaintext = wire::Request { num_cookies: cookies_requested as u32, }; trace!( "Encoding plaintext for time request {:x?}: {:?}", unique_id, plaintext ); let mut plaintext_serialized = Vec::with_capacity(plaintext.encoded_len()); plaintext .encode(&mut plaintext_serialized) .expect("Error encoding plaintext for time request"); let cookie_len = cookie.len(); let mut nonce = Aes128SivNonce::default(); rand::thread_rng().fill_bytes(nonce.as_mut_slice()); let ad = wire::RequestAd { unique_id: unique_id.to_vec(), cookie, }; trace!( "Encoding associated data for time request {:x?}: {:?}", unique_id, ad ); let mut ad_serialized = Vec::with_capacity(ad.encoded_len()); ad.encode(&mut ad_serialized) .expect("Error encoding associated data for time request"); let aead_c2s = Aes128SivAead::new(c2s); let ciphertext = aead_c2s .encrypt( &nonce, Payload { aad: &ad_serialized, msg: &plaintext_serialized, }, ) .expect("Error encrypting time request"); let padding = vec![0; cookie_len * cookies_requested.saturating_sub(1) + wire::EXTRA_PADDING]; let packet = wire::Packet { msg: Some(wire::packet::Msg::Request(wire::RequestEnvelope { nonce: nonce.to_vec(), ad: ad_serialized, ciphertext, padding, })), }; trace!( "Encoding packet for time request {:x?}: {:?}", unique_id, packet ); out.reserve(packet.encoded_len()); packet .encode(out) .expect("Error encoding packet for time request"); } ///Send a time request /// ///Resolve `peer_config.host` using `resolver`. Take keys and cookies /// from `secret_store`. If they aren't there, run NTS-KE to obtain /// them. Send a time request over `socket_mutex` and record in /// `core_state` that it's in flight. pub async fn send_time_request( resolver: &trust_dns_resolver::TokioAsyncResolver, socket: &tokio::net::UdpSocket, peer_name: &PeerName, peer_config: &PeerConfig, core_state: &RwLock<core::CoreState>, secret_store: &SecretStore, ) -> Result<(), RequestError> { let ip_addr = resolver .lookup_ip(peer_config.host.as_str()) .await .map_err(RequestError::ResolveError)? .into_iter() .next() .expect("Got empty iterator from DNS lookup"); debug!( "Resolved DNS for peer '{}': {} -> {}", peer_name, peer_config.host, ip_addr ); let peer_addr = SocketAddr::new(ip_addr, peer_config.port); //These two secret_store calls each use separate transactions, so // it's possible to get a cookie that doesn't correspond to to the // c2s key if the results of an NTS-KE exchange get committed in // between the two calls. This can be elicited in testing by // setting an extremely short polling interval. Preventing this // would be easy — just add a method to SecretStore that fetches // both the C2S key and the cookie in a single transaction — but // it wouldn't actually improve anything because the new S2C key // will still get committed right afterward and we won't be able // to decrypt the server's response. The problem is harmless in // any case because we'll just recover on the next tick. Worst // that happens is that NTS-KE gets run twice rather than just // once. let (c2s, cookie, cookies_left) = match ( secret_store .get_c2s_key(peer_name) .map_err(RequestError::C2SLookupError)?, secret_store .take_cookie(peer_name) .map_err(RequestError::CookieLookupError)?, ) { (Some(c2s), (Some(cookie), cookies_left)) => (c2s, cookie, cookies_left), _ => { let tcp_stream = net::TcpStream::connect(&peer_addr) .await .map_err(RequestError::TcpError)?; debug!( "TCP connection established for NTS-KE with peer '{}'", peer_name ); let mut tls_stream = peer_config .tls_connector .connect(peer_config.cert_name.as_ref(), tcp_stream) .await .map_err(RequestError::TlsHandshakeError)?; debug!("TLS handshake completed with peer '{}'", peer_name); let mut ntske_output = ntske::request_ntske(&mut tls_stream) .await .map_err(RequestError::TlsSessionError)? .map_err(RequestError::NtskeProblem)?; debug!("Successful NTS-KE with peer '{}'", peer_name); let my_cookie = ntske_output .cookies .pop() .ok_or(RequestError::NtskeNoCookies)?; let cookies_left = ntske_output.cookies.len(); secret_store .set_credentials( peer_name, &ntske_output.c2s, &ntske_output.s2c, ntske_output.cookies.as_slice(), ) .map_err(RequestError::CredentialSaveError)?; debug!( "Stored session keys and {} cookies for peer '{}'", cookies_left, peer_name ); (ntske_output.c2s, my_cookie, cookies_left) } }; let query = core_state .write() .unwrap() .on_tick(peer_name, &mut rand::thread_rng()) .map_err(RequestError::CoreTickError)?; let cookies_requested = if cookies_left > 7 { 1 } else { 8 - cookies_left }; let mut send_buf = Vec::new(); serialize_time_request( &mut send_buf, &query.unique_id, &c2s, cookie, cookies_requested, ); core_state .write() .unwrap() .on_departure(peer_name) .map_err(RequestError::CoreDepartureError)?; debug!("Sending time request to peer '{}'", peer_name); socket .send_to(send_buf.as_slice(), &peer_addr) .await .map_err(RequestError::UdpSocketError)?; Ok(()) } ///Enumeration of errors that can occur when processing a time response #[derive(Debug)] pub enum ResponseError { DestTimeError(io::Error), PacketDecodingError(prost::DecodeError), NotAResponse, AdDecodingError(prost::DecodeError), WrongNonceLength, WrongUniqueIdLength, UnrecognizedErrorResponse, NonMatchingUniqueId, S2CLookupError(PeerName, StoreError), S2CNotFound(PeerName), DecryptionFailure(PeerName), PlaintextDecodingError(PeerName, prost::DecodeError), WrongEraLength(PeerName), NoLocalClock(PeerName), NoGlobalOffset(PeerName), CoreError(PeerName, io::Error), StoreCookiesError(PeerName, StoreError), StoreClearError(PeerName, StoreError), } impl ResponseError { fn level(&self) -> log::Level { use log::Level::*; use ResponseError::*; match self { DestTimeError(_) => Error, PacketDecodingError(_) => Debug, NotAResponse => Debug, AdDecodingError(_) => Debug, WrongNonceLength => Debug, WrongUniqueIdLength => Debug, UnrecognizedErrorResponse => Debug, NonMatchingUniqueId => Debug, S2CLookupError(_, _) => Error, S2CNotFound(_) => Warn, DecryptionFailure(_) => Warn, PlaintextDecodingError(_, _) => Warn, WrongEraLength(_) => Warn, NoLocalClock(_) => Warn, NoGlobalOffset(_) => Warn, CoreError(_, _) => Error, StoreCookiesError(_, _) => Error, StoreClearError(_, _) => Error, } } } impl fmt::Display for ResponseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use ResponseError::*; match self { DestTimeError(e) => write!(f, "Getting destination timestamp: {}", e), PacketDecodingError(e) => write!(f, "Decoding packet: {}", e), NotAResponse => write!(f, "Not a response packet"), AdDecodingError(e) => write!(f, "Decoding associated data: {}", e), WrongNonceLength => write!(f, "Wrong nonce length"), WrongUniqueIdLength => write!(f, "Wrong unique-ID length"), UnrecognizedErrorResponse => write!(f, "Unrecognized error response"), NonMatchingUniqueId => { write!(f, "Unique-ID does not correspond to any in-flight request") } S2CLookupError(peer, e) => write!(f, "Looking up S2C for peer '{}': {}", peer, e), S2CNotFound(peer) => write!(f, "S2C key not found for peer '{}'", peer), DecryptionFailure(peer) => write!(f, "Failed to decrypt response from peer '{}'", peer), PlaintextDecodingError(peer, e) => { write!(f, "Decoding plaintext send by peer '{}': {}", peer, e) } WrongEraLength(peer) => write!( f, "Response from peer '{}' has an era of the wrong length", peer ), NoLocalClock(peer) => write!( f, "Response from peer '{}' is missing its local-clock field", peer ), NoGlobalOffset(peer) => write!( f, "Response from peer '{}' is missing its global-offset field", peer ), CoreError(peer, e) => write!( f, "Updating core state for response from peer '{}': {}", peer, e ), StoreCookiesError(peer, e) => write!( f, "Writing new cookies from peer '{}' to secret store: {}", peer, e ), StoreClearError(peer, e) => write!( f, "Clearing secret store in response to crypto-NAK from peer '{}': {}", peer, e ), } } } impl std::error::Error for ResponseError {} ///Data extracted from a [wire::ResponseEnvelope](../wire/struct.ResponseEnvelope.html) pub struct ResponseEnvelopeData { unique_id: core::UniqueId, nonce: Aes128SivNonce, ad: Vec<u8>, ciphertext: Vec<u8>, } ///Data extracted from a crypto-NAK response pub struct CryptoNakData { unique_id: core::UniqueId, } ///Deserialize a time response as far as the envelope, but don't try to decrypt it pub fn deserialize_response_envelope<Response: Buf>( response: Response, ) -> Result<Result<ResponseEnvelopeData, CryptoNakData>, ResponseError> { let packet = wire::Packet::decode(response).map_err(ResponseError::PacketDecodingError)?; trace!("Deserialized time response packet: {:?}", packet); match packet.msg { Some(wire::packet::Msg::Response(envelope)) => { let ad = wire::ResponseAd::decode(envelope.ad.as_ref()) .map_err(ResponseError::AdDecodingError)?; let nonce = Aes128SivNonce::try_clone_from_slice(envelope.nonce.as_slice()) .map_err(|_| ResponseError::WrongNonceLength)?; let unique_id = core::UniqueId::try_from(ad.unique_id.as_slice()) .map_err(|_| ResponseError::WrongUniqueIdLength)?; Ok(Ok(ResponseEnvelopeData { unique_id, nonce, ad: envelope.ad, ciphertext: envelope.ciphertext, })) } Some(wire::packet::Msg::Error(error)) => { let unique_id = core::UniqueId::try_from(error.unique_id.as_slice()) .map_err(|_| ResponseError::WrongUniqueIdLength)?; match error.error { Some(wire::error::Error::CryptoNak(_)) => Ok(Err(CryptoNakData { unique_id })), _ => Err(ResponseError::UnrecognizedErrorResponse), } } _ => Err(ResponseError::NotAResponse), } } ///Deserialize the plaintext of a time response, returning cookies and /// a [`core::Response`](../core/struct.Response.html). pub fn deserialize_response_plaintext<Plaintext: Buf>( peer_name: &PeerName, unique_id: &core::UniqueId, plaintext: Plaintext, ) -> Result<(Vec<Vec<u8>>, core::Response), ResponseError> { let response = wire::Response::decode(plaintext) .map_err(|e| ResponseError::PlaintextDecodingError(peer_name.clone(), e))?; trace!("Deserialized time response plaintext: {:?}", response); let era = Era(<[u8; 16]>::try_from(response.era.as_slice()) .map_err(|_| ResponseError::WrongEraLength(peer_name.clone()))?); let global_offset = response .offset .ok_or_else(|| ResponseError::NoGlobalOffset(peer_name.clone()))?; let local_clock = response .local_clock .ok_or_else(|| ResponseError::NoLocalClock(peer_name.clone()))?; Ok(( response.cookies, core::Response { era, unique_id: *unique_id, global_offset: Timestamp::new( global_offset.seconds as i64, global_offset.nanoseconds as i64, ), local_clock: Timestamp::new(local_clock.seconds as i64, local_clock.nanoseconds as i64), }, )) } ///Process a time response /// ///Deserialize and decrypt the `response` using `secret_store` to look up keys. /// Pass the response to `core_state`. Add any returned cookies to the store. pub fn handle_time_response<Response: Buf>( response: Response, core_state: &RwLock<core::CoreState>, secret_store: &SecretStore, ) -> Result<(), ResponseError> { let dest_time = Timestamp::local_time().map_err(ResponseError::DestTimeError)?; match deserialize_response_envelope(response)? { Ok(envelope) => { let peer_name = core_state .read() .unwrap() .lookup_peer(&envelope.unique_id) .ok_or(ResponseError::NonMatchingUniqueId)?; //It's possible for S2CNotFound to happen when request B // crosses request A on the wire, and response B is a // crypto-NAK which causes us to clear our // credentials. This can readily be elicited in testing // setting an extremely short polling interval, but should // never normally happen in production, barring // adversarial behavior by the network or the peer. If it // does, it's harmless; we'll log it at WARN level and // recover on the next tick. let s2c = secret_store .get_s2c_key(&peer_name) .map_err(|e| ResponseError::S2CLookupError(peer_name.clone(), e))? .ok_or_else(|| ResponseError::S2CNotFound(peer_name.clone()))?; let aead_s2c = Aes128SivAead::new(&s2c);
&envelope.nonce, Payload { aad: &envelope.ad, msg: &envelope.ciphertext, }, ) .map_err(|_| ResponseError::DecryptionFailure(peer_name.clone()))?; let (cookies, response) = deserialize_response_plaintext( &peer_name, &envelope.unique_id, plaintext.as_ref(), )?; core_state .write() .unwrap() .on_response(&response, dest_time) .map_err(|e| ResponseError::CoreError(peer_name.clone(), e))?; secret_store .give_cookies(&peer_name, cookies) .map_err(|e| ResponseError::StoreCookiesError(peer_name.clone(), e))?; debug!( "Successfully handled time response from peer '{}'", peer_name ); Ok(()) } Err(crypto_nak) => { let peer_name = core_state
let plaintext = aead_s2c .decrypt(
random_line_split
time_client.rs
CookieLookupError(e) => write!(f, "Looking up cookie from store: {}", e), C2SLookupError(e) => write!(f, "Looking up C2S key from store: {}", e), TcpError(e) => write!(f, "Establishing TCP connection for NTS-KE: {}", e), TlsHandshakeError(e) => write!(f, "During TLS handshake: {}", e), TlsSessionError(e) => write!(f, "In TLS session: {}", e), NtskeProblem(e) => write!(f, "In NTS-KE response: {:?}", e), NtskeNoCookies => write!(f, "NTS-KE succeeded but no cookies were returned"), CredentialSaveError(e) => write!(f, "Saving credentials to store: {}", e), CoreTickError(e) => write!(f, "Handling tick in core: {}", e), CoreDepartureError(e) => write!(f, "Updating origin timestamp: {}", e), UdpSocketError(e) => write!(f, "Sending on UDP socket: {}", e), } } } impl std::error::Error for RequestError {} pub fn serialize_time_request( out: &mut Vec<u8>, unique_id: &core::UniqueId, c2s: &Aes128SivKey, cookie: Vec<u8>, cookies_requested: usize, ) { let plaintext = wire::Request { num_cookies: cookies_requested as u32, }; trace!( "Encoding plaintext for time request {:x?}: {:?}", unique_id, plaintext ); let mut plaintext_serialized = Vec::with_capacity(plaintext.encoded_len()); plaintext .encode(&mut plaintext_serialized) .expect("Error encoding plaintext for time request"); let cookie_len = cookie.len(); let mut nonce = Aes128SivNonce::default(); rand::thread_rng().fill_bytes(nonce.as_mut_slice()); let ad = wire::RequestAd { unique_id: unique_id.to_vec(), cookie, }; trace!( "Encoding associated data for time request {:x?}: {:?}", unique_id, ad ); let mut ad_serialized = Vec::with_capacity(ad.encoded_len()); ad.encode(&mut ad_serialized) .expect("Error encoding associated data for time request"); let aead_c2s = Aes128SivAead::new(c2s); let ciphertext = aead_c2s .encrypt( &nonce, Payload { aad: &ad_serialized, msg: &plaintext_serialized, }, ) .expect("Error encrypting time request"); let padding = vec![0; cookie_len * cookies_requested.saturating_sub(1) + wire::EXTRA_PADDING]; let packet = wire::Packet { msg: Some(wire::packet::Msg::Request(wire::RequestEnvelope { nonce: nonce.to_vec(), ad: ad_serialized, ciphertext, padding, })), }; trace!( "Encoding packet for time request {:x?}: {:?}", unique_id, packet ); out.reserve(packet.encoded_len()); packet .encode(out) .expect("Error encoding packet for time request"); } ///Send a time request /// ///Resolve `peer_config.host` using `resolver`. Take keys and cookies /// from `secret_store`. If they aren't there, run NTS-KE to obtain /// them. Send a time request over `socket_mutex` and record in /// `core_state` that it's in flight. pub async fn send_time_request( resolver: &trust_dns_resolver::TokioAsyncResolver, socket: &tokio::net::UdpSocket, peer_name: &PeerName, peer_config: &PeerConfig, core_state: &RwLock<core::CoreState>, secret_store: &SecretStore, ) -> Result<(), RequestError> { let ip_addr = resolver .lookup_ip(peer_config.host.as_str()) .await .map_err(RequestError::ResolveError)? .into_iter() .next() .expect("Got empty iterator from DNS lookup"); debug!( "Resolved DNS for peer '{}': {} -> {}", peer_name, peer_config.host, ip_addr ); let peer_addr = SocketAddr::new(ip_addr, peer_config.port); //These two secret_store calls each use separate transactions, so // it's possible to get a cookie that doesn't correspond to to the // c2s key if the results of an NTS-KE exchange get committed in // between the two calls. This can be elicited in testing by // setting an extremely short polling interval. Preventing this // would be easy — just add a method to SecretStore that fetches // both the C2S key and the cookie in a single transaction — but // it wouldn't actually improve anything because the new S2C key // will still get committed right afterward and we won't be able // to decrypt the server's response. The problem is harmless in // any case because we'll just recover on the next tick. Worst // that happens is that NTS-KE gets run twice rather than just // once. let (c2s, cookie, cookies_left) = match ( secret_store .get_c2s_key(peer_name) .map_err(RequestError::C2SLookupError)?, secret_store .take_cookie(peer_name) .map_err(RequestError::CookieLookupError)?, ) { (Some(c2s), (Some(cookie), cookies_left)) => (c2s, cookie, cookies_left), _ => { let tcp_stream = net::TcpStream::connect(&peer_addr) .await .map_err(RequestError::TcpError)?; debug!( "TCP connection established for NTS-KE with peer '{}'", peer_name ); let mut tls_stream = peer_config .tls_connector .connect(peer_config.cert_name.as_ref(), tcp_stream) .await .map_err(RequestError::TlsHandshakeError)?; debug!("TLS handshake completed with peer '{}'", peer_name); let mut ntske_output = ntske::request_ntske(&mut tls_stream) .await .map_err(RequestError::TlsSessionError)? .map_err(RequestError::NtskeProblem)?; debug!("Successful NTS-KE with peer '{}'", peer_name); let my_cookie = ntske_output .cookies .pop() .ok_or(RequestError::NtskeNoCookies)?; let cookies_left = ntske_output.cookies.len(); secret_store .set_credentials( peer_name, &ntske_output.c2s, &ntske_output.s2c, ntske_output.cookies.as_slice(), ) .map_err(RequestError::CredentialSaveError)?; debug!( "Stored session keys and {} cookies for peer '{}'", cookies_left, peer_name ); (ntske_output.c2s, my_cookie, cookies_left) } }; let query = core_state .write() .unwrap() .on_tick(peer_name, &mut rand::thread_rng()) .map_err(RequestError::CoreTickError)?; let cookies_requested = if cookies_left > 7 { 1 } else { 8 - cookies_left }; let mut send_buf = Vec::new(); serialize_time_request( &mut send_buf, &query.unique_id, &c2s, cookie, cookies_requested, ); core_state .write() .unwrap() .on_departure(peer_name) .map_err(RequestError::CoreDepartureError)?; debug!("Sending time request to peer '{}'", peer_name); socket .send_to(send_buf.as_slice(), &peer_addr) .await .map_err(RequestError::UdpSocketError)?; Ok(()) } ///Enumeration of errors that can occur when processing a time response #[derive(Debug)] pub enum ResponseError { DestTimeError(io::Error), PacketDecodingError(prost::DecodeError), NotAResponse, AdDecodingError(prost::DecodeError), WrongNonceLength, WrongUniqueIdLength, UnrecognizedErrorResponse, NonMatchingUniqueId, S2CLookupError(PeerName, StoreError), S2CNotFound(PeerName), DecryptionFailure(PeerName), PlaintextDecodingError(PeerName, prost::DecodeError), WrongEraLength(PeerName), NoLocalClock(PeerName), NoGlobalOffset(PeerName), CoreError(PeerName, io::Error), StoreCookiesError(PeerName, StoreError), StoreClearError(PeerName, StoreError), } impl ResponseError { fn level(&self) -> log::Level { use log::Level::*; use ResponseError::*; match self { DestTimeError(_) => Error, PacketDecodingError(_) => Debug, NotAResponse => Debug, AdDecodingError(_) => Debug, WrongNonceLength => Debug, WrongUniqueIdLength => Debug, UnrecognizedErrorResponse => Debug, NonMatchingUniqueId => Debug, S2CLookupError(_, _) => Error, S2CNotFound(_) => Warn, DecryptionFailure(_) => Warn, PlaintextDecodingError(_, _) => Warn, WrongEraLength(_) => Warn, NoLocalClock(_) => Warn, NoGlobalOffset(_) => Warn, CoreError(_, _) => Error, StoreCookiesError(_, _) => Error, StoreClearError(_, _) => Error, } } } impl fmt::Display for ResponseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use ResponseError::*; match self { DestTimeError(e) => write!(f, "Getting destination timestamp: {}", e), PacketDecodingError(e) => write!(f, "Decoding packet: {}", e), NotAResponse => write!(f, "Not a response packet"), AdDecodingError(e) => write!(f, "Decoding associated data: {}", e), WrongNonceLength => write!(f, "Wrong nonce length"), WrongUniqueIdLength => write!(f, "Wrong unique-ID length"), UnrecognizedErrorResponse => write!(f, "Unrecognized error response"), NonMatchingUniqueId => { write!(f, "Unique-ID does not correspond to any in-flight request") } S2CLookupError(peer, e) => write!(f, "Looking up S2C for peer '{}': {}", peer, e), S2CNotFound(peer) => write!(f, "S2C key not found for peer '{}'", peer), DecryptionFailure(peer) => write!(f, "Failed to decrypt response from peer '{}'", peer), PlaintextDecodingError(peer, e) => { write!(f, "Decoding plaintext send by peer '{}': {}", peer, e) } WrongEraLength(peer) => write!( f, "Response from peer '{}' has an era of the wrong length", peer ), NoLocalClock(peer) => write!( f, "Response from peer '{}' is missing its local-clock field", peer ), NoGlobalOffset(peer) => write!( f, "Response from peer '{}' is missing its global-offset field", peer ), CoreError(peer, e) => write!( f, "Updating core state for response from peer '{}': {}", peer, e ), StoreCookiesError(peer, e) => write!( f, "Writing new cookies from peer '{}' to secret store: {}", peer, e ), StoreClearError(peer, e) => write!( f, "Clearing secret store in response to crypto-NAK from peer '{}': {}", peer, e ), } } } impl std::error::Error for ResponseError {} ///Data extracted from a [wire::ResponseEnvelope](../wire/struct.ResponseEnvelope.html) pub struct ResponseEnvelopeData { unique_id: core::UniqueId, nonce: Aes128SivNonce, ad: Vec<u8>, ciphertext: Vec<u8>, } ///Data extracted from a crypto-NAK response pub struct CryptoNakData { unique_id: core::UniqueId, } ///Deserialize a time response as far as the envelope, but don't try to decrypt it pub fn deserialize_response_envelope<Response: Buf>( response: Response, ) -> Result<Result<ResponseEnvelopeData, CryptoNakData>, ResponseError> { let packet = wire::Packet::decode(response).map_err(ResponseError::PacketDecodingError)?; trace!("Deserialized time response packet: {:?}", packet); match packet.msg { Some(wire::packet::Msg::Response(envelope)) => { let ad = wire::ResponseAd::decode(envelope.ad.as_ref()) .map_err(ResponseError::AdDecodingError)?; let nonce = Aes128SivNonce::try_clone_from_slice(envelope.nonce.as_slice()) .map_err(|_| ResponseError::WrongNonceLength)?; let unique_id = core::UniqueId::try_from(ad.unique_id.as_slice()) .map_err(|_| ResponseError::WrongUniqueIdLength)?; Ok(Ok(ResponseEnvelopeData { unique_id, nonce, ad: envelope.ad, ciphertext: envelope.ciphertext, })) } Some(wire::packet::Msg::Error(error)) => { let unique_id = core::UniqueId::try_from(error.unique_id.as_slice()) .map_err(|_| ResponseError::WrongUniqueIdLength)?; match error.error { Some(wire::error::Error::CryptoNak(_)) => Ok(Err(CryptoNakData { unique_id })), _ => Err(ResponseError::UnrecognizedErrorResponse), } } _ => Err(ResponseError::NotAResponse), } } ///Deserialize the plaintext of a time response, returning cookies and /// a [`core::Response`](../core/struct.Response.html). pub fn deserialize_response_plaintext<Plaintext: Buf>( peer_name: &PeerName, unique_id: &core::UniqueId, plaintext: Plaintext, ) -> Result<(Vec<Vec<u8>>, core::Response), ResponseError> { let response = wire::Response::decode(plaintext) .map_err(|e| ResponseError::PlaintextDecodingError(peer_name.clone(), e))?; trace!("Deserialized time response plaintext: {:?}", response); let era = Era(<[u8; 16]>::try_from(response.era.as_slice()) .map_err(|_| ResponseError::WrongEraLength(peer_name.clone()))?); let global_offset = response .offset .ok_or_else(|| ResponseError::NoGlobalOffset(peer_name.clone()))?; let local_clock = response .local_clock .ok_or_else(|| ResponseError::NoLocalClock(peer_name.clone()))?; Ok(( response.cookies, core::Response { era, unique_id: *unique_id, global_offset: Timestamp::new( global_offset.seconds as i64, global_offset.nanoseconds as i64, ), local_clock: Timestamp::new(local_clock.seconds as i64, local_clock.nanoseconds as i64), }, )) } ///Process a time response /// ///Deserialize and decrypt the `response` using `secret_store` to look up keys. /// Pass the response to `core_state`. Add any returned cookies to the store. pub fn handle_time_response<Response: Buf>( response: Response, core_state: &RwLock<core::CoreState>, secret_store: &SecretStore, ) -> Result<(), ResponseError> { let dest_time = Timestamp::local_time().map_err(ResponseError::DestTimeError)?; match deserialize_response_envelope(response)? { Ok(envelope) => { let peer_name = core_state .read() .unwrap() .lookup_peer(&envelope.unique_id) .ok_or(ResponseError::NonMatchingUniqueId)?; //It's possible for S2CNotFound to happen when request B // crosses request A on the wire, and response B is a // crypto-NAK which causes us to clear our // credentials. This can readily be elicited in testing // setting an extremely short polling interval, but should // never normally happen in production, barring // adversarial behavior by the network or the peer. If it // does, it's harmless; we'll log it at WARN level and // recover on the next tick. let s2c = secret_store .get_s2c_key(&peer_name) .map_err(|e| ResponseError::S2CLookupError(peer_name.clone(), e))? .ok_or_else(|| ResponseError::S2CNotFound(peer_name.clone()))?; let aead_s2c = Aes128SivAead::new(&s2c); let plaintext = aead_s2c .decrypt( &envelope.nonce, Payload { aad: &envelope.ad, msg: &envelope.ciphertext, }, ) .map_err(|_| ResponseError::DecryptionFailure(peer_name.clone()))?; let (cookies, response) = deserialize_response_plaintext( &peer_name, &envelope.unique_id, plaintext.as_ref(), )?; core_state .write() .unwrap() .on_response(&response, dest_time) .map_err(|e| ResponseError::CoreError(peer_name.clone(), e))?; secret_store .give_cookies(&peer_name, cookies) .map_err(|e| ResponseError::StoreCookiesError(peer_name.clone(), e))?; debug!( "Successfully handled time response from peer '{}'", peer_name ); Ok(()) } Err(crypto_nak) => { let peer_name = core_state .read() .unwrap() .lookup_peer(&crypto_nak.unique_id) .ok_or(ResponseError::NonMatchingUniqueId)?; debug!("Received crypto-NAK from peer '{}'", peer_name); secret_store .clear_peer(&peer_name) .map_err(|e| ResponseError::StoreClearError(peer_name.clone(), e))?; Ok(()) } } } ///Listen for time response and process them /// ///Listen forever on `socket`. Process any responses that come in. If any /// errors occur, log them and continue. pub async fn time_response_listener( socket: &tokio::net::UdpSocket, core_state: &RwLock<core::CoreState>, secret_store: &SecretStore, ) -> io::Result<()> {
let mut recv_buf = [0; 65535]; loop { let (recv_size, peer_addr) = socket.recv_from(&mut recv_buf).await?; if let Err(e) = handle_time_response(&recv_buf[0..recv_size], core_state, secret_store) { log!( e.level(), "Handling time response from {}: {}", peer_addr, e ); } } }
identifier_body
fst_builder.rs
inited: false, } } // this should be call after new FstBuilder pub fn init(&mut self) { if self.do_share_suffix { let reader = self.fst.bytes_store.get_reverse_reader(); let dedup_hash = NodeHash::new(&mut self.fst, reader); self.dedup_hash = Some(dedup_hash); } for i in 0..10 { let node = UnCompiledNode::new(self, i); self.frontier.push(node); } self.inited = true; } pub fn term_count(&self) -> i64 { self.frontier[0].input_count } fn compile_node(&mut self, node_index: usize, tail_length: u32) -> Result<CompiledAddress> { debug_assert!(self.inited); let node: i64; let bytes_pos_start = self.fst.bytes_store.get_position(); let builder = self as *mut FstBuilder<F>; unsafe { if let Some(ref mut dedup_hash) = self.dedup_hash { if (self.do_share_non_singleton_nodes || self.frontier[node_index].num_arcs <= 1) && tail_length <= self.share_max_tail_length { if self.frontier[node_index].num_arcs == 0 { node = self.fst.add_node(&mut *builder, node_index)?; self.last_frozen_node = node; } else { node = dedup_hash.add(&mut *builder, node_index)? as i64; } } else { node = self.fst.add_node(&mut *builder, node_index)?; } } else { node = self.fst.add_node(&mut *builder, node_index)?; } } assert_ne!(node, -2); let bytes_pos_end = self.fst.bytes_store.get_position(); if bytes_pos_end!= bytes_pos_start { // fst added a new node assert!(bytes_pos_end > bytes_pos_start); self.last_frozen_node = node; } self.frontier[node_index].clear(); Ok(node) } #[allow(unused_assignments)] fn freeze_tail(&mut self, prefix_len_plus1: usize) -> Result<()> { debug_assert!(self.inited); let down_to = max(1, prefix_len_plus1); if self.last_input.length < down_to { return Ok(()); } for i in 0..=self.last_input.length - down_to { let idx = self.last_input.length - i; let mut do_prune = false; let mut do_compile = false; let tmp = UnCompiledNode::new(self, 0); let mut parent = mem::replace(&mut self.frontier[idx - 1], tmp); if self.frontier[idx].input_count < self.min_suffix_count1 as i64 { do_prune = true; do_compile = true; } else if idx > prefix_len_plus1 { // prune if parent's input_count is less than suffix_min_count2 if parent.input_count < self.min_suffix_count2 as i64 || (self.min_suffix_count2 == 1 && parent.input_count == 1 && idx > 1) { // my parent, about to be compiled, doesn't make the cut, so // I'm definitely pruned // if minSuffixCount2 is 1, we keep only up // until the 'distinguished edge', ie we keep only the // 'divergent' part of the FST. if my parent, about to be // compiled, has inputCount 1 then we are already past the // distinguished edge. NOTE: this only works if // the FST outputs are not "compressible" (simple // ords ARE compressible). do_prune = true; } else { // my parent, about to be compiled, does make the cut, so // I'm definitely not pruned do_prune = false; } do_compile = true; } else { // if pruning is disabled (count is 0) we can always // compile current node do_compile = self.min_suffix_count2 == 0; } if self.frontier[idx].input_count < self.min_suffix_count2 as i64 || (self.min_suffix_count2 == 1 && self.frontier[idx].input_count == 1 && idx > 1) { // drop all arcs for arc_idx in 0..self.frontier[idx].num_arcs { if let Node::UnCompiled(target) = self.frontier[idx].arcs[arc_idx].target { self.frontier[target].clear(); } } self.frontier[idx].num_arcs = 0; } if do_prune { // this node doesn't make it -- deref it self.frontier[idx].clear(); parent.delete_last(self.last_input.int_at(idx - 1), &Node::UnCompiled(idx)); } else { if self.min_suffix_count2!= 0 { let tail_len = self.last_input.length - idx; self.compile_all_targets(idx, tail_len)?; } let next_final_output = self.frontier[idx].output.clone(); // We "fake" the node as being final if it has no // outgoing arcs; in theory we could leave it // as non-final (the FST can represent this), but // FSTEnum, Util, etc., have trouble w/ non-final // dead-end states: let is_final = self.frontier[idx].is_final || self.frontier[idx].num_arcs == 0; if do_compile { // this node makes it and we now compile it. first, // compile any targets that were previously // undecided: let tail_len = (1 + self.last_input.length - idx) as u32; let n = self.compile_node(idx, tail_len)?; parent.replace_last( self.last_input.int_at(idx - 1), Node::Compiled(n), next_final_output, is_final, ); } else { // replaceLast just to install // next_final_output/is_final onto the arc parent.replace_last( self.last_input.int_at(idx - 1), Node::UnCompiled(0), // a stub node, next_final_output, is_final, ); // this node will stay in play for now, since we are // undecided on whether to prune it. later, it // will be either compiled or pruned, so we must // allocate a new node: self.frontier[idx] = UnCompiledNode::new(self, idx as i32); } } self.frontier[idx - 1] = parent; } Ok(()) } /// Add the next input/output pair. The provided input /// must be sorted after the previous one according to /// `IntsRef#compareTo`. It's also OK to add the same /// input twice in a row with different outputs, as long /// as `OutputFactory` implements the `OutputFactory#merge` /// method. Note that input is fully consumed after this /// method is returned (so caller is free to reuse), but /// output is not. So if your outputs are changeable (eg /// `ByteSequenceOutputs`) then you cannot reuse across /// calls. pub fn add(&mut self, input: IntsRef, output: F::Value) -> Result<()> { debug_assert!(self.inited); assert!(self.last_input.length == 0 || input > self.last_input.get()); let mut output = output; if self.frontier.len() < input.length + 1 { for i in self.frontier.len()..input.length + 2 { let node = UnCompiledNode::new(self, i as i32); self.frontier.push(node); } } if input.length == 0 { // empty input: only allowed as first input. we have // to special case this because the packed FST // format cannot represent the empty input since // 'finalness' is stored on the incoming arc, not on // the node self.frontier[0].input_count += 1; self.frontier[0].is_final = true; self.fst.set_empty_output(output); return Ok(()); } // compare shared prefix length let mut pos1 = 0; let mut pos2 = input.offset; let pos1_stop = min(self.last_input.length, input.length); loop { self.frontier[pos1].input_count += 1; if pos1 >= pos1_stop || self.last_input.int_at(pos1)!= input.ints()[pos2] { break; } pos1 += 1; pos2 += 1; } let prefix_len_plus1 = pos1 + 1; // minimize/compile states from previous input's // orphan'd suffix self.freeze_tail(prefix_len_plus1)?; // init tail states for current input for i in prefix_len_plus1..=input.length { let node = Node::UnCompiled(i); self.frontier[i - 1].add_arc(input.ints()[input.offset + i - 1], node); self.frontier[i].input_count += 1; } let last_idx = input.length; if self.last_input.length!= input.length || prefix_len_plus1!= input.length + 1 { self.frontier[last_idx].is_final = true; self.frontier[last_idx].output = self.no_output.clone(); } // push conflicting outputs forward, only as far as needed for i in 1..prefix_len_plus1 { let last_output = self.frontier[i - 1] .get_last_output(input.ints()[input.offset + i - 1]) .clone(); let common_output_prefix: F::Value; if last_output!= self.no_output { common_output_prefix = self.fst.outputs().common(&output, &last_output); let word_suffix = self .fst .outputs() .subtract(&last_output, &common_output_prefix); self.frontier[i].prepend_output(word_suffix); } else { common_output_prefix = self.no_output.clone(); } output = self.fst.outputs().subtract(&output, &common_output_prefix); if last_output!= self.no_output { self.frontier[i - 1] .set_last_output(input.ints()[input.offset + i - 1], common_output_prefix); } } if self.last_input.length == input.length && prefix_len_plus1 == input.length + 1 { // same input more than 1 time in a row, mapping to // multiple outputs self.frontier[last_idx].output = self .fst .outputs() .merge(&self.frontier[last_idx].output, &output); } else { // this new arc is private to this new input; set its // arc output to the leftover output: self.frontier[prefix_len_plus1 - 1] .set_last_output(input.ints()[input.offset + prefix_len_plus1 - 1], output); } // save last input self.last_input.copy_ints_ref(&input); Ok(()) } // Returns final FST. NOTE: this will return None if nothing is accepted by the fst pub fn finish(&mut self) -> Result<Option<FST<F>>> { debug_assert!(self.inited); // minimize nodes in the last word's suffix self.freeze_tail(0)?; if self.frontier[0].input_count < self.min_suffix_count1 as i64 || self.frontier[0].input_count < self.min_suffix_count2 as i64 || self.frontier[0].num_arcs == 0 { if self.fst.empty_output.is_none() || (self.min_suffix_count1 > 0 || self.min_suffix_count2 > 0) { return Ok(None); } } else if self.min_suffix_count2!= 0 { let tail_len = self.last_input.length; self.compile_all_targets(0, tail_len)?; } let node = { let tail_len = self.last_input.length as u32; self.compile_node(0, tail_len)? }; self.fst.finish(node)?; // create a tmp for mem replace let tmp_fst = FST::new(self.fst.input_type, self.fst.outputs().clone(), 1); let fst = mem::replace(&mut self.fst, tmp_fst); Ok(Some(fst)) } fn compile_all_targets(&mut self, node_idx: usize, tail_length: usize) -> Result<()> { for i in 0..self.frontier[node_idx].num_arcs { if let Node::UnCompiled(index) = self.frontier[node_idx].arcs[i].target { // not yet compiled if self.frontier[index].num_arcs == 0 { self.frontier[node_idx].arcs[i].is_final = true; self.frontier[index].is_final = true; } self.frontier[node_idx].arcs[i].target = Node::Compiled(self.compile_node(index, tail_length as u32 - 1)? as i64); } } Ok(()) } } pub struct BuilderArc<F: OutputFactory> { pub label: i32, pub target: Node, pub is_final: bool, pub output: F::Value, pub next_final_output: F::Value, } impl<F: OutputFactory> fmt::Debug for BuilderArc<F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let target = match self.target { Node::Compiled(c) => format!("Compiled({})", c), Node::UnCompiled(_) => "UnCompiled".to_string(), }; write!( f, "BuilderArc(label: {}, is_final: {}, output: {:?}, next_final_output: {:?}, target: \ {})", self.label, self.is_final, self.output, self.next_final_output, target ) } } impl<F> Clone for BuilderArc<F> where F: OutputFactory, { fn clone(&self) -> Self { BuilderArc { label: self.label, target: self.target.clone(), is_final: self.is_final, output: self.output.clone(), next_final_output: self.next_final_output.clone(), } } } /// used to dedup states (lookup already-frozen states) struct NodeHash<F: OutputFactory> { table: PagedGrowableWriter, count: usize, mask: usize, fst: *mut FST<F>, input: StoreBytesReader, } impl<F: OutputFactory> NodeHash<F> { pub fn new(fst: &mut FST<F>, input: StoreBytesReader) -> Self { let table = PagedGrowableWriter::new(16, 1 << 27, 8, COMPACT); NodeHash { table, count: 0, mask: 15, fst: fst as *mut FST<F>, input, } } #[allow(clippy::mut_from_ref)] fn fst(&self) -> &mut FST<F> { unsafe { &mut (*self.fst) } } fn nodes_equal(&mut self, node: &UnCompiledNode<F>, address: CompiledAddress) -> Result<bool> { let reader = &mut self.input as *mut StoreBytesReader; let mut scratch_arc = unsafe { self.fst().read_first_real_arc(address, &mut *reader)? }; if scratch_arc.bytes_per_arc > 0 && node.num_arcs!= scratch_arc.num_arcs { return Ok(false); } for idx in 0..node.num_arcs { let arc = &node.arcs[idx]; if arc.label!= scratch_arc.label || arc.is_final!= scratch_arc.is_final() { return Ok(false);
{ let no_output = outputs.empty(); let fst = FST::new(input_type, outputs, bytes_page_bits as usize); FstBuilder { dedup_hash: None, fst, no_output, min_suffix_count1, min_suffix_count2, do_share_non_singleton_nodes, share_max_tail_length, last_input: IntsRefBuilder::new(), frontier: Vec::with_capacity(10), last_frozen_node: 0, reused_bytes_per_arc: Vec::with_capacity(4), arc_count: 0, node_count: 0, allow_array_arcs, do_share_suffix,
identifier_body
fst_builder.rs
for i in 0..10 { let node = UnCompiledNode::new(self, i); self.frontier.push(node); } self.inited = true; } pub fn term_count(&self) -> i64 { self.frontier[0].input_count } fn compile_node(&mut self, node_index: usize, tail_length: u32) -> Result<CompiledAddress> { debug_assert!(self.inited); let node: i64; let bytes_pos_start = self.fst.bytes_store.get_position(); let builder = self as *mut FstBuilder<F>; unsafe { if let Some(ref mut dedup_hash) = self.dedup_hash { if (self.do_share_non_singleton_nodes || self.frontier[node_index].num_arcs <= 1) && tail_length <= self.share_max_tail_length { if self.frontier[node_index].num_arcs == 0 { node = self.fst.add_node(&mut *builder, node_index)?; self.last_frozen_node = node; } else { node = dedup_hash.add(&mut *builder, node_index)? as i64; } } else { node = self.fst.add_node(&mut *builder, node_index)?; } } else { node = self.fst.add_node(&mut *builder, node_index)?; } } assert_ne!(node, -2); let bytes_pos_end = self.fst.bytes_store.get_position(); if bytes_pos_end!= bytes_pos_start { // fst added a new node assert!(bytes_pos_end > bytes_pos_start); self.last_frozen_node = node; } self.frontier[node_index].clear(); Ok(node) } #[allow(unused_assignments)] fn freeze_tail(&mut self, prefix_len_plus1: usize) -> Result<()> { debug_assert!(self.inited); let down_to = max(1, prefix_len_plus1); if self.last_input.length < down_to { return Ok(()); } for i in 0..=self.last_input.length - down_to { let idx = self.last_input.length - i; let mut do_prune = false; let mut do_compile = false; let tmp = UnCompiledNode::new(self, 0); let mut parent = mem::replace(&mut self.frontier[idx - 1], tmp); if self.frontier[idx].input_count < self.min_suffix_count1 as i64 { do_prune = true; do_compile = true; } else if idx > prefix_len_plus1 { // prune if parent's input_count is less than suffix_min_count2 if parent.input_count < self.min_suffix_count2 as i64 || (self.min_suffix_count2 == 1 && parent.input_count == 1 && idx > 1) { // my parent, about to be compiled, doesn't make the cut, so // I'm definitely pruned // if minSuffixCount2 is 1, we keep only up // until the 'distinguished edge', ie we keep only the // 'divergent' part of the FST. if my parent, about to be // compiled, has inputCount 1 then we are already past the // distinguished edge. NOTE: this only works if // the FST outputs are not "compressible" (simple // ords ARE compressible). do_prune = true; } else { // my parent, about to be compiled, does make the cut, so // I'm definitely not pruned do_prune = false; } do_compile = true; } else { // if pruning is disabled (count is 0) we can always // compile current node do_compile = self.min_suffix_count2 == 0; } if self.frontier[idx].input_count < self.min_suffix_count2 as i64 || (self.min_suffix_count2 == 1 && self.frontier[idx].input_count == 1 && idx > 1) { // drop all arcs for arc_idx in 0..self.frontier[idx].num_arcs { if let Node::UnCompiled(target) = self.frontier[idx].arcs[arc_idx].target { self.frontier[target].clear(); } } self.frontier[idx].num_arcs = 0; } if do_prune { // this node doesn't make it -- deref it self.frontier[idx].clear(); parent.delete_last(self.last_input.int_at(idx - 1), &Node::UnCompiled(idx)); } else { if self.min_suffix_count2!= 0 { let tail_len = self.last_input.length - idx; self.compile_all_targets(idx, tail_len)?; } let next_final_output = self.frontier[idx].output.clone(); // We "fake" the node as being final if it has no // outgoing arcs; in theory we could leave it // as non-final (the FST can represent this), but // FSTEnum, Util, etc., have trouble w/ non-final // dead-end states: let is_final = self.frontier[idx].is_final || self.frontier[idx].num_arcs == 0; if do_compile { // this node makes it and we now compile it. first, // compile any targets that were previously // undecided: let tail_len = (1 + self.last_input.length - idx) as u32; let n = self.compile_node(idx, tail_len)?; parent.replace_last( self.last_input.int_at(idx - 1), Node::Compiled(n), next_final_output, is_final, ); } else { // replaceLast just to install // next_final_output/is_final onto the arc parent.replace_last( self.last_input.int_at(idx - 1), Node::UnCompiled(0), // a stub node, next_final_output, is_final, ); // this node will stay in play for now, since we are // undecided on whether to prune it. later, it // will be either compiled or pruned, so we must // allocate a new node: self.frontier[idx] = UnCompiledNode::new(self, idx as i32); } } self.frontier[idx - 1] = parent; } Ok(()) } /// Add the next input/output pair. The provided input /// must be sorted after the previous one according to /// `IntsRef#compareTo`. It's also OK to add the same /// input twice in a row with different outputs, as long /// as `OutputFactory` implements the `OutputFactory#merge` /// method. Note that input is fully consumed after this /// method is returned (so caller is free to reuse), but /// output is not. So if your outputs are changeable (eg /// `ByteSequenceOutputs`) then you cannot reuse across /// calls. pub fn add(&mut self, input: IntsRef, output: F::Value) -> Result<()> { debug_assert!(self.inited); assert!(self.last_input.length == 0 || input > self.last_input.get()); let mut output = output; if self.frontier.len() < input.length + 1 { for i in self.frontier.len()..input.length + 2 { let node = UnCompiledNode::new(self, i as i32); self.frontier.push(node); } } if input.length == 0 { // empty input: only allowed as first input. we have // to special case this because the packed FST // format cannot represent the empty input since // 'finalness' is stored on the incoming arc, not on // the node self.frontier[0].input_count += 1; self.frontier[0].is_final = true; self.fst.set_empty_output(output); return Ok(()); } // compare shared prefix length let mut pos1 = 0; let mut pos2 = input.offset; let pos1_stop = min(self.last_input.length, input.length); loop { self.frontier[pos1].input_count += 1; if pos1 >= pos1_stop || self.last_input.int_at(pos1)!= input.ints()[pos2] { break; } pos1 += 1; pos2 += 1; } let prefix_len_plus1 = pos1 + 1; // minimize/compile states from previous input's // orphan'd suffix self.freeze_tail(prefix_len_plus1)?; // init tail states for current input for i in prefix_len_plus1..=input.length { let node = Node::UnCompiled(i); self.frontier[i - 1].add_arc(input.ints()[input.offset + i - 1], node); self.frontier[i].input_count += 1; } let last_idx = input.length; if self.last_input.length!= input.length || prefix_len_plus1!= input.length + 1 { self.frontier[last_idx].is_final = true; self.frontier[last_idx].output = self.no_output.clone(); } // push conflicting outputs forward, only as far as needed for i in 1..prefix_len_plus1 { let last_output = self.frontier[i - 1] .get_last_output(input.ints()[input.offset + i - 1]) .clone(); let common_output_prefix: F::Value; if last_output!= self.no_output { common_output_prefix = self.fst.outputs().common(&output, &last_output); let word_suffix = self .fst .outputs() .subtract(&last_output, &common_output_prefix); self.frontier[i].prepend_output(word_suffix); } else { common_output_prefix = self.no_output.clone(); } output = self.fst.outputs().subtract(&output, &common_output_prefix); if last_output!= self.no_output { self.frontier[i - 1] .set_last_output(input.ints()[input.offset + i - 1], common_output_prefix); } } if self.last_input.length == input.length && prefix_len_plus1 == input.length + 1 { // same input more than 1 time in a row, mapping to // multiple outputs self.frontier[last_idx].output = self .fst .outputs() .merge(&self.frontier[last_idx].output, &output); } else { // this new arc is private to this new input; set its // arc output to the leftover output: self.frontier[prefix_len_plus1 - 1] .set_last_output(input.ints()[input.offset + prefix_len_plus1 - 1], output); } // save last input self.last_input.copy_ints_ref(&input); Ok(()) } // Returns final FST. NOTE: this will return None if nothing is accepted by the fst pub fn finish(&mut self) -> Result<Option<FST<F>>> { debug_assert!(self.inited); // minimize nodes in the last word's suffix self.freeze_tail(0)?; if self.frontier[0].input_count < self.min_suffix_count1 as i64 || self.frontier[0].input_count < self.min_suffix_count2 as i64 || self.frontier[0].num_arcs == 0 { if self.fst.empty_output.is_none() || (self.min_suffix_count1 > 0 || self.min_suffix_count2 > 0) { return Ok(None); } } else if self.min_suffix_count2!= 0 { let tail_len = self.last_input.length; self.compile_all_targets(0, tail_len)?; } let node = { let tail_len = self.last_input.length as u32; self.compile_node(0, tail_len)? }; self.fst.finish(node)?; // create a tmp for mem replace let tmp_fst = FST::new(self.fst.input_type, self.fst.outputs().clone(), 1); let fst = mem::replace(&mut self.fst, tmp_fst); Ok(Some(fst)) } fn compile_all_targets(&mut self, node_idx: usize, tail_length: usize) -> Result<()> { for i in 0..self.frontier[node_idx].num_arcs { if let Node::UnCompiled(index) = self.frontier[node_idx].arcs[i].target { // not yet compiled if self.frontier[index].num_arcs == 0 { self.frontier[node_idx].arcs[i].is_final = true; self.frontier[index].is_final = true; } self.frontier[node_idx].arcs[i].target = Node::Compiled(self.compile_node(index, tail_length as u32 - 1)? as i64); } } Ok(()) } } pub struct BuilderArc<F: OutputFactory> { pub label: i32, pub target: Node, pub is_final: bool, pub output: F::Value, pub next_final_output: F::Value, } impl<F: OutputFactory> fmt::Debug for BuilderArc<F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let target = match self.target { Node::Compiled(c) => format!("Compiled({})", c), Node::UnCompiled(_) => "UnCompiled".to_string(), }; write!( f, "BuilderArc(label: {}, is_final: {}, output: {:?}, next_final_output: {:?}, target: \ {})", self.label, self.is_final, self.output, self.next_final_output, target ) } } impl<F> Clone for BuilderArc<F> where F: OutputFactory, { fn clone(&self) -> Self { BuilderArc { label: self.label, target: self.target.clone(), is_final: self.is_final, output: self.output.clone(), next_final_output: self.next_final_output.clone(), } } } /// used to dedup states (lookup already-frozen states) struct NodeHash<F: OutputFactory> { table: PagedGrowableWriter, count: usize, mask: usize, fst: *mut FST<F>, input: StoreBytesReader, } impl<F: OutputFactory> NodeHash<F> { pub fn new(fst: &mut FST<F>, input: StoreBytesReader) -> Self { let table = PagedGrowableWriter::new(16, 1 << 27, 8, COMPACT); NodeHash { table, count: 0, mask: 15, fst: fst as *mut FST<F>, input, } } #[allow(clippy::mut_from_ref)] fn
(&self) -> &mut FST<F> { unsafe { &mut (*self.fst) } } fn nodes_equal(&mut self, node: &UnCompiledNode<F>, address: CompiledAddress) -> Result<bool> { let reader = &mut self.input as *mut StoreBytesReader; let mut scratch_arc = unsafe { self.fst().read_first_real_arc(address, &mut *reader)? }; if scratch_arc.bytes_per_arc > 0 && node.num_arcs!= scratch_arc.num_arcs { return Ok(false); } for idx in 0..node.num_arcs { let arc = &node.arcs[idx]; if arc.label!= scratch_arc.label || arc.is_final!= scratch_arc.is_final() { return Ok(false); } if let Some(ref output) = scratch_arc.output { if output!= &arc.output { return Ok(false); } } else if!arc.output.is_empty() { return Ok(false); } if let Some(ref output) = scratch_arc.next_final_output { if output!= &arc.next_final_output { return Ok(false); } } else if!arc.next_final_output.is_empty() { return Ok(false); } if let Node::Compiled(ref node) = arc.target { if *node!= scratch_arc.target { return Ok(false); } } if scratch_arc.is_last() { return Ok(idx == node.num_arcs - 1); } unsafe { self.fst()
fst
identifier_name
fst_builder.rs
for i in 0..10 { let node = UnCompiledNode::new(self, i); self.frontier.push(node); } self.inited = true; } pub fn term_count(&self) -> i64 { self.frontier[0].input_count } fn compile_node(&mut self, node_index: usize, tail_length: u32) -> Result<CompiledAddress> { debug_assert!(self.inited); let node: i64; let bytes_pos_start = self.fst.bytes_store.get_position(); let builder = self as *mut FstBuilder<F>; unsafe { if let Some(ref mut dedup_hash) = self.dedup_hash { if (self.do_share_non_singleton_nodes || self.frontier[node_index].num_arcs <= 1) && tail_length <= self.share_max_tail_length { if self.frontier[node_index].num_arcs == 0 { node = self.fst.add_node(&mut *builder, node_index)?; self.last_frozen_node = node; } else { node = dedup_hash.add(&mut *builder, node_index)? as i64; } } else { node = self.fst.add_node(&mut *builder, node_index)?; } } else { node = self.fst.add_node(&mut *builder, node_index)?; } } assert_ne!(node, -2); let bytes_pos_end = self.fst.bytes_store.get_position(); if bytes_pos_end!= bytes_pos_start { // fst added a new node assert!(bytes_pos_end > bytes_pos_start); self.last_frozen_node = node; } self.frontier[node_index].clear(); Ok(node) } #[allow(unused_assignments)] fn freeze_tail(&mut self, prefix_len_plus1: usize) -> Result<()> { debug_assert!(self.inited); let down_to = max(1, prefix_len_plus1); if self.last_input.length < down_to { return Ok(()); } for i in 0..=self.last_input.length - down_to { let idx = self.last_input.length - i; let mut do_prune = false; let mut do_compile = false; let tmp = UnCompiledNode::new(self, 0); let mut parent = mem::replace(&mut self.frontier[idx - 1], tmp); if self.frontier[idx].input_count < self.min_suffix_count1 as i64 { do_prune = true; do_compile = true; } else if idx > prefix_len_plus1 { // prune if parent's input_count is less than suffix_min_count2 if parent.input_count < self.min_suffix_count2 as i64 || (self.min_suffix_count2 == 1 && parent.input_count == 1 && idx > 1) { // my parent, about to be compiled, doesn't make the cut, so // I'm definitely pruned // if minSuffixCount2 is 1, we keep only up // until the 'distinguished edge', ie we keep only the // 'divergent' part of the FST. if my parent, about to be // compiled, has inputCount 1 then we are already past the // distinguished edge. NOTE: this only works if // the FST outputs are not "compressible" (simple // ords ARE compressible). do_prune = true; } else { // my parent, about to be compiled, does make the cut, so // I'm definitely not pruned do_prune = false; } do_compile = true; } else { // if pruning is disabled (count is 0) we can always // compile current node do_compile = self.min_suffix_count2 == 0; } if self.frontier[idx].input_count < self.min_suffix_count2 as i64 || (self.min_suffix_count2 == 1 && self.frontier[idx].input_count == 1 && idx > 1) { // drop all arcs for arc_idx in 0..self.frontier[idx].num_arcs { if let Node::UnCompiled(target) = self.frontier[idx].arcs[arc_idx].target { self.frontier[target].clear(); } } self.frontier[idx].num_arcs = 0; } if do_prune { // this node doesn't make it -- deref it self.frontier[idx].clear(); parent.delete_last(self.last_input.int_at(idx - 1), &Node::UnCompiled(idx)); } else { if self.min_suffix_count2!= 0 { let tail_len = self.last_input.length - idx; self.compile_all_targets(idx, tail_len)?; } let next_final_output = self.frontier[idx].output.clone(); // We "fake" the node as being final if it has no // outgoing arcs; in theory we could leave it // as non-final (the FST can represent this), but // FSTEnum, Util, etc., have trouble w/ non-final // dead-end states: let is_final = self.frontier[idx].is_final || self.frontier[idx].num_arcs == 0; if do_compile { // this node makes it and we now compile it. first, // compile any targets that were previously // undecided: let tail_len = (1 + self.last_input.length - idx) as u32; let n = self.compile_node(idx, tail_len)?; parent.replace_last( self.last_input.int_at(idx - 1), Node::Compiled(n), next_final_output, is_final, ); } else { // replaceLast just to install // next_final_output/is_final onto the arc parent.replace_last( self.last_input.int_at(idx - 1), Node::UnCompiled(0), // a stub node, next_final_output, is_final, ); // this node will stay in play for now, since we are // undecided on whether to prune it. later, it // will be either compiled or pruned, so we must // allocate a new node: self.frontier[idx] = UnCompiledNode::new(self, idx as i32); } } self.frontier[idx - 1] = parent; } Ok(()) } /// Add the next input/output pair. The provided input /// must be sorted after the previous one according to /// `IntsRef#compareTo`. It's also OK to add the same /// input twice in a row with different outputs, as long /// as `OutputFactory` implements the `OutputFactory#merge` /// method. Note that input is fully consumed after this /// method is returned (so caller is free to reuse), but /// output is not. So if your outputs are changeable (eg /// `ByteSequenceOutputs`) then you cannot reuse across /// calls. pub fn add(&mut self, input: IntsRef, output: F::Value) -> Result<()> { debug_assert!(self.inited); assert!(self.last_input.length == 0 || input > self.last_input.get()); let mut output = output; if self.frontier.len() < input.length + 1 { for i in self.frontier.len()..input.length + 2 { let node = UnCompiledNode::new(self, i as i32); self.frontier.push(node); } } if input.length == 0 { // empty input: only allowed as first input. we have // to special case this because the packed FST // format cannot represent the empty input since // 'finalness' is stored on the incoming arc, not on // the node self.frontier[0].input_count += 1; self.frontier[0].is_final = true; self.fst.set_empty_output(output); return Ok(()); } // compare shared prefix length let mut pos1 = 0; let mut pos2 = input.offset; let pos1_stop = min(self.last_input.length, input.length); loop { self.frontier[pos1].input_count += 1; if pos1 >= pos1_stop || self.last_input.int_at(pos1)!= input.ints()[pos2] { break; } pos1 += 1; pos2 += 1; } let prefix_len_plus1 = pos1 + 1; // minimize/compile states from previous input's // orphan'd suffix self.freeze_tail(prefix_len_plus1)?; // init tail states for current input for i in prefix_len_plus1..=input.length { let node = Node::UnCompiled(i); self.frontier[i - 1].add_arc(input.ints()[input.offset + i - 1], node); self.frontier[i].input_count += 1; } let last_idx = input.length; if self.last_input.length!= input.length || prefix_len_plus1!= input.length + 1 { self.frontier[last_idx].is_final = true; self.frontier[last_idx].output = self.no_output.clone(); } // push conflicting outputs forward, only as far as needed for i in 1..prefix_len_plus1 { let last_output = self.frontier[i - 1] .get_last_output(input.ints()[input.offset + i - 1]) .clone(); let common_output_prefix: F::Value; if last_output!= self.no_output { common_output_prefix = self.fst.outputs().common(&output, &last_output); let word_suffix = self .fst .outputs() .subtract(&last_output, &common_output_prefix); self.frontier[i].prepend_output(word_suffix); } else { common_output_prefix = self.no_output.clone(); } output = self.fst.outputs().subtract(&output, &common_output_prefix); if last_output!= self.no_output { self.frontier[i - 1] .set_last_output(input.ints()[input.offset + i - 1], common_output_prefix); } } if self.last_input.length == input.length && prefix_len_plus1 == input.length + 1 { // same input more than 1 time in a row, mapping to // multiple outputs self.frontier[last_idx].output = self .fst .outputs() .merge(&self.frontier[last_idx].output, &output); } else { // this new arc is private to this new input; set its // arc output to the leftover output: self.frontier[prefix_len_plus1 - 1] .set_last_output(input.ints()[input.offset + prefix_len_plus1 - 1], output); } // save last input self.last_input.copy_ints_ref(&input); Ok(()) } // Returns final FST. NOTE: this will return None if nothing is accepted by the fst pub fn finish(&mut self) -> Result<Option<FST<F>>> { debug_assert!(self.inited); // minimize nodes in the last word's suffix self.freeze_tail(0)?; if self.frontier[0].input_count < self.min_suffix_count1 as i64 || self.frontier[0].input_count < self.min_suffix_count2 as i64 || self.frontier[0].num_arcs == 0 { if self.fst.empty_output.is_none() || (self.min_suffix_count1 > 0 || self.min_suffix_count2 > 0) { return Ok(None); } } else if self.min_suffix_count2!= 0 { let tail_len = self.last_input.length; self.compile_all_targets(0, tail_len)?; } let node = { let tail_len = self.last_input.length as u32; self.compile_node(0, tail_len)? }; self.fst.finish(node)?; // create a tmp for mem replace let tmp_fst = FST::new(self.fst.input_type, self.fst.outputs().clone(), 1); let fst = mem::replace(&mut self.fst, tmp_fst); Ok(Some(fst)) } fn compile_all_targets(&mut self, node_idx: usize, tail_length: usize) -> Result<()> { for i in 0..self.frontier[node_idx].num_arcs { if let Node::UnCompiled(index) = self.frontier[node_idx].arcs[i].target { // not yet compiled if self.frontier[index].num_arcs == 0 { self.frontier[node_idx].arcs[i].is_final = true; self.frontier[index].is_final = true; } self.frontier[node_idx].arcs[i].target = Node::Compiled(self.compile_node(index, tail_length as u32 - 1)? as i64); } } Ok(()) } } pub struct BuilderArc<F: OutputFactory> { pub label: i32, pub target: Node, pub is_final: bool, pub output: F::Value, pub next_final_output: F::Value, } impl<F: OutputFactory> fmt::Debug for BuilderArc<F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let target = match self.target { Node::Compiled(c) => format!("Compiled({})", c), Node::UnCompiled(_) => "UnCompiled".to_string(), }; write!( f, "BuilderArc(label: {}, is_final: {}, output: {:?}, next_final_output: {:?}, target: \ {})", self.label, self.is_final, self.output, self.next_final_output, target ) } } impl<F> Clone for BuilderArc<F> where F: OutputFactory, { fn clone(&self) -> Self { BuilderArc { label: self.label, target: self.target.clone(), is_final: self.is_final, output: self.output.clone(), next_final_output: self.next_final_output.clone(), } } } /// used to dedup states (lookup already-frozen states) struct NodeHash<F: OutputFactory> { table: PagedGrowableWriter, count: usize, mask: usize, fst: *mut FST<F>, input: StoreBytesReader, } impl<F: OutputFactory> NodeHash<F> { pub fn new(fst: &mut FST<F>, input: StoreBytesReader) -> Self { let table = PagedGrowableWriter::new(16, 1 << 27, 8, COMPACT); NodeHash { table, count: 0, mask: 15, fst: fst as *mut FST<F>, input, } } #[allow(clippy::mut_from_ref)] fn fst(&self) -> &mut FST<F> { unsafe { &mut (*self.fst) } } fn nodes_equal(&mut self, node: &UnCompiledNode<F>, address: CompiledAddress) -> Result<bool> { let reader = &mut self.input as *mut StoreBytesReader; let mut scratch_arc = unsafe { self.fst().read_first_real_arc(address, &mut *reader)? }; if scratch_arc.bytes_per_arc > 0 && node.num_arcs!= scratch_arc.num_arcs { return Ok(false); } for idx in 0..node.num_arcs { let arc = &node.arcs[idx]; if arc.label!= scratch_arc.label || arc.is_final!= scratch_arc.is_final() { return Ok(false); } if let Some(ref output) = scratch_arc.output { if output!= &arc.output { return Ok(false); } } else if!arc.output.is_empty() { return Ok(false); } if let Some(ref output) = scratch_arc.next_final_output { if output!= &arc.next_final_output { return Ok(false); } } else if!arc.next_final_output.is_empty() { return Ok(false); } if let Node::Compiled(ref node) = arc.target { if *node!= scratch_arc.target
} if scratch_arc.is_last() { return Ok(idx == node.num_arcs - 1); } unsafe { self.fst()
{ return Ok(false); }
conditional_block
fst_builder.rs
for i in 0..10 { let node = UnCompiledNode::new(self, i); self.frontier.push(node); } self.inited = true; } pub fn term_count(&self) -> i64 { self.frontier[0].input_count } fn compile_node(&mut self, node_index: usize, tail_length: u32) -> Result<CompiledAddress> { debug_assert!(self.inited); let node: i64; let bytes_pos_start = self.fst.bytes_store.get_position(); let builder = self as *mut FstBuilder<F>; unsafe { if let Some(ref mut dedup_hash) = self.dedup_hash { if (self.do_share_non_singleton_nodes || self.frontier[node_index].num_arcs <= 1) && tail_length <= self.share_max_tail_length { if self.frontier[node_index].num_arcs == 0 { node = self.fst.add_node(&mut *builder, node_index)?; self.last_frozen_node = node; } else { node = dedup_hash.add(&mut *builder, node_index)? as i64; } } else { node = self.fst.add_node(&mut *builder, node_index)?; } } else { node = self.fst.add_node(&mut *builder, node_index)?; } } assert_ne!(node, -2); let bytes_pos_end = self.fst.bytes_store.get_position(); if bytes_pos_end!= bytes_pos_start { // fst added a new node assert!(bytes_pos_end > bytes_pos_start); self.last_frozen_node = node; } self.frontier[node_index].clear(); Ok(node) } #[allow(unused_assignments)] fn freeze_tail(&mut self, prefix_len_plus1: usize) -> Result<()> { debug_assert!(self.inited); let down_to = max(1, prefix_len_plus1); if self.last_input.length < down_to { return Ok(()); } for i in 0..=self.last_input.length - down_to { let idx = self.last_input.length - i; let mut do_prune = false; let mut do_compile = false; let tmp = UnCompiledNode::new(self, 0); let mut parent = mem::replace(&mut self.frontier[idx - 1], tmp); if self.frontier[idx].input_count < self.min_suffix_count1 as i64 { do_prune = true; do_compile = true; } else if idx > prefix_len_plus1 { // prune if parent's input_count is less than suffix_min_count2 if parent.input_count < self.min_suffix_count2 as i64 || (self.min_suffix_count2 == 1 && parent.input_count == 1 && idx > 1) { // my parent, about to be compiled, doesn't make the cut, so // I'm definitely pruned // if minSuffixCount2 is 1, we keep only up // until the 'distinguished edge', ie we keep only the // 'divergent' part of the FST. if my parent, about to be // compiled, has inputCount 1 then we are already past the // distinguished edge. NOTE: this only works if // the FST outputs are not "compressible" (simple // ords ARE compressible). do_prune = true; } else { // my parent, about to be compiled, does make the cut, so // I'm definitely not pruned do_prune = false; } do_compile = true; } else { // if pruning is disabled (count is 0) we can always // compile current node do_compile = self.min_suffix_count2 == 0; } if self.frontier[idx].input_count < self.min_suffix_count2 as i64 || (self.min_suffix_count2 == 1 && self.frontier[idx].input_count == 1 && idx > 1) { // drop all arcs for arc_idx in 0..self.frontier[idx].num_arcs { if let Node::UnCompiled(target) = self.frontier[idx].arcs[arc_idx].target { self.frontier[target].clear(); } } self.frontier[idx].num_arcs = 0; } if do_prune { // this node doesn't make it -- deref it self.frontier[idx].clear(); parent.delete_last(self.last_input.int_at(idx - 1), &Node::UnCompiled(idx)); } else { if self.min_suffix_count2!= 0 { let tail_len = self.last_input.length - idx; self.compile_all_targets(idx, tail_len)?; } let next_final_output = self.frontier[idx].output.clone(); // We "fake" the node as being final if it has no // outgoing arcs; in theory we could leave it // as non-final (the FST can represent this), but // FSTEnum, Util, etc., have trouble w/ non-final // dead-end states: let is_final = self.frontier[idx].is_final || self.frontier[idx].num_arcs == 0; if do_compile { // this node makes it and we now compile it. first, // compile any targets that were previously // undecided: let tail_len = (1 + self.last_input.length - idx) as u32; let n = self.compile_node(idx, tail_len)?; parent.replace_last( self.last_input.int_at(idx - 1), Node::Compiled(n), next_final_output, is_final, ); } else { // replaceLast just to install // next_final_output/is_final onto the arc parent.replace_last( self.last_input.int_at(idx - 1), Node::UnCompiled(0), // a stub node, next_final_output, is_final, ); // this node will stay in play for now, since we are // undecided on whether to prune it. later, it // will be either compiled or pruned, so we must // allocate a new node: self.frontier[idx] = UnCompiledNode::new(self, idx as i32); } } self.frontier[idx - 1] = parent; } Ok(()) } /// Add the next input/output pair. The provided input /// must be sorted after the previous one according to /// `IntsRef#compareTo`. It's also OK to add the same /// input twice in a row with different outputs, as long /// as `OutputFactory` implements the `OutputFactory#merge` /// method. Note that input is fully consumed after this /// method is returned (so caller is free to reuse), but /// output is not. So if your outputs are changeable (eg /// `ByteSequenceOutputs`) then you cannot reuse across /// calls. pub fn add(&mut self, input: IntsRef, output: F::Value) -> Result<()> { debug_assert!(self.inited); assert!(self.last_input.length == 0 || input > self.last_input.get()); let mut output = output; if self.frontier.len() < input.length + 1 { for i in self.frontier.len()..input.length + 2 { let node = UnCompiledNode::new(self, i as i32); self.frontier.push(node); } } if input.length == 0 { // empty input: only allowed as first input. we have // to special case this because the packed FST // format cannot represent the empty input since // 'finalness' is stored on the incoming arc, not on // the node self.frontier[0].input_count += 1; self.frontier[0].is_final = true; self.fst.set_empty_output(output); return Ok(()); } // compare shared prefix length let mut pos1 = 0; let mut pos2 = input.offset; let pos1_stop = min(self.last_input.length, input.length); loop { self.frontier[pos1].input_count += 1; if pos1 >= pos1_stop || self.last_input.int_at(pos1)!= input.ints()[pos2] { break; } pos1 += 1; pos2 += 1; } let prefix_len_plus1 = pos1 + 1; // minimize/compile states from previous input's // orphan'd suffix self.freeze_tail(prefix_len_plus1)?; // init tail states for current input for i in prefix_len_plus1..=input.length { let node = Node::UnCompiled(i); self.frontier[i - 1].add_arc(input.ints()[input.offset + i - 1], node); self.frontier[i].input_count += 1; } let last_idx = input.length; if self.last_input.length!= input.length || prefix_len_plus1!= input.length + 1 { self.frontier[last_idx].is_final = true; self.frontier[last_idx].output = self.no_output.clone(); } // push conflicting outputs forward, only as far as needed for i in 1..prefix_len_plus1 { let last_output = self.frontier[i - 1] .get_last_output(input.ints()[input.offset + i - 1]) .clone(); let common_output_prefix: F::Value; if last_output!= self.no_output { common_output_prefix = self.fst.outputs().common(&output, &last_output); let word_suffix = self .fst .outputs() .subtract(&last_output, &common_output_prefix); self.frontier[i].prepend_output(word_suffix); } else { common_output_prefix = self.no_output.clone(); } output = self.fst.outputs().subtract(&output, &common_output_prefix); if last_output!= self.no_output { self.frontier[i - 1] .set_last_output(input.ints()[input.offset + i - 1], common_output_prefix); } } if self.last_input.length == input.length && prefix_len_plus1 == input.length + 1 { // same input more than 1 time in a row, mapping to // multiple outputs self.frontier[last_idx].output = self .fst .outputs() .merge(&self.frontier[last_idx].output, &output); } else { // this new arc is private to this new input; set its // arc output to the leftover output: self.frontier[prefix_len_plus1 - 1] .set_last_output(input.ints()[input.offset + prefix_len_plus1 - 1], output); } // save last input self.last_input.copy_ints_ref(&input); Ok(()) } // Returns final FST. NOTE: this will return None if nothing is accepted by the fst pub fn finish(&mut self) -> Result<Option<FST<F>>> { debug_assert!(self.inited); // minimize nodes in the last word's suffix self.freeze_tail(0)?; if self.frontier[0].input_count < self.min_suffix_count1 as i64 || self.frontier[0].input_count < self.min_suffix_count2 as i64 || self.frontier[0].num_arcs == 0 { if self.fst.empty_output.is_none() || (self.min_suffix_count1 > 0 || self.min_suffix_count2 > 0) { return Ok(None); } } else if self.min_suffix_count2!= 0 { let tail_len = self.last_input.length; self.compile_all_targets(0, tail_len)?; } let node = { let tail_len = self.last_input.length as u32;
// create a tmp for mem replace let tmp_fst = FST::new(self.fst.input_type, self.fst.outputs().clone(), 1); let fst = mem::replace(&mut self.fst, tmp_fst); Ok(Some(fst)) } fn compile_all_targets(&mut self, node_idx: usize, tail_length: usize) -> Result<()> { for i in 0..self.frontier[node_idx].num_arcs { if let Node::UnCompiled(index) = self.frontier[node_idx].arcs[i].target { // not yet compiled if self.frontier[index].num_arcs == 0 { self.frontier[node_idx].arcs[i].is_final = true; self.frontier[index].is_final = true; } self.frontier[node_idx].arcs[i].target = Node::Compiled(self.compile_node(index, tail_length as u32 - 1)? as i64); } } Ok(()) } } pub struct BuilderArc<F: OutputFactory> { pub label: i32, pub target: Node, pub is_final: bool, pub output: F::Value, pub next_final_output: F::Value, } impl<F: OutputFactory> fmt::Debug for BuilderArc<F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let target = match self.target { Node::Compiled(c) => format!("Compiled({})", c), Node::UnCompiled(_) => "UnCompiled".to_string(), }; write!( f, "BuilderArc(label: {}, is_final: {}, output: {:?}, next_final_output: {:?}, target: \ {})", self.label, self.is_final, self.output, self.next_final_output, target ) } } impl<F> Clone for BuilderArc<F> where F: OutputFactory, { fn clone(&self) -> Self { BuilderArc { label: self.label, target: self.target.clone(), is_final: self.is_final, output: self.output.clone(), next_final_output: self.next_final_output.clone(), } } } /// used to dedup states (lookup already-frozen states) struct NodeHash<F: OutputFactory> { table: PagedGrowableWriter, count: usize, mask: usize, fst: *mut FST<F>, input: StoreBytesReader, } impl<F: OutputFactory> NodeHash<F> { pub fn new(fst: &mut FST<F>, input: StoreBytesReader) -> Self { let table = PagedGrowableWriter::new(16, 1 << 27, 8, COMPACT); NodeHash { table, count: 0, mask: 15, fst: fst as *mut FST<F>, input, } } #[allow(clippy::mut_from_ref)] fn fst(&self) -> &mut FST<F> { unsafe { &mut (*self.fst) } } fn nodes_equal(&mut self, node: &UnCompiledNode<F>, address: CompiledAddress) -> Result<bool> { let reader = &mut self.input as *mut StoreBytesReader; let mut scratch_arc = unsafe { self.fst().read_first_real_arc(address, &mut *reader)? }; if scratch_arc.bytes_per_arc > 0 && node.num_arcs!= scratch_arc.num_arcs { return Ok(false); } for idx in 0..node.num_arcs { let arc = &node.arcs[idx]; if arc.label!= scratch_arc.label || arc.is_final!= scratch_arc.is_final() { return Ok(false); } if let Some(ref output) = scratch_arc.output { if output!= &arc.output { return Ok(false); } } else if!arc.output.is_empty() { return Ok(false); } if let Some(ref output) = scratch_arc.next_final_output { if output!= &arc.next_final_output { return Ok(false); } } else if!arc.next_final_output.is_empty() { return Ok(false); } if let Node::Compiled(ref node) = arc.target { if *node!= scratch_arc.target { return Ok(false); } } if scratch_arc.is_last() { return Ok(idx == node.num_arcs - 1); } unsafe { self.fst()
self.compile_node(0, tail_len)? }; self.fst.finish(node)?;
random_line_split
furnace.rs
use super::{ items::item_to_str, structure::{Structure, StructureDynIter, StructureId}, DropItem, FactorishState, FrameProcResult, Inventory, InventoryTrait, ItemType, Position, Recipe, TempEnt, COAL_POWER, }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize};
use web_sys::CanvasRenderingContext2d; const FUEL_CAPACITY: usize = 10; /// A list of fixed recipes, because dynamic get_recipes() can only return a Vec. static RECIPES: Lazy<[Recipe; 2]> = Lazy::new(|| { [ Recipe::new( hash_map!(ItemType::IronOre => 1usize), hash_map!(ItemType::IronPlate => 1usize), 20., 50., ), Recipe::new( hash_map!(ItemType::CopperOre => 1usize), hash_map!(ItemType::CopperPlate => 1usize), 20., 50., ), ] }); #[derive(Serialize, Deserialize)] pub(crate) struct Furnace { position: Position, input_inventory: Inventory, output_inventory: Inventory, progress: Option<f64>, power: f64, max_power: f64, recipe: Option<Recipe>, } impl Furnace { pub(crate) fn new(position: &Position) -> Self { Furnace { position: *position, input_inventory: Inventory::new(), output_inventory: Inventory::new(), progress: None, power: 20., max_power: 20., recipe: None, } } } impl Structure for Furnace { fn name(&self) -> &str { "Furnace" } fn position(&self) -> &Position { &self.position } fn draw( &self, state: &FactorishState, context: &CanvasRenderingContext2d, depth: i32, is_toolbar: bool, ) -> Result<(), JsValue> { if depth!= 0 { return Ok(()); }; let (x, y) = (self.position.x as f64 * 32., self.position.y as f64 * 32.); match state.image_furnace.as_ref() { Some(img) => { let sx = if self.progress.is_some() && 0. < self.power { ((((state.sim_time * 5.) as isize) % 2 + 1) * 32) as f64 } else { 0. }; context.draw_image_with_image_bitmap_and_sw_and_sh_and_dx_and_dy_and_dw_and_dh( &img.bitmap, sx, 0., 32., 32., x, y, 32., 32., )?; } None => return Err(JsValue::from_str("furnace image not available")), } if!is_toolbar { crate::draw_fuel_alarm!(self, state, context); } Ok(()) } fn desc(&self, _state: &FactorishState) -> String { format!( "{}<br>{}{}", if self.recipe.is_some() { // Progress bar format!("{}{}{}{}", format!("Progress: {:.0}%<br>", self.progress.unwrap_or(0.) * 100.), "<div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>", format!("<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>", self.progress.unwrap_or(0.) * 100.), format!(r#"Power: {:.1}kJ <div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'> <div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>"#, self.power, if 0. < self.max_power { (self.power) / self.max_power * 100. } else { 0. }), ) // getHTML(generateItemImage("time", true, this.recipe.time), true) + "<br>" + // "Outputs: <br>" + // getHTML(generateItemImage(this.recipe.output, true, 1), true) + "<br>"; } else { String::from("No recipe") }, format!("Input Items: <br>{}", self.input_inventory.describe()), format!("Output Items: <br>{}", self.output_inventory.describe()) ) } fn frame_proc( &mut self, _me: StructureId, state: &mut FactorishState, _structures: &mut StructureDynIter, ) -> Result<FrameProcResult, ()> { if self.recipe.is_none() { self.recipe = RECIPES .iter() .find(|recipe| { recipe .input .iter() .all(|(type_, count)| *count <= self.input_inventory.count_item(&type_)) }) .cloned(); } if let Some(recipe) = &self.recipe { let mut ret = FrameProcResult::None; // First, check if we need to refill the energy buffer in order to continue the current work. if self.input_inventory.get(&ItemType::CoalOre).is_some() { // Refill the energy from the fuel if self.power < recipe.power_cost { self.power += COAL_POWER; self.max_power = self.power; self.input_inventory.remove_item(&ItemType::CoalOre); ret = FrameProcResult::InventoryChanged(self.position); } } if self.progress.is_none() { // First, check if we have enough ingredients to finish this recipe. // If we do, consume the ingredients and start the progress timer. // We can't start as soon as the recipe is set because we may not have enough ingredients // at the point we set the recipe. if recipe .input .iter() .map(|(item, count)| count <= &self.input_inventory.count_item(item)) .all(|b| b) { for (item, count) in &recipe.input { self.input_inventory.remove_items(item, *count); } self.progress = Some(0.); ret = FrameProcResult::InventoryChanged(self.position); } else { self.recipe = None; return Ok(FrameProcResult::None); // Return here to avoid borrow checker } } if let Some(prev_progress) = self.progress { // Proceed only if we have sufficient energy in the buffer. let progress = (self.power / recipe.power_cost) .min(1. / recipe.recipe_time) .min(1.); if state.rng.next() < progress * 10. { state .temp_ents .push(TempEnt::new(&mut state.rng, self.position)); } if 1. <= prev_progress + progress { self.progress = None; // Produce outputs into inventory for output_item in &recipe.output { self.output_inventory.add_item(&output_item.0); } return Ok(FrameProcResult::InventoryChanged(self.position)); } else { self.progress = Some(prev_progress + progress); self.power -= progress * recipe.power_cost; } } return Ok(ret); } Ok(FrameProcResult::None) } fn input(&mut self, o: &DropItem) -> Result<(), JsValue> { // Fuels are always welcome. if o.type_ == ItemType::CoalOre && self.input_inventory.count_item(&ItemType::CoalOre) < FUEL_CAPACITY { self.input_inventory.add_item(&ItemType::CoalOre); return Ok(()); } if self.recipe.is_none() { match o.type_ { ItemType::IronOre => { self.recipe = Some(Recipe::new( hash_map!(ItemType::IronOre => 1usize), hash_map!(ItemType::IronPlate => 1usize), 20., 50., )); } ItemType::CopperOre => { self.recipe = Some(Recipe::new( hash_map!(ItemType::CopperOre => 1usize), hash_map!(ItemType::CopperPlate => 1usize), 20., 50., )); } _ => { return Err(JsValue::from_str(&format!( "Cannot smelt {}", item_to_str(&o.type_) ))) } } } if let Some(recipe) = &self.recipe { if 0 < recipe.input.count_item(&o.type_) || 0 < recipe.output.count_item(&o.type_) { self.input_inventory.add_item(&o.type_); return Ok(()); } else { return Err(JsValue::from_str("Item is not part of recipe")); } } Err(JsValue::from_str("Recipe is not initialized")) } fn can_input(&self, item_type: &ItemType) -> bool { if *item_type == ItemType::CoalOre && self.input_inventory.count_item(item_type) < FUEL_CAPACITY { return true; } if let Some(recipe) = &self.recipe { recipe.input.get(item_type).is_some() } else { matches!(item_type, ItemType::IronOre | ItemType::CopperOre) } } fn can_output(&self, _structures: &StructureDynIter) -> Inventory { self.output_inventory.clone() } fn output(&mut self, _state: &mut FactorishState, item_type: &ItemType) -> Result<(), ()> { if self.output_inventory.remove_item(item_type) { Ok(()) } else { Err(()) } } fn inventory(&self, is_input: bool) -> Option<&Inventory> { Some(if is_input { &self.input_inventory } else { &self.output_inventory }) } fn inventory_mut(&mut self, is_input: bool) -> Option<&mut Inventory> { Some(if is_input { &mut self.input_inventory } else { &mut self.output_inventory }) } fn destroy_inventory(&mut self) -> Inventory { let mut ret = std::mem::take(&mut self.input_inventory); ret.merge(std::mem::take(&mut self.output_inventory)); // Return the ingredients if it was in the middle of processing a recipe. if let Some(mut recipe) = self.recipe.take() { if self.progress.is_some() { ret.merge(std::mem::take(&mut recipe.input)); } } ret } fn get_recipes(&self) -> std::borrow::Cow<[Recipe]> { std::borrow::Cow::from(&RECIPES[..]) } fn get_selected_recipe(&self) -> Option<&Recipe> { self.recipe.as_ref() } fn serialize(&self) -> serde_json::Result<serde_json::Value> { serde_json::to_value(self) } }
use wasm_bindgen::prelude::*;
random_line_split
furnace.rs
use super::{ items::item_to_str, structure::{Structure, StructureDynIter, StructureId}, DropItem, FactorishState, FrameProcResult, Inventory, InventoryTrait, ItemType, Position, Recipe, TempEnt, COAL_POWER, }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; const FUEL_CAPACITY: usize = 10; /// A list of fixed recipes, because dynamic get_recipes() can only return a Vec. static RECIPES: Lazy<[Recipe; 2]> = Lazy::new(|| { [ Recipe::new( hash_map!(ItemType::IronOre => 1usize), hash_map!(ItemType::IronPlate => 1usize), 20., 50., ), Recipe::new( hash_map!(ItemType::CopperOre => 1usize), hash_map!(ItemType::CopperPlate => 1usize), 20., 50., ), ] }); #[derive(Serialize, Deserialize)] pub(crate) struct Furnace { position: Position, input_inventory: Inventory, output_inventory: Inventory, progress: Option<f64>, power: f64, max_power: f64, recipe: Option<Recipe>, } impl Furnace { pub(crate) fn new(position: &Position) -> Self { Furnace { position: *position, input_inventory: Inventory::new(), output_inventory: Inventory::new(), progress: None, power: 20., max_power: 20., recipe: None, } } } impl Structure for Furnace { fn name(&self) -> &str { "Furnace" } fn position(&self) -> &Position { &self.position } fn draw( &self, state: &FactorishState, context: &CanvasRenderingContext2d, depth: i32, is_toolbar: bool, ) -> Result<(), JsValue> { if depth!= 0 { return Ok(()); }; let (x, y) = (self.position.x as f64 * 32., self.position.y as f64 * 32.); match state.image_furnace.as_ref() { Some(img) => { let sx = if self.progress.is_some() && 0. < self.power { ((((state.sim_time * 5.) as isize) % 2 + 1) * 32) as f64 } else { 0. }; context.draw_image_with_image_bitmap_and_sw_and_sh_and_dx_and_dy_and_dw_and_dh( &img.bitmap, sx, 0., 32., 32., x, y, 32., 32., )?; } None => return Err(JsValue::from_str("furnace image not available")), } if!is_toolbar { crate::draw_fuel_alarm!(self, state, context); } Ok(()) } fn desc(&self, _state: &FactorishState) -> String { format!( "{}<br>{}{}", if self.recipe.is_some() { // Progress bar format!("{}{}{}{}", format!("Progress: {:.0}%<br>", self.progress.unwrap_or(0.) * 100.), "<div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>", format!("<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>", self.progress.unwrap_or(0.) * 100.), format!(r#"Power: {:.1}kJ <div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'> <div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>"#, self.power, if 0. < self.max_power { (self.power) / self.max_power * 100. } else { 0. }), ) // getHTML(generateItemImage("time", true, this.recipe.time), true) + "<br>" + // "Outputs: <br>" + // getHTML(generateItemImage(this.recipe.output, true, 1), true) + "<br>"; } else { String::from("No recipe") }, format!("Input Items: <br>{}", self.input_inventory.describe()), format!("Output Items: <br>{}", self.output_inventory.describe()) ) } fn frame_proc( &mut self, _me: StructureId, state: &mut FactorishState, _structures: &mut StructureDynIter, ) -> Result<FrameProcResult, ()> { if self.recipe.is_none() { self.recipe = RECIPES .iter() .find(|recipe| { recipe .input .iter() .all(|(type_, count)| *count <= self.input_inventory.count_item(&type_)) }) .cloned(); } if let Some(recipe) = &self.recipe { let mut ret = FrameProcResult::None; // First, check if we need to refill the energy buffer in order to continue the current work. if self.input_inventory.get(&ItemType::CoalOre).is_some() { // Refill the energy from the fuel if self.power < recipe.power_cost { self.power += COAL_POWER; self.max_power = self.power; self.input_inventory.remove_item(&ItemType::CoalOre); ret = FrameProcResult::InventoryChanged(self.position); } } if self.progress.is_none() { // First, check if we have enough ingredients to finish this recipe. // If we do, consume the ingredients and start the progress timer. // We can't start as soon as the recipe is set because we may not have enough ingredients // at the point we set the recipe. if recipe .input .iter() .map(|(item, count)| count <= &self.input_inventory.count_item(item)) .all(|b| b) { for (item, count) in &recipe.input { self.input_inventory.remove_items(item, *count); } self.progress = Some(0.); ret = FrameProcResult::InventoryChanged(self.position); } else { self.recipe = None; return Ok(FrameProcResult::None); // Return here to avoid borrow checker } } if let Some(prev_progress) = self.progress { // Proceed only if we have sufficient energy in the buffer. let progress = (self.power / recipe.power_cost) .min(1. / recipe.recipe_time) .min(1.); if state.rng.next() < progress * 10. { state .temp_ents .push(TempEnt::new(&mut state.rng, self.position)); } if 1. <= prev_progress + progress { self.progress = None; // Produce outputs into inventory for output_item in &recipe.output { self.output_inventory.add_item(&output_item.0); } return Ok(FrameProcResult::InventoryChanged(self.position)); } else { self.progress = Some(prev_progress + progress); self.power -= progress * recipe.power_cost; } } return Ok(ret); } Ok(FrameProcResult::None) } fn input(&mut self, o: &DropItem) -> Result<(), JsValue> { // Fuels are always welcome. if o.type_ == ItemType::CoalOre && self.input_inventory.count_item(&ItemType::CoalOre) < FUEL_CAPACITY { self.input_inventory.add_item(&ItemType::CoalOre); return Ok(()); } if self.recipe.is_none() { match o.type_ { ItemType::IronOre => { self.recipe = Some(Recipe::new( hash_map!(ItemType::IronOre => 1usize), hash_map!(ItemType::IronPlate => 1usize), 20., 50., )); } ItemType::CopperOre => { self.recipe = Some(Recipe::new( hash_map!(ItemType::CopperOre => 1usize), hash_map!(ItemType::CopperPlate => 1usize), 20., 50., )); } _ => { return Err(JsValue::from_str(&format!( "Cannot smelt {}", item_to_str(&o.type_) ))) } } } if let Some(recipe) = &self.recipe { if 0 < recipe.input.count_item(&o.type_) || 0 < recipe.output.count_item(&o.type_) { self.input_inventory.add_item(&o.type_); return Ok(()); } else { return Err(JsValue::from_str("Item is not part of recipe")); } } Err(JsValue::from_str("Recipe is not initialized")) } fn can_input(&self, item_type: &ItemType) -> bool { if *item_type == ItemType::CoalOre && self.input_inventory.count_item(item_type) < FUEL_CAPACITY { return true; } if let Some(recipe) = &self.recipe { recipe.input.get(item_type).is_some() } else { matches!(item_type, ItemType::IronOre | ItemType::CopperOre) } } fn can_output(&self, _structures: &StructureDynIter) -> Inventory { self.output_inventory.clone() } fn output(&mut self, _state: &mut FactorishState, item_type: &ItemType) -> Result<(), ()> { if self.output_inventory.remove_item(item_type) { Ok(()) } else { Err(()) } } fn inventory(&self, is_input: bool) -> Option<&Inventory> { Some(if is_input { &self.input_inventory } else { &self.output_inventory }) } fn inventory_mut(&mut self, is_input: bool) -> Option<&mut Inventory> { Some(if is_input { &mut self.input_inventory } else
) } fn destroy_inventory(&mut self) -> Inventory { let mut ret = std::mem::take(&mut self.input_inventory); ret.merge(std::mem::take(&mut self.output_inventory)); // Return the ingredients if it was in the middle of processing a recipe. if let Some(mut recipe) = self.recipe.take() { if self.progress.is_some() { ret.merge(std::mem::take(&mut recipe.input)); } } ret } fn get_recipes(&self) -> std::borrow::Cow<[Recipe]> { std::borrow::Cow::from(&RECIPES[..]) } fn get_selected_recipe(&self) -> Option<&Recipe> { self.recipe.as_ref() } fn serialize(&self) -> serde_json::Result<serde_json::Value> { serde_json::to_value(self) } }
{ &mut self.output_inventory }
conditional_block
furnace.rs
use super::{ items::item_to_str, structure::{Structure, StructureDynIter, StructureId}, DropItem, FactorishState, FrameProcResult, Inventory, InventoryTrait, ItemType, Position, Recipe, TempEnt, COAL_POWER, }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; const FUEL_CAPACITY: usize = 10; /// A list of fixed recipes, because dynamic get_recipes() can only return a Vec. static RECIPES: Lazy<[Recipe; 2]> = Lazy::new(|| { [ Recipe::new( hash_map!(ItemType::IronOre => 1usize), hash_map!(ItemType::IronPlate => 1usize), 20., 50., ), Recipe::new( hash_map!(ItemType::CopperOre => 1usize), hash_map!(ItemType::CopperPlate => 1usize), 20., 50., ), ] }); #[derive(Serialize, Deserialize)] pub(crate) struct Furnace { position: Position, input_inventory: Inventory, output_inventory: Inventory, progress: Option<f64>, power: f64, max_power: f64, recipe: Option<Recipe>, } impl Furnace { pub(crate) fn new(position: &Position) -> Self { Furnace { position: *position, input_inventory: Inventory::new(), output_inventory: Inventory::new(), progress: None, power: 20., max_power: 20., recipe: None, } } } impl Structure for Furnace { fn name(&self) -> &str { "Furnace" } fn position(&self) -> &Position { &self.position } fn draw( &self, state: &FactorishState, context: &CanvasRenderingContext2d, depth: i32, is_toolbar: bool, ) -> Result<(), JsValue> { if depth!= 0 { return Ok(()); }; let (x, y) = (self.position.x as f64 * 32., self.position.y as f64 * 32.); match state.image_furnace.as_ref() { Some(img) => { let sx = if self.progress.is_some() && 0. < self.power { ((((state.sim_time * 5.) as isize) % 2 + 1) * 32) as f64 } else { 0. }; context.draw_image_with_image_bitmap_and_sw_and_sh_and_dx_and_dy_and_dw_and_dh( &img.bitmap, sx, 0., 32., 32., x, y, 32., 32., )?; } None => return Err(JsValue::from_str("furnace image not available")), } if!is_toolbar { crate::draw_fuel_alarm!(self, state, context); } Ok(()) } fn desc(&self, _state: &FactorishState) -> String { format!( "{}<br>{}{}", if self.recipe.is_some() { // Progress bar format!("{}{}{}{}", format!("Progress: {:.0}%<br>", self.progress.unwrap_or(0.) * 100.), "<div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>", format!("<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>", self.progress.unwrap_or(0.) * 100.), format!(r#"Power: {:.1}kJ <div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'> <div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>"#, self.power, if 0. < self.max_power { (self.power) / self.max_power * 100. } else { 0. }), ) // getHTML(generateItemImage("time", true, this.recipe.time), true) + "<br>" + // "Outputs: <br>" + // getHTML(generateItemImage(this.recipe.output, true, 1), true) + "<br>"; } else { String::from("No recipe") }, format!("Input Items: <br>{}", self.input_inventory.describe()), format!("Output Items: <br>{}", self.output_inventory.describe()) ) } fn frame_proc( &mut self, _me: StructureId, state: &mut FactorishState, _structures: &mut StructureDynIter, ) -> Result<FrameProcResult, ()> { if self.recipe.is_none() { self.recipe = RECIPES .iter() .find(|recipe| { recipe .input .iter() .all(|(type_, count)| *count <= self.input_inventory.count_item(&type_)) }) .cloned(); } if let Some(recipe) = &self.recipe { let mut ret = FrameProcResult::None; // First, check if we need to refill the energy buffer in order to continue the current work. if self.input_inventory.get(&ItemType::CoalOre).is_some() { // Refill the energy from the fuel if self.power < recipe.power_cost { self.power += COAL_POWER; self.max_power = self.power; self.input_inventory.remove_item(&ItemType::CoalOre); ret = FrameProcResult::InventoryChanged(self.position); } } if self.progress.is_none() { // First, check if we have enough ingredients to finish this recipe. // If we do, consume the ingredients and start the progress timer. // We can't start as soon as the recipe is set because we may not have enough ingredients // at the point we set the recipe. if recipe .input .iter() .map(|(item, count)| count <= &self.input_inventory.count_item(item)) .all(|b| b) { for (item, count) in &recipe.input { self.input_inventory.remove_items(item, *count); } self.progress = Some(0.); ret = FrameProcResult::InventoryChanged(self.position); } else { self.recipe = None; return Ok(FrameProcResult::None); // Return here to avoid borrow checker } } if let Some(prev_progress) = self.progress { // Proceed only if we have sufficient energy in the buffer. let progress = (self.power / recipe.power_cost) .min(1. / recipe.recipe_time) .min(1.); if state.rng.next() < progress * 10. { state .temp_ents .push(TempEnt::new(&mut state.rng, self.position)); } if 1. <= prev_progress + progress { self.progress = None; // Produce outputs into inventory for output_item in &recipe.output { self.output_inventory.add_item(&output_item.0); } return Ok(FrameProcResult::InventoryChanged(self.position)); } else { self.progress = Some(prev_progress + progress); self.power -= progress * recipe.power_cost; } } return Ok(ret); } Ok(FrameProcResult::None) } fn input(&mut self, o: &DropItem) -> Result<(), JsValue> { // Fuels are always welcome. if o.type_ == ItemType::CoalOre && self.input_inventory.count_item(&ItemType::CoalOre) < FUEL_CAPACITY { self.input_inventory.add_item(&ItemType::CoalOre); return Ok(()); } if self.recipe.is_none() { match o.type_ { ItemType::IronOre => { self.recipe = Some(Recipe::new( hash_map!(ItemType::IronOre => 1usize), hash_map!(ItemType::IronPlate => 1usize), 20., 50., )); } ItemType::CopperOre => { self.recipe = Some(Recipe::new( hash_map!(ItemType::CopperOre => 1usize), hash_map!(ItemType::CopperPlate => 1usize), 20., 50., )); } _ => { return Err(JsValue::from_str(&format!( "Cannot smelt {}", item_to_str(&o.type_) ))) } } } if let Some(recipe) = &self.recipe { if 0 < recipe.input.count_item(&o.type_) || 0 < recipe.output.count_item(&o.type_) { self.input_inventory.add_item(&o.type_); return Ok(()); } else { return Err(JsValue::from_str("Item is not part of recipe")); } } Err(JsValue::from_str("Recipe is not initialized")) } fn can_input(&self, item_type: &ItemType) -> bool { if *item_type == ItemType::CoalOre && self.input_inventory.count_item(item_type) < FUEL_CAPACITY { return true; } if let Some(recipe) = &self.recipe { recipe.input.get(item_type).is_some() } else { matches!(item_type, ItemType::IronOre | ItemType::CopperOre) } } fn can_output(&self, _structures: &StructureDynIter) -> Inventory { self.output_inventory.clone() } fn output(&mut self, _state: &mut FactorishState, item_type: &ItemType) -> Result<(), ()>
fn inventory(&self, is_input: bool) -> Option<&Inventory> { Some(if is_input { &self.input_inventory } else { &self.output_inventory }) } fn inventory_mut(&mut self, is_input: bool) -> Option<&mut Inventory> { Some(if is_input { &mut self.input_inventory } else { &mut self.output_inventory }) } fn destroy_inventory(&mut self) -> Inventory { let mut ret = std::mem::take(&mut self.input_inventory); ret.merge(std::mem::take(&mut self.output_inventory)); // Return the ingredients if it was in the middle of processing a recipe. if let Some(mut recipe) = self.recipe.take() { if self.progress.is_some() { ret.merge(std::mem::take(&mut recipe.input)); } } ret } fn get_recipes(&self) -> std::borrow::Cow<[Recipe]> { std::borrow::Cow::from(&RECIPES[..]) } fn get_selected_recipe(&self) -> Option<&Recipe> { self.recipe.as_ref() } fn serialize(&self) -> serde_json::Result<serde_json::Value> { serde_json::to_value(self) } }
{ if self.output_inventory.remove_item(item_type) { Ok(()) } else { Err(()) } }
identifier_body
furnace.rs
use super::{ items::item_to_str, structure::{Structure, StructureDynIter, StructureId}, DropItem, FactorishState, FrameProcResult, Inventory, InventoryTrait, ItemType, Position, Recipe, TempEnt, COAL_POWER, }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; const FUEL_CAPACITY: usize = 10; /// A list of fixed recipes, because dynamic get_recipes() can only return a Vec. static RECIPES: Lazy<[Recipe; 2]> = Lazy::new(|| { [ Recipe::new( hash_map!(ItemType::IronOre => 1usize), hash_map!(ItemType::IronPlate => 1usize), 20., 50., ), Recipe::new( hash_map!(ItemType::CopperOre => 1usize), hash_map!(ItemType::CopperPlate => 1usize), 20., 50., ), ] }); #[derive(Serialize, Deserialize)] pub(crate) struct Furnace { position: Position, input_inventory: Inventory, output_inventory: Inventory, progress: Option<f64>, power: f64, max_power: f64, recipe: Option<Recipe>, } impl Furnace { pub(crate) fn new(position: &Position) -> Self { Furnace { position: *position, input_inventory: Inventory::new(), output_inventory: Inventory::new(), progress: None, power: 20., max_power: 20., recipe: None, } } } impl Structure for Furnace { fn name(&self) -> &str { "Furnace" } fn position(&self) -> &Position { &self.position } fn draw( &self, state: &FactorishState, context: &CanvasRenderingContext2d, depth: i32, is_toolbar: bool, ) -> Result<(), JsValue> { if depth!= 0 { return Ok(()); }; let (x, y) = (self.position.x as f64 * 32., self.position.y as f64 * 32.); match state.image_furnace.as_ref() { Some(img) => { let sx = if self.progress.is_some() && 0. < self.power { ((((state.sim_time * 5.) as isize) % 2 + 1) * 32) as f64 } else { 0. }; context.draw_image_with_image_bitmap_and_sw_and_sh_and_dx_and_dy_and_dw_and_dh( &img.bitmap, sx, 0., 32., 32., x, y, 32., 32., )?; } None => return Err(JsValue::from_str("furnace image not available")), } if!is_toolbar { crate::draw_fuel_alarm!(self, state, context); } Ok(()) } fn desc(&self, _state: &FactorishState) -> String { format!( "{}<br>{}{}", if self.recipe.is_some() { // Progress bar format!("{}{}{}{}", format!("Progress: {:.0}%<br>", self.progress.unwrap_or(0.) * 100.), "<div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'>", format!("<div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>", self.progress.unwrap_or(0.) * 100.), format!(r#"Power: {:.1}kJ <div style='position: relative; width: 100px; height: 10px; background-color: #001f1f; margin: 2px; border: 1px solid #3f3f3f'> <div style='position: absolute; width: {}px; height: 10px; background-color: #ff00ff'></div></div>"#, self.power, if 0. < self.max_power { (self.power) / self.max_power * 100. } else { 0. }), ) // getHTML(generateItemImage("time", true, this.recipe.time), true) + "<br>" + // "Outputs: <br>" + // getHTML(generateItemImage(this.recipe.output, true, 1), true) + "<br>"; } else { String::from("No recipe") }, format!("Input Items: <br>{}", self.input_inventory.describe()), format!("Output Items: <br>{}", self.output_inventory.describe()) ) } fn frame_proc( &mut self, _me: StructureId, state: &mut FactorishState, _structures: &mut StructureDynIter, ) -> Result<FrameProcResult, ()> { if self.recipe.is_none() { self.recipe = RECIPES .iter() .find(|recipe| { recipe .input .iter() .all(|(type_, count)| *count <= self.input_inventory.count_item(&type_)) }) .cloned(); } if let Some(recipe) = &self.recipe { let mut ret = FrameProcResult::None; // First, check if we need to refill the energy buffer in order to continue the current work. if self.input_inventory.get(&ItemType::CoalOre).is_some() { // Refill the energy from the fuel if self.power < recipe.power_cost { self.power += COAL_POWER; self.max_power = self.power; self.input_inventory.remove_item(&ItemType::CoalOre); ret = FrameProcResult::InventoryChanged(self.position); } } if self.progress.is_none() { // First, check if we have enough ingredients to finish this recipe. // If we do, consume the ingredients and start the progress timer. // We can't start as soon as the recipe is set because we may not have enough ingredients // at the point we set the recipe. if recipe .input .iter() .map(|(item, count)| count <= &self.input_inventory.count_item(item)) .all(|b| b) { for (item, count) in &recipe.input { self.input_inventory.remove_items(item, *count); } self.progress = Some(0.); ret = FrameProcResult::InventoryChanged(self.position); } else { self.recipe = None; return Ok(FrameProcResult::None); // Return here to avoid borrow checker } } if let Some(prev_progress) = self.progress { // Proceed only if we have sufficient energy in the buffer. let progress = (self.power / recipe.power_cost) .min(1. / recipe.recipe_time) .min(1.); if state.rng.next() < progress * 10. { state .temp_ents .push(TempEnt::new(&mut state.rng, self.position)); } if 1. <= prev_progress + progress { self.progress = None; // Produce outputs into inventory for output_item in &recipe.output { self.output_inventory.add_item(&output_item.0); } return Ok(FrameProcResult::InventoryChanged(self.position)); } else { self.progress = Some(prev_progress + progress); self.power -= progress * recipe.power_cost; } } return Ok(ret); } Ok(FrameProcResult::None) } fn input(&mut self, o: &DropItem) -> Result<(), JsValue> { // Fuels are always welcome. if o.type_ == ItemType::CoalOre && self.input_inventory.count_item(&ItemType::CoalOre) < FUEL_CAPACITY { self.input_inventory.add_item(&ItemType::CoalOre); return Ok(()); } if self.recipe.is_none() { match o.type_ { ItemType::IronOre => { self.recipe = Some(Recipe::new( hash_map!(ItemType::IronOre => 1usize), hash_map!(ItemType::IronPlate => 1usize), 20., 50., )); } ItemType::CopperOre => { self.recipe = Some(Recipe::new( hash_map!(ItemType::CopperOre => 1usize), hash_map!(ItemType::CopperPlate => 1usize), 20., 50., )); } _ => { return Err(JsValue::from_str(&format!( "Cannot smelt {}", item_to_str(&o.type_) ))) } } } if let Some(recipe) = &self.recipe { if 0 < recipe.input.count_item(&o.type_) || 0 < recipe.output.count_item(&o.type_) { self.input_inventory.add_item(&o.type_); return Ok(()); } else { return Err(JsValue::from_str("Item is not part of recipe")); } } Err(JsValue::from_str("Recipe is not initialized")) } fn can_input(&self, item_type: &ItemType) -> bool { if *item_type == ItemType::CoalOre && self.input_inventory.count_item(item_type) < FUEL_CAPACITY { return true; } if let Some(recipe) = &self.recipe { recipe.input.get(item_type).is_some() } else { matches!(item_type, ItemType::IronOre | ItemType::CopperOre) } } fn can_output(&self, _structures: &StructureDynIter) -> Inventory { self.output_inventory.clone() } fn output(&mut self, _state: &mut FactorishState, item_type: &ItemType) -> Result<(), ()> { if self.output_inventory.remove_item(item_type) { Ok(()) } else { Err(()) } } fn inventory(&self, is_input: bool) -> Option<&Inventory> { Some(if is_input { &self.input_inventory } else { &self.output_inventory }) } fn
(&mut self, is_input: bool) -> Option<&mut Inventory> { Some(if is_input { &mut self.input_inventory } else { &mut self.output_inventory }) } fn destroy_inventory(&mut self) -> Inventory { let mut ret = std::mem::take(&mut self.input_inventory); ret.merge(std::mem::take(&mut self.output_inventory)); // Return the ingredients if it was in the middle of processing a recipe. if let Some(mut recipe) = self.recipe.take() { if self.progress.is_some() { ret.merge(std::mem::take(&mut recipe.input)); } } ret } fn get_recipes(&self) -> std::borrow::Cow<[Recipe]> { std::borrow::Cow::from(&RECIPES[..]) } fn get_selected_recipe(&self) -> Option<&Recipe> { self.recipe.as_ref() } fn serialize(&self) -> serde_json::Result<serde_json::Value> { serde_json::to_value(self) } }
inventory_mut
identifier_name
day18.rs
// day 18 use std::collections::HashMap; struct MazeNode { obstacle: bool, key_index: i64, door_index: i64, } struct Key { x: usize, y: usize, symbol: char, } struct Door { _x: usize, _y: usize, symbol: char, key_index: usize, } struct Maze { grid: Vec<Vec<MazeNode>>, keys: Vec<Key>, doors: Vec<Door>, cached: HashMap<usize, HashMap<usize, CachedPath>>, width: usize, height: usize, } struct CachedPath { dist: i64, keys: Vec<usize>, } #[derive(Clone)] struct DNode { x: usize, y: usize, dist: usize, parent_x:usize, parent_y:usize } #[derive(Clone)] struct DNodeB { at:Vec<usize>, keys:Vec<usize>, dist:usize } fn intersect_count (vec_a:&Vec<usize>, vec_b:&Vec<usize>)->usize { let mut count = 0; for i in 0..vec_a.len() { for j in 0..vec_b.len() { if vec_b[j] == vec_a[i] { count+=1; break; } } } return count; } fn keynodeindex(maze:&mut Maze, keys: &Vec<usize>, at: &Vec<usize>)->String { let mut ret = String::from(""); let mut keys2 = Vec::new(); for i in 0..keys.len() { keys2.push((*maze).keys[(*keys)[i]].symbol); } keys2.sort(); for i in 0..(*at).len() { ret.push((*maze).keys[(*at)[i]].symbol); } ret.push('|'); for i in 0..keys2.len() { ret.push(keys2[i]); } return ret; } fn dijkstra_b(maze:&mut Maze, origins:&Vec<usize>)->usize { let mut frontier:HashMap<String, DNodeB> = HashMap::new(); let mut frontier_next:HashMap<String, DNodeB> = HashMap::new(); let mut explored:HashMap<String, DNodeB> = HashMap::new(); let mut candidates:HashMap<String, usize> = HashMap::new(); let mut start = DNodeB{at:Vec::new(), keys:Vec::new(), dist:0}; for i in 0..(*origins).len() { start.at.push((*origins)[i]); start.keys.push((*origins)[i]); } frontier_next.insert(keynodeindex(maze, &(start.keys), &(start.at)), start); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let node2 = (*node).clone(); frontier.insert(key.to_string(), node2); } frontier_next.clear(); for key in frontier.keys() { //println!("Key {}", key); let node = frontier.get(key).unwrap(); if (*node).keys.len() == (*maze).keys.len() { if let Some(candidate) = candidates.get_mut(key) { if (*candidate) > (*node).dist { *candidate = (*node).dist; } } else { candidates.insert(key.to_string(), (*node).dist); } } // add to explored or update if let Some(explored_node) = explored.get_mut(key) { if (*explored_node).dist > (*node).dist { (*explored_node).keys.clear(); (*explored_node).at.clear(); for i in 0..(*node).keys.len() { (*explored_node).keys.push((*node).keys[i]); } for i in 0..(*node).at.len() { (*explored_node).at.push((*node).at[i]); } (*explored_node).dist = (*node).dist; } } else { let new_node = (*node).clone(); explored.insert(key.to_string(), new_node); } // add all next steps from all positions for p in 0..(*node).at.len() { for k in 0..(*maze).keys.len() { let mut present = false; for j in 0..(*node).keys.len() { if (*node).keys[j] == k { present = true; break; } } if present { continue; } let curr_key = (*node).at[p]; // if not accessible from current position if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 { continue; } // if not accessible with current keys let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone(); if intersect_count(&((*node).keys), &required_keys) < required_keys.len() { continue; } let mut new_keys = (*node).keys.clone(); new_keys.push(k); let mut new_at = (*node).at.clone(); new_at[p] = k; let new_keys_index = keynodeindex(maze, &new_keys, &new_at); let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize); // if previously explored and not shorter if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist { continue; } // if previously added to the frontier if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist { continue; } // add to frontier frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist}); } } } } let mut min_dist = 0; for candidate_key in candidates.keys() { let candidate = candidates.get(candidate_key).unwrap(); if min_dist == 0 || min_dist > *candidate { min_dist = *candidate; } } return min_dist; } fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize { return ((*maze).width * y) + x; } fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64 { let mut explored:HashMap<usize, DNode> = HashMap::new(); let mut frontier:HashMap<usize,DNode> = HashMap::new(); let mut frontier_next:HashMap<usize,DNode> = HashMap::new(); frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y}); let dest_key = exploredindex(maze, end_x, end_y); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let new_node = (*node).clone(); frontier.insert(*key, new_node); } frontier_next.clear(); for key in frontier.keys() { let node = frontier.get(key).unwrap(); let exploredindex1 = exploredindex(maze, (*node).x, (*node).y); if explored.contains_key(&exploredindex1) { let last_dist = explored.get(&exploredindex1).unwrap().dist; if (*node).dist < last_dist { let node2 = explored.get_mut(&exploredindex1).unwrap(); (*node2).dist = (*node).dist; (*node2).parent_x = (*node).parent_x; (*node2).parent_y = (*node).parent_y; } } else { let new_node = (*node).clone(); explored.insert(exploredindex1, new_node); } let mut xd:i64 = 0; let mut yd:i64 = 0; for i in 0..4 { if i == 0 { xd = -1; yd = 0; } else if i == 1 { xd = 1; yd = 0; } else if i == 2 { xd = 0; yd = 1; } else if i == 3 { xd = 0; yd = -1; } let x1 = (*node).x as i64 + xd; let y1 = (*node).y as i64 + yd; if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 { continue; } else { if (*maze).grid[y1 as usize][x1 as usize].obstacle { continue; } let index = exploredindex(maze, x1 as usize, y1 as usize); let new_dist = (*node).dist + 1; if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist { continue; } if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist { continue; } frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y}); } } } if explored.contains_key(&dest_key) { let end_node = explored.get(&dest_key).unwrap(); if ret_doors_keys { let mut curr_x = end_node.parent_x; let mut curr_y = end_node.parent_y; while!(curr_x == start_x && curr_y == start_y) { if (*maze).grid[curr_y][curr_x].key_index >= 0 { (*keys).push((*maze).grid[curr_y][curr_x].key_index as usize); } if (*maze).grid[curr_y][curr_x].door_index >= 0 { (*doors).push((*maze).grid[curr_y][curr_x].door_index as usize); } let index = exploredindex(maze, curr_x, curr_y); let trace = explored.get(&index).unwrap(); curr_x = trace.parent_x; curr_y = trace.parent_y; } } return end_node.dist as i64; } } return -1; } fn read_maze(input: Vec<String>, maze:&mut Maze)->usize { (*maze).width = input[0].len(); (*maze).height = input.len(); // read origin, obstacles, doors and keys for y in 0..(*maze).height { (*maze).grid.push(Vec::new()); for x in 0..(*maze).width { let byte = input[y].as_bytes()[x]; match byte { 35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}), 46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}), 65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol:(byte as char),key_index:0}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: ((*maze).doors.len() - 1) as i64, key_index: -1}); }, 97..=122=> {(*maze).keys.push(Key{x:x,y:y,symbol:((byte-32) as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64}); }, _=>{(*maze).keys.push(Key{x:x, y:y, symbol:(byte as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64});}, } } } // quick lookup for door/key correspondance for i in 0..(*maze).doors.len() { for j in 0..(*maze).keys.len() { if (*maze).keys[j].symbol == (*maze).doors[i].symbol { (*maze).doors[i].key_index = j; break; } } } // cache distances between each key for i in 0..(*maze).keys.len() { (*maze).cached.insert(i, HashMap::new()); } for i in 0..(*maze).keys.len() { for j in 0..(*maze).keys.len() { if j == i { continue; } let mut doors = Vec::new(); let mut keys = Vec::new(); let dist = dijkstra_a(maze, (*maze).keys[i].x, (*maze).keys[i].y, (*maze).keys[j].x, (*maze).keys[j].y, &mut doors, &mut keys, true); let mut doorkeys:Vec<usize> = Vec::new(); for k in 0..doors.len() { doorkeys.push((*maze).doors[doors[k]].key_index); } (*maze).cached.get_mut(&i).unwrap().insert(j, CachedPath{dist: dist, keys: doorkeys}); } } let mut first_keys:Vec<usize> = Vec::new(); for i in 0..(*maze).keys.len() { if (*maze).keys[i].symbol == '@' { first_keys.push(i); } } return dijkstra_b(maze, &first_keys); } pub fn run(file_path:&str) { let mut maze = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let mut maze2 = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let vec = super::utility::util_fread(file_path); let mut vec2:Vec<String> = Vec::new(); let mut ox = 0; let mut oy = 0; if vec.len() == 0 { println!("Input not read properly"); return; } // test if maze is set up for part B for line in 0..vec.len() { let bytes = vec[line].as_bytes(); for pos in 0..bytes.len() { if bytes[pos] == '@' as u8 { ox = pos; oy = line; } } } let mut has_part_b = true; if ox + 1 >= vec[0].len() || (ox as i64 - 1) < 0 || oy + 1 >= vec.len() || (oy as i64 - 1) < 0 { has_part_b = false; } else { for y in oy-1..=oy+1 { let bytes = vec[y].as_bytes(); if y == oy-1 && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '.' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } else if y == oy && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '@' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } else if y == oy+1 && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '.' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } } } if has_part_b
} } vec2.push(line); } } let result_a = read_maze(vec, &mut maze); println!("Result A: {}", result_a); if has_part_b { let result_b = read_maze(vec2, &mut maze2); println!("Result B: {}", result_b); } }
{ for y in 0..vec.len() { let mut line = String::from(""); let bytes = vec[y].as_bytes(); for x in 0..vec[y].len() { if (x == ox - 1 && y == oy - 1) || (x == ox + 1 && y == oy - 1) || (x == ox - 1 && y == oy + 1) || (x == ox + 1 && y == oy + 1) { line.push('@'); } else if (x == ox && y == oy - 1) || (x == ox && y == oy + 1) || (x == ox - 1 && y == oy) || (x == ox + 1 && y == oy) || (x == ox && y == oy) { line.push('#'); } else { line.push(bytes[x] as char);
conditional_block
day18.rs
// day 18 use std::collections::HashMap; struct MazeNode { obstacle: bool, key_index: i64, door_index: i64, } struct Key { x: usize, y: usize, symbol: char, } struct Door { _x: usize, _y: usize, symbol: char, key_index: usize, } struct Maze { grid: Vec<Vec<MazeNode>>, keys: Vec<Key>, doors: Vec<Door>, cached: HashMap<usize, HashMap<usize, CachedPath>>, width: usize, height: usize, } struct CachedPath { dist: i64, keys: Vec<usize>, } #[derive(Clone)] struct DNode { x: usize, y: usize, dist: usize, parent_x:usize, parent_y:usize } #[derive(Clone)] struct DNodeB { at:Vec<usize>, keys:Vec<usize>, dist:usize } fn intersect_count (vec_a:&Vec<usize>, vec_b:&Vec<usize>)->usize { let mut count = 0; for i in 0..vec_a.len() { for j in 0..vec_b.len() { if vec_b[j] == vec_a[i] { count+=1; break; } } } return count; } fn keynodeindex(maze:&mut Maze, keys: &Vec<usize>, at: &Vec<usize>)->String { let mut ret = String::from(""); let mut keys2 = Vec::new(); for i in 0..keys.len() { keys2.push((*maze).keys[(*keys)[i]].symbol); } keys2.sort(); for i in 0..(*at).len() { ret.push((*maze).keys[(*at)[i]].symbol); } ret.push('|'); for i in 0..keys2.len() { ret.push(keys2[i]); } return ret; } fn dijkstra_b(maze:&mut Maze, origins:&Vec<usize>)->usize { let mut frontier:HashMap<String, DNodeB> = HashMap::new(); let mut frontier_next:HashMap<String, DNodeB> = HashMap::new(); let mut explored:HashMap<String, DNodeB> = HashMap::new(); let mut candidates:HashMap<String, usize> = HashMap::new(); let mut start = DNodeB{at:Vec::new(), keys:Vec::new(), dist:0}; for i in 0..(*origins).len() { start.at.push((*origins)[i]); start.keys.push((*origins)[i]); } frontier_next.insert(keynodeindex(maze, &(start.keys), &(start.at)), start); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let node2 = (*node).clone(); frontier.insert(key.to_string(), node2); } frontier_next.clear(); for key in frontier.keys() { //println!("Key {}", key); let node = frontier.get(key).unwrap(); if (*node).keys.len() == (*maze).keys.len() { if let Some(candidate) = candidates.get_mut(key) { if (*candidate) > (*node).dist { *candidate = (*node).dist; } } else { candidates.insert(key.to_string(), (*node).dist); } } // add to explored or update if let Some(explored_node) = explored.get_mut(key) { if (*explored_node).dist > (*node).dist { (*explored_node).keys.clear(); (*explored_node).at.clear(); for i in 0..(*node).keys.len() { (*explored_node).keys.push((*node).keys[i]); } for i in 0..(*node).at.len() { (*explored_node).at.push((*node).at[i]); } (*explored_node).dist = (*node).dist; } } else { let new_node = (*node).clone(); explored.insert(key.to_string(), new_node); } // add all next steps from all positions for p in 0..(*node).at.len() { for k in 0..(*maze).keys.len() { let mut present = false; for j in 0..(*node).keys.len() { if (*node).keys[j] == k { present = true; break; } } if present { continue; } let curr_key = (*node).at[p]; // if not accessible from current position if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 { continue; } // if not accessible with current keys let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone(); if intersect_count(&((*node).keys), &required_keys) < required_keys.len() { continue; } let mut new_keys = (*node).keys.clone(); new_keys.push(k); let mut new_at = (*node).at.clone(); new_at[p] = k; let new_keys_index = keynodeindex(maze, &new_keys, &new_at); let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize); // if previously explored and not shorter if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist { continue; } // if previously added to the frontier if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist { continue; } // add to frontier frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist}); } } } } let mut min_dist = 0; for candidate_key in candidates.keys() { let candidate = candidates.get(candidate_key).unwrap(); if min_dist == 0 || min_dist > *candidate { min_dist = *candidate; } } return min_dist; } fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize { return ((*maze).width * y) + x; } fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64
for key in frontier.keys() { let node = frontier.get(key).unwrap(); let exploredindex1 = exploredindex(maze, (*node).x, (*node).y); if explored.contains_key(&exploredindex1) { let last_dist = explored.get(&exploredindex1).unwrap().dist; if (*node).dist < last_dist { let node2 = explored.get_mut(&exploredindex1).unwrap(); (*node2).dist = (*node).dist; (*node2).parent_x = (*node).parent_x; (*node2).parent_y = (*node).parent_y; } } else { let new_node = (*node).clone(); explored.insert(exploredindex1, new_node); } let mut xd:i64 = 0; let mut yd:i64 = 0; for i in 0..4 { if i == 0 { xd = -1; yd = 0; } else if i == 1 { xd = 1; yd = 0; } else if i == 2 { xd = 0; yd = 1; } else if i == 3 { xd = 0; yd = -1; } let x1 = (*node).x as i64 + xd; let y1 = (*node).y as i64 + yd; if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 { continue; } else { if (*maze).grid[y1 as usize][x1 as usize].obstacle { continue; } let index = exploredindex(maze, x1 as usize, y1 as usize); let new_dist = (*node).dist + 1; if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist { continue; } if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist { continue; } frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y}); } } } if explored.contains_key(&dest_key) { let end_node = explored.get(&dest_key).unwrap(); if ret_doors_keys { let mut curr_x = end_node.parent_x; let mut curr_y = end_node.parent_y; while!(curr_x == start_x && curr_y == start_y) { if (*maze).grid[curr_y][curr_x].key_index >= 0 { (*keys).push((*maze).grid[curr_y][curr_x].key_index as usize); } if (*maze).grid[curr_y][curr_x].door_index >= 0 { (*doors).push((*maze).grid[curr_y][curr_x].door_index as usize); } let index = exploredindex(maze, curr_x, curr_y); let trace = explored.get(&index).unwrap(); curr_x = trace.parent_x; curr_y = trace.parent_y; } } return end_node.dist as i64; } } return -1; } fn read_maze(input: Vec<String>, maze:&mut Maze)->usize { (*maze).width = input[0].len(); (*maze).height = input.len(); // read origin, obstacles, doors and keys for y in 0..(*maze).height { (*maze).grid.push(Vec::new()); for x in 0..(*maze).width { let byte = input[y].as_bytes()[x]; match byte { 35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}), 46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}), 65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol:(byte as char),key_index:0}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: ((*maze).doors.len() - 1) as i64, key_index: -1}); }, 97..=122=> {(*maze).keys.push(Key{x:x,y:y,symbol:((byte-32) as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64}); }, _=>{(*maze).keys.push(Key{x:x, y:y, symbol:(byte as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64});}, } } } // quick lookup for door/key correspondance for i in 0..(*maze).doors.len() { for j in 0..(*maze).keys.len() { if (*maze).keys[j].symbol == (*maze).doors[i].symbol { (*maze).doors[i].key_index = j; break; } } } // cache distances between each key for i in 0..(*maze).keys.len() { (*maze).cached.insert(i, HashMap::new()); } for i in 0..(*maze).keys.len() { for j in 0..(*maze).keys.len() { if j == i { continue; } let mut doors = Vec::new(); let mut keys = Vec::new(); let dist = dijkstra_a(maze, (*maze).keys[i].x, (*maze).keys[i].y, (*maze).keys[j].x, (*maze).keys[j].y, &mut doors, &mut keys, true); let mut doorkeys:Vec<usize> = Vec::new(); for k in 0..doors.len() { doorkeys.push((*maze).doors[doors[k]].key_index); } (*maze).cached.get_mut(&i).unwrap().insert(j, CachedPath{dist: dist, keys: doorkeys}); } } let mut first_keys:Vec<usize> = Vec::new(); for i in 0..(*maze).keys.len() { if (*maze).keys[i].symbol == '@' { first_keys.push(i); } } return dijkstra_b(maze, &first_keys); } pub fn run(file_path:&str) { let mut maze = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let mut maze2 = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let vec = super::utility::util_fread(file_path); let mut vec2:Vec<String> = Vec::new(); let mut ox = 0; let mut oy = 0; if vec.len() == 0 { println!("Input not read properly"); return; } // test if maze is set up for part B for line in 0..vec.len() { let bytes = vec[line].as_bytes(); for pos in 0..bytes.len() { if bytes[pos] == '@' as u8 { ox = pos; oy = line; } } } let mut has_part_b = true; if ox + 1 >= vec[0].len() || (ox as i64 - 1) < 0 || oy + 1 >= vec.len() || (oy as i64 - 1) < 0 { has_part_b = false; } else { for y in oy-1..=oy+1 { let bytes = vec[y].as_bytes(); if y == oy-1 && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '.' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } else if y == oy && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '@' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } else if y == oy+1 && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '.' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } } } if has_part_b { for y in 0..vec.len() { let mut line = String::from(""); let bytes = vec[y].as_bytes(); for x in 0..vec[y].len() { if (x == ox - 1 && y == oy - 1) || (x == ox + 1 && y == oy - 1) || (x == ox - 1 && y == oy + 1) || (x == ox + 1 && y == oy + 1) { line.push('@'); } else if (x == ox && y == oy - 1) || (x == ox && y == oy + 1) || (x == ox - 1 && y == oy) || (x == ox + 1 && y == oy) || (x == ox && y == oy) { line.push('#'); } else { line.push(bytes[x] as char); } } vec2.push(line); } } let result_a = read_maze(vec, &mut maze); println!("Result A: {}", result_a); if has_part_b { let result_b = read_maze(vec2, &mut maze2); println!("Result B: {}", result_b); } }
{ let mut explored:HashMap<usize, DNode> = HashMap::new(); let mut frontier:HashMap<usize,DNode> = HashMap::new(); let mut frontier_next:HashMap<usize,DNode> = HashMap::new(); frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y}); let dest_key = exploredindex(maze, end_x, end_y); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let new_node = (*node).clone(); frontier.insert(*key, new_node); } frontier_next.clear();
identifier_body
day18.rs
// day 18 use std::collections::HashMap; struct MazeNode { obstacle: bool, key_index: i64, door_index: i64, } struct Key { x: usize, y: usize, symbol: char, } struct Door { _x: usize, _y: usize, symbol: char, key_index: usize, } struct Maze { grid: Vec<Vec<MazeNode>>, keys: Vec<Key>, doors: Vec<Door>, cached: HashMap<usize, HashMap<usize, CachedPath>>, width: usize, height: usize, } struct CachedPath { dist: i64, keys: Vec<usize>, } #[derive(Clone)] struct DNode { x: usize, y: usize, dist: usize, parent_x:usize, parent_y:usize } #[derive(Clone)] struct DNodeB { at:Vec<usize>, keys:Vec<usize>, dist:usize } fn intersect_count (vec_a:&Vec<usize>, vec_b:&Vec<usize>)->usize { let mut count = 0; for i in 0..vec_a.len() { for j in 0..vec_b.len() { if vec_b[j] == vec_a[i] { count+=1; break; } } } return count; } fn keynodeindex(maze:&mut Maze, keys: &Vec<usize>, at: &Vec<usize>)->String { let mut ret = String::from(""); let mut keys2 = Vec::new(); for i in 0..keys.len() { keys2.push((*maze).keys[(*keys)[i]].symbol); } keys2.sort(); for i in 0..(*at).len() { ret.push((*maze).keys[(*at)[i]].symbol); } ret.push('|'); for i in 0..keys2.len() { ret.push(keys2[i]); } return ret; } fn dijkstra_b(maze:&mut Maze, origins:&Vec<usize>)->usize { let mut frontier:HashMap<String, DNodeB> = HashMap::new(); let mut frontier_next:HashMap<String, DNodeB> = HashMap::new(); let mut explored:HashMap<String, DNodeB> = HashMap::new(); let mut candidates:HashMap<String, usize> = HashMap::new(); let mut start = DNodeB{at:Vec::new(), keys:Vec::new(), dist:0}; for i in 0..(*origins).len() { start.at.push((*origins)[i]); start.keys.push((*origins)[i]); } frontier_next.insert(keynodeindex(maze, &(start.keys), &(start.at)), start); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let node2 = (*node).clone(); frontier.insert(key.to_string(), node2); } frontier_next.clear(); for key in frontier.keys() { //println!("Key {}", key); let node = frontier.get(key).unwrap(); if (*node).keys.len() == (*maze).keys.len() { if let Some(candidate) = candidates.get_mut(key) { if (*candidate) > (*node).dist { *candidate = (*node).dist; } } else { candidates.insert(key.to_string(), (*node).dist); } } // add to explored or update if let Some(explored_node) = explored.get_mut(key) { if (*explored_node).dist > (*node).dist { (*explored_node).keys.clear(); (*explored_node).at.clear(); for i in 0..(*node).keys.len() { (*explored_node).keys.push((*node).keys[i]); } for i in 0..(*node).at.len() { (*explored_node).at.push((*node).at[i]); } (*explored_node).dist = (*node).dist; } } else { let new_node = (*node).clone(); explored.insert(key.to_string(), new_node); } // add all next steps from all positions for p in 0..(*node).at.len() { for k in 0..(*maze).keys.len() { let mut present = false; for j in 0..(*node).keys.len() { if (*node).keys[j] == k { present = true; break; } } if present { continue; } let curr_key = (*node).at[p]; // if not accessible from current position if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 { continue; } // if not accessible with current keys let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone(); if intersect_count(&((*node).keys), &required_keys) < required_keys.len() { continue; } let mut new_keys = (*node).keys.clone(); new_keys.push(k); let mut new_at = (*node).at.clone(); new_at[p] = k; let new_keys_index = keynodeindex(maze, &new_keys, &new_at); let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize); // if previously explored and not shorter if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist { continue; } // if previously added to the frontier if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist { continue; } // add to frontier frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist}); } } } } let mut min_dist = 0; for candidate_key in candidates.keys() { let candidate = candidates.get(candidate_key).unwrap(); if min_dist == 0 || min_dist > *candidate { min_dist = *candidate; } } return min_dist; } fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize { return ((*maze).width * y) + x; } fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64 { let mut explored:HashMap<usize, DNode> = HashMap::new(); let mut frontier:HashMap<usize,DNode> = HashMap::new(); let mut frontier_next:HashMap<usize,DNode> = HashMap::new(); frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y}); let dest_key = exploredindex(maze, end_x, end_y); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let new_node = (*node).clone(); frontier.insert(*key, new_node); } frontier_next.clear(); for key in frontier.keys() { let node = frontier.get(key).unwrap(); let exploredindex1 = exploredindex(maze, (*node).x, (*node).y); if explored.contains_key(&exploredindex1) { let last_dist = explored.get(&exploredindex1).unwrap().dist; if (*node).dist < last_dist { let node2 = explored.get_mut(&exploredindex1).unwrap(); (*node2).dist = (*node).dist; (*node2).parent_x = (*node).parent_x; (*node2).parent_y = (*node).parent_y; } } else { let new_node = (*node).clone(); explored.insert(exploredindex1, new_node); } let mut xd:i64 = 0; let mut yd:i64 = 0; for i in 0..4 { if i == 0 { xd = -1; yd = 0;
} else if i == 3 { xd = 0; yd = -1; } let x1 = (*node).x as i64 + xd; let y1 = (*node).y as i64 + yd; if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 { continue; } else { if (*maze).grid[y1 as usize][x1 as usize].obstacle { continue; } let index = exploredindex(maze, x1 as usize, y1 as usize); let new_dist = (*node).dist + 1; if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist { continue; } if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist { continue; } frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y}); } } } if explored.contains_key(&dest_key) { let end_node = explored.get(&dest_key).unwrap(); if ret_doors_keys { let mut curr_x = end_node.parent_x; let mut curr_y = end_node.parent_y; while!(curr_x == start_x && curr_y == start_y) { if (*maze).grid[curr_y][curr_x].key_index >= 0 { (*keys).push((*maze).grid[curr_y][curr_x].key_index as usize); } if (*maze).grid[curr_y][curr_x].door_index >= 0 { (*doors).push((*maze).grid[curr_y][curr_x].door_index as usize); } let index = exploredindex(maze, curr_x, curr_y); let trace = explored.get(&index).unwrap(); curr_x = trace.parent_x; curr_y = trace.parent_y; } } return end_node.dist as i64; } } return -1; } fn read_maze(input: Vec<String>, maze:&mut Maze)->usize { (*maze).width = input[0].len(); (*maze).height = input.len(); // read origin, obstacles, doors and keys for y in 0..(*maze).height { (*maze).grid.push(Vec::new()); for x in 0..(*maze).width { let byte = input[y].as_bytes()[x]; match byte { 35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}), 46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}), 65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol:(byte as char),key_index:0}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: ((*maze).doors.len() - 1) as i64, key_index: -1}); }, 97..=122=> {(*maze).keys.push(Key{x:x,y:y,symbol:((byte-32) as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64}); }, _=>{(*maze).keys.push(Key{x:x, y:y, symbol:(byte as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64});}, } } } // quick lookup for door/key correspondance for i in 0..(*maze).doors.len() { for j in 0..(*maze).keys.len() { if (*maze).keys[j].symbol == (*maze).doors[i].symbol { (*maze).doors[i].key_index = j; break; } } } // cache distances between each key for i in 0..(*maze).keys.len() { (*maze).cached.insert(i, HashMap::new()); } for i in 0..(*maze).keys.len() { for j in 0..(*maze).keys.len() { if j == i { continue; } let mut doors = Vec::new(); let mut keys = Vec::new(); let dist = dijkstra_a(maze, (*maze).keys[i].x, (*maze).keys[i].y, (*maze).keys[j].x, (*maze).keys[j].y, &mut doors, &mut keys, true); let mut doorkeys:Vec<usize> = Vec::new(); for k in 0..doors.len() { doorkeys.push((*maze).doors[doors[k]].key_index); } (*maze).cached.get_mut(&i).unwrap().insert(j, CachedPath{dist: dist, keys: doorkeys}); } } let mut first_keys:Vec<usize> = Vec::new(); for i in 0..(*maze).keys.len() { if (*maze).keys[i].symbol == '@' { first_keys.push(i); } } return dijkstra_b(maze, &first_keys); } pub fn run(file_path:&str) { let mut maze = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let mut maze2 = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let vec = super::utility::util_fread(file_path); let mut vec2:Vec<String> = Vec::new(); let mut ox = 0; let mut oy = 0; if vec.len() == 0 { println!("Input not read properly"); return; } // test if maze is set up for part B for line in 0..vec.len() { let bytes = vec[line].as_bytes(); for pos in 0..bytes.len() { if bytes[pos] == '@' as u8 { ox = pos; oy = line; } } } let mut has_part_b = true; if ox + 1 >= vec[0].len() || (ox as i64 - 1) < 0 || oy + 1 >= vec.len() || (oy as i64 - 1) < 0 { has_part_b = false; } else { for y in oy-1..=oy+1 { let bytes = vec[y].as_bytes(); if y == oy-1 && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '.' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } else if y == oy && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '@' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } else if y == oy+1 && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '.' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } } } if has_part_b { for y in 0..vec.len() { let mut line = String::from(""); let bytes = vec[y].as_bytes(); for x in 0..vec[y].len() { if (x == ox - 1 && y == oy - 1) || (x == ox + 1 && y == oy - 1) || (x == ox - 1 && y == oy + 1) || (x == ox + 1 && y == oy + 1) { line.push('@'); } else if (x == ox && y == oy - 1) || (x == ox && y == oy + 1) || (x == ox - 1 && y == oy) || (x == ox + 1 && y == oy) || (x == ox && y == oy) { line.push('#'); } else { line.push(bytes[x] as char); } } vec2.push(line); } } let result_a = read_maze(vec, &mut maze); println!("Result A: {}", result_a); if has_part_b { let result_b = read_maze(vec2, &mut maze2); println!("Result B: {}", result_b); } }
} else if i == 1 { xd = 1; yd = 0; } else if i == 2 { xd = 0; yd = 1;
random_line_split
day18.rs
// day 18 use std::collections::HashMap; struct MazeNode { obstacle: bool, key_index: i64, door_index: i64, } struct Key { x: usize, y: usize, symbol: char, } struct Door { _x: usize, _y: usize, symbol: char, key_index: usize, } struct Maze { grid: Vec<Vec<MazeNode>>, keys: Vec<Key>, doors: Vec<Door>, cached: HashMap<usize, HashMap<usize, CachedPath>>, width: usize, height: usize, } struct CachedPath { dist: i64, keys: Vec<usize>, } #[derive(Clone)] struct
{ x: usize, y: usize, dist: usize, parent_x:usize, parent_y:usize } #[derive(Clone)] struct DNodeB { at:Vec<usize>, keys:Vec<usize>, dist:usize } fn intersect_count (vec_a:&Vec<usize>, vec_b:&Vec<usize>)->usize { let mut count = 0; for i in 0..vec_a.len() { for j in 0..vec_b.len() { if vec_b[j] == vec_a[i] { count+=1; break; } } } return count; } fn keynodeindex(maze:&mut Maze, keys: &Vec<usize>, at: &Vec<usize>)->String { let mut ret = String::from(""); let mut keys2 = Vec::new(); for i in 0..keys.len() { keys2.push((*maze).keys[(*keys)[i]].symbol); } keys2.sort(); for i in 0..(*at).len() { ret.push((*maze).keys[(*at)[i]].symbol); } ret.push('|'); for i in 0..keys2.len() { ret.push(keys2[i]); } return ret; } fn dijkstra_b(maze:&mut Maze, origins:&Vec<usize>)->usize { let mut frontier:HashMap<String, DNodeB> = HashMap::new(); let mut frontier_next:HashMap<String, DNodeB> = HashMap::new(); let mut explored:HashMap<String, DNodeB> = HashMap::new(); let mut candidates:HashMap<String, usize> = HashMap::new(); let mut start = DNodeB{at:Vec::new(), keys:Vec::new(), dist:0}; for i in 0..(*origins).len() { start.at.push((*origins)[i]); start.keys.push((*origins)[i]); } frontier_next.insert(keynodeindex(maze, &(start.keys), &(start.at)), start); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let node2 = (*node).clone(); frontier.insert(key.to_string(), node2); } frontier_next.clear(); for key in frontier.keys() { //println!("Key {}", key); let node = frontier.get(key).unwrap(); if (*node).keys.len() == (*maze).keys.len() { if let Some(candidate) = candidates.get_mut(key) { if (*candidate) > (*node).dist { *candidate = (*node).dist; } } else { candidates.insert(key.to_string(), (*node).dist); } } // add to explored or update if let Some(explored_node) = explored.get_mut(key) { if (*explored_node).dist > (*node).dist { (*explored_node).keys.clear(); (*explored_node).at.clear(); for i in 0..(*node).keys.len() { (*explored_node).keys.push((*node).keys[i]); } for i in 0..(*node).at.len() { (*explored_node).at.push((*node).at[i]); } (*explored_node).dist = (*node).dist; } } else { let new_node = (*node).clone(); explored.insert(key.to_string(), new_node); } // add all next steps from all positions for p in 0..(*node).at.len() { for k in 0..(*maze).keys.len() { let mut present = false; for j in 0..(*node).keys.len() { if (*node).keys[j] == k { present = true; break; } } if present { continue; } let curr_key = (*node).at[p]; // if not accessible from current position if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 { continue; } // if not accessible with current keys let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone(); if intersect_count(&((*node).keys), &required_keys) < required_keys.len() { continue; } let mut new_keys = (*node).keys.clone(); new_keys.push(k); let mut new_at = (*node).at.clone(); new_at[p] = k; let new_keys_index = keynodeindex(maze, &new_keys, &new_at); let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize); // if previously explored and not shorter if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist { continue; } // if previously added to the frontier if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist { continue; } // add to frontier frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist}); } } } } let mut min_dist = 0; for candidate_key in candidates.keys() { let candidate = candidates.get(candidate_key).unwrap(); if min_dist == 0 || min_dist > *candidate { min_dist = *candidate; } } return min_dist; } fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize { return ((*maze).width * y) + x; } fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64 { let mut explored:HashMap<usize, DNode> = HashMap::new(); let mut frontier:HashMap<usize,DNode> = HashMap::new(); let mut frontier_next:HashMap<usize,DNode> = HashMap::new(); frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y}); let dest_key = exploredindex(maze, end_x, end_y); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let new_node = (*node).clone(); frontier.insert(*key, new_node); } frontier_next.clear(); for key in frontier.keys() { let node = frontier.get(key).unwrap(); let exploredindex1 = exploredindex(maze, (*node).x, (*node).y); if explored.contains_key(&exploredindex1) { let last_dist = explored.get(&exploredindex1).unwrap().dist; if (*node).dist < last_dist { let node2 = explored.get_mut(&exploredindex1).unwrap(); (*node2).dist = (*node).dist; (*node2).parent_x = (*node).parent_x; (*node2).parent_y = (*node).parent_y; } } else { let new_node = (*node).clone(); explored.insert(exploredindex1, new_node); } let mut xd:i64 = 0; let mut yd:i64 = 0; for i in 0..4 { if i == 0 { xd = -1; yd = 0; } else if i == 1 { xd = 1; yd = 0; } else if i == 2 { xd = 0; yd = 1; } else if i == 3 { xd = 0; yd = -1; } let x1 = (*node).x as i64 + xd; let y1 = (*node).y as i64 + yd; if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 { continue; } else { if (*maze).grid[y1 as usize][x1 as usize].obstacle { continue; } let index = exploredindex(maze, x1 as usize, y1 as usize); let new_dist = (*node).dist + 1; if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist { continue; } if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist { continue; } frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y}); } } } if explored.contains_key(&dest_key) { let end_node = explored.get(&dest_key).unwrap(); if ret_doors_keys { let mut curr_x = end_node.parent_x; let mut curr_y = end_node.parent_y; while!(curr_x == start_x && curr_y == start_y) { if (*maze).grid[curr_y][curr_x].key_index >= 0 { (*keys).push((*maze).grid[curr_y][curr_x].key_index as usize); } if (*maze).grid[curr_y][curr_x].door_index >= 0 { (*doors).push((*maze).grid[curr_y][curr_x].door_index as usize); } let index = exploredindex(maze, curr_x, curr_y); let trace = explored.get(&index).unwrap(); curr_x = trace.parent_x; curr_y = trace.parent_y; } } return end_node.dist as i64; } } return -1; } fn read_maze(input: Vec<String>, maze:&mut Maze)->usize { (*maze).width = input[0].len(); (*maze).height = input.len(); // read origin, obstacles, doors and keys for y in 0..(*maze).height { (*maze).grid.push(Vec::new()); for x in 0..(*maze).width { let byte = input[y].as_bytes()[x]; match byte { 35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}), 46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}), 65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol:(byte as char),key_index:0}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: ((*maze).doors.len() - 1) as i64, key_index: -1}); }, 97..=122=> {(*maze).keys.push(Key{x:x,y:y,symbol:((byte-32) as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64}); }, _=>{(*maze).keys.push(Key{x:x, y:y, symbol:(byte as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64});}, } } } // quick lookup for door/key correspondance for i in 0..(*maze).doors.len() { for j in 0..(*maze).keys.len() { if (*maze).keys[j].symbol == (*maze).doors[i].symbol { (*maze).doors[i].key_index = j; break; } } } // cache distances between each key for i in 0..(*maze).keys.len() { (*maze).cached.insert(i, HashMap::new()); } for i in 0..(*maze).keys.len() { for j in 0..(*maze).keys.len() { if j == i { continue; } let mut doors = Vec::new(); let mut keys = Vec::new(); let dist = dijkstra_a(maze, (*maze).keys[i].x, (*maze).keys[i].y, (*maze).keys[j].x, (*maze).keys[j].y, &mut doors, &mut keys, true); let mut doorkeys:Vec<usize> = Vec::new(); for k in 0..doors.len() { doorkeys.push((*maze).doors[doors[k]].key_index); } (*maze).cached.get_mut(&i).unwrap().insert(j, CachedPath{dist: dist, keys: doorkeys}); } } let mut first_keys:Vec<usize> = Vec::new(); for i in 0..(*maze).keys.len() { if (*maze).keys[i].symbol == '@' { first_keys.push(i); } } return dijkstra_b(maze, &first_keys); } pub fn run(file_path:&str) { let mut maze = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let mut maze2 = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let vec = super::utility::util_fread(file_path); let mut vec2:Vec<String> = Vec::new(); let mut ox = 0; let mut oy = 0; if vec.len() == 0 { println!("Input not read properly"); return; } // test if maze is set up for part B for line in 0..vec.len() { let bytes = vec[line].as_bytes(); for pos in 0..bytes.len() { if bytes[pos] == '@' as u8 { ox = pos; oy = line; } } } let mut has_part_b = true; if ox + 1 >= vec[0].len() || (ox as i64 - 1) < 0 || oy + 1 >= vec.len() || (oy as i64 - 1) < 0 { has_part_b = false; } else { for y in oy-1..=oy+1 { let bytes = vec[y].as_bytes(); if y == oy-1 && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '.' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } else if y == oy && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '@' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } else if y == oy+1 && (bytes[ox-1]!= '.' as u8 || bytes[ox]!= '.' as u8 || bytes[ox+1]!= '.' as u8) { has_part_b = false; break; } } } if has_part_b { for y in 0..vec.len() { let mut line = String::from(""); let bytes = vec[y].as_bytes(); for x in 0..vec[y].len() { if (x == ox - 1 && y == oy - 1) || (x == ox + 1 && y == oy - 1) || (x == ox - 1 && y == oy + 1) || (x == ox + 1 && y == oy + 1) { line.push('@'); } else if (x == ox && y == oy - 1) || (x == ox && y == oy + 1) || (x == ox - 1 && y == oy) || (x == ox + 1 && y == oy) || (x == ox && y == oy) { line.push('#'); } else { line.push(bytes[x] as char); } } vec2.push(line); } } let result_a = read_maze(vec, &mut maze); println!("Result A: {}", result_a); if has_part_b { let result_b = read_maze(vec2, &mut maze2); println!("Result B: {}", result_b); } }
DNode
identifier_name
main.rs
ability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if!perms.can_access_domain(&req.domain) { return Err(Status::new(Code::Unauthenticated, "invalid network access")); } } // Build the network request let method = match req.method.as_str() { "GET" => Method::GET, "POST" => Method::POST, "PUT" => Method::PUT, "DELETE" => Method::DELETE, "HEAD" => Method::HEAD, "OPTIONS" => Method::OPTIONS, "CONNECT" => Method::CONNECT, "PATCH" => Method::PATCH, "TRACE" => Method::TRACE, _ => { return Err(Status::new(Code::InvalidArgument, "invalid http method")) }, }; let mut builder = reqwest::Client::new() .request(method, req.domain.clone()) .timeout(Duration::from_secs(1)) .body(req.body.clone()); for header in &req.headers { let key = HeaderName::from_bytes(&header.key[..]) .map_err(|e| { error!("{}", e); Status::new(Code::InvalidArgument, "invalid header name") })?; builder = builder.header(key, &header.value[..]); } if self.mock_network { // Forward the network access to the controller. warn!("mock network request"); warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now()); self.api.forward_network(req).await?; Ok(Response::new(NetworkAccessResult::default())) } else { // Make the actual network access let handle = tokio::spawn(async move { let res = builder.send().await.map_err(|e| { error!("{}", e); Status::new(Code::Aborted, "http request failed") })?; let status_code = res.status().as_u16() as u32; let headers = res .headers() .iter() .map(|(key, value)| KeyValuePair { key: key.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect::<Vec<_>>(); let data = res.bytes().await.map_err(|e| { error!("{}", e); Status::new(Code::Unavailable, "error streaming response bytes") })?; Ok(Response::new(NetworkAccessResult { status_code, headers, data: data.to_vec(), })) }); // Forward the network access to the controller. self.api.forward_network(req).await?; // Return the result of the HTTP request. handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))? } } /// Validates the process is an existing process, and checks its /// permissions to see that the tag corresponds to a valid param. /// If the tag is valid and this is a stateless edge, only respond /// succesfully if the module is trying to get the triggered data. /// If the tag is valid and this is a stateful edge, endorse the data /// with the host token and forward to the controller. async fn get( &self, req: Request<GetData>, ) -> Result<Response<GetDataResult>, Status> { debug!("get"); // Validate the process is valid and has permissions to read the tag. // No serializability guarantees from other requests from the same process. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { warn!("get: invalid token {}", req.process_token); return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if perms.is_triggered(&req.tag) { // cached the triggered file if req.lower!= req.upper { debug!("get: {} invalid triggered timestamps", req.process_token); return Ok(Response::new(GetDataResult::default())) } else if!self.pubsub_enabled { debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token); // fallthrough below } else if let Some(data) = perms.read_triggered(&req.lower) { debug!("get: {} reading triggered data", req.process_token); return Ok(Response::new(GetDataResult { timestamps: vec![req.lower], data: vec![data], })) } else { debug!("get: {} process was not triggered", req.process_token); return Ok(Response::new(GetDataResult::default())) } } else if!perms.can_read(&req.tag) { warn!("get: {} cannot read {}", req.process_token, req.tag); return Err(Status::new(Code::Unauthenticated, "cannot read")); } } // Forward the file access to the controller and return the result debug!("get: {} forwarding tag={}", req.process_token, req.tag); self.api.forward_get(req).await } /// Validates the process is an existing process, and checks its /// permissions to see that the process is writing to a valid tag. /// If the tag is valid, endorse the data with the host token and
/// If the tag corresponds to sensor state (say maybe it starts with # /// which is reserved for state tags), forward the request as a state /// change instead. async fn push( &self, req: Request<PushData>, ) -> Result<Response<()>, Status> { debug!("push"); // Validate the process is valid and has permissions to write the file. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if!perms.can_write(&req.tag) { debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag); return Ok(Response::new(())); } if state_tags::is_state_tag(&req.tag) { Some(state_tags::parse_state_tag(&req.tag)) } else { None } } else { unreachable!() }; if let Some((sensor, key)) = sensor_key { // Forward as state change if the tag changes state. debug!("push: {} forwarding state change tag={}", req.process_token, req.tag); let req = StateChange { host_token: String::new(), process_token: req.process_token, sensor_id: sensor, key, value: req.data, }; self.api.forward_state(req).await } else { // Forward the file access to the controller and return the result debug!("push: {} forwarding push tag={}", req.process_token, req.tag); self.api.forward_push(req).await } } } impl Host { /// Generate a new host with a random ID. pub fn new( base_path: PathBuf, controller: &str, cold_cache_enabled: bool, warm_cache_enabled: bool, pubsub_enabled: bool, mock_network: bool, ) -> Self { use rand::Rng; let id: u32 = rand::thread_rng().gen(); assert!(cold_cache_enabled ||!warm_cache_enabled); // TODO: buffer size Self { id, api: crate::net::KarlHostAPI::new(controller), process_tokens: Arc::new(Mutex::new(HashMap::new())), warm_processes: Arc::new(Mutex::new(HashMap::new())), warm_cache_tx: None, // wish this didn't have to be wrapped path_manager: Arc::new(PathManager::new(base_path, id)), compute_lock: Arc::new(Mutex::new(())), cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, } } /// Spawns a background process that sends heartbeats to the controller /// at the HEARTBEAT_INTERVAL. /// /// The constructor creates a directory at the <KARL_PATH> if it does /// not already exist. The working directory for any computation is at /// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working /// directory must be at <KARL_PATH>. /// /// Parameters: /// - port - The port to listen on. /// - password - The password to register with the controller. pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> { self.api.register(self.id, port, password).await?; let api = self.api.clone(); tokio::spawn(async move { // Every HEARTBEAT_INTERVAL seconds, this process wakes up // sends a heartbeat message to the controller. loop { tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await; trace!("heartbeat"); let res = api.heartbeat().await; if let Err(e) = res { warn!("error sending heartbeat: {}", e); }; } }); // listener for spawning warm processes let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100); self.warm_cache_tx = Some(tx); let host = self.clone(); tokio::spawn(async move { loop { let req: ComputeRequest = rx.recv().await.unwrap(); let is_warm = true; Host::spawn_new_process( host.clone(), req, is_warm, TRIGGERED_KEY.to_string(), // special value TRIGGERED_KEY.to_string(), // special value ).await; } }); Ok(()) } async fn attach_warm_process( &self, req: &mut ComputeRequest, ) -> Option<ProcessToken> { let warm_process = { let mut warm_processes = self.warm_processes.lock().unwrap(); let mut process_tokens = self.process_tokens.lock().unwrap(); let mut process: Option<WarmProcess> = None; if let Some(processes) = warm_processes.get_mut(&req.module_id) { // reserve the process token process = processes.pop(); } if let Some(process) = process { process_tokens .get_mut(&process.process_token).unwrap() .set_compute_request(req); process } else { return None; } }; // permissions are set and warm process can continue info!("attaching: {} ({})", req.module_id, warm_process.process_token); warm_process.tx.send(()).await.unwrap(); Some(warm_process.process_token) } async fn spawn_new_process( host: Host, mut req: ComputeRequest, is_warm: bool, triggered_tag: String, triggered_timestamp: String, ) -> ProcessToken { let process_token = Token::gen(); let (perms, tx) = if!is_warm { info!("spawning cold process: {} ({})", req.module_id, process_token); (ProcessPerms::new(&mut req), None) } else { info!("spawning warm process: {} ({})", req.module_id, process_token); let (perms, tx) = ProcessPerms::new_warm_cache(); (perms, Some(tx)) }; // Mark an active process { let mut process_tokens = host.process_tokens.lock().unwrap(); assert!(!process_tokens.contains_key(&process_token)); process_tokens.insert(process_token.clone(), perms); } // If it's warm insert a sending channel to eventually notify // this process it is ready to continue if let Some(tx) = tx { host.warm_processes.lock().unwrap() .entry(req.module_id.clone()) .or_insert(vec![]) .push(WarmProcess { process_token: process_token.clone(), tx, }); } // Handle the process asynchronously #[cfg(target_os = "linux")] { let binary_path = Path::new(&req.binary_path).to_path_buf(); let process_token = process_token.clone(); tokio::spawn(async move { let original_req = req.clone(); if!triggered_tag.is_empty() { req.envs.push(format!("TRIGGERED_TAG={}", &triggered_tag)); } if!triggered_timestamp.is_empty() { req.envs.push(format!("TRIGGERED_TIMESTAMP={}", &triggered_timestamp)); } req.envs.push(format!("PROCESS_TOKEN={}", &process_token)); if!req.params.is_empty() { req.envs.push(format!("KARL_PARAMS={}", &req.params)); } if!req.returns.is_empty() { req.envs.push(format!("KARL_RETURNS={}", &req.returns)); } let execution_time = Host::handle_compute( host.compute_lock.clone(), host.path_manager.clone(), req.module_id, req.cached, host.cold_cache_enabled, req.package, binary_path, req.args, req.envs, ).unwrap(); host.process_tokens.lock().unwrap().remove(&process_token); host.api.notify_end(process_token).await.unwrap(); // Now that the compute request is finished, evaluate its // initialization time. If the initialization time was high, // recursively call this function but as a warm cache module. // We assume initialization time is high if the warm cache // is enabled. let _long_init_time = execution_time > Duration::from_secs(5); if host.warm_cache_enabled { debug!("execution_time was {:?}, spawning warm modules anyway", execution_time); host.warm_cache_tx.as_ref().unwrap().send(original_req).await.unwrap(); } }); } #[cfg(not(target_os = "linux"))] { unimplemented!() } process_token } /// Handle a compute request. /// /// Returns the execution time. #[cfg(target_os = "linux")] fn handle_compute( lock: Arc<Mutex<()>>, path_manager: Arc<PathManager>, module_id: ModuleID, cached: bool, cold_cache_enabled: bool, package: Vec<u8>, binary_path: PathBuf, args: Vec<String>, envs: Vec<String>, ) -> Result<Duration, Error> { let now = Instant::now(); if cached &&!cold_cache_enabled { return Err(Error::CacheError("caching is disabled".to_string())); } // TODO: lock on finer granularity, just the specific module // But gets a lock around the filesystem so multiple people // aren't handling compute requests that could be cached. // And so that each request can create a directory for its process. let (mount, paths) = { let lock = lock.lock().unwrap(); debug!("cached={} cold_cache_enabled={}", cached, cold_cache_enabled); if cached &&!path_manager.is_cached(&module_id) { // TODO: controller needs to handle this error
/// forward to the controller. ///
random_line_split
main.rs
guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if!perms.can_access_domain(&req.domain) { return Err(Status::new(Code::Unauthenticated, "invalid network access")); } } // Build the network request let method = match req.method.as_str() { "GET" => Method::GET, "POST" => Method::POST, "PUT" => Method::PUT, "DELETE" => Method::DELETE, "HEAD" => Method::HEAD, "OPTIONS" => Method::OPTIONS, "CONNECT" => Method::CONNECT, "PATCH" => Method::PATCH, "TRACE" => Method::TRACE, _ => { return Err(Status::new(Code::InvalidArgument, "invalid http method")) }, }; let mut builder = reqwest::Client::new() .request(method, req.domain.clone()) .timeout(Duration::from_secs(1)) .body(req.body.clone()); for header in &req.headers { let key = HeaderName::from_bytes(&header.key[..]) .map_err(|e| { error!("{}", e); Status::new(Code::InvalidArgument, "invalid header name") })?; builder = builder.header(key, &header.value[..]); } if self.mock_network { // Forward the network access to the controller. warn!("mock network request"); warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now()); self.api.forward_network(req).await?; Ok(Response::new(NetworkAccessResult::default())) } else { // Make the actual network access let handle = tokio::spawn(async move { let res = builder.send().await.map_err(|e| { error!("{}", e); Status::new(Code::Aborted, "http request failed") })?; let status_code = res.status().as_u16() as u32; let headers = res .headers() .iter() .map(|(key, value)| KeyValuePair { key: key.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect::<Vec<_>>(); let data = res.bytes().await.map_err(|e| { error!("{}", e); Status::new(Code::Unavailable, "error streaming response bytes") })?; Ok(Response::new(NetworkAccessResult { status_code, headers, data: data.to_vec(), })) }); // Forward the network access to the controller. self.api.forward_network(req).await?; // Return the result of the HTTP request. handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))? } } /// Validates the process is an existing process, and checks its /// permissions to see that the tag corresponds to a valid param. /// If the tag is valid and this is a stateless edge, only respond /// succesfully if the module is trying to get the triggered data. /// If the tag is valid and this is a stateful edge, endorse the data /// with the host token and forward to the controller. async fn get( &self, req: Request<GetData>, ) -> Result<Response<GetDataResult>, Status> { debug!("get"); // Validate the process is valid and has permissions to read the tag. // No serializability guarantees from other requests from the same process. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { warn!("get: invalid token {}", req.process_token); return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if perms.is_triggered(&req.tag) { // cached the triggered file if req.lower!= req.upper { debug!("get: {} invalid triggered timestamps", req.process_token); return Ok(Response::new(GetDataResult::default())) } else if!self.pubsub_enabled { debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token); // fallthrough below } else if let Some(data) = perms.read_triggered(&req.lower) { debug!("get: {} reading triggered data", req.process_token); return Ok(Response::new(GetDataResult { timestamps: vec![req.lower], data: vec![data], })) } else { debug!("get: {} process was not triggered", req.process_token); return Ok(Response::new(GetDataResult::default())) } } else if!perms.can_read(&req.tag) { warn!("get: {} cannot read {}", req.process_token, req.tag); return Err(Status::new(Code::Unauthenticated, "cannot read")); } } // Forward the file access to the controller and return the result debug!("get: {} forwarding tag={}", req.process_token, req.tag); self.api.forward_get(req).await } /// Validates the process is an existing process, and checks its /// permissions to see that the process is writing to a valid tag. /// If the tag is valid, endorse the data with the host token and /// forward to the controller. /// /// If the tag corresponds to sensor state (say maybe it starts with # /// which is reserved for state tags), forward the request as a state /// change instead. async fn
( &self, req: Request<PushData>, ) -> Result<Response<()>, Status> { debug!("push"); // Validate the process is valid and has permissions to write the file. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if!perms.can_write(&req.tag) { debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag); return Ok(Response::new(())); } if state_tags::is_state_tag(&req.tag) { Some(state_tags::parse_state_tag(&req.tag)) } else { None } } else { unreachable!() }; if let Some((sensor, key)) = sensor_key { // Forward as state change if the tag changes state. debug!("push: {} forwarding state change tag={}", req.process_token, req.tag); let req = StateChange { host_token: String::new(), process_token: req.process_token, sensor_id: sensor, key, value: req.data, }; self.api.forward_state(req).await } else { // Forward the file access to the controller and return the result debug!("push: {} forwarding push tag={}", req.process_token, req.tag); self.api.forward_push(req).await } } } impl Host { /// Generate a new host with a random ID. pub fn new( base_path: PathBuf, controller: &str, cold_cache_enabled: bool, warm_cache_enabled: bool, pubsub_enabled: bool, mock_network: bool, ) -> Self { use rand::Rng; let id: u32 = rand::thread_rng().gen(); assert!(cold_cache_enabled ||!warm_cache_enabled); // TODO: buffer size Self { id, api: crate::net::KarlHostAPI::new(controller), process_tokens: Arc::new(Mutex::new(HashMap::new())), warm_processes: Arc::new(Mutex::new(HashMap::new())), warm_cache_tx: None, // wish this didn't have to be wrapped path_manager: Arc::new(PathManager::new(base_path, id)), compute_lock: Arc::new(Mutex::new(())), cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, } } /// Spawns a background process that sends heartbeats to the controller /// at the HEARTBEAT_INTERVAL. /// /// The constructor creates a directory at the <KARL_PATH> if it does /// not already exist. The working directory for any computation is at /// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working /// directory must be at <KARL_PATH>. /// /// Parameters: /// - port - The port to listen on. /// - password - The password to register with the controller. pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> { self.api.register(self.id, port, password).await?; let api = self.api.clone(); tokio::spawn(async move { // Every HEARTBEAT_INTERVAL seconds, this process wakes up // sends a heartbeat message to the controller. loop { tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await; trace!("heartbeat"); let res = api.heartbeat().await; if let Err(e) = res { warn!("error sending heartbeat: {}", e); }; } }); // listener for spawning warm processes let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100); self.warm_cache_tx = Some(tx); let host = self.clone(); tokio::spawn(async move { loop { let req: ComputeRequest = rx.recv().await.unwrap(); let is_warm = true; Host::spawn_new_process( host.clone(), req, is_warm, TRIGGERED_KEY.to_string(), // special value TRIGGERED_KEY.to_string(), // special value ).await; } }); Ok(()) } async fn attach_warm_process( &self, req: &mut ComputeRequest, ) -> Option<ProcessToken> { let warm_process = { let mut warm_processes = self.warm_processes.lock().unwrap(); let mut process_tokens = self.process_tokens.lock().unwrap(); let mut process: Option<WarmProcess> = None; if let Some(processes) = warm_processes.get_mut(&req.module_id) { // reserve the process token process = processes.pop(); } if let Some(process) = process { process_tokens .get_mut(&process.process_token).unwrap() .set_compute_request(req); process } else { return None; } }; // permissions are set and warm process can continue info!("attaching: {} ({})", req.module_id, warm_process.process_token); warm_process.tx.send(()).await.unwrap(); Some(warm_process.process_token) } async fn spawn_new_process( host: Host, mut req: ComputeRequest, is_warm: bool, triggered_tag: String, triggered_timestamp: String, ) -> ProcessToken { let process_token = Token::gen(); let (perms, tx) = if!is_warm { info!("spawning cold process: {} ({})", req.module_id, process_token); (ProcessPerms::new(&mut req), None) } else { info!("spawning warm process: {} ({})", req.module_id, process_token); let (perms, tx) = ProcessPerms::new_warm_cache(); (perms, Some(tx)) }; // Mark an active process { let mut process_tokens = host.process_tokens.lock().unwrap(); assert!(!process_tokens.contains_key(&process_token)); process_tokens.insert(process_token.clone(), perms); } // If it's warm insert a sending channel to eventually notify // this process it is ready to continue if let Some(tx) = tx { host.warm_processes.lock().unwrap() .entry(req.module_id.clone()) .or_insert(vec![]) .push(WarmProcess { process_token: process_token.clone(), tx, }); } // Handle the process asynchronously #[cfg(target_os = "linux")] { let binary_path = Path::new(&req.binary_path).to_path_buf(); let process_token = process_token.clone(); tokio::spawn(async move { let original_req = req.clone(); if!triggered_tag.is_empty() { req.envs.push(format!("TRIGGERED_TAG={}", &triggered_tag)); } if!triggered_timestamp.is_empty() { req.envs.push(format!("TRIGGERED_TIMESTAMP={}", &triggered_timestamp)); } req.envs.push(format!("PROCESS_TOKEN={}", &process_token)); if!req.params.is_empty() { req.envs.push(format!("KARL_PARAMS={}", &req.params)); } if!req.returns.is_empty() { req.envs.push(format!("KARL_RETURNS={}", &req.returns)); } let execution_time = Host::handle_compute( host.compute_lock.clone(), host.path_manager.clone(), req.module_id, req.cached, host.cold_cache_enabled, req.package, binary_path, req.args, req.envs, ).unwrap(); host.process_tokens.lock().unwrap().remove(&process_token); host.api.notify_end(process_token).await.unwrap(); // Now that the compute request is finished, evaluate its // initialization time. If the initialization time was high, // recursively call this function but as a warm cache module. // We assume initialization time is high if the warm cache // is enabled. let _long_init_time = execution_time > Duration::from_secs(5); if host.warm_cache_enabled { debug!("execution_time was {:?}, spawning warm modules anyway", execution_time); host.warm_cache_tx.as_ref().unwrap().send(original_req).await.unwrap(); } }); } #[cfg(not(target_os = "linux"))] { unimplemented!() } process_token } /// Handle a compute request. /// /// Returns the execution time. #[cfg(target_os = "linux")] fn handle_compute( lock: Arc<Mutex<()>>, path_manager: Arc<PathManager>, module_id: ModuleID, cached: bool, cold_cache_enabled: bool, package: Vec<u8>, binary_path: PathBuf, args: Vec<String>, envs: Vec<String>, ) -> Result<Duration, Error> { let now = Instant::now(); if cached &&!cold_cache_enabled { return Err(Error::CacheError("caching is disabled".to_string())); } // TODO: lock on finer granularity, just the specific module // But gets a lock around the filesystem so multiple people // aren't handling compute requests that could be cached. // And so that each request can create a directory for its process. let (mount, paths) = { let lock = lock.lock().unwrap(); debug!("cached={} cold_cache_enabled={}", cached, cold_cache_enabled); if cached &&!path_manager.is_cached(&module_id) { // TODO: controller needs to handle this error
push
identifier_name
main.rs
guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if!perms.can_access_domain(&req.domain) { return Err(Status::new(Code::Unauthenticated, "invalid network access")); } } // Build the network request let method = match req.method.as_str() { "GET" => Method::GET, "POST" => Method::POST, "PUT" => Method::PUT, "DELETE" => Method::DELETE, "HEAD" => Method::HEAD, "OPTIONS" => Method::OPTIONS, "CONNECT" => Method::CONNECT, "PATCH" => Method::PATCH, "TRACE" => Method::TRACE, _ => { return Err(Status::new(Code::InvalidArgument, "invalid http method")) }, }; let mut builder = reqwest::Client::new() .request(method, req.domain.clone()) .timeout(Duration::from_secs(1)) .body(req.body.clone()); for header in &req.headers { let key = HeaderName::from_bytes(&header.key[..]) .map_err(|e| { error!("{}", e); Status::new(Code::InvalidArgument, "invalid header name") })?; builder = builder.header(key, &header.value[..]); } if self.mock_network { // Forward the network access to the controller. warn!("mock network request"); warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now()); self.api.forward_network(req).await?; Ok(Response::new(NetworkAccessResult::default())) } else { // Make the actual network access let handle = tokio::spawn(async move { let res = builder.send().await.map_err(|e| { error!("{}", e); Status::new(Code::Aborted, "http request failed") })?; let status_code = res.status().as_u16() as u32; let headers = res .headers() .iter() .map(|(key, value)| KeyValuePair { key: key.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect::<Vec<_>>(); let data = res.bytes().await.map_err(|e| { error!("{}", e); Status::new(Code::Unavailable, "error streaming response bytes") })?; Ok(Response::new(NetworkAccessResult { status_code, headers, data: data.to_vec(), })) }); // Forward the network access to the controller. self.api.forward_network(req).await?; // Return the result of the HTTP request. handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))? } } /// Validates the process is an existing process, and checks its /// permissions to see that the tag corresponds to a valid param. /// If the tag is valid and this is a stateless edge, only respond /// succesfully if the module is trying to get the triggered data. /// If the tag is valid and this is a stateful edge, endorse the data /// with the host token and forward to the controller. async fn get( &self, req: Request<GetData>, ) -> Result<Response<GetDataResult>, Status> { debug!("get"); // Validate the process is valid and has permissions to read the tag. // No serializability guarantees from other requests from the same process. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { warn!("get: invalid token {}", req.process_token); return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if perms.is_triggered(&req.tag) { // cached the triggered file if req.lower!= req.upper { debug!("get: {} invalid triggered timestamps", req.process_token); return Ok(Response::new(GetDataResult::default())) } else if!self.pubsub_enabled { debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token); // fallthrough below } else if let Some(data) = perms.read_triggered(&req.lower) { debug!("get: {} reading triggered data", req.process_token); return Ok(Response::new(GetDataResult { timestamps: vec![req.lower], data: vec![data], })) } else { debug!("get: {} process was not triggered", req.process_token); return Ok(Response::new(GetDataResult::default())) } } else if!perms.can_read(&req.tag) { warn!("get: {} cannot read {}", req.process_token, req.tag); return Err(Status::new(Code::Unauthenticated, "cannot read")); } } // Forward the file access to the controller and return the result debug!("get: {} forwarding tag={}", req.process_token, req.tag); self.api.forward_get(req).await } /// Validates the process is an existing process, and checks its /// permissions to see that the process is writing to a valid tag. /// If the tag is valid, endorse the data with the host token and /// forward to the controller. /// /// If the tag corresponds to sensor state (say maybe it starts with # /// which is reserved for state tags), forward the request as a state /// change instead. async fn push( &self, req: Request<PushData>, ) -> Result<Response<()>, Status>
return Ok(Response::new(())); } if state_tags::is_state_tag(&req.tag) { Some(state_tags::parse_state_tag(&req.tag)) } else { None } } else { unreachable!() }; if let Some((sensor, key)) = sensor_key { // Forward as state change if the tag changes state. debug!("push: {} forwarding state change tag={}", req.process_token, req.tag); let req = StateChange { host_token: String::new(), process_token: req.process_token, sensor_id: sensor, key, value: req.data, }; self.api.forward_state(req).await } else { // Forward the file access to the controller and return the result debug!("push: {} forwarding push tag={}", req.process_token, req.tag); self.api.forward_push(req).await } } } impl Host { /// Generate a new host with a random ID. pub fn new( base_path: PathBuf, controller: &str, cold_cache_enabled: bool, warm_cache_enabled: bool, pubsub_enabled: bool, mock_network: bool, ) -> Self { use rand::Rng; let id: u32 = rand::thread_rng().gen(); assert!(cold_cache_enabled ||!warm_cache_enabled); // TODO: buffer size Self { id, api: crate::net::KarlHostAPI::new(controller), process_tokens: Arc::new(Mutex::new(HashMap::new())), warm_processes: Arc::new(Mutex::new(HashMap::new())), warm_cache_tx: None, // wish this didn't have to be wrapped path_manager: Arc::new(PathManager::new(base_path, id)), compute_lock: Arc::new(Mutex::new(())), cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, } } /// Spawns a background process that sends heartbeats to the controller /// at the HEARTBEAT_INTERVAL. /// /// The constructor creates a directory at the <KARL_PATH> if it does /// not already exist. The working directory for any computation is at /// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working /// directory must be at <KARL_PATH>. /// /// Parameters: /// - port - The port to listen on. /// - password - The password to register with the controller. pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> { self.api.register(self.id, port, password).await?; let api = self.api.clone(); tokio::spawn(async move { // Every HEARTBEAT_INTERVAL seconds, this process wakes up // sends a heartbeat message to the controller. loop { tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await; trace!("heartbeat"); let res = api.heartbeat().await; if let Err(e) = res { warn!("error sending heartbeat: {}", e); }; } }); // listener for spawning warm processes let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100); self.warm_cache_tx = Some(tx); let host = self.clone(); tokio::spawn(async move { loop { let req: ComputeRequest = rx.recv().await.unwrap(); let is_warm = true; Host::spawn_new_process( host.clone(), req, is_warm, TRIGGERED_KEY.to_string(), // special value TRIGGERED_KEY.to_string(), // special value ).await; } }); Ok(()) } async fn attach_warm_process( &self, req: &mut ComputeRequest, ) -> Option<ProcessToken> { let warm_process = { let mut warm_processes = self.warm_processes.lock().unwrap(); let mut process_tokens = self.process_tokens.lock().unwrap(); let mut process: Option<WarmProcess> = None; if let Some(processes) = warm_processes.get_mut(&req.module_id) { // reserve the process token process = processes.pop(); } if let Some(process) = process { process_tokens .get_mut(&process.process_token).unwrap() .set_compute_request(req); process } else { return None; } }; // permissions are set and warm process can continue info!("attaching: {} ({})", req.module_id, warm_process.process_token); warm_process.tx.send(()).await.unwrap(); Some(warm_process.process_token) } async fn spawn_new_process( host: Host, mut req: ComputeRequest, is_warm: bool, triggered_tag: String, triggered_timestamp: String, ) -> ProcessToken { let process_token = Token::gen(); let (perms, tx) = if!is_warm { info!("spawning cold process: {} ({})", req.module_id, process_token); (ProcessPerms::new(&mut req), None) } else { info!("spawning warm process: {} ({})", req.module_id, process_token); let (perms, tx) = ProcessPerms::new_warm_cache(); (perms, Some(tx)) }; // Mark an active process { let mut process_tokens = host.process_tokens.lock().unwrap(); assert!(!process_tokens.contains_key(&process_token)); process_tokens.insert(process_token.clone(), perms); } // If it's warm insert a sending channel to eventually notify // this process it is ready to continue if let Some(tx) = tx { host.warm_processes.lock().unwrap() .entry(req.module_id.clone()) .or_insert(vec![]) .push(WarmProcess { process_token: process_token.clone(), tx, }); } // Handle the process asynchronously #[cfg(target_os = "linux")] { let binary_path = Path::new(&req.binary_path).to_path_buf(); let process_token = process_token.clone(); tokio::spawn(async move { let original_req = req.clone(); if!triggered_tag.is_empty() { req.envs.push(format!("TRIGGERED_TAG={}", &triggered_tag)); } if!triggered_timestamp.is_empty() { req.envs.push(format!("TRIGGERED_TIMESTAMP={}", &triggered_timestamp)); } req.envs.push(format!("PROCESS_TOKEN={}", &process_token)); if!req.params.is_empty() { req.envs.push(format!("KARL_PARAMS={}", &req.params)); } if!req.returns.is_empty() { req.envs.push(format!("KARL_RETURNS={}", &req.returns)); } let execution_time = Host::handle_compute( host.compute_lock.clone(), host.path_manager.clone(), req.module_id, req.cached, host.cold_cache_enabled, req.package, binary_path, req.args, req.envs, ).unwrap(); host.process_tokens.lock().unwrap().remove(&process_token); host.api.notify_end(process_token).await.unwrap(); // Now that the compute request is finished, evaluate its // initialization time. If the initialization time was high, // recursively call this function but as a warm cache module. // We assume initialization time is high if the warm cache // is enabled. let _long_init_time = execution_time > Duration::from_secs(5); if host.warm_cache_enabled { debug!("execution_time was {:?}, spawning warm modules anyway", execution_time); host.warm_cache_tx.as_ref().unwrap().send(original_req).await.unwrap(); } }); } #[cfg(not(target_os = "linux"))] { unimplemented!() } process_token } /// Handle a compute request. /// /// Returns the execution time. #[cfg(target_os = "linux")] fn handle_compute( lock: Arc<Mutex<()>>, path_manager: Arc<PathManager>, module_id: ModuleID, cached: bool, cold_cache_enabled: bool, package: Vec<u8>, binary_path: PathBuf, args: Vec<String>, envs: Vec<String>, ) -> Result<Duration, Error> { let now = Instant::now(); if cached &&!cold_cache_enabled { return Err(Error::CacheError("caching is disabled".to_string())); } // TODO: lock on finer granularity, just the specific module // But gets a lock around the filesystem so multiple people // aren't handling compute requests that could be cached. // And so that each request can create a directory for its process. let (mount, paths) = { let lock = lock.lock().unwrap(); debug!("cached={} cold_cache_enabled={}", cached, cold_cache_enabled); if cached &&!path_manager.is_cached(&module_id) { // TODO: controller needs to handle this error
{ debug!("push"); // Validate the process is valid and has permissions to write the file. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if !perms.can_write(&req.tag) { debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag);
identifier_body
main.rs
guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if!perms.can_access_domain(&req.domain) { return Err(Status::new(Code::Unauthenticated, "invalid network access")); } } // Build the network request let method = match req.method.as_str() { "GET" => Method::GET, "POST" => Method::POST, "PUT" => Method::PUT, "DELETE" => Method::DELETE, "HEAD" => Method::HEAD, "OPTIONS" => Method::OPTIONS, "CONNECT" => Method::CONNECT, "PATCH" => Method::PATCH, "TRACE" => Method::TRACE, _ => { return Err(Status::new(Code::InvalidArgument, "invalid http method")) }, }; let mut builder = reqwest::Client::new() .request(method, req.domain.clone()) .timeout(Duration::from_secs(1)) .body(req.body.clone()); for header in &req.headers { let key = HeaderName::from_bytes(&header.key[..]) .map_err(|e| { error!("{}", e); Status::new(Code::InvalidArgument, "invalid header name") })?; builder = builder.header(key, &header.value[..]); } if self.mock_network { // Forward the network access to the controller. warn!("mock network request"); warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now()); self.api.forward_network(req).await?; Ok(Response::new(NetworkAccessResult::default())) } else { // Make the actual network access let handle = tokio::spawn(async move { let res = builder.send().await.map_err(|e| { error!("{}", e); Status::new(Code::Aborted, "http request failed") })?; let status_code = res.status().as_u16() as u32; let headers = res .headers() .iter() .map(|(key, value)| KeyValuePair { key: key.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect::<Vec<_>>(); let data = res.bytes().await.map_err(|e| { error!("{}", e); Status::new(Code::Unavailable, "error streaming response bytes") })?; Ok(Response::new(NetworkAccessResult { status_code, headers, data: data.to_vec(), })) }); // Forward the network access to the controller. self.api.forward_network(req).await?; // Return the result of the HTTP request. handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))? } } /// Validates the process is an existing process, and checks its /// permissions to see that the tag corresponds to a valid param. /// If the tag is valid and this is a stateless edge, only respond /// succesfully if the module is trying to get the triggered data. /// If the tag is valid and this is a stateful edge, endorse the data /// with the host token and forward to the controller. async fn get( &self, req: Request<GetData>, ) -> Result<Response<GetDataResult>, Status> { debug!("get"); // Validate the process is valid and has permissions to read the tag. // No serializability guarantees from other requests from the same process. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { warn!("get: invalid token {}", req.process_token); return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if perms.is_triggered(&req.tag) { // cached the triggered file if req.lower!= req.upper { debug!("get: {} invalid triggered timestamps", req.process_token); return Ok(Response::new(GetDataResult::default())) } else if!self.pubsub_enabled { debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token); // fallthrough below } else if let Some(data) = perms.read_triggered(&req.lower) { debug!("get: {} reading triggered data", req.process_token); return Ok(Response::new(GetDataResult { timestamps: vec![req.lower], data: vec![data], })) } else { debug!("get: {} process was not triggered", req.process_token); return Ok(Response::new(GetDataResult::default())) } } else if!perms.can_read(&req.tag) { warn!("get: {} cannot read {}", req.process_token, req.tag); return Err(Status::new(Code::Unauthenticated, "cannot read")); } } // Forward the file access to the controller and return the result debug!("get: {} forwarding tag={}", req.process_token, req.tag); self.api.forward_get(req).await } /// Validates the process is an existing process, and checks its /// permissions to see that the process is writing to a valid tag. /// If the tag is valid, endorse the data with the host token and /// forward to the controller. /// /// If the tag corresponds to sensor state (say maybe it starts with # /// which is reserved for state tags), forward the request as a state /// change instead. async fn push( &self, req: Request<PushData>, ) -> Result<Response<()>, Status> { debug!("push"); // Validate the process is valid and has permissions to write the file. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if!perms.can_write(&req.tag) { debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag); return Ok(Response::new(())); } if state_tags::is_state_tag(&req.tag) { Some(state_tags::parse_state_tag(&req.tag)) } else { None } } else { unreachable!() }; if let Some((sensor, key)) = sensor_key { // Forward as state change if the tag changes state. debug!("push: {} forwarding state change tag={}", req.process_token, req.tag); let req = StateChange { host_token: String::new(), process_token: req.process_token, sensor_id: sensor, key, value: req.data, }; self.api.forward_state(req).await } else
} } impl Host { /// Generate a new host with a random ID. pub fn new( base_path: PathBuf, controller: &str, cold_cache_enabled: bool, warm_cache_enabled: bool, pubsub_enabled: bool, mock_network: bool, ) -> Self { use rand::Rng; let id: u32 = rand::thread_rng().gen(); assert!(cold_cache_enabled ||!warm_cache_enabled); // TODO: buffer size Self { id, api: crate::net::KarlHostAPI::new(controller), process_tokens: Arc::new(Mutex::new(HashMap::new())), warm_processes: Arc::new(Mutex::new(HashMap::new())), warm_cache_tx: None, // wish this didn't have to be wrapped path_manager: Arc::new(PathManager::new(base_path, id)), compute_lock: Arc::new(Mutex::new(())), cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, } } /// Spawns a background process that sends heartbeats to the controller /// at the HEARTBEAT_INTERVAL. /// /// The constructor creates a directory at the <KARL_PATH> if it does /// not already exist. The working directory for any computation is at /// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working /// directory must be at <KARL_PATH>. /// /// Parameters: /// - port - The port to listen on. /// - password - The password to register with the controller. pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> { self.api.register(self.id, port, password).await?; let api = self.api.clone(); tokio::spawn(async move { // Every HEARTBEAT_INTERVAL seconds, this process wakes up // sends a heartbeat message to the controller. loop { tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await; trace!("heartbeat"); let res = api.heartbeat().await; if let Err(e) = res { warn!("error sending heartbeat: {}", e); }; } }); // listener for spawning warm processes let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100); self.warm_cache_tx = Some(tx); let host = self.clone(); tokio::spawn(async move { loop { let req: ComputeRequest = rx.recv().await.unwrap(); let is_warm = true; Host::spawn_new_process( host.clone(), req, is_warm, TRIGGERED_KEY.to_string(), // special value TRIGGERED_KEY.to_string(), // special value ).await; } }); Ok(()) } async fn attach_warm_process( &self, req: &mut ComputeRequest, ) -> Option<ProcessToken> { let warm_process = { let mut warm_processes = self.warm_processes.lock().unwrap(); let mut process_tokens = self.process_tokens.lock().unwrap(); let mut process: Option<WarmProcess> = None; if let Some(processes) = warm_processes.get_mut(&req.module_id) { // reserve the process token process = processes.pop(); } if let Some(process) = process { process_tokens .get_mut(&process.process_token).unwrap() .set_compute_request(req); process } else { return None; } }; // permissions are set and warm process can continue info!("attaching: {} ({})", req.module_id, warm_process.process_token); warm_process.tx.send(()).await.unwrap(); Some(warm_process.process_token) } async fn spawn_new_process( host: Host, mut req: ComputeRequest, is_warm: bool, triggered_tag: String, triggered_timestamp: String, ) -> ProcessToken { let process_token = Token::gen(); let (perms, tx) = if!is_warm { info!("spawning cold process: {} ({})", req.module_id, process_token); (ProcessPerms::new(&mut req), None) } else { info!("spawning warm process: {} ({})", req.module_id, process_token); let (perms, tx) = ProcessPerms::new_warm_cache(); (perms, Some(tx)) }; // Mark an active process { let mut process_tokens = host.process_tokens.lock().unwrap(); assert!(!process_tokens.contains_key(&process_token)); process_tokens.insert(process_token.clone(), perms); } // If it's warm insert a sending channel to eventually notify // this process it is ready to continue if let Some(tx) = tx { host.warm_processes.lock().unwrap() .entry(req.module_id.clone()) .or_insert(vec![]) .push(WarmProcess { process_token: process_token.clone(), tx, }); } // Handle the process asynchronously #[cfg(target_os = "linux")] { let binary_path = Path::new(&req.binary_path).to_path_buf(); let process_token = process_token.clone(); tokio::spawn(async move { let original_req = req.clone(); if!triggered_tag.is_empty() { req.envs.push(format!("TRIGGERED_TAG={}", &triggered_tag)); } if!triggered_timestamp.is_empty() { req.envs.push(format!("TRIGGERED_TIMESTAMP={}", &triggered_timestamp)); } req.envs.push(format!("PROCESS_TOKEN={}", &process_token)); if!req.params.is_empty() { req.envs.push(format!("KARL_PARAMS={}", &req.params)); } if!req.returns.is_empty() { req.envs.push(format!("KARL_RETURNS={}", &req.returns)); } let execution_time = Host::handle_compute( host.compute_lock.clone(), host.path_manager.clone(), req.module_id, req.cached, host.cold_cache_enabled, req.package, binary_path, req.args, req.envs, ).unwrap(); host.process_tokens.lock().unwrap().remove(&process_token); host.api.notify_end(process_token).await.unwrap(); // Now that the compute request is finished, evaluate its // initialization time. If the initialization time was high, // recursively call this function but as a warm cache module. // We assume initialization time is high if the warm cache // is enabled. let _long_init_time = execution_time > Duration::from_secs(5); if host.warm_cache_enabled { debug!("execution_time was {:?}, spawning warm modules anyway", execution_time); host.warm_cache_tx.as_ref().unwrap().send(original_req).await.unwrap(); } }); } #[cfg(not(target_os = "linux"))] { unimplemented!() } process_token } /// Handle a compute request. /// /// Returns the execution time. #[cfg(target_os = "linux")] fn handle_compute( lock: Arc<Mutex<()>>, path_manager: Arc<PathManager>, module_id: ModuleID, cached: bool, cold_cache_enabled: bool, package: Vec<u8>, binary_path: PathBuf, args: Vec<String>, envs: Vec<String>, ) -> Result<Duration, Error> { let now = Instant::now(); if cached &&!cold_cache_enabled { return Err(Error::CacheError("caching is disabled".to_string())); } // TODO: lock on finer granularity, just the specific module // But gets a lock around the filesystem so multiple people // aren't handling compute requests that could be cached. // And so that each request can create a directory for its process. let (mount, paths) = { let lock = lock.lock().unwrap(); debug!("cached={} cold_cache_enabled={}", cached, cold_cache_enabled); if cached &&!path_manager.is_cached(&module_id) { // TODO: controller needs to handle this error
{ // Forward the file access to the controller and return the result debug!("push: {} forwarding push tag={}", req.process_token, req.tag); self.api.forward_push(req).await }
conditional_block
parser.rs
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 //! Parsing of Verilog vmem files into the [`Vmem`] representation. //! //! See the [srec_vmem] documentation for a description of the file format. //! //! To summarise: //! * Files specify hexadecimal data for sequential addresses. //! * Start addresses for a run can be specified in hex with '@____'. //! * Address and data values are separated by whitespace or comments. //! * C-style '//' and '/* */' comments are supported. //! //! [srec_vmem]: https://srecord.sourceforge.net/man/man5/srec_vmem.5.html use std::num::ParseIntError; use thiserror::Error; use super::{Section, Vmem}; pub type ParseResult<T> = Result<T, ParseError>; /// Errors that can occur when parsing vmem files. #[derive(Clone, Debug, Error, PartialEq, Eq)] pub enum ParseError { /// Failure to parse an integer from hexadecimal. #[error("failed to parse as hexadecimal integer")] ParseInt(#[from] ParseIntError), /// An opened comment was not closed. #[error("unclosed comment")] UnclosedComment, /// An address was started with an '@' character, but no address value followed. #[error("address is missing a value")] AddrMissingValue, /// Catch-all for any characters that don't belong in vmem files. #[error("unknown character '{0}'")] UnknownChar(char), } /// Representation of the possible tokens found in vmem files. #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum Token { /// End of file. Eof, /// Address directive, e.g. `@123abc`. Addr(u32), /// Data value, e.g. `abc123`. Value(u32), /// Comments, e.g. `/* comment */` or `// comment`. Comment, /// Whitespace, including newlines. Whitespace, } /// Some span of the input text representing a token. #[derive(Clone, Copy, Debug, PartialEq, Eq)] struct Span { token: Token, len: usize, } /// Parser for vmem files. pub struct VmemParser; impl VmemParser { /// Parse a complete vmem file from a string. pub fn parse(mut s: &str) -> ParseResult<Vmem> { // Build up the vmem file as sections. let mut vmem = Vmem::default(); vmem.sections.push(Section::default()); loop { // Parse a token from the input string, and move along by its span. let Span { len, token } = Self::token(s)?; s = &s[len..]; match token { Token::Eof => break, Token::Addr(addr) => { // Add a new section to the `Vmem` at this address. // Here we translate between a "word index" to a byte address. vmem.sections.push(Section { addr: addr * 4, data: Vec::new(), }); } Token::Value(value) => { // Add the value to the current (last added) section's data. let section = vmem.sections.last_mut().unwrap(); section.data.push(value) } // Whitespace and comments are ignored. Token::Whitespace => continue, Token::Comment => continue, } } Ok(vmem) } /// Parse a single token from the beginning of a string. fn token(s: &str) -> ParseResult<Span> { let parsers = [ Self::parse_eof, Self::parse_addr, Self::parse_value, Self::parse_comment, Self::parse_whitespace, ]; // Run each parser in order, stopping when one gets a matching parse. let span = parsers.iter().find_map(|p| p(s).transpose()); // If no parsers succeeded, return an error. match span { Some(span) => span, None => Err(ParseError::UnknownChar(s.chars().next().unwrap())), } } /// Try to parse an EOF from the beginning of a string. fn parse_eof(s: &str) -> ParseResult<Option<Span>> { // Empty strings give a 0-length `Token::Eof` span. match s.is_empty() { true => Ok(Some(Span { len: 0, token: Token::Eof, })), false => Ok(None), } } /// Try to parse an address from the beginning of a string. fn parse_addr(s: &str) -> ParseResult<Option<Span>> { // Check for the beginning '@' symbol. let Some(addr) = s.strip_prefix('@') else { return Ok(None); }; // Find the length of the actual address string. let addr_len = match addr.find(|c: char|!c.is_ascii_hexdigit()) { Some(0) => return Err(ParseError::AddrMissingValue), Some(len) => len, None => addr.len(), }; // Ensure the '@' is included in the span's length! let len = '@'.len_utf8() + addr_len; // Parse from hexadecimal. let val = u32::from_str_radix(&addr[..addr_len], 16)?; let token = Token::Addr(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a value from the beginning of a string. fn parse_value(s: &str) -> ParseResult<Option<Span>> { // Check for hexadecimal characters in the input. let len = match s.find(|c: char|!c.is_ascii_hexdigit()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let val = u32::from_str_radix(&s[..len], 16)?; let token = Token::Value(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a comment from the beginning of a string. fn parse_comment(s: &str) -> ParseResult<Option<Span>> { // Look for commend identifiers and their closers. let len = match s { s if s.starts_with("//") => s.find('\n').unwrap_or(s.len()), s if s.starts_with("/*") => { // `find` gives us the _start_ of the `*/`, so include its length as well. s.find("*/").ok_or(ParseError::UnclosedComment)? + "*/".len() } _ => return Ok(None), }; let token = Token::Comment; let span = Span { token, len }; Ok(Some(span)) } /// Try to parse whitespace from the beginning of a string. fn parse_whitespace(s: &str) -> ParseResult<Option<Span>> { // Check for whitespace at the beginning of the input. let len = match s.find(|c: char|!c.is_whitespace()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let token = Token::Whitespace; let span = Span { len, token }; Ok(Some(span)) } } #[cfg(test)] mod test { use super::*; #[test] fn parse() { let input = r#" AB // comment CD EF @42 12 /* comment */ 34 "#; let expected = Vmem { sections: vec![ Section { addr: 0x00, data: vec![0xAB, 0xCD, 0xEF], }, Section { addr: 0x108, data: vec![0x12, 0x34], }, ], }; assert_eq!(VmemParser::parse(input).unwrap(), expected); } #[test] fn
() { // Check we can pick out the correct token from a string: let expected = [ ("", Token::Eof, 0), ("@ff", Token::Addr(0xff), 3), ("ff", Token::Value(0xff), 2), ("// X", Token::Comment, 4), ("/* X */", Token::Comment, 7), (" ", Token::Whitespace, 2), ]; for (s, token, len) in expected { let span = Span { token, len }; assert_eq!(VmemParser::token(s), Ok(span)); } // Unknown non-token: assert_eq!(VmemParser::token("X"), Err(ParseError::UnknownChar('X'))); } #[test] fn eof() { // Not EOF: assert_eq!(VmemParser::parse_eof(" ").unwrap(), None); // EOF: let expected = Some(Span { len: 0, token: Token::Eof, }); assert_eq!(VmemParser::parse_eof("").unwrap(), expected); } #[test] fn addr() { // No address: assert_eq!(VmemParser::parse_addr("/* X */").unwrap(), None); let expected = Some(Span { len: 9, token: Token::Addr(0x0123abcd), }); // Partially an address: assert_eq!(VmemParser::parse_addr("@0123ABCD FF").unwrap(), expected); // Entirely an address: assert_eq!(VmemParser::parse_addr("@0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_addr("@0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_addr("@123456789").is_err()); // Missing address after '@': assert!(VmemParser::parse_addr("@").is_err()); assert!(VmemParser::parse_addr("@ FF").is_err()); } #[test] fn value() { // No value: assert_eq!(VmemParser::parse_value("/* X */").unwrap(), None); let expected = Some(Span { len: 8, token: Token::Value(0x0123abcd), }); // Partially a value: assert_eq!(VmemParser::parse_value("0123ABCD FF").unwrap(), expected); // Entirely a value: assert_eq!(VmemParser::parse_value("0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_value("0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_value("123456789").is_err()); } #[test] fn comment() { // No whitespace: assert_eq!(VmemParser::parse_comment("FF").unwrap(), None); let expected = Some(Span { len: 7, token: Token::Comment, }); // Partial block comment: assert_eq!(VmemParser::parse_comment("/* X */ FF").unwrap(), expected); // Entirely a block comment: assert_eq!(VmemParser::parse_comment("/* X */").unwrap(), expected); // Unclosed block comment: assert!(VmemParser::parse_comment("/* X").is_err()); // Line comment ending in newline: assert_eq!( VmemParser::parse_comment(concat!("// XXXX", '\n', "FF")).unwrap(), expected ); // Line comment ending at EOF: assert_eq!(VmemParser::parse_comment("// XXXX").unwrap(), expected); } #[test] fn whitespace() { // No whitespace: assert_eq!(VmemParser::parse_whitespace("FF").unwrap(), None); let expected = Some(Span { len: 2, token: Token::Whitespace, }); // Partial whitespace: assert_eq!(VmemParser::parse_whitespace(" FF").unwrap(), expected); // Entirely whitespace: assert_eq!(VmemParser::parse_whitespace(" ").unwrap(), expected); } }
token
identifier_name
parser.rs
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 //! Parsing of Verilog vmem files into the [`Vmem`] representation. //! //! See the [srec_vmem] documentation for a description of the file format. //! //! To summarise: //! * Files specify hexadecimal data for sequential addresses. //! * Start addresses for a run can be specified in hex with '@____'. //! * Address and data values are separated by whitespace or comments. //! * C-style '//' and '/* */' comments are supported. //! //! [srec_vmem]: https://srecord.sourceforge.net/man/man5/srec_vmem.5.html use std::num::ParseIntError; use thiserror::Error; use super::{Section, Vmem}; pub type ParseResult<T> = Result<T, ParseError>; /// Errors that can occur when parsing vmem files. #[derive(Clone, Debug, Error, PartialEq, Eq)] pub enum ParseError { /// Failure to parse an integer from hexadecimal. #[error("failed to parse as hexadecimal integer")] ParseInt(#[from] ParseIntError), /// An opened comment was not closed. #[error("unclosed comment")] UnclosedComment, /// An address was started with an '@' character, but no address value followed. #[error("address is missing a value")]
/// Catch-all for any characters that don't belong in vmem files. #[error("unknown character '{0}'")] UnknownChar(char), } /// Representation of the possible tokens found in vmem files. #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum Token { /// End of file. Eof, /// Address directive, e.g. `@123abc`. Addr(u32), /// Data value, e.g. `abc123`. Value(u32), /// Comments, e.g. `/* comment */` or `// comment`. Comment, /// Whitespace, including newlines. Whitespace, } /// Some span of the input text representing a token. #[derive(Clone, Copy, Debug, PartialEq, Eq)] struct Span { token: Token, len: usize, } /// Parser for vmem files. pub struct VmemParser; impl VmemParser { /// Parse a complete vmem file from a string. pub fn parse(mut s: &str) -> ParseResult<Vmem> { // Build up the vmem file as sections. let mut vmem = Vmem::default(); vmem.sections.push(Section::default()); loop { // Parse a token from the input string, and move along by its span. let Span { len, token } = Self::token(s)?; s = &s[len..]; match token { Token::Eof => break, Token::Addr(addr) => { // Add a new section to the `Vmem` at this address. // Here we translate between a "word index" to a byte address. vmem.sections.push(Section { addr: addr * 4, data: Vec::new(), }); } Token::Value(value) => { // Add the value to the current (last added) section's data. let section = vmem.sections.last_mut().unwrap(); section.data.push(value) } // Whitespace and comments are ignored. Token::Whitespace => continue, Token::Comment => continue, } } Ok(vmem) } /// Parse a single token from the beginning of a string. fn token(s: &str) -> ParseResult<Span> { let parsers = [ Self::parse_eof, Self::parse_addr, Self::parse_value, Self::parse_comment, Self::parse_whitespace, ]; // Run each parser in order, stopping when one gets a matching parse. let span = parsers.iter().find_map(|p| p(s).transpose()); // If no parsers succeeded, return an error. match span { Some(span) => span, None => Err(ParseError::UnknownChar(s.chars().next().unwrap())), } } /// Try to parse an EOF from the beginning of a string. fn parse_eof(s: &str) -> ParseResult<Option<Span>> { // Empty strings give a 0-length `Token::Eof` span. match s.is_empty() { true => Ok(Some(Span { len: 0, token: Token::Eof, })), false => Ok(None), } } /// Try to parse an address from the beginning of a string. fn parse_addr(s: &str) -> ParseResult<Option<Span>> { // Check for the beginning '@' symbol. let Some(addr) = s.strip_prefix('@') else { return Ok(None); }; // Find the length of the actual address string. let addr_len = match addr.find(|c: char|!c.is_ascii_hexdigit()) { Some(0) => return Err(ParseError::AddrMissingValue), Some(len) => len, None => addr.len(), }; // Ensure the '@' is included in the span's length! let len = '@'.len_utf8() + addr_len; // Parse from hexadecimal. let val = u32::from_str_radix(&addr[..addr_len], 16)?; let token = Token::Addr(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a value from the beginning of a string. fn parse_value(s: &str) -> ParseResult<Option<Span>> { // Check for hexadecimal characters in the input. let len = match s.find(|c: char|!c.is_ascii_hexdigit()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let val = u32::from_str_radix(&s[..len], 16)?; let token = Token::Value(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a comment from the beginning of a string. fn parse_comment(s: &str) -> ParseResult<Option<Span>> { // Look for commend identifiers and their closers. let len = match s { s if s.starts_with("//") => s.find('\n').unwrap_or(s.len()), s if s.starts_with("/*") => { // `find` gives us the _start_ of the `*/`, so include its length as well. s.find("*/").ok_or(ParseError::UnclosedComment)? + "*/".len() } _ => return Ok(None), }; let token = Token::Comment; let span = Span { token, len }; Ok(Some(span)) } /// Try to parse whitespace from the beginning of a string. fn parse_whitespace(s: &str) -> ParseResult<Option<Span>> { // Check for whitespace at the beginning of the input. let len = match s.find(|c: char|!c.is_whitespace()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let token = Token::Whitespace; let span = Span { len, token }; Ok(Some(span)) } } #[cfg(test)] mod test { use super::*; #[test] fn parse() { let input = r#" AB // comment CD EF @42 12 /* comment */ 34 "#; let expected = Vmem { sections: vec![ Section { addr: 0x00, data: vec![0xAB, 0xCD, 0xEF], }, Section { addr: 0x108, data: vec![0x12, 0x34], }, ], }; assert_eq!(VmemParser::parse(input).unwrap(), expected); } #[test] fn token() { // Check we can pick out the correct token from a string: let expected = [ ("", Token::Eof, 0), ("@ff", Token::Addr(0xff), 3), ("ff", Token::Value(0xff), 2), ("// X", Token::Comment, 4), ("/* X */", Token::Comment, 7), (" ", Token::Whitespace, 2), ]; for (s, token, len) in expected { let span = Span { token, len }; assert_eq!(VmemParser::token(s), Ok(span)); } // Unknown non-token: assert_eq!(VmemParser::token("X"), Err(ParseError::UnknownChar('X'))); } #[test] fn eof() { // Not EOF: assert_eq!(VmemParser::parse_eof(" ").unwrap(), None); // EOF: let expected = Some(Span { len: 0, token: Token::Eof, }); assert_eq!(VmemParser::parse_eof("").unwrap(), expected); } #[test] fn addr() { // No address: assert_eq!(VmemParser::parse_addr("/* X */").unwrap(), None); let expected = Some(Span { len: 9, token: Token::Addr(0x0123abcd), }); // Partially an address: assert_eq!(VmemParser::parse_addr("@0123ABCD FF").unwrap(), expected); // Entirely an address: assert_eq!(VmemParser::parse_addr("@0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_addr("@0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_addr("@123456789").is_err()); // Missing address after '@': assert!(VmemParser::parse_addr("@").is_err()); assert!(VmemParser::parse_addr("@ FF").is_err()); } #[test] fn value() { // No value: assert_eq!(VmemParser::parse_value("/* X */").unwrap(), None); let expected = Some(Span { len: 8, token: Token::Value(0x0123abcd), }); // Partially a value: assert_eq!(VmemParser::parse_value("0123ABCD FF").unwrap(), expected); // Entirely a value: assert_eq!(VmemParser::parse_value("0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_value("0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_value("123456789").is_err()); } #[test] fn comment() { // No whitespace: assert_eq!(VmemParser::parse_comment("FF").unwrap(), None); let expected = Some(Span { len: 7, token: Token::Comment, }); // Partial block comment: assert_eq!(VmemParser::parse_comment("/* X */ FF").unwrap(), expected); // Entirely a block comment: assert_eq!(VmemParser::parse_comment("/* X */").unwrap(), expected); // Unclosed block comment: assert!(VmemParser::parse_comment("/* X").is_err()); // Line comment ending in newline: assert_eq!( VmemParser::parse_comment(concat!("// XXXX", '\n', "FF")).unwrap(), expected ); // Line comment ending at EOF: assert_eq!(VmemParser::parse_comment("// XXXX").unwrap(), expected); } #[test] fn whitespace() { // No whitespace: assert_eq!(VmemParser::parse_whitespace("FF").unwrap(), None); let expected = Some(Span { len: 2, token: Token::Whitespace, }); // Partial whitespace: assert_eq!(VmemParser::parse_whitespace(" FF").unwrap(), expected); // Entirely whitespace: assert_eq!(VmemParser::parse_whitespace(" ").unwrap(), expected); } }
AddrMissingValue,
random_line_split
parser.rs
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 //! Parsing of Verilog vmem files into the [`Vmem`] representation. //! //! See the [srec_vmem] documentation for a description of the file format. //! //! To summarise: //! * Files specify hexadecimal data for sequential addresses. //! * Start addresses for a run can be specified in hex with '@____'. //! * Address and data values are separated by whitespace or comments. //! * C-style '//' and '/* */' comments are supported. //! //! [srec_vmem]: https://srecord.sourceforge.net/man/man5/srec_vmem.5.html use std::num::ParseIntError; use thiserror::Error; use super::{Section, Vmem}; pub type ParseResult<T> = Result<T, ParseError>; /// Errors that can occur when parsing vmem files. #[derive(Clone, Debug, Error, PartialEq, Eq)] pub enum ParseError { /// Failure to parse an integer from hexadecimal. #[error("failed to parse as hexadecimal integer")] ParseInt(#[from] ParseIntError), /// An opened comment was not closed. #[error("unclosed comment")] UnclosedComment, /// An address was started with an '@' character, but no address value followed. #[error("address is missing a value")] AddrMissingValue, /// Catch-all for any characters that don't belong in vmem files. #[error("unknown character '{0}'")] UnknownChar(char), } /// Representation of the possible tokens found in vmem files. #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum Token { /// End of file. Eof, /// Address directive, e.g. `@123abc`. Addr(u32), /// Data value, e.g. `abc123`. Value(u32), /// Comments, e.g. `/* comment */` or `// comment`. Comment, /// Whitespace, including newlines. Whitespace, } /// Some span of the input text representing a token. #[derive(Clone, Copy, Debug, PartialEq, Eq)] struct Span { token: Token, len: usize, } /// Parser for vmem files. pub struct VmemParser; impl VmemParser { /// Parse a complete vmem file from a string. pub fn parse(mut s: &str) -> ParseResult<Vmem> { // Build up the vmem file as sections. let mut vmem = Vmem::default(); vmem.sections.push(Section::default()); loop { // Parse a token from the input string, and move along by its span. let Span { len, token } = Self::token(s)?; s = &s[len..]; match token { Token::Eof => break, Token::Addr(addr) => { // Add a new section to the `Vmem` at this address. // Here we translate between a "word index" to a byte address. vmem.sections.push(Section { addr: addr * 4, data: Vec::new(), }); } Token::Value(value) => { // Add the value to the current (last added) section's data. let section = vmem.sections.last_mut().unwrap(); section.data.push(value) } // Whitespace and comments are ignored. Token::Whitespace => continue, Token::Comment => continue, } } Ok(vmem) } /// Parse a single token from the beginning of a string. fn token(s: &str) -> ParseResult<Span> { let parsers = [ Self::parse_eof, Self::parse_addr, Self::parse_value, Self::parse_comment, Self::parse_whitespace, ]; // Run each parser in order, stopping when one gets a matching parse. let span = parsers.iter().find_map(|p| p(s).transpose()); // If no parsers succeeded, return an error. match span { Some(span) => span, None => Err(ParseError::UnknownChar(s.chars().next().unwrap())), } } /// Try to parse an EOF from the beginning of a string. fn parse_eof(s: &str) -> ParseResult<Option<Span>> { // Empty strings give a 0-length `Token::Eof` span. match s.is_empty() { true => Ok(Some(Span { len: 0, token: Token::Eof, })), false => Ok(None), } } /// Try to parse an address from the beginning of a string. fn parse_addr(s: &str) -> ParseResult<Option<Span>> { // Check for the beginning '@' symbol. let Some(addr) = s.strip_prefix('@') else { return Ok(None); }; // Find the length of the actual address string. let addr_len = match addr.find(|c: char|!c.is_ascii_hexdigit()) { Some(0) => return Err(ParseError::AddrMissingValue), Some(len) => len, None => addr.len(), }; // Ensure the '@' is included in the span's length! let len = '@'.len_utf8() + addr_len; // Parse from hexadecimal. let val = u32::from_str_radix(&addr[..addr_len], 16)?; let token = Token::Addr(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a value from the beginning of a string. fn parse_value(s: &str) -> ParseResult<Option<Span>> { // Check for hexadecimal characters in the input. let len = match s.find(|c: char|!c.is_ascii_hexdigit()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let val = u32::from_str_radix(&s[..len], 16)?; let token = Token::Value(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a comment from the beginning of a string. fn parse_comment(s: &str) -> ParseResult<Option<Span>> { // Look for commend identifiers and their closers. let len = match s { s if s.starts_with("//") => s.find('\n').unwrap_or(s.len()), s if s.starts_with("/*") => { // `find` gives us the _start_ of the `*/`, so include its length as well. s.find("*/").ok_or(ParseError::UnclosedComment)? + "*/".len() } _ => return Ok(None), }; let token = Token::Comment; let span = Span { token, len }; Ok(Some(span)) } /// Try to parse whitespace from the beginning of a string. fn parse_whitespace(s: &str) -> ParseResult<Option<Span>>
} #[cfg(test)] mod test { use super::*; #[test] fn parse() { let input = r#" AB // comment CD EF @42 12 /* comment */ 34 "#; let expected = Vmem { sections: vec![ Section { addr: 0x00, data: vec![0xAB, 0xCD, 0xEF], }, Section { addr: 0x108, data: vec![0x12, 0x34], }, ], }; assert_eq!(VmemParser::parse(input).unwrap(), expected); } #[test] fn token() { // Check we can pick out the correct token from a string: let expected = [ ("", Token::Eof, 0), ("@ff", Token::Addr(0xff), 3), ("ff", Token::Value(0xff), 2), ("// X", Token::Comment, 4), ("/* X */", Token::Comment, 7), (" ", Token::Whitespace, 2), ]; for (s, token, len) in expected { let span = Span { token, len }; assert_eq!(VmemParser::token(s), Ok(span)); } // Unknown non-token: assert_eq!(VmemParser::token("X"), Err(ParseError::UnknownChar('X'))); } #[test] fn eof() { // Not EOF: assert_eq!(VmemParser::parse_eof(" ").unwrap(), None); // EOF: let expected = Some(Span { len: 0, token: Token::Eof, }); assert_eq!(VmemParser::parse_eof("").unwrap(), expected); } #[test] fn addr() { // No address: assert_eq!(VmemParser::parse_addr("/* X */").unwrap(), None); let expected = Some(Span { len: 9, token: Token::Addr(0x0123abcd), }); // Partially an address: assert_eq!(VmemParser::parse_addr("@0123ABCD FF").unwrap(), expected); // Entirely an address: assert_eq!(VmemParser::parse_addr("@0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_addr("@0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_addr("@123456789").is_err()); // Missing address after '@': assert!(VmemParser::parse_addr("@").is_err()); assert!(VmemParser::parse_addr("@ FF").is_err()); } #[test] fn value() { // No value: assert_eq!(VmemParser::parse_value("/* X */").unwrap(), None); let expected = Some(Span { len: 8, token: Token::Value(0x0123abcd), }); // Partially a value: assert_eq!(VmemParser::parse_value("0123ABCD FF").unwrap(), expected); // Entirely a value: assert_eq!(VmemParser::parse_value("0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_value("0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_value("123456789").is_err()); } #[test] fn comment() { // No whitespace: assert_eq!(VmemParser::parse_comment("FF").unwrap(), None); let expected = Some(Span { len: 7, token: Token::Comment, }); // Partial block comment: assert_eq!(VmemParser::parse_comment("/* X */ FF").unwrap(), expected); // Entirely a block comment: assert_eq!(VmemParser::parse_comment("/* X */").unwrap(), expected); // Unclosed block comment: assert!(VmemParser::parse_comment("/* X").is_err()); // Line comment ending in newline: assert_eq!( VmemParser::parse_comment(concat!("// XXXX", '\n', "FF")).unwrap(), expected ); // Line comment ending at EOF: assert_eq!(VmemParser::parse_comment("// XXXX").unwrap(), expected); } #[test] fn whitespace() { // No whitespace: assert_eq!(VmemParser::parse_whitespace("FF").unwrap(), None); let expected = Some(Span { len: 2, token: Token::Whitespace, }); // Partial whitespace: assert_eq!(VmemParser::parse_whitespace(" FF").unwrap(), expected); // Entirely whitespace: assert_eq!(VmemParser::parse_whitespace(" ").unwrap(), expected); } }
{ // Check for whitespace at the beginning of the input. let len = match s.find(|c: char| !c.is_whitespace()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let token = Token::Whitespace; let span = Span { len, token }; Ok(Some(span)) }
identifier_body
main.rs
use oorandom; #[test] fn research_on_directionary() { // 词性分类函数, 找出.dic的标识符以及每个标识符 10 个id use std::fs; use std::collections::HashMap; let mut hash : HashMap<&str, Vec<&str>> = HashMap::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['0','1','2','3','4','5','6','7','8','9','\n'][..], ""); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; for tag in seperate_words { i = i + 1; // 奇数列为word, 偶数列为tag if i % 2!= 0 { last_word = tag; continue; } let hash_get = hash.get_mut(tag); match hash_get { None => { let vec = vec!(last_word); hash.insert(tag, vec); } Some(vec) => { if vec.len() >= 10 {continue;} vec.push(last_word); } } } println!("{:?}", hash); } #[test] fn add_directionary() { let mut random_word = Word::from_literal("okay"); random_word.set_tag("v"); let mut random_wor2 = Word::from_literal("no"); random_wor2.set_tag("l"); let mut directionary = Directionary::new(); directionary.add_a_word(&random_word); directionary.add_a_word(&random_wor2); println!("{:?}", directionary); } #[test] fn create_directionary() { let directionary = Directionary::from_default(None, Some(500)); println!("generation finished!"); println!("result {:?}", directionary); } #[test] fn create_sentance() { let mut resolver = RandomResolver::from_seed(64u128); let directionary = Directionary::from_default(None, None); let mut sentance = SentanceNode::word("其实") .next( SentanceNode::icon(',') .next( SentanceNode::element("Noun") .next( SentanceNode::word("是") .next( SentanceNode::element("Noun") .next( SentanceNode::icon(',') .next( SentanceNode::word("你知道吗?") )))))); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } #[test] fn multi_sentance() { let mut resolver = RandomResolver::from_seed(1024u128); let directionary = Directionary::from_default(None, None); let generic_sentance = sentance!( [element="Who"][word="是"][element= "Adjective"][word="的"][element="Adjective"][element="IntranstiveVerb"][word="器."] ); for _ in 1..255 { let mut sentance = generic_sentance.clone(); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } } #[macro_export] macro_rules! sentance { ([$function:ident=$string:expr]$([$nfunction:ident=$nstring:expr])+ ) => { SentanceNode::$function($string).next(sentance!($([$nfunction=$nstring])+)) }; ([$function:ident=$string:expr]) => { SentanceNode::$function($string) }; } #[derive(Debug)] struct Word { pub tag : String, pub literal : String, } impl Word { pub fn from_literal(init_literal : &str) -> Word { Word{ tag : String::new(), literal : String::from(init_literal) } } pub fn set_tag(&mut self, new_tag : &str) { self.tag = String::from(new_tag); } } #[derive(Debug)] struct TagMatcher { pub matchers_pool : std::collections::BTreeMap<String, Vec<String>>, } impl TagMatcher { pub fn new() -> TagMatcher { use std::collections::BTreeMap; TagMatcher{matchers_pool : BTreeMap::new()} } fn add(mut self, tag : &'static str, matchers : Vec<&'static str>) -> Self { let matcher_result = self.matchers_pool.get(tag); match matcher_result { Some(_) => { } None => { self.matchers_pool.insert(String::from(tag), Vec::new()); } } let matcher_vec = self.matchers_pool.get_mut(tag).unwrap(); for matcher in matchers { matcher_vec.push(String::from(matcher)); } self } pub fn resolve(&self, tag : String) -> Option<Vec<String>> { let mut ret_vec : Option<Vec<String>> = None; for (element, matchers) in &self.matchers_pool { for matcher in matchers { if *matcher == tag { match &mut ret_vec { Some(vec) => { vec.push(element.clone()); } None => { let vec = vec!(element.clone()); ret_vec = Some(vec); } } } } } ret_vec } } #[derive(Debug)] struct Directionary { // verbs : Vec<String>, // nouns : Vec<String>, // advs : Vec<String>, // adjs : Vec<String>, library : std::collections::HashMap<String, Vec<String>>, matcher : TagMatcher, } impl Directionary { pub fn new() -> Directionary { use std::collections::HashMap; // TODO FINISH THIS let tag_matcher = TagMatcher::new() .add("Location", vec!("nis","ntcb","ntcf","s","na","ns","ntc","nts","nth","ntch","nto","nit","nt","nsf","nz","f","ntu","nsf",)) .add("Name", vec!("nr","nba","nrfg","nrf","nrj",)) .add("Time", vec!("tg","t","Mg")) .add("GenericNoun", vec!("gb","vf","nnd","nhd","nmc","nbc","gc","nhm","ng","gg","gi","n","gp","gm","nnt",)) .add("AllNouns", vec!("vf","nis","ntcb","ntcf","gb","nhd","j","nr","nba","s","nmc","nnd","nrfg","na","ns","ntc","nbc","gc","nts","nth","x","ntch","nto","nit","nrf","nhm","ng","nrt","ntu","gg","gi","nt","nsf","nrj","nz","f","n","gp","gm","tg","nnt","t","Mg",)) .add("Numeral",vec!("m")) .add("Quantifier", vec!("qv","q","qt",)) .add("IndependentVerb", vec!("vl",)) .add("TranstiveVerb", vec!("pba","pbei","vyou","vshi","vd","vx","vq","vi","vn",)) .add("IntranstiveVerb", vec!("vg","uguo","v","vf",)) .add("AllVerbs", vec!("vyou","uguo","vd","v","vx","vi","pba","pbei","vl","vg","vq","vn","vshi","vf",)) .add("Adjective", vec!("b","mq","bl","a","z","al","ag","an","œa",)) .add("Adverb",vec!("b","bl","ad","d","dl","œa","dg",)) .add("AllPronouns", vec!("rr","rz","ryt","Rg","ry","rys","rzs","rzt","ryv","k",)) .add("AskWhen", vec!("rzt")) .add("When", vec!("ryt")) .add("AskHow", vec!("ryv")) .add("AskWhere", vec!("rys")) .add("Where", vec!("rzs")) .add("Who", vec!("rr","rz","Rg",)) .add("AskWho", vec!("ry")) .add("Conjunction", vec!("rzv","u","c","cc",)) .add("Preposition", vec!("r","uyy","udeng","p","udh",)) .add("Particle", vec!("uzhe","uls","ule","usuo","ulian","uzhi","ude",)) .add("AllModals", vec!("y","e","o",)) .add("PostFixModal", vec!("y")) .add("PreFixModal", vec!("e")) .add("Onomatopoeia", vec!("o")); let new_library = HashMap::new(); Directionary { library : new_library, matcher : tag_matcher, } } pub fn from_default(highest_input : Option<u32>, lowest_input : Option<u32>) -> Directionary { // TODO use std::fs; let highest_frequency : u32 = match highest_input { Some(frequency) => frequency, None => 2147483647, }; let lowest_frequency : u32 = match lowest_input { Some(frequency) => frequency, None=> 0, }; let mut directionary = Directionary::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['\n'][..], "\t"); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; let mut last_tag : &str = ""; let mut frequency; for tag in seperate_words { i = i + 1; // 第一列为word, 第二列为tag, 第三列为frequency let count = i % 3; match count { 0 => { // println!("tag{:?}, result{:?}", &tag, tag.parse::<u32>()); frequency = tag.parse::<u32>().unwrap(); }, // 第三列 1 => { last_word = tag; continue; }, // 第一列 _ => { last_tag = tag; continue; }, // 第二列 } if (frequency > highest_frequency) || (frequency < lowest_frequency) { continue; } let mut word = Word::from_literal(last_word); word.set_tag(last_tag); directionary.add_a_word(&word); } directionary } pub fn find_a_word(&self, element : &str, resolver : &mut RandomResolver) -> String { let library_vec = self.library.get(element); match library_vec { Some(_) => {} None => {panic!("failed to get element type {}", element);} } let library_vec = library_vec.unwrap(); let word = library_vec.get( resolver.get_pos(library_vec.len())).unwrap(); word.clone() } pub fn add_a_word(&mut self, new_word : &Word) { let matcher_result = self.matcher.resolve(new_word.tag.clone()); // print!("word :{:?}, result :{:?}", &new_word, matcher_result); match matcher_result { Some(element_vec) => { for element in element_vec{ let library_result = self.library.get_mut(element.as_str()); match library_result { Some(ele_vec) => { ele_vec.push(new_word.literal.clone()); } None => { self.library.insert(element.clone(), vec!(new_word.literal.clone())); } } } } None => {} } } } #[derive(Debug)] struct RandomResolver { rng : oorandom::Rand64, } impl RandomResolver { pub fn from_seed(seed : u128) -> Self { let rng = oorandom::Rand64::new(seed); RandomResolver{rng : rng} } fn resolve_pos(&mut self, vec : &Vec<f64>) -> usize { let float_result = self.rng.rand_float(); let mut sum = 0.0f64; let size = vec.len(); for i in 0..size { sum += vec[i]; if sum > float_result { return i; } } return size; } pub fn get_pos(&mut self, size : usize) -> usize { let float_result = self.rng.rand_float(); (size as f64 * float_result) as usize } } #[derive(Debug, Clone)] enum SentanceItem { Element(String), // element type to be resolve Word(String), Icon(char), } #[derive(Debug, Clone)] struct SentanceNode { item : SentanceItem, next : Option<Box<SentanceNode>>, } impl SentanceNode { pub fn element(element_name : &str) -> SentanceNode { SentanceNode { item : SentanceItem::Element(String::from(element_name)), next : None, } } pub fn icon(icon : char) -> SentanceNode{ SentanceNode { item : SentanceItem::Icon(icon), next : None, } } pub fn word(word : &str) -> SentanceNode{ SentanceNode { item : SentanceItem::Word(String::from(word)), next : None, } } // insert a node, panic if already has one pub fn next(mut self, next : SentanceNode) -> Self { match &self.next { Some(_) => { panic!("node {:?} already have a next node", &self); } None => { self.next = Some(Box::new(next)); } } self } // get the result from element fn resolve(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { match &self.item { SentanceItem::Element(element) => { self.item = SentanceItem::Word( dict.find_a_word(element.as_str(), resolver)); } _ => {} } } pub fn resolve_sentance(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { self.resolve(resolver, dict); match &mut self.next { Some(node) => {node.resolve_sentance(resolver, dict);} None => {} } } pub fn to_string(self) -> String { let mut string = String::new(); match self.item { SentanceItem::Word(word) => string.push_str(word.as_str()), SentanceItem::Icon(icon) => string.push(icon), _ => {} } match self.next { Some(node) => string.push_str(node.to_string().as_str()), None => {} } string } } #[derive(Debug)] struct ComedyWriter { possibilitys : Vec<f64>, sentances : Vec<Box<SentanceNode>>, resolver : RandomResolver, directionary : Directionary, } impl ComedyWriter { pub fn from_seed(random_seed : u128, highest_frequency : Option<u32>, lowest_frequency : Option<u32>) -> ComedyWriter { ComedyWriter{ possibilitys : Vec::new(), sentances : Vec::new(), resolver : RandomResolver::from_seed(random_seed), directionary : Directionary::from_default( highest_frequency, lowest_frequency), } } pub fn add_node(&mut self, sentance : SentanceNode, posssibility : f64) -> &mut Self { self.sentances.push(Box::new(sentance)); self.possibilitys.push(posssibility); self } fn normalize(&mut self) { let mut sum = 0.0f64; for chance in &self.possibilitys { sum += chance; } for chance in &mut self.possibilitys { *chance /= sum; } } pub fn write(&mut self, number : u32) -> String { self.normalize(); let mut article = String::new(); for _ in 0..number { let pos = self.resolver.resolve_pos(&self.possibilitys); let mut sentance = self.sentances[pos].clone(); sentance.resolve_sentance(&mut self.resolver, &self.directionary); let string = sentance.to_string(); article.push_str(string.as_str()); } article } } fn main() { let mut writer = ComedyWriter::from_seed( 65536, None, None); writer .add_node(sentance
[element ="Who"][word = "是从"][element = "Location"][word="来的."] ), 1.8) .add_node(sentance!( [element="AskWho"][word="在"][element ="Time"][element="TranstiveVerb"] [element="Adjective"][word="的"][element="GenericNoun"][icon='?'] ), 0.4) .add_node(sentance!( [word = "建议"][element = "Location"][word="的"][element="Name"] [element="Time"][word="就"][element="IntranstiveVerb"][icon='.'] ), 0.2); let result = writer.write(50); println!("{}",result); }
!(
identifier_name
main.rs
use oorandom; #[test] fn research_on_directionary() { // 词性分类函数, 找出.dic的标识符以及每个标识符 10 个id use std::fs; use std::collections::HashMap; let mut hash : HashMap<&str, Vec<&str>> = HashMap::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['0','1','2','3','4','5','6','7','8','9','\n'][..], ""); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; for tag in seperate_words {
continue; } let hash_get = hash.get_mut(tag); match hash_get { None => { let vec = vec!(last_word); hash.insert(tag, vec); } Some(vec) => { if vec.len() >= 10 {continue;} vec.push(last_word); } } } println!("{:?}", hash); } #[test] fn add_directionary() { let mut random_word = Word::from_literal("okay"); random_word.set_tag("v"); let mut random_wor2 = Word::from_literal("no"); random_wor2.set_tag("l"); let mut directionary = Directionary::new(); directionary.add_a_word(&random_word); directionary.add_a_word(&random_wor2); println!("{:?}", directionary); } #[test] fn create_directionary() { let directionary = Directionary::from_default(None, Some(500)); println!("generation finished!"); println!("result {:?}", directionary); } #[test] fn create_sentance() { let mut resolver = RandomResolver::from_seed(64u128); let directionary = Directionary::from_default(None, None); let mut sentance = SentanceNode::word("其实") .next( SentanceNode::icon(',') .next( SentanceNode::element("Noun") .next( SentanceNode::word("是") .next( SentanceNode::element("Noun") .next( SentanceNode::icon(',') .next( SentanceNode::word("你知道吗?") )))))); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } #[test] fn multi_sentance() { let mut resolver = RandomResolver::from_seed(1024u128); let directionary = Directionary::from_default(None, None); let generic_sentance = sentance!( [element="Who"][word="是"][element= "Adjective"][word="的"][element="Adjective"][element="IntranstiveVerb"][word="器."] ); for _ in 1..255 { let mut sentance = generic_sentance.clone(); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } } #[macro_export] macro_rules! sentance { ([$function:ident=$string:expr]$([$nfunction:ident=$nstring:expr])+ ) => { SentanceNode::$function($string).next(sentance!($([$nfunction=$nstring])+)) }; ([$function:ident=$string:expr]) => { SentanceNode::$function($string) }; } #[derive(Debug)] struct Word { pub tag : String, pub literal : String, } impl Word { pub fn from_literal(init_literal : &str) -> Word { Word{ tag : String::new(), literal : String::from(init_literal) } } pub fn set_tag(&mut self, new_tag : &str) { self.tag = String::from(new_tag); } } #[derive(Debug)] struct TagMatcher { pub matchers_pool : std::collections::BTreeMap<String, Vec<String>>, } impl TagMatcher { pub fn new() -> TagMatcher { use std::collections::BTreeMap; TagMatcher{matchers_pool : BTreeMap::new()} } fn add(mut self, tag : &'static str, matchers : Vec<&'static str>) -> Self { let matcher_result = self.matchers_pool.get(tag); match matcher_result { Some(_) => { } None => { self.matchers_pool.insert(String::from(tag), Vec::new()); } } let matcher_vec = self.matchers_pool.get_mut(tag).unwrap(); for matcher in matchers { matcher_vec.push(String::from(matcher)); } self } pub fn resolve(&self, tag : String) -> Option<Vec<String>> { let mut ret_vec : Option<Vec<String>> = None; for (element, matchers) in &self.matchers_pool { for matcher in matchers { if *matcher == tag { match &mut ret_vec { Some(vec) => { vec.push(element.clone()); } None => { let vec = vec!(element.clone()); ret_vec = Some(vec); } } } } } ret_vec } } #[derive(Debug)] struct Directionary { // verbs : Vec<String>, // nouns : Vec<String>, // advs : Vec<String>, // adjs : Vec<String>, library : std::collections::HashMap<String, Vec<String>>, matcher : TagMatcher, } impl Directionary { pub fn new() -> Directionary { use std::collections::HashMap; // TODO FINISH THIS let tag_matcher = TagMatcher::new() .add("Location", vec!("nis","ntcb","ntcf","s","na","ns","ntc","nts","nth","ntch","nto","nit","nt","nsf","nz","f","ntu","nsf",)) .add("Name", vec!("nr","nba","nrfg","nrf","nrj",)) .add("Time", vec!("tg","t","Mg")) .add("GenericNoun", vec!("gb","vf","nnd","nhd","nmc","nbc","gc","nhm","ng","gg","gi","n","gp","gm","nnt",)) .add("AllNouns", vec!("vf","nis","ntcb","ntcf","gb","nhd","j","nr","nba","s","nmc","nnd","nrfg","na","ns","ntc","nbc","gc","nts","nth","x","ntch","nto","nit","nrf","nhm","ng","nrt","ntu","gg","gi","nt","nsf","nrj","nz","f","n","gp","gm","tg","nnt","t","Mg",)) .add("Numeral",vec!("m")) .add("Quantifier", vec!("qv","q","qt",)) .add("IndependentVerb", vec!("vl",)) .add("TranstiveVerb", vec!("pba","pbei","vyou","vshi","vd","vx","vq","vi","vn",)) .add("IntranstiveVerb", vec!("vg","uguo","v","vf",)) .add("AllVerbs", vec!("vyou","uguo","vd","v","vx","vi","pba","pbei","vl","vg","vq","vn","vshi","vf",)) .add("Adjective", vec!("b","mq","bl","a","z","al","ag","an","œa",)) .add("Adverb",vec!("b","bl","ad","d","dl","œa","dg",)) .add("AllPronouns", vec!("rr","rz","ryt","Rg","ry","rys","rzs","rzt","ryv","k",)) .add("AskWhen", vec!("rzt")) .add("When", vec!("ryt")) .add("AskHow", vec!("ryv")) .add("AskWhere", vec!("rys")) .add("Where", vec!("rzs")) .add("Who", vec!("rr","rz","Rg",)) .add("AskWho", vec!("ry")) .add("Conjunction", vec!("rzv","u","c","cc",)) .add("Preposition", vec!("r","uyy","udeng","p","udh",)) .add("Particle", vec!("uzhe","uls","ule","usuo","ulian","uzhi","ude",)) .add("AllModals", vec!("y","e","o",)) .add("PostFixModal", vec!("y")) .add("PreFixModal", vec!("e")) .add("Onomatopoeia", vec!("o")); let new_library = HashMap::new(); Directionary { library : new_library, matcher : tag_matcher, } } pub fn from_default(highest_input : Option<u32>, lowest_input : Option<u32>) -> Directionary { // TODO use std::fs; let highest_frequency : u32 = match highest_input { Some(frequency) => frequency, None => 2147483647, }; let lowest_frequency : u32 = match lowest_input { Some(frequency) => frequency, None=> 0, }; let mut directionary = Directionary::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['\n'][..], "\t"); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; let mut last_tag : &str = ""; let mut frequency; for tag in seperate_words { i = i + 1; // 第一列为word, 第二列为tag, 第三列为frequency let count = i % 3; match count { 0 => { // println!("tag{:?}, result{:?}", &tag, tag.parse::<u32>()); frequency = tag.parse::<u32>().unwrap(); }, // 第三列 1 => { last_word = tag; continue; }, // 第一列 _ => { last_tag = tag; continue; }, // 第二列 } if (frequency > highest_frequency) || (frequency < lowest_frequency) { continue; } let mut word = Word::from_literal(last_word); word.set_tag(last_tag); directionary.add_a_word(&word); } directionary } pub fn find_a_word(&self, element : &str, resolver : &mut RandomResolver) -> String { let library_vec = self.library.get(element); match library_vec { Some(_) => {} None => {panic!("failed to get element type {}", element);} } let library_vec = library_vec.unwrap(); let word = library_vec.get( resolver.get_pos(library_vec.len())).unwrap(); word.clone() } pub fn add_a_word(&mut self, new_word : &Word) { let matcher_result = self.matcher.resolve(new_word.tag.clone()); // print!("word :{:?}, result :{:?}", &new_word, matcher_result); match matcher_result { Some(element_vec) => { for element in element_vec{ let library_result = self.library.get_mut(element.as_str()); match library_result { Some(ele_vec) => { ele_vec.push(new_word.literal.clone()); } None => { self.library.insert(element.clone(), vec!(new_word.literal.clone())); } } } } None => {} } } } #[derive(Debug)] struct RandomResolver { rng : oorandom::Rand64, } impl RandomResolver { pub fn from_seed(seed : u128) -> Self { let rng = oorandom::Rand64::new(seed); RandomResolver{rng : rng} } fn resolve_pos(&mut self, vec : &Vec<f64>) -> usize { let float_result = self.rng.rand_float(); let mut sum = 0.0f64; let size = vec.len(); for i in 0..size { sum += vec[i]; if sum > float_result { return i; } } return size; } pub fn get_pos(&mut self, size : usize) -> usize { let float_result = self.rng.rand_float(); (size as f64 * float_result) as usize } } #[derive(Debug, Clone)] enum SentanceItem { Element(String), // element type to be resolve Word(String), Icon(char), } #[derive(Debug, Clone)] struct SentanceNode { item : SentanceItem, next : Option<Box<SentanceNode>>, } impl SentanceNode { pub fn element(element_name : &str) -> SentanceNode { SentanceNode { item : SentanceItem::Element(String::from(element_name)), next : None, } } pub fn icon(icon : char) -> SentanceNode{ SentanceNode { item : SentanceItem::Icon(icon), next : None, } } pub fn word(word : &str) -> SentanceNode{ SentanceNode { item : SentanceItem::Word(String::from(word)), next : None, } } // insert a node, panic if already has one pub fn next(mut self, next : SentanceNode) -> Self { match &self.next { Some(_) => { panic!("node {:?} already have a next node", &self); } None => { self.next = Some(Box::new(next)); } } self } // get the result from element fn resolve(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { match &self.item { SentanceItem::Element(element) => { self.item = SentanceItem::Word( dict.find_a_word(element.as_str(), resolver)); } _ => {} } } pub fn resolve_sentance(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { self.resolve(resolver, dict); match &mut self.next { Some(node) => {node.resolve_sentance(resolver, dict);} None => {} } } pub fn to_string(self) -> String { let mut string = String::new(); match self.item { SentanceItem::Word(word) => string.push_str(word.as_str()), SentanceItem::Icon(icon) => string.push(icon), _ => {} } match self.next { Some(node) => string.push_str(node.to_string().as_str()), None => {} } string } } #[derive(Debug)] struct ComedyWriter { possibilitys : Vec<f64>, sentances : Vec<Box<SentanceNode>>, resolver : RandomResolver, directionary : Directionary, } impl ComedyWriter { pub fn from_seed(random_seed : u128, highest_frequency : Option<u32>, lowest_frequency : Option<u32>) -> ComedyWriter { ComedyWriter{ possibilitys : Vec::new(), sentances : Vec::new(), resolver : RandomResolver::from_seed(random_seed), directionary : Directionary::from_default( highest_frequency, lowest_frequency), } } pub fn add_node(&mut self, sentance : SentanceNode, posssibility : f64) -> &mut Self { self.sentances.push(Box::new(sentance)); self.possibilitys.push(posssibility); self } fn normalize(&mut self) { let mut sum = 0.0f64; for chance in &self.possibilitys { sum += chance; } for chance in &mut self.possibilitys { *chance /= sum; } } pub fn write(&mut self, number : u32) -> String { self.normalize(); let mut article = String::new(); for _ in 0..number { let pos = self.resolver.resolve_pos(&self.possibilitys); let mut sentance = self.sentances[pos].clone(); sentance.resolve_sentance(&mut self.resolver, &self.directionary); let string = sentance.to_string(); article.push_str(string.as_str()); } article } } fn main() { let mut writer = ComedyWriter::from_seed( 65536, None, None); writer .add_node(sentance!( [element ="Who"][word = "是从"][element = "Location"][word="来的."] ), 1.8) .add_node(sentance!( [element="AskWho"][word="在"][element ="Time"][element="TranstiveVerb"] [element="Adjective"][word="的"][element="GenericNoun"][icon='?'] ), 0.4) .add_node(sentance!( [word = "建议"][element = "Location"][word="的"][element="Name"] [element="Time"][word="就"][element="IntranstiveVerb"][icon='.'] ), 0.2); let result = writer.write(50); println!("{}",result); }
i = i + 1; // 奇数列为word, 偶数列为tag if i % 2 != 0 { last_word = tag;
random_line_split
main.rs
use oorandom; #[test] fn research_on_directionary() { // 词性分类函数, 找出.dic的标识符以及每个标识符 10 个id use std::fs; use std::collections::HashMap; let mut hash : HashMap<&str, Vec<&str>> = HashMap::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['0','1','2','3','4','5','6','7','8','9','\n'][..], ""); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; for tag in seperate_words { i = i + 1; // 奇数列为word, 偶数列为tag if i % 2!= 0 { last_word = tag; continue; } let hash_get = hash.get_mut(tag); match hash_get { None => { let vec = vec!(last_word); hash.insert(tag, vec); } Some(vec) => { if vec.len() >= 10 {continue;} vec.push(last_word); } } } println!("{:?}", hash); } #[test] fn add_directionary() { let mut random_word = Word::from_literal("okay"); random_word.set_tag("v"); let mut random_wor2 = Word::from_literal("no"); random_wor2.set_tag("l"); let mut directionary = Directionary::new(); directionary.add_a_word(&random_word); directionary.add_a_word(&random_wor2); println!("{:?}", directionary); } #[test] fn create_directionary() { let directionary = Directionary::from_default(None, Some(500)); println!("generation finished!"); println!("result {:?}", directionary); } #[test] fn create_sentance() { let mut resolver = RandomResolver::from_seed(64u128); let directionary = Directionary::from_default(None, None); let mut sentance = SentanceNode::word("其实") .next( SentanceNode::icon(',') .next( SentanceNode::element("Noun") .next( SentanceNode::word("是") .next( SentanceNode::element("Noun") .next( SentanceNode::icon(',') .next( SentanceNode::word("你知道吗?") )))))); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } #[test] fn multi_sentance() { let mut resolver = RandomResolver::from_seed(1024u128); let directionary = Directionary::from_default(None, None); let generic_sentance = sentance!( [element="Who"][word="是"][element= "Adjective"][word="的"][element="Adjective"][element="IntranstiveVerb"][word="器."] ); for _ in 1..255 { let mut sentance = generic_sentance.clone(); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } } #[macro_export] macro_rules! sentance { ([$function:ident=$string:expr]$([$nfunction:ident=$nstring:expr])+ ) => { SentanceNode::$function($string).next(sentance!($([$nfunction=$nstring])+)) }; ([$function:ident=$string:expr]) => { SentanceNode::$function($string) }; } #[derive(Debug)] struct Word { pub tag : String, pub literal : String, } impl Word { pub fn from_literal(init_literal : &str) -> Word { Word{ tag : String::new(), literal : String::from(init_literal) } } pub fn set_tag(&mut self, new_tag : &str) { self.tag = String::from(new_tag); } } #[derive(Debug)] struct TagMatcher { pub matchers_pool : std::collections::BTreeMap<String, Vec<String>>, } impl TagMatcher { pub fn new() -> TagMatcher { use std::collections::BTreeMap; TagMatcher{matchers_pool : BTreeMap::new()} } fn add(mut self, tag : &'static str, matchers : Vec<&'static str>) -> Self { let matcher_result = self.matchers_pool.get(tag); match matcher_result { Some(_) => { } None => { self.matchers_pool.insert(String::from(tag), Vec::new()); } } let matcher_vec = self.matchers_pool.get_mut(tag).unwrap(); for matcher in matchers { matcher_vec.push(String::from(matcher)); } self } pub fn resolve(&self, tag : String) -> Option<Vec<String>> { let mut ret_vec : Option<Vec<String>> = None; for (element, matchers) in &self.matchers_pool { for matcher in matchers { if *matcher == tag { match &mut ret_vec { Some(vec) => { vec.push(element.clone()); } None => { let vec = vec!(element.clone()); ret_vec = Some(vec); } } } } } ret_vec } } #[derive(Debug)] struct Directionary { // verbs : Vec<String>, // nouns : Vec<String>, // advs : Vec<String>, // adjs : Vec<String>, library : std::collections::HashMap<String, Vec<String>>, matcher : TagMatcher, } impl Directionary { pub fn new() -> Directionary { use std::collections::HashMap; // TODO FINISH THIS let tag_matcher = TagMatcher::new() .add("Location", vec!("nis","ntcb","ntcf","s","na","ns","ntc","nts","nth","ntch","nto","nit","nt","nsf","nz","f","ntu","nsf",)) .add("Name", vec!("nr","nba","nrfg","nrf","nrj",)) .add("Time", vec!("tg","t","Mg")) .add("GenericNoun", vec!("gb","vf","nnd","nhd","nmc","nbc","gc","nhm","ng","gg","gi","n","gp","gm","nnt",)) .add("AllNouns", vec!("vf","nis","ntcb","ntcf","gb","nhd","j","nr","nba","s","nmc","nnd","nrfg","na","ns","ntc","nbc","gc","nts","nth","x","ntch","nto","nit","nrf","nhm","ng","nrt","ntu","gg","gi","nt","nsf","nrj","nz","f","n","gp","gm","tg","nnt","t","Mg",)) .add("Numeral",vec!("m")) .add("Quantifier", vec!("qv","q","qt",)) .add("IndependentVerb", vec!("vl",)) .add("TranstiveVerb", vec!("pba","pbei","vyou","vshi","vd","vx","vq","vi","vn",)) .add("IntranstiveVerb", vec!("vg","uguo","v","vf",)) .add("AllVerbs", vec!("vyou","uguo","vd","v","vx","vi","pba","pbei","vl","vg","vq","vn","vshi","vf",)) .add("Adjective", vec!("b","mq","bl","a","z","al","ag","an","œa",)) .add("Adverb",vec!("b","bl","ad","d","dl","œa","dg",)) .add("AllPronouns", vec!("rr","rz","ryt","Rg","ry","rys","rzs","rzt","ryv","k",)) .add("AskWhen", vec!("rzt")) .add("When", vec!("ryt")) .add("AskHow", vec!("ryv")) .add("AskWhere", vec!("rys")) .add("Where", vec!("rzs")) .add("Who", vec!("rr","rz","Rg",)) .add("AskWho", vec!("ry")) .add("Conjunction", vec!("rzv","u","c","cc",)) .add("Preposition", vec!("r","uyy","udeng","p","udh",)) .add("Particle", vec!("uzhe","uls","ule","usuo","ulian","uzhi","ude",)) .add("AllModals", vec!("y","e","o",)) .add("PostFixModal", vec!("y")) .add("PreFixModal", vec!("e")) .add("Onomatopoeia", vec!("o")); let new_library = HashMap::new(); Directionary { library : new_library, matcher : tag_matcher, } } pub fn from_default(highest_input : Option<u32>, lowest_input : Option<u32>) -> Directionary { // TODO use std::fs; let highest_frequency : u32 = match highest_input { Some(frequency) => frequency, None => 2147483647, }; let lowest_frequency : u32 = match lowest_input { Some(frequency) => frequency, None=> 0, }; let mut directionary = Directionary::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['\n'][..], "\t"); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; let mut last_tag : &str = ""; let mut frequency; for tag in seperate_words { i = i + 1; // 第一列为word, 第二列为tag, 第三列为frequency let count = i % 3; match count { 0 => { // println!("tag{:?}, result{:?}", &tag, tag.parse::<u32>()); frequency = tag.parse::<u32>().unwrap(); }, // 第三列 1 => { last_word = tag; continue; }, // 第一列 _ => { last_tag = tag; continue; }, // 第二列 } if (frequency > highest_frequency) || (frequency < lowest_frequency) { continue; } let mut word = Word::from_literal(last_word); word.set_tag(last_tag); directionary.add_a_word(&word); } directionary } pub fn find_a_word(&self, element : &str, resolver : &mut RandomResolver) -> String { let library_vec = self.library.get(element); match library_vec { Some(_) => {} None => {panic!("failed to get element type {}", element);} } let library_vec = library_vec.unwrap(); let word = library_vec.get( resolver.get_pos(library_vec.len())).unwrap(); word.clone() } pub fn add_a_word(&mut self, new_word : &Word) { let matcher_result = self.matcher.resolve(new_word.tag.clone()); // print!("word :{:?}, result :{:?}",
#[derive(Debug)] struct RandomResolver { rng : oorandom::Rand64, } impl RandomResolver { pub fn from_se ed(seed : u128) -> Self { let rng = oorandom::Rand64::new(seed); RandomResolver{rng : rng} } fn resolve_pos(&mut self, vec : &Vec<f64>) -> usize { let float_result = self.rng.rand_float(); let mut sum = 0.0f64; let size = vec.len(); for i in 0..size { sum += vec[i]; if sum > float_result { return i; } } return size; } pub fn get_pos(&mut self, size : usize) -> usize { let float_result = self.rng.rand_float(); (size as f64 * float_result) as usize } } #[derive(Debug, Clone)] enum SentanceItem { Element(String), // element type to be resolve Word(String), Icon(char), } #[derive(Debug, Clone)] struct SentanceNode { item : SentanceItem, next : Option<Box<SentanceNode>>, } impl SentanceNode { pub fn element(element_name : &str) -> SentanceNode { SentanceNode { item : SentanceItem::Element(String::from(element_name)), next : None, } } pub fn icon(icon : char) -> SentanceNode{ SentanceNode { item : SentanceItem::Icon(icon), next : None, } } pub fn word(word : &str) -> SentanceNode{ SentanceNode { item : SentanceItem::Word(String::from(word)), next : None, } } // insert a node, panic if already has one pub fn next(mut self, next : SentanceNode) -> Self { match &self.next { Some(_) => { panic!("node {:?} already have a next node", &self); } None => { self.next = Some(Box::new(next)); } } self } // get the result from element fn resolve(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { match &self.item { SentanceItem::Element(element) => { self.item = SentanceItem::Word( dict.find_a_word(element.as_str(), resolver)); } _ => {} } } pub fn resolve_sentance(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { self.resolve(resolver, dict); match &mut self.next { Some(node) => {node.resolve_sentance(resolver, dict);} None => {} } } pub fn to_string(self) -> String { let mut string = String::new(); match self.item { SentanceItem::Word(word) => string.push_str(word.as_str()), SentanceItem::Icon(icon) => string.push(icon), _ => {} } match self.next { Some(node) => string.push_str(node.to_string().as_str()), None => {} } string } } #[derive(Debug)] struct ComedyWriter { possibilitys : Vec<f64>, sentances : Vec<Box<SentanceNode>>, resolver : RandomResolver, directionary : Directionary, } impl ComedyWriter { pub fn from_seed(random_seed : u128, highest_frequency : Option<u32>, lowest_frequency : Option<u32>) -> ComedyWriter { ComedyWriter{ possibilitys : Vec::new(), sentances : Vec::new(), resolver : RandomResolver::from_seed(random_seed), directionary : Directionary::from_default( highest_frequency, lowest_frequency), } } pub fn add_node(&mut self, sentance : SentanceNode, posssibility : f64) -> &mut Self { self.sentances.push(Box::new(sentance)); self.possibilitys.push(posssibility); self } fn normalize(&mut self) { let mut sum = 0.0f64; for chance in &self.possibilitys { sum += chance; } for chance in &mut self.possibilitys { *chance /= sum; } } pub fn write(&mut self, number : u32) -> String { self.normalize(); let mut article = String::new(); for _ in 0..number { let pos = self.resolver.resolve_pos(&self.possibilitys); let mut sentance = self.sentances[pos].clone(); sentance.resolve_sentance(&mut self.resolver, &self.directionary); let string = sentance.to_string(); article.push_str(string.as_str()); } article } } fn main() { let mut writer = ComedyWriter::from_seed( 65536, None, None); writer .add_node(sentance!( [element ="Who"][word = "是从"][element = "Location"][word="来的."] ), 1.8) .add_node(sentance!( [element="AskWho"][word="在"][element ="Time"][element="TranstiveVerb"] [element="Adjective"][word="的"][element="GenericNoun"][icon='?'] ), 0.4) .add_node(sentance!( [word = "建议"][element = "Location"][word="的"][element="Name"] [element="Time"][word="就"][element="IntranstiveVerb"][icon='.'] ), 0.2); let result = writer.write(50); println!("{}",result); }
&new_word, matcher_result); match matcher_result { Some(element_vec) => { for element in element_vec{ let library_result = self.library.get_mut(element.as_str()); match library_result { Some(ele_vec) => { ele_vec.push(new_word.literal.clone()); } None => { self.library.insert(element.clone(), vec!(new_word.literal.clone())); } } } } None => {} } } }
identifier_body
virtio_constants.rs
#![allow(dead_code)] #![allow(clippy::all)] // Copied from the ixy C driver // Amended with updates from the newer Virtio spec v1.1 /*- * BSD LICENSE * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * VirtIO Header, located in BAR 0. */ pub const VIRTIO_PCI_HOST_FEATURES: u64 = 0; /* host's supported features (32bit, RO)*/ pub const VIRTIO_PCI_GUEST_FEATURES: u64 = 4; /* guest's supported features (32, RW) */ pub const VIRTIO_PCI_QUEUE_PFN: u64 = 8; /* physical address of VQ (32, RW) */ pub const VIRTIO_PCI_QUEUE_NUM: u64 = 12; /* number of ring entries (16, RO) */ pub const VIRTIO_PCI_QUEUE_SEL: u64 = 14; /* current VQ selection (16, RW) */ pub const VIRTIO_PCI_QUEUE_NOTIFY: u64 = 16; /* notify host regarding VQ (16, RW) */ pub const VIRTIO_PCI_STATUS: u64 = 18; /* device status register (8, RW) */ pub const VIRTIO_PCI_ISR: u64 = 19; /* interrupt status register, reading also clears the register (8, RO) */ /* Only if MSIX is enabled: */ pub const VIRTIO_MSI_CONFIG_VECTOR: u64 = 20; /* configuration change vector (16, RW) */ pub const VIRTIO_MSI_QUEUE_VECTOR: u64 = 22; /* vector for selected VQ notifications (16, RW) */ /* Status byte for guest to report progress. */ pub const VIRTIO_CONFIG_STATUS_RESET: u8 = 0x00; pub const VIRTIO_CONFIG_STATUS_ACK: u8 = 0x01; pub const VIRTIO_CONFIG_STATUS_DRIVER: u8 = 0x02; pub const VIRTIO_CONFIG_STATUS_DRIVER_OK: u8 = 0x04; pub const VIRTIO_CONFIG_STATUS_FEATURES_OK: u8 = 0x08; pub const VIRTIO_CONFIG_STATUS_FAILED: u8 = 0x80; /* * How many bits to shift physical queue address written to QUEUE_PFN. * 12 is historical, and due to x86 page size. */ pub const VIRTIO_PCI_QUEUE_ADDR_SHIFT: usize = 12; /* This marks a buffer as continuing via the next field. */ pub const VIRTQ_DESC_F_NEXT: u16 = 1; /* This marks a buffer as write-only (otherwise read-only). */ pub const VIRTQ_DESC_F_WRITE: u16 = 2; /* This means the buffer contains a list of buffer descriptors. */ pub const VIRTQ_DESC_F_INDIRECT: u16 = 4; /* The feature bitmap for virtio net */ pub const VIRTIO_NET_F_CSUM: usize = 0; /* Host handles pkts w/ partial csum */ pub const VIRTIO_NET_F_GUEST_CSUM: usize = 1; /* Guest handles pkts w/ partial csum */ pub const VIRTIO_NET_F_MTU: usize = 3; /* Initial MTU advice. */ pub const VIRTIO_NET_F_MAC: usize = 5; /* Host has given MAC address. */ pub const VIRTIO_NET_F_GUEST_TSO4: usize = 7; /* Guest can handle TSOv4 in. */ pub const VIRTIO_NET_F_GUEST_TSO6: usize = 8; /* Guest can handle TSOv6 in. */ pub const VIRTIO_NET_F_GUEST_ECN: usize = 9; /* Guest can handle TSO[6] w/ ECN in. */ pub const VIRTIO_NET_F_GUEST_UFO: usize = 10; /* Guest can handle UFO in. */ pub const VIRTIO_NET_F_HOST_TSO4: usize = 11; /* Host can handle TSOv4 in. */ pub const VIRTIO_NET_F_HOST_TSO6: usize = 12; /* Host can handle TSOv6 in. */ pub const VIRTIO_NET_F_HOST_ECN: usize = 13; /* Host can handle TSO[6] w/ ECN in. */ pub const VIRTIO_NET_F_HOST_UFO: usize = 14; /* Host can handle UFO in. */ pub const VIRTIO_NET_F_MRG_RXBUF: usize = 15; /* Host can merge receive buffers. */
pub const VIRTIO_NET_F_STATUS: usize = 16; /* virtio_net_config.status available */ pub const VIRTIO_NET_F_CTRL_VQ: usize = 17; /* Control channel available */ pub const VIRTIO_NET_F_CTRL_RX: usize = 18; /* Control channel RX mode support */ pub const VIRTIO_NET_F_CTRL_VLAN: usize = 19; /* Control channel VLAN filtering */ pub const VIRTIO_NET_F_CTRL_RX_EXTRA: usize = 20; /* Extra RX mode control support */ pub const VIRTIO_NET_F_GUEST_ANNOUNCE: usize = 21; /* Guest can announce device on the network */ pub const VIRTIO_NET_F_MQ: usize = 22; /* Device supports Receive Flow Steering */ pub const VIRTIO_NET_F_CTRL_MAC_ADDR: usize = 23; /* Set MAC address */ /* Do we get callbacks when the ring is completely used, even if we've suppressed them? */ pub const VIRTIO_F_NOTIFY_ON_EMPTY: usize = 24; /* Can the device handle any descriptor layout? */ pub const VIRTIO_F_ANY_LAYOUT: usize = 27; /* We support indirect buffer descriptors */ pub const VIRTIO_RING_F_INDIRECT_DESC: usize = 28; pub const VIRTIO_F_VERSION_1: usize = 32; pub const VIRTIO_F_IOMMU_PLATFORM: usize = 33; /** * Control the RX mode, ie. promiscuous, allmulti, etc... * All commands require an "out" sg entry containing a 1 byte * state value, zero = disable, non-zero = enable. Commands * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature. * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA. */ pub const VIRTIO_NET_CTRL_RX: u8 = 0; pub const VIRTIO_NET_CTRL_RX_PROMISC: u8 = 0; pub const VIRTIO_NET_CTRL_RX_ALLMULTI: u8 = 1; pub const VIRTIO_NET_CTRL_RX_ALLUNI: u8 = 2; pub const VIRTIO_NET_CTRL_RX_NOMULTI: u8 = 3; pub const VIRTIO_NET_CTRL_RX_NOUNI: u8 = 4; pub const VIRTIO_NET_CTRL_RX_NOBCAST: u8 = 5; pub const VIRTIO_NET_OK: u8 = 0; pub const VIRTIO_NET_ERR: u8 = 1; pub const VIRTIO_MAX_CTRL_DATA: usize = 2048; /** * This is the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. */ #[repr(C)] pub struct virtio_net_hdr { pub flags: u8, pub gso_type: u8, pub hdr_len: u16, // Ethernet + IP + tcp/udp hdrs pub gso_size: u16, // Bytes to append to hdr_len per frame pub csum_start: u16, // Position to start checksumming from pub csum_offset: u16, // Offset after that to place checksum } pub const VIRTIO_NET_HDR_F_NEEDS_CSUM: u8 = 1; /**< Use csum_start,csum_offset*/ pub const VIRTIO_NET_HDR_F_DATA_VALID: u8 = 2; /**< Checksum is valid */ pub const VIRTIO_NET_HDR_GSO_NONE: u8 = 0; /**< Not a GSO frame */ pub const VIRTIO_NET_HDR_GSO_TCPV4: u8 = 1; /**< GSO frame, IPv4 TCP (TSO) */ pub const VIRTIO_NET_HDR_GSO_UDP: u8 = 3; /**< GSO frame, IPv4 UDP (UFO) */ pub const VIRTIO_NET_HDR_GSO_TCPV6: u8 = 4; /**< GSO frame, IPv6 TCP */ pub const VIRTIO_NET_HDR_GSO_ECN: u8 = 0x80; /**< TCP has ECN set */ /* The Host uses this in used->flags to advise the Guest: don't kick me * when you add a buffer. It's unreliable, so it's simply an * optimization. Guest will still kick if it's out of buffers. */ pub const VIRTQ_USED_F_NO_NOTIFY: u16 = 1; /* The Guest uses this in avail->flags to advise the Host: don't * interrupt me when you consume a buffer. It's unreliable, so it's * simply an optimization. */ pub const VIRTQ_AVAIL_F_NO_INTERRUPT: u16 = 1; use std::num::Wrapping; /* VirtIO ring descriptors: 16 bytes. * These can chain together via "next". */ #[repr(C)] #[derive(Default)] pub struct VirtqDesc { pub addr: usize, /* Address (guest-physical). */ pub len: u32, /* Length. */ pub flags: u16, /* The flags as indicated above. */ pub next: u16, /* We chain unused descriptors via this. */ } #[repr(C)] pub struct VirtqAvail { pub flags: u16, pub idx: Wrapping<u16>, pub ring: [u16; 0], } #[repr(C)] pub struct VirtqUsed { pub flags: u16, pub idx: Wrapping<u16>, pub ring: [VirtqUsedElem; 0], } #[repr(C)] #[derive(Clone, Default)] pub struct VirtqUsedElem { /* Index of start of used descriptor chain. */ pub id: u16, pub _padding: u16, /* Total length of the descriptor chain which was written to. */ pub len: u32, } pub trait Ring { type Element; fn ring(&self) -> *const Self::Element; fn ring_mut(&mut self) -> *mut Self::Element; } impl Ring for VirtqAvail { type Element = u16; fn ring(&self) -> *const u16 { self.ring.as_ptr() } fn ring_mut(&mut self) -> *mut u16 { self.ring.as_mut_ptr() } } impl Ring for VirtqUsed { type Element = VirtqUsedElem; fn ring(&self) -> *const VirtqUsedElem { self.ring.as_ptr() } fn ring_mut(&mut self) -> *mut VirtqUsedElem { self.ring.as_mut_ptr() } } #[repr(C)] #[derive(Debug)] pub struct VirtioNetCtrl<T: VirtioNetCtrlCommand> { pub class: u8, pub command: u8, pub command_data: T, pub ack: u8, } impl<T: VirtioNetCtrlCommand> From<T> for VirtioNetCtrl<T> { fn from(command_data: T) -> VirtioNetCtrl<T> { VirtioNetCtrl { class: T::CLASS, command: T::COMMAND, command_data, ack: 0, } } } /// A specific command to be sent through the control queue (wrapped in a [`VirtioNetCtrl`]) pub trait VirtioNetCtrlCommand { const CLASS: u8; const COMMAND: u8; } #[derive(Debug)] pub struct VirtioNetCtrlPromisc(u8); impl VirtioNetCtrlCommand for VirtioNetCtrlPromisc { const CLASS: u8 = VIRTIO_NET_CTRL_RX; const COMMAND: u8 = VIRTIO_NET_CTRL_RX_PROMISC; } impl VirtioNetCtrlPromisc { pub fn new(on: bool) -> VirtioNetCtrlPromisc { VirtioNetCtrlPromisc(on as u8) } } #[cfg(test)] mod tests { use super::*; use std::mem; #[test] fn static_type_sizes() { assert_eq!(mem::size_of::<VirtioNetCtrl<VirtioNetCtrlPromisc>>(), 4); } }
random_line_split
virtio_constants.rs
#![allow(dead_code)] #![allow(clippy::all)] // Copied from the ixy C driver // Amended with updates from the newer Virtio spec v1.1 /*- * BSD LICENSE * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * VirtIO Header, located in BAR 0. */ pub const VIRTIO_PCI_HOST_FEATURES: u64 = 0; /* host's supported features (32bit, RO)*/ pub const VIRTIO_PCI_GUEST_FEATURES: u64 = 4; /* guest's supported features (32, RW) */ pub const VIRTIO_PCI_QUEUE_PFN: u64 = 8; /* physical address of VQ (32, RW) */ pub const VIRTIO_PCI_QUEUE_NUM: u64 = 12; /* number of ring entries (16, RO) */ pub const VIRTIO_PCI_QUEUE_SEL: u64 = 14; /* current VQ selection (16, RW) */ pub const VIRTIO_PCI_QUEUE_NOTIFY: u64 = 16; /* notify host regarding VQ (16, RW) */ pub const VIRTIO_PCI_STATUS: u64 = 18; /* device status register (8, RW) */ pub const VIRTIO_PCI_ISR: u64 = 19; /* interrupt status register, reading also clears the register (8, RO) */ /* Only if MSIX is enabled: */ pub const VIRTIO_MSI_CONFIG_VECTOR: u64 = 20; /* configuration change vector (16, RW) */ pub const VIRTIO_MSI_QUEUE_VECTOR: u64 = 22; /* vector for selected VQ notifications (16, RW) */ /* Status byte for guest to report progress. */ pub const VIRTIO_CONFIG_STATUS_RESET: u8 = 0x00; pub const VIRTIO_CONFIG_STATUS_ACK: u8 = 0x01; pub const VIRTIO_CONFIG_STATUS_DRIVER: u8 = 0x02; pub const VIRTIO_CONFIG_STATUS_DRIVER_OK: u8 = 0x04; pub const VIRTIO_CONFIG_STATUS_FEATURES_OK: u8 = 0x08; pub const VIRTIO_CONFIG_STATUS_FAILED: u8 = 0x80; /* * How many bits to shift physical queue address written to QUEUE_PFN. * 12 is historical, and due to x86 page size. */ pub const VIRTIO_PCI_QUEUE_ADDR_SHIFT: usize = 12; /* This marks a buffer as continuing via the next field. */ pub const VIRTQ_DESC_F_NEXT: u16 = 1; /* This marks a buffer as write-only (otherwise read-only). */ pub const VIRTQ_DESC_F_WRITE: u16 = 2; /* This means the buffer contains a list of buffer descriptors. */ pub const VIRTQ_DESC_F_INDIRECT: u16 = 4; /* The feature bitmap for virtio net */ pub const VIRTIO_NET_F_CSUM: usize = 0; /* Host handles pkts w/ partial csum */ pub const VIRTIO_NET_F_GUEST_CSUM: usize = 1; /* Guest handles pkts w/ partial csum */ pub const VIRTIO_NET_F_MTU: usize = 3; /* Initial MTU advice. */ pub const VIRTIO_NET_F_MAC: usize = 5; /* Host has given MAC address. */ pub const VIRTIO_NET_F_GUEST_TSO4: usize = 7; /* Guest can handle TSOv4 in. */ pub const VIRTIO_NET_F_GUEST_TSO6: usize = 8; /* Guest can handle TSOv6 in. */ pub const VIRTIO_NET_F_GUEST_ECN: usize = 9; /* Guest can handle TSO[6] w/ ECN in. */ pub const VIRTIO_NET_F_GUEST_UFO: usize = 10; /* Guest can handle UFO in. */ pub const VIRTIO_NET_F_HOST_TSO4: usize = 11; /* Host can handle TSOv4 in. */ pub const VIRTIO_NET_F_HOST_TSO6: usize = 12; /* Host can handle TSOv6 in. */ pub const VIRTIO_NET_F_HOST_ECN: usize = 13; /* Host can handle TSO[6] w/ ECN in. */ pub const VIRTIO_NET_F_HOST_UFO: usize = 14; /* Host can handle UFO in. */ pub const VIRTIO_NET_F_MRG_RXBUF: usize = 15; /* Host can merge receive buffers. */ pub const VIRTIO_NET_F_STATUS: usize = 16; /* virtio_net_config.status available */ pub const VIRTIO_NET_F_CTRL_VQ: usize = 17; /* Control channel available */ pub const VIRTIO_NET_F_CTRL_RX: usize = 18; /* Control channel RX mode support */ pub const VIRTIO_NET_F_CTRL_VLAN: usize = 19; /* Control channel VLAN filtering */ pub const VIRTIO_NET_F_CTRL_RX_EXTRA: usize = 20; /* Extra RX mode control support */ pub const VIRTIO_NET_F_GUEST_ANNOUNCE: usize = 21; /* Guest can announce device on the network */ pub const VIRTIO_NET_F_MQ: usize = 22; /* Device supports Receive Flow Steering */ pub const VIRTIO_NET_F_CTRL_MAC_ADDR: usize = 23; /* Set MAC address */ /* Do we get callbacks when the ring is completely used, even if we've suppressed them? */ pub const VIRTIO_F_NOTIFY_ON_EMPTY: usize = 24; /* Can the device handle any descriptor layout? */ pub const VIRTIO_F_ANY_LAYOUT: usize = 27; /* We support indirect buffer descriptors */ pub const VIRTIO_RING_F_INDIRECT_DESC: usize = 28; pub const VIRTIO_F_VERSION_1: usize = 32; pub const VIRTIO_F_IOMMU_PLATFORM: usize = 33; /** * Control the RX mode, ie. promiscuous, allmulti, etc... * All commands require an "out" sg entry containing a 1 byte * state value, zero = disable, non-zero = enable. Commands * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature. * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA. */ pub const VIRTIO_NET_CTRL_RX: u8 = 0; pub const VIRTIO_NET_CTRL_RX_PROMISC: u8 = 0; pub const VIRTIO_NET_CTRL_RX_ALLMULTI: u8 = 1; pub const VIRTIO_NET_CTRL_RX_ALLUNI: u8 = 2; pub const VIRTIO_NET_CTRL_RX_NOMULTI: u8 = 3; pub const VIRTIO_NET_CTRL_RX_NOUNI: u8 = 4; pub const VIRTIO_NET_CTRL_RX_NOBCAST: u8 = 5; pub const VIRTIO_NET_OK: u8 = 0; pub const VIRTIO_NET_ERR: u8 = 1; pub const VIRTIO_MAX_CTRL_DATA: usize = 2048; /** * This is the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. */ #[repr(C)] pub struct virtio_net_hdr { pub flags: u8, pub gso_type: u8, pub hdr_len: u16, // Ethernet + IP + tcp/udp hdrs pub gso_size: u16, // Bytes to append to hdr_len per frame pub csum_start: u16, // Position to start checksumming from pub csum_offset: u16, // Offset after that to place checksum } pub const VIRTIO_NET_HDR_F_NEEDS_CSUM: u8 = 1; /**< Use csum_start,csum_offset*/ pub const VIRTIO_NET_HDR_F_DATA_VALID: u8 = 2; /**< Checksum is valid */ pub const VIRTIO_NET_HDR_GSO_NONE: u8 = 0; /**< Not a GSO frame */ pub const VIRTIO_NET_HDR_GSO_TCPV4: u8 = 1; /**< GSO frame, IPv4 TCP (TSO) */ pub const VIRTIO_NET_HDR_GSO_UDP: u8 = 3; /**< GSO frame, IPv4 UDP (UFO) */ pub const VIRTIO_NET_HDR_GSO_TCPV6: u8 = 4; /**< GSO frame, IPv6 TCP */ pub const VIRTIO_NET_HDR_GSO_ECN: u8 = 0x80; /**< TCP has ECN set */ /* The Host uses this in used->flags to advise the Guest: don't kick me * when you add a buffer. It's unreliable, so it's simply an * optimization. Guest will still kick if it's out of buffers. */ pub const VIRTQ_USED_F_NO_NOTIFY: u16 = 1; /* The Guest uses this in avail->flags to advise the Host: don't * interrupt me when you consume a buffer. It's unreliable, so it's * simply an optimization. */ pub const VIRTQ_AVAIL_F_NO_INTERRUPT: u16 = 1; use std::num::Wrapping; /* VirtIO ring descriptors: 16 bytes. * These can chain together via "next". */ #[repr(C)] #[derive(Default)] pub struct VirtqDesc { pub addr: usize, /* Address (guest-physical). */ pub len: u32, /* Length. */ pub flags: u16, /* The flags as indicated above. */ pub next: u16, /* We chain unused descriptors via this. */ } #[repr(C)] pub struct VirtqAvail { pub flags: u16, pub idx: Wrapping<u16>, pub ring: [u16; 0], } #[repr(C)] pub struct VirtqUsed { pub flags: u16, pub idx: Wrapping<u16>, pub ring: [VirtqUsedElem; 0], } #[repr(C)] #[derive(Clone, Default)] pub struct VirtqUsedElem { /* Index of start of used descriptor chain. */ pub id: u16, pub _padding: u16, /* Total length of the descriptor chain which was written to. */ pub len: u32, } pub trait Ring { type Element; fn ring(&self) -> *const Self::Element; fn ring_mut(&mut self) -> *mut Self::Element; } impl Ring for VirtqAvail { type Element = u16; fn ring(&self) -> *const u16 { self.ring.as_ptr() } fn ring_mut(&mut self) -> *mut u16 { self.ring.as_mut_ptr() } } impl Ring for VirtqUsed { type Element = VirtqUsedElem; fn ring(&self) -> *const VirtqUsedElem { self.ring.as_ptr() } fn ring_mut(&mut self) -> *mut VirtqUsedElem { self.ring.as_mut_ptr() } } #[repr(C)] #[derive(Debug)] pub struct VirtioNetCtrl<T: VirtioNetCtrlCommand> { pub class: u8, pub command: u8, pub command_data: T, pub ack: u8, } impl<T: VirtioNetCtrlCommand> From<T> for VirtioNetCtrl<T> { fn
(command_data: T) -> VirtioNetCtrl<T> { VirtioNetCtrl { class: T::CLASS, command: T::COMMAND, command_data, ack: 0, } } } /// A specific command to be sent through the control queue (wrapped in a [`VirtioNetCtrl`]) pub trait VirtioNetCtrlCommand { const CLASS: u8; const COMMAND: u8; } #[derive(Debug)] pub struct VirtioNetCtrlPromisc(u8); impl VirtioNetCtrlCommand for VirtioNetCtrlPromisc { const CLASS: u8 = VIRTIO_NET_CTRL_RX; const COMMAND: u8 = VIRTIO_NET_CTRL_RX_PROMISC; } impl VirtioNetCtrlPromisc { pub fn new(on: bool) -> VirtioNetCtrlPromisc { VirtioNetCtrlPromisc(on as u8) } } #[cfg(test)] mod tests { use super::*; use std::mem; #[test] fn static_type_sizes() { assert_eq!(mem::size_of::<VirtioNetCtrl<VirtioNetCtrlPromisc>>(), 4); } }
from
identifier_name
main.rs
6], } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstanceConst { translate: nalgebra::Vector3<f32>, dir: u32, } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstance { color: nalgebra::Vector3<f32>, pad: u32, } #[derive(Debug)] struct Camera { view: nalgebra::Projective3<f32>, // proj: nalgebra::Perspective3<f32>, proj: nalgebra::Matrix4<f32>, } struct Scene<B: hal::Backend> { camera: Camera, object_mesh: Option<Mesh<B>>, per_instance_const: Vec<PerInstanceConst>, per_instance: Vec<PerInstance>, } const UNIFORM_SIZE: u64 = size_of::<UniformArgs>() as u64; const NUM_INSTANCES: u64 = 1024 * 1024; const PER_INSTANCE_CONST_SIZE: u64 = size_of::<PerInstanceConst>() as u64; const PER_INSTANCE_SIZE: u64 = size_of::<PerInstance>() as u64; const fn align_to(s: u64, align: u64) -> u64 { ((s - 1) / align + 1) * align } const fn buffer_const_size(align: u64) -> u64 { align_to(PER_INSTANCE_CONST_SIZE * NUM_INSTANCES, align) } const fn buffer_frame_size(align: u64) -> u64 { align_to(UNIFORM_SIZE + PER_INSTANCE_SIZE * NUM_INSTANCES, align) } const fn buffer_size(align: u64, frames: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * frames } const fn uniform_offset(index: usize, align: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * index as u64 } const fn per_instance_offset(index: usize, align: u64) -> u64 { uniform_offset(index, align) + UNIFORM_SIZE } #[derive(Debug, Default)] struct MeshRenderPipelineDesc; #[derive(Debug)] struct MeshRenderPipeline<B: hal::Backend> { align: u64, buffer: Escape<Buffer<B>>, sets: Vec<Escape<DescriptorSet<B>>>, } struct ProfileTimer { label: std::string::String, start: std::time::Instant, } impl ProfileTimer { fn start(label: &str) -> Self { ProfileTimer { label: label.into(), start: std::time::Instant::now(), } } } // impl Drop for ProfileTimer { // fn drop(&mut self) { // println!("{}: {:?}", self.label, self.start.elapsed()); // } // } impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc where B: hal::Backend, { type Pipeline = MeshRenderPipeline<B>; fn load_shader_set( &self, factory: &mut Factory<B>, _scene: &Scene<B>, ) -> rendy_shader::ShaderSet<B> { SHADERS.build(factory, Default::default()).unwrap() } fn vertices( &self, ) -> Vec<( Vec<hal::pso::Element<hal::format::Format>>, hal::pso::ElemStride, hal::pso::VertexInputRate, )> { return vec![ SHADER_REFLECTION .attributes(&["position"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex), SHADER_REFLECTION .attributes(&["translate", "dir"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), SHADER_REFLECTION .attributes(&["color", "pad"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), ]; } fn layout(&self) -> Layout { return SHADER_REFLECTION.layout().unwrap(); } fn build<'a>( self, ctx: &GraphContext<B>, factory: &mut Factory<B>, _queue: QueueId, scene: &Scene<B>, buffers: Vec<NodeBuffer>, images: Vec<NodeImage>, set_layouts: &[Handle<DescriptorSetLayout<B>>], ) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> { assert!(buffers.is_empty()); assert!(images.is_empty()); assert_eq!(set_layouts.len(), 1); let frames = ctx.frames_in_flight as _; let align = factory .physical() .limits() .min_uniform_buffer_offset_alignment; let mut buffer = factory .create_buffer( BufferInfo { size: buffer_size(align, frames) as u64, usage: hal::buffer::Usage::UNIFORM | hal::buffer::Usage::INDIRECT | hal::buffer::Usage::VERTEX, }, Dynamic, ) .unwrap(); let mut sets = Vec::new(); for index in 0..frames { unsafe { let set = factory .create_descriptor_set(set_layouts[0].clone()) .unwrap(); factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite { set: set.raw(), binding: 0, array_offset: 0, descriptors: Some(hal::pso::Descriptor::Buffer( buffer.raw(), Some(uniform_offset(index as usize, align)) ..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE), )), })); sets.push(set); } } if!scene.per_instance_const.is_empty()
Ok(MeshRenderPipeline { align, buffer, sets, }) } } fn model_transform() -> nalgebra::Matrix4<f32> { let rot = nalgebra::UnitQuaternion::identity(); nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into() } fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] { let z_pos = nalgebra::UnitQuaternion::identity(); let z_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 0.0, -1.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(-1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let y_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 1.0, 0.0), &Vector3::new(0.0, 0.0, 1.0), ); let y_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, -1.0, 0.0), &Vector3::new(0.0, 0.0, -1.0), ); // let unit = 0.125; let unit = 0.125; let scale = 0.125; [ nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale) .into(), ] } impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B> where B: hal::Backend, { type Desc = MeshRenderPipelineDesc; fn prepare( &mut self, factory: &Factory<B>, _queue: QueueId, _set_layouts: &[Handle<DescriptorSetLayout<B>>], index: usize, scene: &Scene<B>, ) -> PrepareResult { let pt = ProfileTimer::start("prepare"); // println!("index: {}", index); // println!( // "upload uniform {}: {}", // index, // std::mem::size_of::<UniformArgs>() // ); unsafe { factory .upload_visible_buffer( &mut self.buffer, uniform_offset(index, self.align), &[UniformArgs { // proj: scene.camera.proj.to_homogeneous(), proj: scene.camera.proj, view: scene.camera.view.to_homogeneous(), model: model_transform2(), }], ) .unwrap() }; // { // let per_instance = &scene.per_instance[..]; // println!( // "upload dyn {}: {}", // index, // // std::mem::size_of::<PerInstance>() * scene.per_instance.len(), // std::mem::size_of_val(per_instance) // ); // } if!scene.per_instance.is_empty() { unsafe { factory .upload_visible_buffer( &mut self.buffer, per_instance_offset(index, self.align), &scene.per_instance[..], ) .unwrap() }; } PrepareResult::DrawReuse } fn draw( &mut self, layout: &B::PipelineLayout, mut encoder: RenderPassEncoder<'_, B>, index: usize, scene: &Scene<B>, ) { println!("draw"); unsafe { encoder.bind_graphics_descriptor_sets( layout, 0, Some(self.sets[index].raw()), std::iter::empty(), ); let vertex = [SHADER_REFLECTION.attributes(&["position"]).unwrap()]; scene .object_mesh .as_ref() .unwrap() .bind(0, &vertex, &mut encoder) .unwrap(); encoder.bind_vertex_buffers(1, std::iter::once((self.buffer.raw(), 0))); encoder.bind_vertex_buffers( 2, std::iter::once((self.buffer.raw(), per_instance_offset(index, self.align))), ); encoder.draw_indexed( 0..scene.object_mesh.as_ref().unwrap().len(), 0 as i32, 0..scene.per_instance.len() as u32, ) } } fn dispose(self, _factory: &mut Factory<B>, _scene: &Scene<B>) {} } fn main() { env_logger::Builder::from_default_env() .filter_module("meshes", log::LevelFilter::Trace) .init(); let mut event_loop = EventLoop::new(); let window = WindowBuilder::new() .with_inner_size((960, 640).into()) .with_title("Rendy example"); let config: Config = Default::default(); let rendy = AnyWindowedRendy::init_auto(&config, window, &event_loop).unwrap(); rendy::with_any_windowed_rendy!((rendy) use back; (mut factory, mut families, surface, window) => { let mut graph_builder = GraphBuilder::<Backend, Scene<Backend>>::new(); let size = window.inner_size().to_physical(window.hidpi_factor()); let window_kind = hal::image::Kind::D2(size.width as u32, size.height as u32, 1, 1); let aspect = size.width / size.height; let depth = graph_builder.create_image( window_kind, 1, hal::format::Format::D32Sfloat, Some(hal::command::ClearValue { depth_stencil: hal::command::ClearDepthStencil { depth: 1.0, stencil: 0, }, }), ); let pass = graph_builder.add_node( MeshRenderPipeline::builder() .into_subpass() .with_color_surface() .with_depth_stencil(depth) .into_pass() .with_surface( surface, hal::window::Extent2D { width: size.width as _, height: size.height as _, }, Some(hal::command::ClearValue { color: hal::command::ClearColor { float32: [0.5, 0.5, 1.0, 1.0], }, }), ), ); let bm = crystal::read_map("hidden_ramp.txt").expect("could not read file"); let mut planes = crystal::PlanesSep::new(); planes.create_planes(&bm); let planes_copy : Vec<crystal::Plane> = planes.planes_iter().cloned().collect(); let mut scene = Scene { camera: Camera { proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0) .to_homogeneous(), view: nalgebra::Projective3::identity() * nalgebra::Translation3::new(0.0, 0.0, 10.0), }, object_mesh: None, per_instance: vec![], per_instance_const: vec![], }; // let mut rng = rand::thread_rng(); // let col_dist = Uniform::new(0.5, 1.0); let mut rc = RandomColor::new(); rc.luminosity(random_color::Luminosity::Bright); println!("planes: {}", planes_copy.len()); for i in 0..std::cmp::min(NUM_INSTANCES as usize,planes_copy.len()) { let color = rc.to_rgb_array(); let point = planes_copy[i].cell; let dir = match planes_copy[i].dir { crystal::Dir::ZxPos => 4, crystal::Dir::ZxNeg => 5, crystal::Dir::YzPos => 2, crystal::Dir::YzNeg => 3, crystal::Dir::XyPos => 0, crystal::Dir::XyNeg => 1, }; scene.per_instance_const.push(PerInstanceConst{ translate: nalgebra::Vector3::new(point[0] as f32 * 0.25, point[1] as f32 * 0.25, point[2] as f32 * 0.25), dir : dir, }); scene.per_instance.push(PerInstance{ color : nalgebra::Vector3::new( color[0] as f32 / 255.0, color[1] as f32 / 255.0, color[2] as f32 / 255.0, ), pad : 0, }); } let graph = graph_builder .build(&mut factory, &mut families, &scene) .unwrap(); // let icosphere = genmesh::generators::IcoSphere::subdivide(3); // let icosphere = genmesh::generators::Torus::new(1f32, 0.5f32, 32, 32); let icosphere = genmesh::generators::Plane::new(); // icosphere. let indices: Vec<_> = genmesh::Vertices::vertices(icosphere.indexed_polygon_iter().triangulate()) .map(|i| i as u32) .collect(); println!("indices: {}", indices.len()); let vertices: Vec<_> = icosphere .shared_vertex_iter() .map(|v| Position(v.pos.into())) .collect(); println!("vertices: {}", vertices.len()); for v in &vertices { println!("vert: {:?}", v); } scene.object_mesh = Some( Mesh::<Backend>::builder() .with_indices(&indices[..]) .with_vertices(&vertices[..]) .build(graph.node_queue(pass), &factory) .unwrap(), ); let started = time::Instant::now(); let mut frames = 0u64..; // let rxy = Uniform::new(-1.0, 1.0); // let rz = Uniform::new(0.0, 185.0); let mut checkpoint = started; let mut player_state = player::State::new(); let mut event_manager = player::EventManager::new(); let mut graph = Some(graph); event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Poll; match event { Event::WindowEvent { event,.. } => match event { WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, _ => event_manager.window_event(event)
{ // println!( // "upload const: {}", // std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len() // ); unsafe { factory .upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..]) .expect("update const buffer failed") }; }
conditional_block
main.rs
>; 6], } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstanceConst { translate: nalgebra::Vector3<f32>, dir: u32, } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstance { color: nalgebra::Vector3<f32>, pad: u32, } #[derive(Debug)] struct Camera { view: nalgebra::Projective3<f32>, // proj: nalgebra::Perspective3<f32>, proj: nalgebra::Matrix4<f32>, } struct Scene<B: hal::Backend> { camera: Camera, object_mesh: Option<Mesh<B>>, per_instance_const: Vec<PerInstanceConst>, per_instance: Vec<PerInstance>, } const UNIFORM_SIZE: u64 = size_of::<UniformArgs>() as u64; const NUM_INSTANCES: u64 = 1024 * 1024; const PER_INSTANCE_CONST_SIZE: u64 = size_of::<PerInstanceConst>() as u64; const PER_INSTANCE_SIZE: u64 = size_of::<PerInstance>() as u64; const fn align_to(s: u64, align: u64) -> u64 { ((s - 1) / align + 1) * align } const fn buffer_const_size(align: u64) -> u64 { align_to(PER_INSTANCE_CONST_SIZE * NUM_INSTANCES, align) } const fn buffer_frame_size(align: u64) -> u64 { align_to(UNIFORM_SIZE + PER_INSTANCE_SIZE * NUM_INSTANCES, align) } const fn buffer_size(align: u64, frames: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * frames } const fn uniform_offset(index: usize, align: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * index as u64 } const fn per_instance_offset(index: usize, align: u64) -> u64 { uniform_offset(index, align) + UNIFORM_SIZE } #[derive(Debug, Default)] struct MeshRenderPipelineDesc; #[derive(Debug)] struct MeshRenderPipeline<B: hal::Backend> { align: u64, buffer: Escape<Buffer<B>>, sets: Vec<Escape<DescriptorSet<B>>>, } struct ProfileTimer { label: std::string::String, start: std::time::Instant, } impl ProfileTimer { fn start(label: &str) -> Self { ProfileTimer { label: label.into(), start: std::time::Instant::now(), } } } // impl Drop for ProfileTimer { // fn drop(&mut self) { // println!("{}: {:?}", self.label, self.start.elapsed()); // } // } impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc where B: hal::Backend, { type Pipeline = MeshRenderPipeline<B>; fn load_shader_set( &self, factory: &mut Factory<B>, _scene: &Scene<B>, ) -> rendy_shader::ShaderSet<B> { SHADERS.build(factory, Default::default()).unwrap() } fn vertices( &self, ) -> Vec<( Vec<hal::pso::Element<hal::format::Format>>, hal::pso::ElemStride, hal::pso::VertexInputRate, )> { return vec![ SHADER_REFLECTION .attributes(&["position"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex), SHADER_REFLECTION .attributes(&["translate", "dir"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), SHADER_REFLECTION .attributes(&["color", "pad"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), ]; } fn layout(&self) -> Layout { return SHADER_REFLECTION.layout().unwrap(); } fn build<'a>( self, ctx: &GraphContext<B>, factory: &mut Factory<B>, _queue: QueueId, scene: &Scene<B>, buffers: Vec<NodeBuffer>, images: Vec<NodeImage>, set_layouts: &[Handle<DescriptorSetLayout<B>>], ) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> { assert!(buffers.is_empty()); assert!(images.is_empty()); assert_eq!(set_layouts.len(), 1); let frames = ctx.frames_in_flight as _; let align = factory .physical() .limits() .min_uniform_buffer_offset_alignment; let mut buffer = factory .create_buffer( BufferInfo { size: buffer_size(align, frames) as u64, usage: hal::buffer::Usage::UNIFORM | hal::buffer::Usage::INDIRECT | hal::buffer::Usage::VERTEX, }, Dynamic, ) .unwrap(); let mut sets = Vec::new(); for index in 0..frames { unsafe { let set = factory .create_descriptor_set(set_layouts[0].clone()) .unwrap(); factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite { set: set.raw(), binding: 0, array_offset: 0, descriptors: Some(hal::pso::Descriptor::Buffer( buffer.raw(), Some(uniform_offset(index as usize, align)) ..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE), )), })); sets.push(set); } } if!scene.per_instance_const.is_empty() { // println!( // "upload const: {}", // std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len() // ); unsafe { factory .upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..]) .expect("update const buffer failed") }; } Ok(MeshRenderPipeline { align, buffer, sets, }) } } fn model_transform() -> nalgebra::Matrix4<f32> { let rot = nalgebra::UnitQuaternion::identity(); nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into() } fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] { let z_pos = nalgebra::UnitQuaternion::identity(); let z_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 0.0, -1.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(-1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let y_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 1.0, 0.0), &Vector3::new(0.0, 0.0, 1.0), ); let y_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, -1.0, 0.0), &Vector3::new(0.0, 0.0, -1.0), ); // let unit = 0.125; let unit = 0.125; let scale = 0.125; [ nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale) .into(), ] } impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B> where B: hal::Backend, { type Desc = MeshRenderPipelineDesc; fn prepare( &mut self, factory: &Factory<B>, _queue: QueueId, _set_layouts: &[Handle<DescriptorSetLayout<B>>], index: usize, scene: &Scene<B>, ) -> PrepareResult { let pt = ProfileTimer::start("prepare"); // println!("index: {}", index); // println!( // "upload uniform {}: {}", // index, // std::mem::size_of::<UniformArgs>() // ); unsafe { factory .upload_visible_buffer( &mut self.buffer, uniform_offset(index, self.align), &[UniformArgs { // proj: scene.camera.proj.to_homogeneous(), proj: scene.camera.proj, view: scene.camera.view.to_homogeneous(), model: model_transform2(), }], ) .unwrap() }; // { // let per_instance = &scene.per_instance[..]; // println!( // "upload dyn {}: {}", // index, // // std::mem::size_of::<PerInstance>() * scene.per_instance.len(), // std::mem::size_of_val(per_instance) // ); // } if!scene.per_instance.is_empty() { unsafe { factory .upload_visible_buffer( &mut self.buffer, per_instance_offset(index, self.align), &scene.per_instance[..], ) .unwrap() }; } PrepareResult::DrawReuse } fn draw( &mut self, layout: &B::PipelineLayout, mut encoder: RenderPassEncoder<'_, B>, index: usize, scene: &Scene<B>, ) { println!("draw"); unsafe { encoder.bind_graphics_descriptor_sets( layout, 0, Some(self.sets[index].raw()), std::iter::empty(), ); let vertex = [SHADER_REFLECTION.attributes(&["position"]).unwrap()]; scene .object_mesh .as_ref() .unwrap() .bind(0, &vertex, &mut encoder) .unwrap(); encoder.bind_vertex_buffers(1, std::iter::once((self.buffer.raw(), 0))); encoder.bind_vertex_buffers( 2, std::iter::once((self.buffer.raw(), per_instance_offset(index, self.align))), ); encoder.draw_indexed( 0..scene.object_mesh.as_ref().unwrap().len(), 0 as i32, 0..scene.per_instance.len() as u32, ) } } fn dispose(self, _factory: &mut Factory<B>, _scene: &Scene<B>) {} } fn main() { env_logger::Builder::from_default_env() .filter_module("meshes", log::LevelFilter::Trace) .init(); let mut event_loop = EventLoop::new(); let window = WindowBuilder::new() .with_inner_size((960, 640).into()) .with_title("Rendy example"); let config: Config = Default::default(); let rendy = AnyWindowedRendy::init_auto(&config, window, &event_loop).unwrap(); rendy::with_any_windowed_rendy!((rendy) use back; (mut factory, mut families, surface, window) => { let mut graph_builder = GraphBuilder::<Backend, Scene<Backend>>::new(); let size = window.inner_size().to_physical(window.hidpi_factor()); let window_kind = hal::image::Kind::D2(size.width as u32, size.height as u32, 1, 1); let aspect = size.width / size.height; let depth = graph_builder.create_image( window_kind, 1, hal::format::Format::D32Sfloat, Some(hal::command::ClearValue { depth_stencil: hal::command::ClearDepthStencil { depth: 1.0, stencil: 0, }, }), ); let pass = graph_builder.add_node( MeshRenderPipeline::builder() .into_subpass() .with_color_surface() .with_depth_stencil(depth) .into_pass() .with_surface( surface, hal::window::Extent2D { width: size.width as _, height: size.height as _, }, Some(hal::command::ClearValue { color: hal::command::ClearColor { float32: [0.5, 0.5, 1.0, 1.0], }, }), ), ); let bm = crystal::read_map("hidden_ramp.txt").expect("could not read file");
let mut planes = crystal::PlanesSep::new(); planes.create_planes(&bm); let planes_copy : Vec<crystal::Plane> = planes.planes_iter().cloned().collect(); let mut scene = Scene { camera: Camera { proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0) .to_homogeneous(), view: nalgebra::Projective3::identity() * nalgebra::Translation3::new(0.0, 0.0, 10.0), }, object_mesh: None, per_instance: vec![], per_instance_const: vec![], }; // let mut rng = rand::thread_rng(); // let col_dist = Uniform::new(0.5, 1.0); let mut rc = RandomColor::new(); rc.luminosity(random_color::Luminosity::Bright); println!("planes: {}", planes_copy.len()); for i in 0..std::cmp::min(NUM_INSTANCES as usize,planes_copy.len()) { let color = rc.to_rgb_array(); let point = planes_copy[i].cell; let dir = match planes_copy[i].dir { crystal::Dir::ZxPos => 4, crystal::Dir::ZxNeg => 5, crystal::Dir::YzPos => 2, crystal::Dir::YzNeg => 3, crystal::Dir::XyPos => 0, crystal::Dir::XyNeg => 1, }; scene.per_instance_const.push(PerInstanceConst{ translate: nalgebra::Vector3::new(point[0] as f32 * 0.25, point[1] as f32 * 0.25, point[2] as f32 * 0.25), dir : dir, }); scene.per_instance.push(PerInstance{ color : nalgebra::Vector3::new( color[0] as f32 / 255.0, color[1] as f32 / 255.0, color[2] as f32 / 255.0, ), pad : 0, }); } let graph = graph_builder .build(&mut factory, &mut families, &scene) .unwrap(); // let icosphere = genmesh::generators::IcoSphere::subdivide(3); // let icosphere = genmesh::generators::Torus::new(1f32, 0.5f32, 32, 32); let icosphere = genmesh::generators::Plane::new(); // icosphere. let indices: Vec<_> = genmesh::Vertices::vertices(icosphere.indexed_polygon_iter().triangulate()) .map(|i| i as u32) .collect(); println!("indices: {}", indices.len()); let vertices: Vec<_> = icosphere .shared_vertex_iter() .map(|v| Position(v.pos.into())) .collect(); println!("vertices: {}", vertices.len()); for v in &vertices { println!("vert: {:?}", v); } scene.object_mesh = Some( Mesh::<Backend>::builder() .with_indices(&indices[..]) .with_vertices(&vertices[..]) .build(graph.node_queue(pass), &factory) .unwrap(), ); let started = time::Instant::now(); let mut frames = 0u64..; // let rxy = Uniform::new(-1.0, 1.0); // let rz = Uniform::new(0.0, 185.0); let mut checkpoint = started; let mut player_state = player::State::new(); let mut event_manager = player::EventManager::new(); let mut graph = Some(graph); event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Poll; match event { Event::WindowEvent { event,.. } => match event { WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, _ => event_manager.window_event(event)
random_line_split
main.rs
6], } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstanceConst { translate: nalgebra::Vector3<f32>, dir: u32, } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstance { color: nalgebra::Vector3<f32>, pad: u32, } #[derive(Debug)] struct Camera { view: nalgebra::Projective3<f32>, // proj: nalgebra::Perspective3<f32>, proj: nalgebra::Matrix4<f32>, } struct Scene<B: hal::Backend> { camera: Camera, object_mesh: Option<Mesh<B>>, per_instance_const: Vec<PerInstanceConst>, per_instance: Vec<PerInstance>, } const UNIFORM_SIZE: u64 = size_of::<UniformArgs>() as u64; const NUM_INSTANCES: u64 = 1024 * 1024; const PER_INSTANCE_CONST_SIZE: u64 = size_of::<PerInstanceConst>() as u64; const PER_INSTANCE_SIZE: u64 = size_of::<PerInstance>() as u64; const fn align_to(s: u64, align: u64) -> u64
const fn buffer_const_size(align: u64) -> u64 { align_to(PER_INSTANCE_CONST_SIZE * NUM_INSTANCES, align) } const fn buffer_frame_size(align: u64) -> u64 { align_to(UNIFORM_SIZE + PER_INSTANCE_SIZE * NUM_INSTANCES, align) } const fn buffer_size(align: u64, frames: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * frames } const fn uniform_offset(index: usize, align: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * index as u64 } const fn per_instance_offset(index: usize, align: u64) -> u64 { uniform_offset(index, align) + UNIFORM_SIZE } #[derive(Debug, Default)] struct MeshRenderPipelineDesc; #[derive(Debug)] struct MeshRenderPipeline<B: hal::Backend> { align: u64, buffer: Escape<Buffer<B>>, sets: Vec<Escape<DescriptorSet<B>>>, } struct ProfileTimer { label: std::string::String, start: std::time::Instant, } impl ProfileTimer { fn start(label: &str) -> Self { ProfileTimer { label: label.into(), start: std::time::Instant::now(), } } } // impl Drop for ProfileTimer { // fn drop(&mut self) { // println!("{}: {:?}", self.label, self.start.elapsed()); // } // } impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc where B: hal::Backend, { type Pipeline = MeshRenderPipeline<B>; fn load_shader_set( &self, factory: &mut Factory<B>, _scene: &Scene<B>, ) -> rendy_shader::ShaderSet<B> { SHADERS.build(factory, Default::default()).unwrap() } fn vertices( &self, ) -> Vec<( Vec<hal::pso::Element<hal::format::Format>>, hal::pso::ElemStride, hal::pso::VertexInputRate, )> { return vec![ SHADER_REFLECTION .attributes(&["position"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex), SHADER_REFLECTION .attributes(&["translate", "dir"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), SHADER_REFLECTION .attributes(&["color", "pad"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), ]; } fn layout(&self) -> Layout { return SHADER_REFLECTION.layout().unwrap(); } fn build<'a>( self, ctx: &GraphContext<B>, factory: &mut Factory<B>, _queue: QueueId, scene: &Scene<B>, buffers: Vec<NodeBuffer>, images: Vec<NodeImage>, set_layouts: &[Handle<DescriptorSetLayout<B>>], ) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> { assert!(buffers.is_empty()); assert!(images.is_empty()); assert_eq!(set_layouts.len(), 1); let frames = ctx.frames_in_flight as _; let align = factory .physical() .limits() .min_uniform_buffer_offset_alignment; let mut buffer = factory .create_buffer( BufferInfo { size: buffer_size(align, frames) as u64, usage: hal::buffer::Usage::UNIFORM | hal::buffer::Usage::INDIRECT | hal::buffer::Usage::VERTEX, }, Dynamic, ) .unwrap(); let mut sets = Vec::new(); for index in 0..frames { unsafe { let set = factory .create_descriptor_set(set_layouts[0].clone()) .unwrap(); factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite { set: set.raw(), binding: 0, array_offset: 0, descriptors: Some(hal::pso::Descriptor::Buffer( buffer.raw(), Some(uniform_offset(index as usize, align)) ..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE), )), })); sets.push(set); } } if!scene.per_instance_const.is_empty() { // println!( // "upload const: {}", // std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len() // ); unsafe { factory .upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..]) .expect("update const buffer failed") }; } Ok(MeshRenderPipeline { align, buffer, sets, }) } } fn model_transform() -> nalgebra::Matrix4<f32> { let rot = nalgebra::UnitQuaternion::identity(); nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into() } fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] { let z_pos = nalgebra::UnitQuaternion::identity(); let z_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 0.0, -1.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(-1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let y_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 1.0, 0.0), &Vector3::new(0.0, 0.0, 1.0), ); let y_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, -1.0, 0.0), &Vector3::new(0.0, 0.0, -1.0), ); // let unit = 0.125; let unit = 0.125; let scale = 0.125; [ nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale) .into(), ] } impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B> where B: hal::Backend, { type Desc = MeshRenderPipelineDesc; fn prepare( &mut self, factory: &Factory<B>, _queue: QueueId, _set_layouts: &[Handle<DescriptorSetLayout<B>>], index: usize, scene: &Scene<B>, ) -> PrepareResult { let pt = ProfileTimer::start("prepare"); // println!("index: {}", index); // println!( // "upload uniform {}: {}", // index, // std::mem::size_of::<UniformArgs>() // ); unsafe { factory .upload_visible_buffer( &mut self.buffer, uniform_offset(index, self.align), &[UniformArgs { // proj: scene.camera.proj.to_homogeneous(), proj: scene.camera.proj, view: scene.camera.view.to_homogeneous(), model: model_transform2(), }], ) .unwrap() }; // { // let per_instance = &scene.per_instance[..]; // println!( // "upload dyn {}: {}", // index, // // std::mem::size_of::<PerInstance>() * scene.per_instance.len(), // std::mem::size_of_val(per_instance) // ); // } if!scene.per_instance.is_empty() { unsafe { factory .upload_visible_buffer( &mut self.buffer, per_instance_offset(index, self.align), &scene.per_instance[..], ) .unwrap() }; } PrepareResult::DrawReuse } fn draw( &mut self, layout: &B::PipelineLayout, mut encoder: RenderPassEncoder<'_, B>, index: usize, scene: &Scene<B>, ) { println!("draw"); unsafe { encoder.bind_graphics_descriptor_sets( layout, 0, Some(self.sets[index].raw()), std::iter::empty(), ); let vertex = [SHADER_REFLECTION.attributes(&["position"]).unwrap()]; scene .object_mesh .as_ref() .unwrap() .bind(0, &vertex, &mut encoder) .unwrap(); encoder.bind_vertex_buffers(1, std::iter::once((self.buffer.raw(), 0))); encoder.bind_vertex_buffers( 2, std::iter::once((self.buffer.raw(), per_instance_offset(index, self.align))), ); encoder.draw_indexed( 0..scene.object_mesh.as_ref().unwrap().len(), 0 as i32, 0..scene.per_instance.len() as u32, ) } } fn dispose(self, _factory: &mut Factory<B>, _scene: &Scene<B>) {} } fn main() { env_logger::Builder::from_default_env() .filter_module("meshes", log::LevelFilter::Trace) .init(); let mut event_loop = EventLoop::new(); let window = WindowBuilder::new() .with_inner_size((960, 640).into()) .with_title("Rendy example"); let config: Config = Default::default(); let rendy = AnyWindowedRendy::init_auto(&config, window, &event_loop).unwrap(); rendy::with_any_windowed_rendy!((rendy) use back; (mut factory, mut families, surface, window) => { let mut graph_builder = GraphBuilder::<Backend, Scene<Backend>>::new(); let size = window.inner_size().to_physical(window.hidpi_factor()); let window_kind = hal::image::Kind::D2(size.width as u32, size.height as u32, 1, 1); let aspect = size.width / size.height; let depth = graph_builder.create_image( window_kind, 1, hal::format::Format::D32Sfloat, Some(hal::command::ClearValue { depth_stencil: hal::command::ClearDepthStencil { depth: 1.0, stencil: 0, }, }), ); let pass = graph_builder.add_node( MeshRenderPipeline::builder() .into_subpass() .with_color_surface() .with_depth_stencil(depth) .into_pass() .with_surface( surface, hal::window::Extent2D { width: size.width as _, height: size.height as _, }, Some(hal::command::ClearValue { color: hal::command::ClearColor { float32: [0.5, 0.5, 1.0, 1.0], }, }), ), ); let bm = crystal::read_map("hidden_ramp.txt").expect("could not read file"); let mut planes = crystal::PlanesSep::new(); planes.create_planes(&bm); let planes_copy : Vec<crystal::Plane> = planes.planes_iter().cloned().collect(); let mut scene = Scene { camera: Camera { proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0) .to_homogeneous(), view: nalgebra::Projective3::identity() * nalgebra::Translation3::new(0.0, 0.0, 10.0), }, object_mesh: None, per_instance: vec![], per_instance_const: vec![], }; // let mut rng = rand::thread_rng(); // let col_dist = Uniform::new(0.5, 1.0); let mut rc = RandomColor::new(); rc.luminosity(random_color::Luminosity::Bright); println!("planes: {}", planes_copy.len()); for i in 0..std::cmp::min(NUM_INSTANCES as usize,planes_copy.len()) { let color = rc.to_rgb_array(); let point = planes_copy[i].cell; let dir = match planes_copy[i].dir { crystal::Dir::ZxPos => 4, crystal::Dir::ZxNeg => 5, crystal::Dir::YzPos => 2, crystal::Dir::YzNeg => 3, crystal::Dir::XyPos => 0, crystal::Dir::XyNeg => 1, }; scene.per_instance_const.push(PerInstanceConst{ translate: nalgebra::Vector3::new(point[0] as f32 * 0.25, point[1] as f32 * 0.25, point[2] as f32 * 0.25), dir : dir, }); scene.per_instance.push(PerInstance{ color : nalgebra::Vector3::new( color[0] as f32 / 255.0, color[1] as f32 / 255.0, color[2] as f32 / 255.0, ), pad : 0, }); } let graph = graph_builder .build(&mut factory, &mut families, &scene) .unwrap(); // let icosphere = genmesh::generators::IcoSphere::subdivide(3); // let icosphere = genmesh::generators::Torus::new(1f32, 0.5f32, 32, 32); let icosphere = genmesh::generators::Plane::new(); // icosphere. let indices: Vec<_> = genmesh::Vertices::vertices(icosphere.indexed_polygon_iter().triangulate()) .map(|i| i as u32) .collect(); println!("indices: {}", indices.len()); let vertices: Vec<_> = icosphere .shared_vertex_iter() .map(|v| Position(v.pos.into())) .collect(); println!("vertices: {}", vertices.len()); for v in &vertices { println!("vert: {:?}", v); } scene.object_mesh = Some( Mesh::<Backend>::builder() .with_indices(&indices[..]) .with_vertices(&vertices[..]) .build(graph.node_queue(pass), &factory) .unwrap(), ); let started = time::Instant::now(); let mut frames = 0u64..; // let rxy = Uniform::new(-1.0, 1.0); // let rz = Uniform::new(0.0, 185.0); let mut checkpoint = started; let mut player_state = player::State::new(); let mut event_manager = player::EventManager::new(); let mut graph = Some(graph); event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Poll; match event { Event::WindowEvent { event,.. } => match event { WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, _ => event_manager.window_event(event)
{ ((s - 1) / align + 1) * align }
identifier_body
main.rs
6], } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstanceConst { translate: nalgebra::Vector3<f32>, dir: u32, } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstance { color: nalgebra::Vector3<f32>, pad: u32, } #[derive(Debug)] struct Camera { view: nalgebra::Projective3<f32>, // proj: nalgebra::Perspective3<f32>, proj: nalgebra::Matrix4<f32>, } struct Scene<B: hal::Backend> { camera: Camera, object_mesh: Option<Mesh<B>>, per_instance_const: Vec<PerInstanceConst>, per_instance: Vec<PerInstance>, } const UNIFORM_SIZE: u64 = size_of::<UniformArgs>() as u64; const NUM_INSTANCES: u64 = 1024 * 1024; const PER_INSTANCE_CONST_SIZE: u64 = size_of::<PerInstanceConst>() as u64; const PER_INSTANCE_SIZE: u64 = size_of::<PerInstance>() as u64; const fn align_to(s: u64, align: u64) -> u64 { ((s - 1) / align + 1) * align } const fn buffer_const_size(align: u64) -> u64 { align_to(PER_INSTANCE_CONST_SIZE * NUM_INSTANCES, align) } const fn buffer_frame_size(align: u64) -> u64 { align_to(UNIFORM_SIZE + PER_INSTANCE_SIZE * NUM_INSTANCES, align) } const fn buffer_size(align: u64, frames: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * frames } const fn
(index: usize, align: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * index as u64 } const fn per_instance_offset(index: usize, align: u64) -> u64 { uniform_offset(index, align) + UNIFORM_SIZE } #[derive(Debug, Default)] struct MeshRenderPipelineDesc; #[derive(Debug)] struct MeshRenderPipeline<B: hal::Backend> { align: u64, buffer: Escape<Buffer<B>>, sets: Vec<Escape<DescriptorSet<B>>>, } struct ProfileTimer { label: std::string::String, start: std::time::Instant, } impl ProfileTimer { fn start(label: &str) -> Self { ProfileTimer { label: label.into(), start: std::time::Instant::now(), } } } // impl Drop for ProfileTimer { // fn drop(&mut self) { // println!("{}: {:?}", self.label, self.start.elapsed()); // } // } impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc where B: hal::Backend, { type Pipeline = MeshRenderPipeline<B>; fn load_shader_set( &self, factory: &mut Factory<B>, _scene: &Scene<B>, ) -> rendy_shader::ShaderSet<B> { SHADERS.build(factory, Default::default()).unwrap() } fn vertices( &self, ) -> Vec<( Vec<hal::pso::Element<hal::format::Format>>, hal::pso::ElemStride, hal::pso::VertexInputRate, )> { return vec![ SHADER_REFLECTION .attributes(&["position"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex), SHADER_REFLECTION .attributes(&["translate", "dir"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), SHADER_REFLECTION .attributes(&["color", "pad"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), ]; } fn layout(&self) -> Layout { return SHADER_REFLECTION.layout().unwrap(); } fn build<'a>( self, ctx: &GraphContext<B>, factory: &mut Factory<B>, _queue: QueueId, scene: &Scene<B>, buffers: Vec<NodeBuffer>, images: Vec<NodeImage>, set_layouts: &[Handle<DescriptorSetLayout<B>>], ) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> { assert!(buffers.is_empty()); assert!(images.is_empty()); assert_eq!(set_layouts.len(), 1); let frames = ctx.frames_in_flight as _; let align = factory .physical() .limits() .min_uniform_buffer_offset_alignment; let mut buffer = factory .create_buffer( BufferInfo { size: buffer_size(align, frames) as u64, usage: hal::buffer::Usage::UNIFORM | hal::buffer::Usage::INDIRECT | hal::buffer::Usage::VERTEX, }, Dynamic, ) .unwrap(); let mut sets = Vec::new(); for index in 0..frames { unsafe { let set = factory .create_descriptor_set(set_layouts[0].clone()) .unwrap(); factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite { set: set.raw(), binding: 0, array_offset: 0, descriptors: Some(hal::pso::Descriptor::Buffer( buffer.raw(), Some(uniform_offset(index as usize, align)) ..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE), )), })); sets.push(set); } } if!scene.per_instance_const.is_empty() { // println!( // "upload const: {}", // std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len() // ); unsafe { factory .upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..]) .expect("update const buffer failed") }; } Ok(MeshRenderPipeline { align, buffer, sets, }) } } fn model_transform() -> nalgebra::Matrix4<f32> { let rot = nalgebra::UnitQuaternion::identity(); nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into() } fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] { let z_pos = nalgebra::UnitQuaternion::identity(); let z_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 0.0, -1.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(-1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let y_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 1.0, 0.0), &Vector3::new(0.0, 0.0, 1.0), ); let y_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, -1.0, 0.0), &Vector3::new(0.0, 0.0, -1.0), ); // let unit = 0.125; let unit = 0.125; let scale = 0.125; [ nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale) .into(), ] } impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B> where B: hal::Backend, { type Desc = MeshRenderPipelineDesc; fn prepare( &mut self, factory: &Factory<B>, _queue: QueueId, _set_layouts: &[Handle<DescriptorSetLayout<B>>], index: usize, scene: &Scene<B>, ) -> PrepareResult { let pt = ProfileTimer::start("prepare"); // println!("index: {}", index); // println!( // "upload uniform {}: {}", // index, // std::mem::size_of::<UniformArgs>() // ); unsafe { factory .upload_visible_buffer( &mut self.buffer, uniform_offset(index, self.align), &[UniformArgs { // proj: scene.camera.proj.to_homogeneous(), proj: scene.camera.proj, view: scene.camera.view.to_homogeneous(), model: model_transform2(), }], ) .unwrap() }; // { // let per_instance = &scene.per_instance[..]; // println!( // "upload dyn {}: {}", // index, // // std::mem::size_of::<PerInstance>() * scene.per_instance.len(), // std::mem::size_of_val(per_instance) // ); // } if!scene.per_instance.is_empty() { unsafe { factory .upload_visible_buffer( &mut self.buffer, per_instance_offset(index, self.align), &scene.per_instance[..], ) .unwrap() }; } PrepareResult::DrawReuse } fn draw( &mut self, layout: &B::PipelineLayout, mut encoder: RenderPassEncoder<'_, B>, index: usize, scene: &Scene<B>, ) { println!("draw"); unsafe { encoder.bind_graphics_descriptor_sets( layout, 0, Some(self.sets[index].raw()), std::iter::empty(), ); let vertex = [SHADER_REFLECTION.attributes(&["position"]).unwrap()]; scene .object_mesh .as_ref() .unwrap() .bind(0, &vertex, &mut encoder) .unwrap(); encoder.bind_vertex_buffers(1, std::iter::once((self.buffer.raw(), 0))); encoder.bind_vertex_buffers( 2, std::iter::once((self.buffer.raw(), per_instance_offset(index, self.align))), ); encoder.draw_indexed( 0..scene.object_mesh.as_ref().unwrap().len(), 0 as i32, 0..scene.per_instance.len() as u32, ) } } fn dispose(self, _factory: &mut Factory<B>, _scene: &Scene<B>) {} } fn main() { env_logger::Builder::from_default_env() .filter_module("meshes", log::LevelFilter::Trace) .init(); let mut event_loop = EventLoop::new(); let window = WindowBuilder::new() .with_inner_size((960, 640).into()) .with_title("Rendy example"); let config: Config = Default::default(); let rendy = AnyWindowedRendy::init_auto(&config, window, &event_loop).unwrap(); rendy::with_any_windowed_rendy!((rendy) use back; (mut factory, mut families, surface, window) => { let mut graph_builder = GraphBuilder::<Backend, Scene<Backend>>::new(); let size = window.inner_size().to_physical(window.hidpi_factor()); let window_kind = hal::image::Kind::D2(size.width as u32, size.height as u32, 1, 1); let aspect = size.width / size.height; let depth = graph_builder.create_image( window_kind, 1, hal::format::Format::D32Sfloat, Some(hal::command::ClearValue { depth_stencil: hal::command::ClearDepthStencil { depth: 1.0, stencil: 0, }, }), ); let pass = graph_builder.add_node( MeshRenderPipeline::builder() .into_subpass() .with_color_surface() .with_depth_stencil(depth) .into_pass() .with_surface( surface, hal::window::Extent2D { width: size.width as _, height: size.height as _, }, Some(hal::command::ClearValue { color: hal::command::ClearColor { float32: [0.5, 0.5, 1.0, 1.0], }, }), ), ); let bm = crystal::read_map("hidden_ramp.txt").expect("could not read file"); let mut planes = crystal::PlanesSep::new(); planes.create_planes(&bm); let planes_copy : Vec<crystal::Plane> = planes.planes_iter().cloned().collect(); let mut scene = Scene { camera: Camera { proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0) .to_homogeneous(), view: nalgebra::Projective3::identity() * nalgebra::Translation3::new(0.0, 0.0, 10.0), }, object_mesh: None, per_instance: vec![], per_instance_const: vec![], }; // let mut rng = rand::thread_rng(); // let col_dist = Uniform::new(0.5, 1.0); let mut rc = RandomColor::new(); rc.luminosity(random_color::Luminosity::Bright); println!("planes: {}", planes_copy.len()); for i in 0..std::cmp::min(NUM_INSTANCES as usize,planes_copy.len()) { let color = rc.to_rgb_array(); let point = planes_copy[i].cell; let dir = match planes_copy[i].dir { crystal::Dir::ZxPos => 4, crystal::Dir::ZxNeg => 5, crystal::Dir::YzPos => 2, crystal::Dir::YzNeg => 3, crystal::Dir::XyPos => 0, crystal::Dir::XyNeg => 1, }; scene.per_instance_const.push(PerInstanceConst{ translate: nalgebra::Vector3::new(point[0] as f32 * 0.25, point[1] as f32 * 0.25, point[2] as f32 * 0.25), dir : dir, }); scene.per_instance.push(PerInstance{ color : nalgebra::Vector3::new( color[0] as f32 / 255.0, color[1] as f32 / 255.0, color[2] as f32 / 255.0, ), pad : 0, }); } let graph = graph_builder .build(&mut factory, &mut families, &scene) .unwrap(); // let icosphere = genmesh::generators::IcoSphere::subdivide(3); // let icosphere = genmesh::generators::Torus::new(1f32, 0.5f32, 32, 32); let icosphere = genmesh::generators::Plane::new(); // icosphere. let indices: Vec<_> = genmesh::Vertices::vertices(icosphere.indexed_polygon_iter().triangulate()) .map(|i| i as u32) .collect(); println!("indices: {}", indices.len()); let vertices: Vec<_> = icosphere .shared_vertex_iter() .map(|v| Position(v.pos.into())) .collect(); println!("vertices: {}", vertices.len()); for v in &vertices { println!("vert: {:?}", v); } scene.object_mesh = Some( Mesh::<Backend>::builder() .with_indices(&indices[..]) .with_vertices(&vertices[..]) .build(graph.node_queue(pass), &factory) .unwrap(), ); let started = time::Instant::now(); let mut frames = 0u64..; // let rxy = Uniform::new(-1.0, 1.0); // let rz = Uniform::new(0.0, 185.0); let mut checkpoint = started; let mut player_state = player::State::new(); let mut event_manager = player::EventManager::new(); let mut graph = Some(graph); event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Poll; match event { Event::WindowEvent { event,.. } => match event { WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, _ => event_manager.window_event(event)
uniform_offset
identifier_name
catalog.rs
use crate::block::BlockType; use crate::error::*; use crate::mutator::append::Append; use crate::params::{SourceId, CATALOG_METADATA, TIMESTAMP_COLUMN}; use crate::scanner::{Scan, ScanResult}; use crate::storage::manager::PartitionGroupManager; use crate::ty::{BlockStorage, ColumnId, ColumnIndexStorageMap}; use hyena_common::collections::HashMap; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::default::Default; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use super::column::Column; use super::partition_group::PartitionGroup; use super::{ColumnMap, PartitionGroupMap}; #[derive(Debug, Serialize, Deserialize)] pub struct Catalog<'cat> { pub(crate) colmap: ColumnMap, pub(crate) groups: PartitionGroupMap<'cat>, pub(crate) indexes: ColumnIndexStorageMap, #[serde(skip)] pub(crate) data_root: PathBuf, } impl<'cat> Catalog<'cat> { pub fn new<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); if meta.exists() { bail!("Catalog metadata already exists {:?}", meta); } let mut catalog = Catalog { colmap: Default::default(), groups: Default::default(), indexes: Default::default(), data_root: root, }; catalog.ensure_default_columns()?; Ok(catalog) } fn ensure_default_columns(&mut self) -> Result<()> { let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp"); let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id"); let mut map = HashMap::new(); map.insert(TIMESTAMP_COLUMN, ts_column); map.insert(1, source_column); self.ensure_columns(map) } pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); Catalog::deserialize(&meta, &root) } pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref())) } pub fn columns(&self) -> &ColumnMap { &self.colmap } #[cfg(feature = "validate_append")] fn validate_append(&self, data: &Append) -> bool { let ts_len = data.ts.len(); data.data.iter().all(|(_col, fragment)| { // check fragment length for dense blocks if!fragment.is_sparse() { if ts_len!= fragment.len() { error!("Dense append fragment has different length than ts"); return false; } } else { if ts_len > fragment.len() { error!("Sparse append fragment longer than ts"); return false; } if fragment.iter().any(|(idx, _)| idx >= ts_len) { error!("Sparse append fragment has index greater than ts length"); return false; } } true }) } pub fn append(&self, data: &Append) -> Result<usize> { if data.is_empty() { bail!("Provided Append contains no data"); } #[cfg(feature = "validate_append")] { if!self.validate_append(&data) { bail!("Provided Append is not consistent"); } } // dispatch to proper PartitionGroup if let Some(pg) = self.groups.get(&data.source_id) { pg.append(&self, &data) } else { bail!("No PartitionGroup found for source_id = {}", data.source_id); } } pub fn scan(&self, scan: &Scan) -> Result<ScanResult> { let all_groups = if scan.groups.is_some() { None } else { Some(self.groups.keys().cloned().collect::<Vec<_>>()) }; if scan.groups.is_some() { scan.groups.as_ref().unwrap() } else { all_groups.as_ref().unwrap() } .chunks(2) .map(|group| { group .par_iter() .filter_map(|pgid| self.groups.get(pgid)) .map(|pg| pg.scan(&self, &scan)) // todo: this would potentially be better with some short-circuiting combinator // instead // need to bench with collect_into() .reduce( || Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }, ) }) .fold(Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }) } pub fn flush(&self) -> Result<()> { // TODO: add dirty flag let meta = self.data_root.join(CATALOG_METADATA); for pg in self.groups.values() { pg.flush()? } Catalog::serialize(self, &meta) } /// Extend internal column map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a column type. /// Use this feature with great caution. pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> { self.colmap.extend(type_map); Ok(()) } /// Adds a column to the catalog. It verifies that catalog does not already contain: /// a) column with the given id, or /// b) column with the given name. /// This function takes all-or-nothing approach: /// either all columns are added, or no changes are applied. pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> { for (id, column) in column_map.iter() { info!( "Adding column {}:{:?} with id {}", column.name, column.ty, id ); if self.colmap.contains_key(id) { bail!("Column Id already exists {}", *id); } if self.colmap.values().any(|col| col.name == column.name) { bail!("Column Name already exists '{}'", column.name); } } self.ensure_columns(column_map) } /// Extend internal index map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a index type. /// Also, the index' support for a given column is not checked. /// Use this feature with great caution. pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { self.indexes.extend(&*index_map); Ok(()) } /// Adds index to the catalog. Verifies that catalog does not already contain: /// a) index for a column with the given id, or /// b) index for a column with the given name. /// This function takes all-or-nothing approach: /// either all indexes are added, or no changes are applied. pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { for (id, index) in index_map.iter() { let column = self .colmap .get(id) .ok_or_else(|| err_msg(format!("column not found {}", id)))?; info!( "Adding index {:?} for column {}[{}]:{:?}", index, column.name, id, column.ty ); if self.indexes.contains_key(id) { bail!("Index already exists {}", *id); } } self.ensure_indexes(index_map) } /// Fetch the first non-occupied column index /// /// todo: rethink this approach (max() every time) pub fn next_id(&self) -> usize { let default = 0; *self.colmap.keys().max().unwrap_or(&default) + 1 } /// Calculate an empty partition's capacity for given column set pub(super) fn space_for_blocks<'iter>( &self, indices: impl Iterator<Item = &'iter ColumnId>, ) -> usize { use crate::params::BLOCK_SIZE; indices .filter_map(|col_id| { if let Some(column) = self.colmap.get(col_id) { Some(BLOCK_SIZE / column.size_of()) } else { None } }) .min() // the default shouldn't ever happen, as there always should be ts block // but in case it happens, this will return 0 // which in turn will cause new partition to be used .unwrap_or_default() } pub(crate) fn ensure_group( &mut self, source_id: SourceId, ) -> Result<&mut PartitionGroup<'cat>> { let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root); Ok(self.groups.entry(source_id).or_insert_with(|| { // this shouldn't fail in general let root = PartitionGroupManager::new(data_root, source_id) .with_context(|_| "Failed to create group manager") .unwrap(); let pg = PartitionGroup::new(&root, source_id) .with_context(|_| "Unable to create partition group") .unwrap(); pg.flush().unwrap(); pg })) } /// Add new partition group with given source id pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()> { let _ = self.ensure_group(source_id)?; Ok(()) } fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>> where P: AsRef<Path>, I: IntoIterator<Item = SourceId>, { ids.into_iter() .map(|source_id| { let path = PartitionGroupManager::new(&root, source_id).with_context(|_| { format!( "Unable to obtain data directory for partition group {}", source_id ) })?; let partition_group = PartitionGroup::with_data(path) .with_context(|_| format!("Unable to read partition group {:?}", source_id))?; Ok((source_id, partition_group)) }) .collect() } fn serialize<P: AsRef<Path>>(catalog: &Catalog<'cat>, meta: P) -> Result<()> { let meta = meta.as_ref(); let group_metas = Vec::from_iter(catalog.groups.keys()); let data = (catalog, group_metas); serialize!(file meta, &data) .with_context(|_| "Failed to serialize catalog metadata") .map_err(|e| e.into()) } fn deserialize<P: AsRef<Path>, R: AsRef<Path>>(meta: P, root: R) -> Result<Catalog<'cat>> {
bail!("Cannot find catalog metadata {:?}", meta); } let (mut catalog, group_metas): (Catalog, Vec<SourceId>) = deserialize!(file meta).with_context(|_| "Failed to read catalog metadata")?; catalog.groups = Catalog::prepare_partition_groups(&root, group_metas) .with_context(|_| "Failed to read partition data")?; catalog.data_root = root.as_ref().to_path_buf(); Ok(catalog) } } impl<'cat> Drop for Catalog<'cat> { fn drop(&mut self) { self.flush() .with_context(|_| "Failed to flush data during drop") .unwrap(); } } impl<'cat> AsRef<ColumnMap> for Catalog<'cat> { fn as_ref(&self) -> &ColumnMap { &self.colmap } } #[cfg(test)] mod tests { use super::*; use crate::datastore::tests::create_random_partitions; #[test] fn new() { let source_ids = [1, 5, 7]; let im_part_count = 8; let mut_part_count = 2; let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); for source_id in &source_ids { let pg = cat .ensure_group(*source_id) .with_context(|_| "Unable to retrieve partition group") .unwrap(); create_random_partitions(pg, im_part_count, mut_part_count); } } #[test] fn add_partition_group_idempotence() { let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); const PG_ID: SourceId = 10; cat.add_partition_group(PG_ID).unwrap(); cat.add_partition_group(PG_ID).unwrap(); assert_eq!(cat.groups.len(), 1); assert_eq!( cat.groups .iter() .nth(0) .expect("partition group not found") .0, &PG_ID ); } }
let meta = meta.as_ref(); if !meta.exists() {
random_line_split
catalog.rs
use crate::block::BlockType; use crate::error::*; use crate::mutator::append::Append; use crate::params::{SourceId, CATALOG_METADATA, TIMESTAMP_COLUMN}; use crate::scanner::{Scan, ScanResult}; use crate::storage::manager::PartitionGroupManager; use crate::ty::{BlockStorage, ColumnId, ColumnIndexStorageMap}; use hyena_common::collections::HashMap; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::default::Default; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use super::column::Column; use super::partition_group::PartitionGroup; use super::{ColumnMap, PartitionGroupMap}; #[derive(Debug, Serialize, Deserialize)] pub struct Catalog<'cat> { pub(crate) colmap: ColumnMap, pub(crate) groups: PartitionGroupMap<'cat>, pub(crate) indexes: ColumnIndexStorageMap, #[serde(skip)] pub(crate) data_root: PathBuf, } impl<'cat> Catalog<'cat> { pub fn new<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); if meta.exists() { bail!("Catalog metadata already exists {:?}", meta); } let mut catalog = Catalog { colmap: Default::default(), groups: Default::default(), indexes: Default::default(), data_root: root, }; catalog.ensure_default_columns()?; Ok(catalog) } fn ensure_default_columns(&mut self) -> Result<()> { let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp"); let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id"); let mut map = HashMap::new(); map.insert(TIMESTAMP_COLUMN, ts_column); map.insert(1, source_column); self.ensure_columns(map) } pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); Catalog::deserialize(&meta, &root) } pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref())) } pub fn columns(&self) -> &ColumnMap { &self.colmap } #[cfg(feature = "validate_append")] fn validate_append(&self, data: &Append) -> bool { let ts_len = data.ts.len(); data.data.iter().all(|(_col, fragment)| { // check fragment length for dense blocks if!fragment.is_sparse() { if ts_len!= fragment.len() { error!("Dense append fragment has different length than ts"); return false; } } else { if ts_len > fragment.len() { error!("Sparse append fragment longer than ts"); return false; } if fragment.iter().any(|(idx, _)| idx >= ts_len) { error!("Sparse append fragment has index greater than ts length"); return false; } } true }) } pub fn append(&self, data: &Append) -> Result<usize> { if data.is_empty() { bail!("Provided Append contains no data"); } #[cfg(feature = "validate_append")] { if!self.validate_append(&data) { bail!("Provided Append is not consistent"); } } // dispatch to proper PartitionGroup if let Some(pg) = self.groups.get(&data.source_id) { pg.append(&self, &data) } else { bail!("No PartitionGroup found for source_id = {}", data.source_id); } } pub fn scan(&self, scan: &Scan) -> Result<ScanResult> { let all_groups = if scan.groups.is_some() { None } else { Some(self.groups.keys().cloned().collect::<Vec<_>>()) }; if scan.groups.is_some() { scan.groups.as_ref().unwrap() } else { all_groups.as_ref().unwrap() } .chunks(2) .map(|group| { group .par_iter() .filter_map(|pgid| self.groups.get(pgid)) .map(|pg| pg.scan(&self, &scan)) // todo: this would potentially be better with some short-circuiting combinator // instead // need to bench with collect_into() .reduce( || Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }, ) }) .fold(Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }) } pub fn flush(&self) -> Result<()> { // TODO: add dirty flag let meta = self.data_root.join(CATALOG_METADATA); for pg in self.groups.values() { pg.flush()? } Catalog::serialize(self, &meta) } /// Extend internal column map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a column type. /// Use this feature with great caution. pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> { self.colmap.extend(type_map); Ok(()) } /// Adds a column to the catalog. It verifies that catalog does not already contain: /// a) column with the given id, or /// b) column with the given name. /// This function takes all-or-nothing approach: /// either all columns are added, or no changes are applied. pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> { for (id, column) in column_map.iter() { info!( "Adding column {}:{:?} with id {}", column.name, column.ty, id ); if self.colmap.contains_key(id) { bail!("Column Id already exists {}", *id); } if self.colmap.values().any(|col| col.name == column.name) { bail!("Column Name already exists '{}'", column.name); } } self.ensure_columns(column_map) } /// Extend internal index map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a index type. /// Also, the index' support for a given column is not checked. /// Use this feature with great caution. pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { self.indexes.extend(&*index_map); Ok(()) } /// Adds index to the catalog. Verifies that catalog does not already contain: /// a) index for a column with the given id, or /// b) index for a column with the given name. /// This function takes all-or-nothing approach: /// either all indexes are added, or no changes are applied. pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { for (id, index) in index_map.iter() { let column = self .colmap .get(id) .ok_or_else(|| err_msg(format!("column not found {}", id)))?; info!( "Adding index {:?} for column {}[{}]:{:?}", index, column.name, id, column.ty ); if self.indexes.contains_key(id) { bail!("Index already exists {}", *id); } } self.ensure_indexes(index_map) } /// Fetch the first non-occupied column index /// /// todo: rethink this approach (max() every time) pub fn next_id(&self) -> usize { let default = 0; *self.colmap.keys().max().unwrap_or(&default) + 1 } /// Calculate an empty partition's capacity for given column set pub(super) fn space_for_blocks<'iter>( &self, indices: impl Iterator<Item = &'iter ColumnId>, ) -> usize { use crate::params::BLOCK_SIZE; indices .filter_map(|col_id| { if let Some(column) = self.colmap.get(col_id) { Some(BLOCK_SIZE / column.size_of()) } else { None } }) .min() // the default shouldn't ever happen, as there always should be ts block // but in case it happens, this will return 0 // which in turn will cause new partition to be used .unwrap_or_default() } pub(crate) fn ensure_group( &mut self, source_id: SourceId, ) -> Result<&mut PartitionGroup<'cat>> { let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root); Ok(self.groups.entry(source_id).or_insert_with(|| { // this shouldn't fail in general let root = PartitionGroupManager::new(data_root, source_id) .with_context(|_| "Failed to create group manager") .unwrap(); let pg = PartitionGroup::new(&root, source_id) .with_context(|_| "Unable to create partition group") .unwrap(); pg.flush().unwrap(); pg })) } /// Add new partition group with given source id pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()>
fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>> where P: AsRef<Path>, I: IntoIterator<Item = SourceId>, { ids.into_iter() .map(|source_id| { let path = PartitionGroupManager::new(&root, source_id).with_context(|_| { format!( "Unable to obtain data directory for partition group {}", source_id ) })?; let partition_group = PartitionGroup::with_data(path) .with_context(|_| format!("Unable to read partition group {:?}", source_id))?; Ok((source_id, partition_group)) }) .collect() } fn serialize<P: AsRef<Path>>(catalog: &Catalog<'cat>, meta: P) -> Result<()> { let meta = meta.as_ref(); let group_metas = Vec::from_iter(catalog.groups.keys()); let data = (catalog, group_metas); serialize!(file meta, &data) .with_context(|_| "Failed to serialize catalog metadata") .map_err(|e| e.into()) } fn deserialize<P: AsRef<Path>, R: AsRef<Path>>(meta: P, root: R) -> Result<Catalog<'cat>> { let meta = meta.as_ref(); if!meta.exists() { bail!("Cannot find catalog metadata {:?}", meta); } let (mut catalog, group_metas): (Catalog, Vec<SourceId>) = deserialize!(file meta).with_context(|_| "Failed to read catalog metadata")?; catalog.groups = Catalog::prepare_partition_groups(&root, group_metas) .with_context(|_| "Failed to read partition data")?; catalog.data_root = root.as_ref().to_path_buf(); Ok(catalog) } } impl<'cat> Drop for Catalog<'cat> { fn drop(&mut self) { self.flush() .with_context(|_| "Failed to flush data during drop") .unwrap(); } } impl<'cat> AsRef<ColumnMap> for Catalog<'cat> { fn as_ref(&self) -> &ColumnMap { &self.colmap } } #[cfg(test)] mod tests { use super::*; use crate::datastore::tests::create_random_partitions; #[test] fn new() { let source_ids = [1, 5, 7]; let im_part_count = 8; let mut_part_count = 2; let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); for source_id in &source_ids { let pg = cat .ensure_group(*source_id) .with_context(|_| "Unable to retrieve partition group") .unwrap(); create_random_partitions(pg, im_part_count, mut_part_count); } } #[test] fn add_partition_group_idempotence() { let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); const PG_ID: SourceId = 10; cat.add_partition_group(PG_ID).unwrap(); cat.add_partition_group(PG_ID).unwrap(); assert_eq!(cat.groups.len(), 1); assert_eq!( cat.groups .iter() .nth(0) .expect("partition group not found") .0, &PG_ID ); } }
{ let _ = self.ensure_group(source_id)?; Ok(()) }
identifier_body
catalog.rs
use crate::block::BlockType; use crate::error::*; use crate::mutator::append::Append; use crate::params::{SourceId, CATALOG_METADATA, TIMESTAMP_COLUMN}; use crate::scanner::{Scan, ScanResult}; use crate::storage::manager::PartitionGroupManager; use crate::ty::{BlockStorage, ColumnId, ColumnIndexStorageMap}; use hyena_common::collections::HashMap; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::default::Default; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use super::column::Column; use super::partition_group::PartitionGroup; use super::{ColumnMap, PartitionGroupMap}; #[derive(Debug, Serialize, Deserialize)] pub struct Catalog<'cat> { pub(crate) colmap: ColumnMap, pub(crate) groups: PartitionGroupMap<'cat>, pub(crate) indexes: ColumnIndexStorageMap, #[serde(skip)] pub(crate) data_root: PathBuf, } impl<'cat> Catalog<'cat> { pub fn new<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); if meta.exists() { bail!("Catalog metadata already exists {:?}", meta); } let mut catalog = Catalog { colmap: Default::default(), groups: Default::default(), indexes: Default::default(), data_root: root, }; catalog.ensure_default_columns()?; Ok(catalog) } fn
(&mut self) -> Result<()> { let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp"); let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id"); let mut map = HashMap::new(); map.insert(TIMESTAMP_COLUMN, ts_column); map.insert(1, source_column); self.ensure_columns(map) } pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); Catalog::deserialize(&meta, &root) } pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref())) } pub fn columns(&self) -> &ColumnMap { &self.colmap } #[cfg(feature = "validate_append")] fn validate_append(&self, data: &Append) -> bool { let ts_len = data.ts.len(); data.data.iter().all(|(_col, fragment)| { // check fragment length for dense blocks if!fragment.is_sparse() { if ts_len!= fragment.len() { error!("Dense append fragment has different length than ts"); return false; } } else { if ts_len > fragment.len() { error!("Sparse append fragment longer than ts"); return false; } if fragment.iter().any(|(idx, _)| idx >= ts_len) { error!("Sparse append fragment has index greater than ts length"); return false; } } true }) } pub fn append(&self, data: &Append) -> Result<usize> { if data.is_empty() { bail!("Provided Append contains no data"); } #[cfg(feature = "validate_append")] { if!self.validate_append(&data) { bail!("Provided Append is not consistent"); } } // dispatch to proper PartitionGroup if let Some(pg) = self.groups.get(&data.source_id) { pg.append(&self, &data) } else { bail!("No PartitionGroup found for source_id = {}", data.source_id); } } pub fn scan(&self, scan: &Scan) -> Result<ScanResult> { let all_groups = if scan.groups.is_some() { None } else { Some(self.groups.keys().cloned().collect::<Vec<_>>()) }; if scan.groups.is_some() { scan.groups.as_ref().unwrap() } else { all_groups.as_ref().unwrap() } .chunks(2) .map(|group| { group .par_iter() .filter_map(|pgid| self.groups.get(pgid)) .map(|pg| pg.scan(&self, &scan)) // todo: this would potentially be better with some short-circuiting combinator // instead // need to bench with collect_into() .reduce( || Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }, ) }) .fold(Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }) } pub fn flush(&self) -> Result<()> { // TODO: add dirty flag let meta = self.data_root.join(CATALOG_METADATA); for pg in self.groups.values() { pg.flush()? } Catalog::serialize(self, &meta) } /// Extend internal column map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a column type. /// Use this feature with great caution. pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> { self.colmap.extend(type_map); Ok(()) } /// Adds a column to the catalog. It verifies that catalog does not already contain: /// a) column with the given id, or /// b) column with the given name. /// This function takes all-or-nothing approach: /// either all columns are added, or no changes are applied. pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> { for (id, column) in column_map.iter() { info!( "Adding column {}:{:?} with id {}", column.name, column.ty, id ); if self.colmap.contains_key(id) { bail!("Column Id already exists {}", *id); } if self.colmap.values().any(|col| col.name == column.name) { bail!("Column Name already exists '{}'", column.name); } } self.ensure_columns(column_map) } /// Extend internal index map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a index type. /// Also, the index' support for a given column is not checked. /// Use this feature with great caution. pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { self.indexes.extend(&*index_map); Ok(()) } /// Adds index to the catalog. Verifies that catalog does not already contain: /// a) index for a column with the given id, or /// b) index for a column with the given name. /// This function takes all-or-nothing approach: /// either all indexes are added, or no changes are applied. pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { for (id, index) in index_map.iter() { let column = self .colmap .get(id) .ok_or_else(|| err_msg(format!("column not found {}", id)))?; info!( "Adding index {:?} for column {}[{}]:{:?}", index, column.name, id, column.ty ); if self.indexes.contains_key(id) { bail!("Index already exists {}", *id); } } self.ensure_indexes(index_map) } /// Fetch the first non-occupied column index /// /// todo: rethink this approach (max() every time) pub fn next_id(&self) -> usize { let default = 0; *self.colmap.keys().max().unwrap_or(&default) + 1 } /// Calculate an empty partition's capacity for given column set pub(super) fn space_for_blocks<'iter>( &self, indices: impl Iterator<Item = &'iter ColumnId>, ) -> usize { use crate::params::BLOCK_SIZE; indices .filter_map(|col_id| { if let Some(column) = self.colmap.get(col_id) { Some(BLOCK_SIZE / column.size_of()) } else { None } }) .min() // the default shouldn't ever happen, as there always should be ts block // but in case it happens, this will return 0 // which in turn will cause new partition to be used .unwrap_or_default() } pub(crate) fn ensure_group( &mut self, source_id: SourceId, ) -> Result<&mut PartitionGroup<'cat>> { let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root); Ok(self.groups.entry(source_id).or_insert_with(|| { // this shouldn't fail in general let root = PartitionGroupManager::new(data_root, source_id) .with_context(|_| "Failed to create group manager") .unwrap(); let pg = PartitionGroup::new(&root, source_id) .with_context(|_| "Unable to create partition group") .unwrap(); pg.flush().unwrap(); pg })) } /// Add new partition group with given source id pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()> { let _ = self.ensure_group(source_id)?; Ok(()) } fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>> where P: AsRef<Path>, I: IntoIterator<Item = SourceId>, { ids.into_iter() .map(|source_id| { let path = PartitionGroupManager::new(&root, source_id).with_context(|_| { format!( "Unable to obtain data directory for partition group {}", source_id ) })?; let partition_group = PartitionGroup::with_data(path) .with_context(|_| format!("Unable to read partition group {:?}", source_id))?; Ok((source_id, partition_group)) }) .collect() } fn serialize<P: AsRef<Path>>(catalog: &Catalog<'cat>, meta: P) -> Result<()> { let meta = meta.as_ref(); let group_metas = Vec::from_iter(catalog.groups.keys()); let data = (catalog, group_metas); serialize!(file meta, &data) .with_context(|_| "Failed to serialize catalog metadata") .map_err(|e| e.into()) } fn deserialize<P: AsRef<Path>, R: AsRef<Path>>(meta: P, root: R) -> Result<Catalog<'cat>> { let meta = meta.as_ref(); if!meta.exists() { bail!("Cannot find catalog metadata {:?}", meta); } let (mut catalog, group_metas): (Catalog, Vec<SourceId>) = deserialize!(file meta).with_context(|_| "Failed to read catalog metadata")?; catalog.groups = Catalog::prepare_partition_groups(&root, group_metas) .with_context(|_| "Failed to read partition data")?; catalog.data_root = root.as_ref().to_path_buf(); Ok(catalog) } } impl<'cat> Drop for Catalog<'cat> { fn drop(&mut self) { self.flush() .with_context(|_| "Failed to flush data during drop") .unwrap(); } } impl<'cat> AsRef<ColumnMap> for Catalog<'cat> { fn as_ref(&self) -> &ColumnMap { &self.colmap } } #[cfg(test)] mod tests { use super::*; use crate::datastore::tests::create_random_partitions; #[test] fn new() { let source_ids = [1, 5, 7]; let im_part_count = 8; let mut_part_count = 2; let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); for source_id in &source_ids { let pg = cat .ensure_group(*source_id) .with_context(|_| "Unable to retrieve partition group") .unwrap(); create_random_partitions(pg, im_part_count, mut_part_count); } } #[test] fn add_partition_group_idempotence() { let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); const PG_ID: SourceId = 10; cat.add_partition_group(PG_ID).unwrap(); cat.add_partition_group(PG_ID).unwrap(); assert_eq!(cat.groups.len(), 1); assert_eq!( cat.groups .iter() .nth(0) .expect("partition group not found") .0, &PG_ID ); } }
ensure_default_columns
identifier_name
catalog.rs
use crate::block::BlockType; use crate::error::*; use crate::mutator::append::Append; use crate::params::{SourceId, CATALOG_METADATA, TIMESTAMP_COLUMN}; use crate::scanner::{Scan, ScanResult}; use crate::storage::manager::PartitionGroupManager; use crate::ty::{BlockStorage, ColumnId, ColumnIndexStorageMap}; use hyena_common::collections::HashMap; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::default::Default; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use super::column::Column; use super::partition_group::PartitionGroup; use super::{ColumnMap, PartitionGroupMap}; #[derive(Debug, Serialize, Deserialize)] pub struct Catalog<'cat> { pub(crate) colmap: ColumnMap, pub(crate) groups: PartitionGroupMap<'cat>, pub(crate) indexes: ColumnIndexStorageMap, #[serde(skip)] pub(crate) data_root: PathBuf, } impl<'cat> Catalog<'cat> { pub fn new<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); if meta.exists() { bail!("Catalog metadata already exists {:?}", meta); } let mut catalog = Catalog { colmap: Default::default(), groups: Default::default(), indexes: Default::default(), data_root: root, }; catalog.ensure_default_columns()?; Ok(catalog) } fn ensure_default_columns(&mut self) -> Result<()> { let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp"); let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id"); let mut map = HashMap::new(); map.insert(TIMESTAMP_COLUMN, ts_column); map.insert(1, source_column); self.ensure_columns(map) } pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); Catalog::deserialize(&meta, &root) } pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref())) } pub fn columns(&self) -> &ColumnMap { &self.colmap } #[cfg(feature = "validate_append")] fn validate_append(&self, data: &Append) -> bool { let ts_len = data.ts.len(); data.data.iter().all(|(_col, fragment)| { // check fragment length for dense blocks if!fragment.is_sparse() { if ts_len!= fragment.len() { error!("Dense append fragment has different length than ts"); return false; } } else { if ts_len > fragment.len() { error!("Sparse append fragment longer than ts"); return false; } if fragment.iter().any(|(idx, _)| idx >= ts_len) { error!("Sparse append fragment has index greater than ts length"); return false; } } true }) } pub fn append(&self, data: &Append) -> Result<usize> { if data.is_empty() { bail!("Provided Append contains no data"); } #[cfg(feature = "validate_append")] { if!self.validate_append(&data) { bail!("Provided Append is not consistent"); } } // dispatch to proper PartitionGroup if let Some(pg) = self.groups.get(&data.source_id) { pg.append(&self, &data) } else { bail!("No PartitionGroup found for source_id = {}", data.source_id); } } pub fn scan(&self, scan: &Scan) -> Result<ScanResult> { let all_groups = if scan.groups.is_some() { None } else { Some(self.groups.keys().cloned().collect::<Vec<_>>()) }; if scan.groups.is_some() { scan.groups.as_ref().unwrap() } else { all_groups.as_ref().unwrap() } .chunks(2) .map(|group| { group .par_iter() .filter_map(|pgid| self.groups.get(pgid)) .map(|pg| pg.scan(&self, &scan)) // todo: this would potentially be better with some short-circuiting combinator // instead // need to bench with collect_into() .reduce( || Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }, ) }) .fold(Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }) } pub fn flush(&self) -> Result<()> { // TODO: add dirty flag let meta = self.data_root.join(CATALOG_METADATA); for pg in self.groups.values() { pg.flush()? } Catalog::serialize(self, &meta) } /// Extend internal column map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a column type. /// Use this feature with great caution. pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> { self.colmap.extend(type_map); Ok(()) } /// Adds a column to the catalog. It verifies that catalog does not already contain: /// a) column with the given id, or /// b) column with the given name. /// This function takes all-or-nothing approach: /// either all columns are added, or no changes are applied. pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> { for (id, column) in column_map.iter() { info!( "Adding column {}:{:?} with id {}", column.name, column.ty, id ); if self.colmap.contains_key(id) { bail!("Column Id already exists {}", *id); } if self.colmap.values().any(|col| col.name == column.name)
} self.ensure_columns(column_map) } /// Extend internal index map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a index type. /// Also, the index' support for a given column is not checked. /// Use this feature with great caution. pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { self.indexes.extend(&*index_map); Ok(()) } /// Adds index to the catalog. Verifies that catalog does not already contain: /// a) index for a column with the given id, or /// b) index for a column with the given name. /// This function takes all-or-nothing approach: /// either all indexes are added, or no changes are applied. pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { for (id, index) in index_map.iter() { let column = self .colmap .get(id) .ok_or_else(|| err_msg(format!("column not found {}", id)))?; info!( "Adding index {:?} for column {}[{}]:{:?}", index, column.name, id, column.ty ); if self.indexes.contains_key(id) { bail!("Index already exists {}", *id); } } self.ensure_indexes(index_map) } /// Fetch the first non-occupied column index /// /// todo: rethink this approach (max() every time) pub fn next_id(&self) -> usize { let default = 0; *self.colmap.keys().max().unwrap_or(&default) + 1 } /// Calculate an empty partition's capacity for given column set pub(super) fn space_for_blocks<'iter>( &self, indices: impl Iterator<Item = &'iter ColumnId>, ) -> usize { use crate::params::BLOCK_SIZE; indices .filter_map(|col_id| { if let Some(column) = self.colmap.get(col_id) { Some(BLOCK_SIZE / column.size_of()) } else { None } }) .min() // the default shouldn't ever happen, as there always should be ts block // but in case it happens, this will return 0 // which in turn will cause new partition to be used .unwrap_or_default() } pub(crate) fn ensure_group( &mut self, source_id: SourceId, ) -> Result<&mut PartitionGroup<'cat>> { let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root); Ok(self.groups.entry(source_id).or_insert_with(|| { // this shouldn't fail in general let root = PartitionGroupManager::new(data_root, source_id) .with_context(|_| "Failed to create group manager") .unwrap(); let pg = PartitionGroup::new(&root, source_id) .with_context(|_| "Unable to create partition group") .unwrap(); pg.flush().unwrap(); pg })) } /// Add new partition group with given source id pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()> { let _ = self.ensure_group(source_id)?; Ok(()) } fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>> where P: AsRef<Path>, I: IntoIterator<Item = SourceId>, { ids.into_iter() .map(|source_id| { let path = PartitionGroupManager::new(&root, source_id).with_context(|_| { format!( "Unable to obtain data directory for partition group {}", source_id ) })?; let partition_group = PartitionGroup::with_data(path) .with_context(|_| format!("Unable to read partition group {:?}", source_id))?; Ok((source_id, partition_group)) }) .collect() } fn serialize<P: AsRef<Path>>(catalog: &Catalog<'cat>, meta: P) -> Result<()> { let meta = meta.as_ref(); let group_metas = Vec::from_iter(catalog.groups.keys()); let data = (catalog, group_metas); serialize!(file meta, &data) .with_context(|_| "Failed to serialize catalog metadata") .map_err(|e| e.into()) } fn deserialize<P: AsRef<Path>, R: AsRef<Path>>(meta: P, root: R) -> Result<Catalog<'cat>> { let meta = meta.as_ref(); if!meta.exists() { bail!("Cannot find catalog metadata {:?}", meta); } let (mut catalog, group_metas): (Catalog, Vec<SourceId>) = deserialize!(file meta).with_context(|_| "Failed to read catalog metadata")?; catalog.groups = Catalog::prepare_partition_groups(&root, group_metas) .with_context(|_| "Failed to read partition data")?; catalog.data_root = root.as_ref().to_path_buf(); Ok(catalog) } } impl<'cat> Drop for Catalog<'cat> { fn drop(&mut self) { self.flush() .with_context(|_| "Failed to flush data during drop") .unwrap(); } } impl<'cat> AsRef<ColumnMap> for Catalog<'cat> { fn as_ref(&self) -> &ColumnMap { &self.colmap } } #[cfg(test)] mod tests { use super::*; use crate::datastore::tests::create_random_partitions; #[test] fn new() { let source_ids = [1, 5, 7]; let im_part_count = 8; let mut_part_count = 2; let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); for source_id in &source_ids { let pg = cat .ensure_group(*source_id) .with_context(|_| "Unable to retrieve partition group") .unwrap(); create_random_partitions(pg, im_part_count, mut_part_count); } } #[test] fn add_partition_group_idempotence() { let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); const PG_ID: SourceId = 10; cat.add_partition_group(PG_ID).unwrap(); cat.add_partition_group(PG_ID).unwrap(); assert_eq!(cat.groups.len(), 1); assert_eq!( cat.groups .iter() .nth(0) .expect("partition group not found") .0, &PG_ID ); } }
{ bail!("Column Name already exists '{}'", column.name); }
conditional_block
credentials.rs
//! # Credentials for accessing the Firebase REST API //! This module contains the [`crate::credentials::Credentials`] type, used by [`crate::sessions`] to create and maintain //! authentication tokens for accessing the Firebase REST API. use chrono::Duration; use serde::{Deserialize, Serialize}; use serde_json; use std::collections::BTreeMap; use std::fs::File; use std::sync::Arc; use super::jwt::{create_jwt_encoded, download_google_jwks, verify_access_token, JWKSet, JWT_AUDIENCE_IDENTITY}; use crate::errors::FirebaseError; use std::io::BufReader; type Error = super::errors::FirebaseError; /// This is not defined in the json file and computed #[derive(Default, Clone)] pub(crate) struct Keys { pub pub_key: BTreeMap<String, Arc<biscuit::jws::Secret>>, pub secret: Option<Arc<biscuit::jws::Secret>>, } /// Service account credentials /// /// Especially the service account email is required to retrieve the public java web key set (jwks) /// for verifying Google Firestore tokens. /// /// The api_key is necessary for interacting with the Firestore REST API. /// /// Internals: /// /// The private key is used for signing JWTs (javascript web token). /// A signed jwt, encoded as a base64 string, can be exchanged into a refresh and access token. #[derive(Serialize, Deserialize, Default, Clone)] pub struct Credentials { pub project_id: String, pub private_key_id: String, pub private_key: String, pub client_email: String, pub client_id: String, pub api_key: String, #[serde(default, skip)] pub(crate) keys: Keys, } /// Converts a PEM (ascii base64) encoded private key into the binary der representation pub fn pem_to_der(pem_file_contents: &str) -> Result<Vec<u8>, Error> { use base64::decode; let pem_file_contents = pem_file_contents .find("-----BEGIN") // Cut off the first BEGIN part .and_then(|i| Some(&pem_file_contents[i + 10..])) // Find the trailing ---- after BEGIN and cut that off .and_then(|str| str.find("-----").and_then(|i| Some(&str[i + 5..]))) // Cut off -----END .and_then(|str| str.rfind("-----END").and_then(|i| Some(&str[..i]))); if pem_file_contents.is_none() { return Err(FirebaseError::Generic( "Invalid private key in credentials file. Must be valid PEM.", )); } let base64_body = pem_file_contents.unwrap().replace("\n", ""); Ok(decode(&base64_body) .map_err(|_| FirebaseError::Generic("Invalid private key in credentials file. Expected Base64 data."))?) } #[test] fn pem_to_der_test() { const INPUT: &str = r#"-----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCTbt9Rs2niyIRE FIdrhIN757eq/1Ry/VhZALBXAveg+lt+ui/9EHtYPJH1A9NyyAwChs0UCRWqkkEo Amtz4dJQ1YlGi0/BGhK2lg== -----END PRIVATE KEY----- "#; const EXPECTED: [u8; 112] = [ 48, 130, 4, 188, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4, 162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123, 231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60, 145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137, 70, 139, 79, 193, 26, 18, 182, 150, ]; assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]); } impl Credentials { /// Create a [`Credentials`] object by parsing a google-service-account json string /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json" and /// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds /// the file content during compile time. This avoids and http or io calls. /// /// ``` /// use firestore_db_and_auth::{Credentials}; /// use firestore_db_and_auth::jwt::JWKSet; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` /// /// You need two JWKS files for this crate to work: /// * https://www.googleapis.com/service_accounts/v1/jwk/[email protected] /// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email} pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> { let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?; credentials.compute_secret()?; Ok(credentials) } /// Create a [`Credentials`] object by reading and parsing a google-service-account json file. /// /// This is a convenience method, that reads in the given credentials file and acts otherwise the same as /// the [`Credentials::new`] method. pub fn from_file(credential_file: &str) -> Result<Self, Error> { let f = BufReader::new(File::open(credential_file)?); let mut credentials: Credentials = serde_json::from_reader(f)?; credentials.compute_secret()?; Ok(credentials) } /// Adds public-key JWKs to a credentials instance and returns it. /// /// This method will also verify that the given JWKs files allow verification of Google access tokens. /// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`]. pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> { self.add_jwks_public_keys(jwks); self.verify()?; Ok(self) } /// The public keys to verify generated tokens will be downloaded, for the given service account as well as /// for "[email protected]". /// Do not use this option if additional downloads are not desired, /// for example in cloud functions that require fast cold boot start times. /// /// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on. /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json". /// /// ```no_run /// use firestore_db_and_auth::{Credentials}; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .download_jwkset()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn download_jwkset(mut self) -> Result<Credentials, Error> { self.download_google_jwks()?; self.verify()?; Ok(self) } /// Verifies that creating access tokens is possible with the given credentials and public keys. /// Returns an empty result type on success. pub fn verify(&self) -> Result<(), Error> { let access_token = create_jwt_encoded( &self, Some(["admin"].iter()), Duration::hours(1), Some(self.client_id.clone()), None, JWT_AUDIENCE_IDENTITY, )?; verify_access_token(&self, &access_token)?; Ok(()) } /// Find the secret in the jwt set that matches the given key id, if any. /// Used for jws validation pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> { self.keys.pub_key.get(kid).and_then(|f| Some(f.clone())) } /// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens. /// /// Example: /// /// ``` /// use firestore_db_and_auth::credentials::Credentials; /// use firestore_db_and_auth::JWKSet; /// /// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?; /// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?); /// c.compute_secret()?; /// c.verify()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) { for entry in jwkset.keys.iter() { if!entry.headers.key_id.is_some() { continue; } let key_id = entry.headers.key_id.as_ref().unwrap().to_owned(); self.keys .pub_key .insert(key_id, Arc::new(entry.ne.jws_public_key_secret())); } } /// If you haven't called [`Credentials::add_jwks_public_keys`] to manually add public keys, /// this method will download one for your google service account and one for the oauth related /// [email protected] service account. pub fn download_google_jwks(&mut self) -> Result<(), Error> { let jwks = download_google_jwks(&self.client_email)?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); let jwks = download_google_jwks("[email protected]")?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); Ok(()) } /// Compute the Rsa keypair by using the private_key of the credentials file. /// You must call this if you have manually created a credentials object. /// /// This is automatically invoked if you use [`Credentials::new`] or [`Credentials::from_file`]. pub fn compute_secret(&mut self) -> Result<(), Error> { use biscuit::jws::Secret; use ring::signature; let vec = pem_to_der(&self.private_key)?; let key_pair = signature::RsaKeyPair::from_pkcs8(&vec)?; self.keys.secret = Some(Arc::new(Secret::RsaKeyPair(Arc::new(key_pair)))); Ok(()) } } #[doc(hidden)] #[allow(dead_code)] pub fn doctest_credentials() -> Credentials { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed") } #[test] fn
() { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); use std::path::PathBuf; let mut credential_file = PathBuf::from(env!("CARGO_MANIFEST_DIR")); credential_file.push("tests/service-account-test.json"); let c = Credentials::from_file(credential_file.to_str().unwrap()) .expect("Failed to open credentials file") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); }
deserialize_credentials
identifier_name
credentials.rs
//! # Credentials for accessing the Firebase REST API //! This module contains the [`crate::credentials::Credentials`] type, used by [`crate::sessions`] to create and maintain //! authentication tokens for accessing the Firebase REST API. use chrono::Duration; use serde::{Deserialize, Serialize}; use serde_json; use std::collections::BTreeMap; use std::fs::File; use std::sync::Arc; use super::jwt::{create_jwt_encoded, download_google_jwks, verify_access_token, JWKSet, JWT_AUDIENCE_IDENTITY}; use crate::errors::FirebaseError; use std::io::BufReader; type Error = super::errors::FirebaseError; /// This is not defined in the json file and computed #[derive(Default, Clone)] pub(crate) struct Keys { pub pub_key: BTreeMap<String, Arc<biscuit::jws::Secret>>, pub secret: Option<Arc<biscuit::jws::Secret>>, } /// Service account credentials /// /// Especially the service account email is required to retrieve the public java web key set (jwks) /// for verifying Google Firestore tokens. /// /// The api_key is necessary for interacting with the Firestore REST API. /// /// Internals: /// /// The private key is used for signing JWTs (javascript web token). /// A signed jwt, encoded as a base64 string, can be exchanged into a refresh and access token. #[derive(Serialize, Deserialize, Default, Clone)] pub struct Credentials { pub project_id: String, pub private_key_id: String, pub private_key: String, pub client_email: String, pub client_id: String, pub api_key: String, #[serde(default, skip)] pub(crate) keys: Keys, } /// Converts a PEM (ascii base64) encoded private key into the binary der representation pub fn pem_to_der(pem_file_contents: &str) -> Result<Vec<u8>, Error> { use base64::decode; let pem_file_contents = pem_file_contents .find("-----BEGIN") // Cut off the first BEGIN part .and_then(|i| Some(&pem_file_contents[i + 10..])) // Find the trailing ---- after BEGIN and cut that off .and_then(|str| str.find("-----").and_then(|i| Some(&str[i + 5..]))) // Cut off -----END .and_then(|str| str.rfind("-----END").and_then(|i| Some(&str[..i]))); if pem_file_contents.is_none() { return Err(FirebaseError::Generic( "Invalid private key in credentials file. Must be valid PEM.", )); } let base64_body = pem_file_contents.unwrap().replace("\n", ""); Ok(decode(&base64_body) .map_err(|_| FirebaseError::Generic("Invalid private key in credentials file. Expected Base64 data."))?) } #[test] fn pem_to_der_test() { const INPUT: &str = r#"-----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCTbt9Rs2niyIRE FIdrhIN757eq/1Ry/VhZALBXAveg+lt+ui/9EHtYPJH1A9NyyAwChs0UCRWqkkEo Amtz4dJQ1YlGi0/BGhK2lg== -----END PRIVATE KEY----- "#; const EXPECTED: [u8; 112] = [ 48, 130, 4, 188, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4, 162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123, 231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60, 145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137, 70, 139, 79, 193, 26, 18, 182, 150, ]; assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]); } impl Credentials { /// Create a [`Credentials`] object by parsing a google-service-account json string /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json" and /// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds /// the file content during compile time. This avoids and http or io calls. /// /// ``` /// use firestore_db_and_auth::{Credentials}; /// use firestore_db_and_auth::jwt::JWKSet; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` /// /// You need two JWKS files for this crate to work: /// * https://www.googleapis.com/service_accounts/v1/jwk/[email protected] /// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email} pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> { let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?; credentials.compute_secret()?; Ok(credentials) } /// Create a [`Credentials`] object by reading and parsing a google-service-account json file. /// /// This is a convenience method, that reads in the given credentials file and acts otherwise the same as /// the [`Credentials::new`] method. pub fn from_file(credential_file: &str) -> Result<Self, Error> { let f = BufReader::new(File::open(credential_file)?); let mut credentials: Credentials = serde_json::from_reader(f)?; credentials.compute_secret()?; Ok(credentials) } /// Adds public-key JWKs to a credentials instance and returns it. /// /// This method will also verify that the given JWKs files allow verification of Google access tokens. /// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`]. pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> { self.add_jwks_public_keys(jwks); self.verify()?; Ok(self) } /// The public keys to verify generated tokens will be downloaded, for the given service account as well as /// for "[email protected]". /// Do not use this option if additional downloads are not desired, /// for example in cloud functions that require fast cold boot start times. /// /// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on. /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json". /// /// ```no_run /// use firestore_db_and_auth::{Credentials}; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .download_jwkset()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn download_jwkset(mut self) -> Result<Credentials, Error> { self.download_google_jwks()?; self.verify()?; Ok(self) } /// Verifies that creating access tokens is possible with the given credentials and public keys. /// Returns an empty result type on success. pub fn verify(&self) -> Result<(), Error> { let access_token = create_jwt_encoded( &self, Some(["admin"].iter()), Duration::hours(1), Some(self.client_id.clone()), None, JWT_AUDIENCE_IDENTITY, )?; verify_access_token(&self, &access_token)?; Ok(()) } /// Find the secret in the jwt set that matches the given key id, if any. /// Used for jws validation pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> { self.keys.pub_key.get(kid).and_then(|f| Some(f.clone())) } /// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens. /// /// Example: /// /// ``` /// use firestore_db_and_auth::credentials::Credentials; /// use firestore_db_and_auth::JWKSet; /// /// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) { for entry in jwkset.keys.iter() { if!entry.headers.key_id.is_some() { continue; } let key_id = entry.headers.key_id.as_ref().unwrap().to_owned(); self.keys .pub_key .insert(key_id, Arc::new(entry.ne.jws_public_key_secret())); } } /// If you haven't called [`Credentials::add_jwks_public_keys`] to manually add public keys, /// this method will download one for your google service account and one for the oauth related /// [email protected] service account. pub fn download_google_jwks(&mut self) -> Result<(), Error> { let jwks = download_google_jwks(&self.client_email)?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); let jwks = download_google_jwks("[email protected]")?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); Ok(()) } /// Compute the Rsa keypair by using the private_key of the credentials file. /// You must call this if you have manually created a credentials object. /// /// This is automatically invoked if you use [`Credentials::new`] or [`Credentials::from_file`]. pub fn compute_secret(&mut self) -> Result<(), Error> { use biscuit::jws::Secret; use ring::signature; let vec = pem_to_der(&self.private_key)?; let key_pair = signature::RsaKeyPair::from_pkcs8(&vec)?; self.keys.secret = Some(Arc::new(Secret::RsaKeyPair(Arc::new(key_pair)))); Ok(()) } } #[doc(hidden)] #[allow(dead_code)] pub fn doctest_credentials() -> Credentials { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed") } #[test] fn deserialize_credentials() { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); use std::path::PathBuf; let mut credential_file = PathBuf::from(env!("CARGO_MANIFEST_DIR")); credential_file.push("tests/service-account-test.json"); let c = Credentials::from_file(credential_file.to_str().unwrap()) .expect("Failed to open credentials file") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); }
/// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?); /// c.compute_secret()?; /// c.verify()?;
random_line_split
credentials.rs
//! # Credentials for accessing the Firebase REST API //! This module contains the [`crate::credentials::Credentials`] type, used by [`crate::sessions`] to create and maintain //! authentication tokens for accessing the Firebase REST API. use chrono::Duration; use serde::{Deserialize, Serialize}; use serde_json; use std::collections::BTreeMap; use std::fs::File; use std::sync::Arc; use super::jwt::{create_jwt_encoded, download_google_jwks, verify_access_token, JWKSet, JWT_AUDIENCE_IDENTITY}; use crate::errors::FirebaseError; use std::io::BufReader; type Error = super::errors::FirebaseError; /// This is not defined in the json file and computed #[derive(Default, Clone)] pub(crate) struct Keys { pub pub_key: BTreeMap<String, Arc<biscuit::jws::Secret>>, pub secret: Option<Arc<biscuit::jws::Secret>>, } /// Service account credentials /// /// Especially the service account email is required to retrieve the public java web key set (jwks) /// for verifying Google Firestore tokens. /// /// The api_key is necessary for interacting with the Firestore REST API. /// /// Internals: /// /// The private key is used for signing JWTs (javascript web token). /// A signed jwt, encoded as a base64 string, can be exchanged into a refresh and access token. #[derive(Serialize, Deserialize, Default, Clone)] pub struct Credentials { pub project_id: String, pub private_key_id: String, pub private_key: String, pub client_email: String, pub client_id: String, pub api_key: String, #[serde(default, skip)] pub(crate) keys: Keys, } /// Converts a PEM (ascii base64) encoded private key into the binary der representation pub fn pem_to_der(pem_file_contents: &str) -> Result<Vec<u8>, Error> { use base64::decode; let pem_file_contents = pem_file_contents .find("-----BEGIN") // Cut off the first BEGIN part .and_then(|i| Some(&pem_file_contents[i + 10..])) // Find the trailing ---- after BEGIN and cut that off .and_then(|str| str.find("-----").and_then(|i| Some(&str[i + 5..]))) // Cut off -----END .and_then(|str| str.rfind("-----END").and_then(|i| Some(&str[..i]))); if pem_file_contents.is_none() { return Err(FirebaseError::Generic( "Invalid private key in credentials file. Must be valid PEM.", )); } let base64_body = pem_file_contents.unwrap().replace("\n", ""); Ok(decode(&base64_body) .map_err(|_| FirebaseError::Generic("Invalid private key in credentials file. Expected Base64 data."))?) } #[test] fn pem_to_der_test() { const INPUT: &str = r#"-----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCTbt9Rs2niyIRE FIdrhIN757eq/1Ry/VhZALBXAveg+lt+ui/9EHtYPJH1A9NyyAwChs0UCRWqkkEo Amtz4dJQ1YlGi0/BGhK2lg== -----END PRIVATE KEY----- "#; const EXPECTED: [u8; 112] = [ 48, 130, 4, 188, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4, 162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123, 231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60, 145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137, 70, 139, 79, 193, 26, 18, 182, 150, ]; assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]); } impl Credentials { /// Create a [`Credentials`] object by parsing a google-service-account json string /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json" and /// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds /// the file content during compile time. This avoids and http or io calls. /// /// ``` /// use firestore_db_and_auth::{Credentials}; /// use firestore_db_and_auth::jwt::JWKSet; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` /// /// You need two JWKS files for this crate to work: /// * https://www.googleapis.com/service_accounts/v1/jwk/[email protected] /// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email} pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> { let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?; credentials.compute_secret()?; Ok(credentials) } /// Create a [`Credentials`] object by reading and parsing a google-service-account json file. /// /// This is a convenience method, that reads in the given credentials file and acts otherwise the same as /// the [`Credentials::new`] method. pub fn from_file(credential_file: &str) -> Result<Self, Error> { let f = BufReader::new(File::open(credential_file)?); let mut credentials: Credentials = serde_json::from_reader(f)?; credentials.compute_secret()?; Ok(credentials) } /// Adds public-key JWKs to a credentials instance and returns it. /// /// This method will also verify that the given JWKs files allow verification of Google access tokens. /// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`]. pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> { self.add_jwks_public_keys(jwks); self.verify()?; Ok(self) } /// The public keys to verify generated tokens will be downloaded, for the given service account as well as /// for "[email protected]". /// Do not use this option if additional downloads are not desired, /// for example in cloud functions that require fast cold boot start times. /// /// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on. /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json". /// /// ```no_run /// use firestore_db_and_auth::{Credentials}; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .download_jwkset()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn download_jwkset(mut self) -> Result<Credentials, Error> { self.download_google_jwks()?; self.verify()?; Ok(self) } /// Verifies that creating access tokens is possible with the given credentials and public keys. /// Returns an empty result type on success. pub fn verify(&self) -> Result<(), Error> { let access_token = create_jwt_encoded( &self, Some(["admin"].iter()), Duration::hours(1), Some(self.client_id.clone()), None, JWT_AUDIENCE_IDENTITY, )?; verify_access_token(&self, &access_token)?; Ok(()) } /// Find the secret in the jwt set that matches the given key id, if any. /// Used for jws validation pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> { self.keys.pub_key.get(kid).and_then(|f| Some(f.clone())) } /// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens. /// /// Example: /// /// ``` /// use firestore_db_and_auth::credentials::Credentials; /// use firestore_db_and_auth::JWKSet; /// /// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?; /// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?); /// c.compute_secret()?; /// c.verify()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) { for entry in jwkset.keys.iter() { if!entry.headers.key_id.is_some() { continue; } let key_id = entry.headers.key_id.as_ref().unwrap().to_owned(); self.keys .pub_key .insert(key_id, Arc::new(entry.ne.jws_public_key_secret())); } } /// If you haven't called [`Credentials::add_jwks_public_keys`] to manually add public keys, /// this method will download one for your google service account and one for the oauth related /// [email protected] service account. pub fn download_google_jwks(&mut self) -> Result<(), Error> { let jwks = download_google_jwks(&self.client_email)?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); let jwks = download_google_jwks("[email protected]")?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); Ok(()) } /// Compute the Rsa keypair by using the private_key of the credentials file. /// You must call this if you have manually created a credentials object. /// /// This is automatically invoked if you use [`Credentials::new`] or [`Credentials::from_file`]. pub fn compute_secret(&mut self) -> Result<(), Error> { use biscuit::jws::Secret; use ring::signature; let vec = pem_to_der(&self.private_key)?; let key_pair = signature::RsaKeyPair::from_pkcs8(&vec)?; self.keys.secret = Some(Arc::new(Secret::RsaKeyPair(Arc::new(key_pair)))); Ok(()) } } #[doc(hidden)] #[allow(dead_code)] pub fn doctest_credentials() -> Credentials { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed") } #[test] fn deserialize_credentials()
{ let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); use std::path::PathBuf; let mut credential_file = PathBuf::from(env!("CARGO_MANIFEST_DIR")); credential_file.push("tests/service-account-test.json"); let c = Credentials::from_file(credential_file.to_str().unwrap()) .expect("Failed to open credentials file") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); }
identifier_body
credentials.rs
//! # Credentials for accessing the Firebase REST API //! This module contains the [`crate::credentials::Credentials`] type, used by [`crate::sessions`] to create and maintain //! authentication tokens for accessing the Firebase REST API. use chrono::Duration; use serde::{Deserialize, Serialize}; use serde_json; use std::collections::BTreeMap; use std::fs::File; use std::sync::Arc; use super::jwt::{create_jwt_encoded, download_google_jwks, verify_access_token, JWKSet, JWT_AUDIENCE_IDENTITY}; use crate::errors::FirebaseError; use std::io::BufReader; type Error = super::errors::FirebaseError; /// This is not defined in the json file and computed #[derive(Default, Clone)] pub(crate) struct Keys { pub pub_key: BTreeMap<String, Arc<biscuit::jws::Secret>>, pub secret: Option<Arc<biscuit::jws::Secret>>, } /// Service account credentials /// /// Especially the service account email is required to retrieve the public java web key set (jwks) /// for verifying Google Firestore tokens. /// /// The api_key is necessary for interacting with the Firestore REST API. /// /// Internals: /// /// The private key is used for signing JWTs (javascript web token). /// A signed jwt, encoded as a base64 string, can be exchanged into a refresh and access token. #[derive(Serialize, Deserialize, Default, Clone)] pub struct Credentials { pub project_id: String, pub private_key_id: String, pub private_key: String, pub client_email: String, pub client_id: String, pub api_key: String, #[serde(default, skip)] pub(crate) keys: Keys, } /// Converts a PEM (ascii base64) encoded private key into the binary der representation pub fn pem_to_der(pem_file_contents: &str) -> Result<Vec<u8>, Error> { use base64::decode; let pem_file_contents = pem_file_contents .find("-----BEGIN") // Cut off the first BEGIN part .and_then(|i| Some(&pem_file_contents[i + 10..])) // Find the trailing ---- after BEGIN and cut that off .and_then(|str| str.find("-----").and_then(|i| Some(&str[i + 5..]))) // Cut off -----END .and_then(|str| str.rfind("-----END").and_then(|i| Some(&str[..i]))); if pem_file_contents.is_none() { return Err(FirebaseError::Generic( "Invalid private key in credentials file. Must be valid PEM.", )); } let base64_body = pem_file_contents.unwrap().replace("\n", ""); Ok(decode(&base64_body) .map_err(|_| FirebaseError::Generic("Invalid private key in credentials file. Expected Base64 data."))?) } #[test] fn pem_to_der_test() { const INPUT: &str = r#"-----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCTbt9Rs2niyIRE FIdrhIN757eq/1Ry/VhZALBXAveg+lt+ui/9EHtYPJH1A9NyyAwChs0UCRWqkkEo Amtz4dJQ1YlGi0/BGhK2lg== -----END PRIVATE KEY----- "#; const EXPECTED: [u8; 112] = [ 48, 130, 4, 188, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4, 162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123, 231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60, 145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137, 70, 139, 79, 193, 26, 18, 182, 150, ]; assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]); } impl Credentials { /// Create a [`Credentials`] object by parsing a google-service-account json string /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json" and /// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds /// the file content during compile time. This avoids and http or io calls. /// /// ``` /// use firestore_db_and_auth::{Credentials}; /// use firestore_db_and_auth::jwt::JWKSet; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` /// /// You need two JWKS files for this crate to work: /// * https://www.googleapis.com/service_accounts/v1/jwk/[email protected] /// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email} pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> { let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?; credentials.compute_secret()?; Ok(credentials) } /// Create a [`Credentials`] object by reading and parsing a google-service-account json file. /// /// This is a convenience method, that reads in the given credentials file and acts otherwise the same as /// the [`Credentials::new`] method. pub fn from_file(credential_file: &str) -> Result<Self, Error> { let f = BufReader::new(File::open(credential_file)?); let mut credentials: Credentials = serde_json::from_reader(f)?; credentials.compute_secret()?; Ok(credentials) } /// Adds public-key JWKs to a credentials instance and returns it. /// /// This method will also verify that the given JWKs files allow verification of Google access tokens. /// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`]. pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> { self.add_jwks_public_keys(jwks); self.verify()?; Ok(self) } /// The public keys to verify generated tokens will be downloaded, for the given service account as well as /// for "[email protected]". /// Do not use this option if additional downloads are not desired, /// for example in cloud functions that require fast cold boot start times. /// /// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on. /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json". /// /// ```no_run /// use firestore_db_and_auth::{Credentials}; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .download_jwkset()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn download_jwkset(mut self) -> Result<Credentials, Error> { self.download_google_jwks()?; self.verify()?; Ok(self) } /// Verifies that creating access tokens is possible with the given credentials and public keys. /// Returns an empty result type on success. pub fn verify(&self) -> Result<(), Error> { let access_token = create_jwt_encoded( &self, Some(["admin"].iter()), Duration::hours(1), Some(self.client_id.clone()), None, JWT_AUDIENCE_IDENTITY, )?; verify_access_token(&self, &access_token)?; Ok(()) } /// Find the secret in the jwt set that matches the given key id, if any. /// Used for jws validation pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> { self.keys.pub_key.get(kid).and_then(|f| Some(f.clone())) } /// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens. /// /// Example: /// /// ``` /// use firestore_db_and_auth::credentials::Credentials; /// use firestore_db_and_auth::JWKSet; /// /// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?; /// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?); /// c.compute_secret()?; /// c.verify()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) { for entry in jwkset.keys.iter() { if!entry.headers.key_id.is_some()
let key_id = entry.headers.key_id.as_ref().unwrap().to_owned(); self.keys .pub_key .insert(key_id, Arc::new(entry.ne.jws_public_key_secret())); } } /// If you haven't called [`Credentials::add_jwks_public_keys`] to manually add public keys, /// this method will download one for your google service account and one for the oauth related /// [email protected] service account. pub fn download_google_jwks(&mut self) -> Result<(), Error> { let jwks = download_google_jwks(&self.client_email)?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); let jwks = download_google_jwks("[email protected]")?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); Ok(()) } /// Compute the Rsa keypair by using the private_key of the credentials file. /// You must call this if you have manually created a credentials object. /// /// This is automatically invoked if you use [`Credentials::new`] or [`Credentials::from_file`]. pub fn compute_secret(&mut self) -> Result<(), Error> { use biscuit::jws::Secret; use ring::signature; let vec = pem_to_der(&self.private_key)?; let key_pair = signature::RsaKeyPair::from_pkcs8(&vec)?; self.keys.secret = Some(Arc::new(Secret::RsaKeyPair(Arc::new(key_pair)))); Ok(()) } } #[doc(hidden)] #[allow(dead_code)] pub fn doctest_credentials() -> Credentials { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed") } #[test] fn deserialize_credentials() { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); use std::path::PathBuf; let mut credential_file = PathBuf::from(env!("CARGO_MANIFEST_DIR")); credential_file.push("tests/service-account-test.json"); let c = Credentials::from_file(credential_file.to_str().unwrap()) .expect("Failed to open credentials file") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); }
{ continue; }
conditional_block
main.rs
1 2 3 4 4 3 2 1 = 1 2 2 1 unpack hi, min 1 2 2 1 1 1 2 2 = 1 1 2 1 */ let x = self.0; let y = _mm256_extractf128_ps(x, 1); let m1 = _mm_min_ps(_mm256_castps256_ps128(x), y); let m2 = _mm_permute_ps(m1, 27); let m2 = _mm_min_ps(m1, m2); let m3 = _mm_unpackhi_ps(m2, m2); let m = _mm_min_ps(m2, m3); _mm_cvtss_f32(m) } } fn splat(x: f32) -> Self { Self(unsafe { _mm256_set1_ps(x) }) } fn select(x: WideF32, y: WideF32, mask: WideF32) -> Self { Self(unsafe { _mm256_blendv_ps(x.0, y.0, mask.0) }) } fn sqrt(&self) -> Self { Self(unsafe { _mm256_sqrt_ps(self.0) }) } #[allow(dead_code)] fn rsqrt(&self) -> Self { Self(unsafe { _mm256_rsqrt_ps(self.0) }) } // approximate a sqrt using an inverse sqrt and one iteration of Newton-Raphson // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots // Note: on many architectures this is significantly faster than the sqrt intrinsic. But this is not so on Skylake // for this program: approx_sqrt crowds the ports with additional mul/subs so is net slower #[allow(dead_code)] fn approx_sqrt(self) -> Self { let half = WideF32::splat(0.5); let three = WideF32::splat(3.0); let rsqrt = self.rsqrt(); let x = three - rsqrt * rsqrt * self; rsqrt * half * x * self } fn gt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_GT_OQ) }) } fn lt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_LT_OQ) }) } fn eq(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_EQ_OQ) }) } fn mul_add(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmadd_ps(x.0, y.0, z.0) }) } fn mul_sub(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) }) } } impl Add for WideF32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_ps(self.0, other.0) }) } } impl AddAssign for WideF32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_ps(self.0, other.0) } } } impl BitAnd for WideF32 { type Output = Self; fn bitand(self, other: Self) -> Self { Self(unsafe { _mm256_and_ps(self.0, other.0) }) } } impl BitOr for WideF32 { type Output = Self; fn bitor(self, other: Self) -> Self { Self(unsafe { _mm256_or_ps(self.0, other.0) }) } } impl Div for WideF32 { type Output = Self; fn div(self, other: Self) -> Self { Self(unsafe { _mm256_div_ps(self.0, other.0) }) } } impl Sub for WideF32 { type Output = Self; fn sub(self, other: Self) -> Self { Self(unsafe { _mm256_sub_ps(self.0, other.0) }) } } impl Mul for WideF32 { type Output = Self; fn mul(self, other: Self) -> Self { Self(unsafe { _mm256_mul_ps(self.0, other.0) }) } } impl MulAssign for WideF32 { fn mul_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_mul_ps(self.0, other.0) } } } impl Neg for WideF32 { type Output = Self; fn neg(self) -> Self { Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) }) } } #[derive(Debug, Copy, Clone, PartialEq)] struct V3(f32, f32, f32); impl V3 { fn dot(self, other: V3) -> f32 { self.0 * other.0 + self.1 * other.1 + self.2 * other.2 } fn cross(self, other: V3) -> V3 { V3( self.1 * other.2 - self.2 * other.1, self.2 * other.0 - self.0 * other.2, self.0 * other.1 - self.1 * other.0, ) } fn normalize(self) -> V3 { self * (1.0 / self.len()) } fn reflect(self, normal: V3) -> V3 { self - normal * self.dot(normal) * 2.0 } fn len(self) -> f32 { self.dot(self).sqrt() } fn is_unit_vector(self) -> bool { (self.dot(self) - 1.0).abs() < TOLERANCE } } impl Add for V3 { type Output = Self; fn add(self, other: Self) -> Self { Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Add<f32> for V3 { type Output = Self; fn add(self, rhs: f32) -> Self { Self(self.0 + rhs, self.1 + rhs, self.2 + rhs) } } impl AddAssign for V3 { fn add_assign(&mut self, other: Self) { *self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Div<f32> for V3 { type Output = Self; fn div(self, rhs: f32) -> Self { Self(self.0 / rhs, self.1 / rhs, self.2 / rhs) } } impl Sub for V3 { type Output = Self; fn sub(self, other: Self) -> Self { Self(self.0 - other.0, self.1 - other.1, self.2 - other.2) } } impl Sub<f32> for V3 { type Output = Self; fn sub(self, rhs: f32) -> Self { Self(self.0 - rhs, self.1 - rhs, self.2 - rhs) } } impl Mul for V3 { type Output = Self; fn mul(self, other: Self) -> Self { Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } impl Mul<f32> for V3 { type Output = Self; fn mul(self, rhs: f32) -> Self { Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign<f32> for V3 { fn mul_assign(&mut self, rhs: f32) { *self = Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign for V3 { fn mul_assign(&mut self, other: Self) { *self = Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } #[derive(Debug)] struct Camera { origin: V3, x: V3, y: V3, z: V3, film_lower_left: V3, film_width: f32, film_height: f32, } impl Camera { fn new(look_from: V3, look_at: V3, aspect_ratio: f32) -> Camera { assert!(aspect_ratio > 1.0, "width must be greater than height"); let origin = look_from - look_at; let z = origin.normalize(); let x = V3(0.0, 0.0, 1.0).cross(z).normalize(); let y = z.cross(x).normalize(); let film_height = 1.0; let film_width = film_height * aspect_ratio; let film_lower_left = origin - z - y * 0.5 * film_height - x * 0.5 * film_width; Camera { origin, x, y, z, film_lower_left, film_width, film_height, } } } #[derive(Debug, Clone, PartialEq)] enum MaterialType { Diffuse, Specular, } #[derive(Debug, Clone, PartialEq)] struct Material { emit_color: V3, reflect_color: V3, t: MaterialType, } struct Sphere { p: V3, rsqrd: f32, m: Material, } impl Sphere { fn new(p: V3, r: f32, m: Material) -> Sphere { Sphere { p, rsqrd: r * r, m } } } struct Spheres { xs: Vec<f32>, ys: Vec<f32>, zs: Vec<f32>, rsqrds: Vec<f32>, mats: Vec<Material>, } impl Spheres { fn new(spheres: Vec<Sphere>) -> Self { let len = (spheres.len() + SIMD_WIDTH - 1) / SIMD_WIDTH * SIMD_WIDTH; let mut me = Self { xs: Vec::with_capacity(len), ys: Vec::with_capacity(len), zs: Vec::with_capacity(len), rsqrds: Vec::with_capacity(len), mats: Vec::with_capacity(len), }; for s in spheres { me.xs.push(s.p.0); me.ys.push(s.p.1); me.zs.push(s.p.2); me.rsqrds.push(s.rsqrd); me.mats.push(s.m); } // pad everything out to the simd width me.xs.resize(len, 0.0); me.ys.resize(len, 0.0); me.zs.resize(len, 0.0); me.rsqrds.resize(len, 0.0); let default_mat = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; me.mats.resize(len, default_mat); me } fn len(&self) -> usize { self.xs.len() } } // https://entropymine.com/imageworsener/srgbformula/ fn linear_to_srgb(x: f32) -> f32 { if x < 0.0 { 0.0 } else if x > 1.0 { 1.0 } else if x > 0.0031308 { 1.055 * x.powf(1.0 / 2.4) - 0.055 } else { x * 12.92 } } thread_local! { static THREAD_RNG: Cell<u64> = { let mut buf = [0u8; 8]; getrandom::getrandom(&mut buf).unwrap(); Cell::new(u64::from_le_bytes(buf)) }; } fn rand_seed() -> u32 { let mut buf = [0u8; 4]; getrandom::getrandom(&mut buf).unwrap(); u32::from_le_bytes(buf) } #[allow(dead_code)] fn thread_rand() -> u32 { // TODO(eli): thread local perf is terrible; causes function call and branching THREAD_RNG.with(|rng_cell| { let mut state = rng_cell.get(); let randu = pcg(&mut state); rng_cell.set(state); randu }) } // Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" // xorshift isn't great, but is good enough for our purposes and has two // nice properties: // 1. it only needs a u32 of state to generate a u32 // 2. it's easy to SIMD fn xorshift(state: &mut u32) -> u32 { debug_assert!(*state!= 0, "xorshift cannot be seeded with 0"); let mut x = *state; x ^= x << 13; x ^= x >> 17; x ^= x << 5; *state = x; x } // pcg xsh rs 64/32 (mcg) #[allow(dead_code)] fn pcg(state: &mut u64) -> u32 { let s = *state; *state = s.wrapping_mul(6364136223846793005); (((s >> 22) ^ s) >> ((s >> 61) + 22)) as u32 } fn randf(state: &mut u32) -> f32 { let randu = (xorshift(state) >> 9) | 0x3f800000; let randf = f32::from_bits(randu) - 1.0; randf } fn randf_range(state: &mut u32, min: f32, max: f32) -> f32 { min + (max - min) * randf(state) } #[inline(always)] fn cast( rng_state: &mut u32, bg: &Material, spheres: &Spheres, mut origin: V3, mut dir: V3, mut bounces: u32, ) -> (V3, u32) { let mut color = V3(0.0, 0.0, 0.0); let mut reflectance = V3(1.0, 1.0, 1.0); let orig_bounces = bounces; loop { debug_assert!(dir.is_unit_vector()); let origin_xs = WideF32::splat(origin.0); let origin_ys = WideF32::splat(origin.1); let origin_zs = WideF32::splat(origin.2); let dir_x = WideF32::splat(dir.0); let dir_y = WideF32::splat(dir.1); let dir_z = WideF32::splat(dir.2); let mut hit_ids = WideI32::splat(-1); let mut hit_dists = WideF32::splat(f32::MAX); let mut iteration_ids = WideI32::new(7, 6, 5, 4, 3, 2, 1, 0); // TODO(eli): egregious bounds checking here for i in (0..spheres.len()).step_by(SIMD_WIDTH) { let sphere_xs = WideF32::load(&spheres.xs[i..i + SIMD_WIDTH]); let sphere_ys = WideF32::load(&spheres.ys[i..i + SIMD_WIDTH]); let sphere_zs = WideF32::load(&spheres.zs[i..i + SIMD_WIDTH]); let sphere_rsqrds = WideF32::load(&spheres.rsqrds[i..i + SIMD_WIDTH]); // this is sphere_relative_origin = origin - sphere_origin // but the math is flipped backwards because it saves us having to negate the b term let relative_xs = sphere_xs - origin_xs; let relative_ys = sphere_ys - origin_ys; let relative_zs = sphere_zs - origin_zs; let neg_b = dir_x * relative_xs; let neg_b = WideF32::mul_add(dir_y, relative_ys, neg_b); let neg_b = WideF32::mul_add(dir_z, relative_zs, neg_b); let c = WideF32::mul_sub(relative_xs, relative_xs, sphere_rsqrds); let c = WideF32::mul_add(relative_ys, relative_ys, c); let c = WideF32::mul_add(relative_zs, relative_zs, c); let discr = WideF32::mul_sub(neg_b, neg_b, c); let discrmask = discr.gt(WideF32::splat(0.0)); if discrmask.any() { let root_term = discr.sqrt(); let t0 = neg_b - root_term; let t1 = neg_b + root_term; // t0 if hit, else t1 let t = WideF32::select(t1, t0, t0.gt(WideF32::splat(TOLERANCE))); let mask = discrmask & t.gt(WideF32::splat(TOLERANCE)) & t.lt(hit_dists); hit_ids = WideI32::select(hit_ids, iteration_ids, mask); hit_dists = WideF32::select(hit_dists, t, mask); } iteration_ids += WideI32::splat(SIMD_WIDTH as i32); } let hmin = hit_dists.hmin(); if hmin < f32::MAX { let minmask = hit_dists.eq(WideF32::splat(hmin)).mask(); let min_idx = minmask.trailing_zeros() as usize; let hit_ids_arr: [i32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_ids.0) }; let hit_dists_arr: [f32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_dists.0) }; let id = hit_ids_arr[min_idx] as usize; let hit_dist = hit_dists_arr[min_idx]; let mat = &spheres.mats[id]; if bounces == 0 { color += reflectance * mat.emit_color; break; } else { bounces -= 1; color += reflectance * mat.emit_color; reflectance *= mat.reflect_color; let hit_point = origin + dir * hit_dist; origin = hit_point; dir = match mat.t { MaterialType::Specular => { let sp = V3(spheres.xs[id], spheres.ys[id], spheres.zs[id]); let hit_normal = (hit_point - sp).normalize(); dir.reflect(hit_normal) } MaterialType::Diffuse =>
} } } else { color += reflectance * bg.emit_color; break; } } (color, orig_bounces - bounces) } fn main() -> Result<(), Box<dyn std::error::Error>> { // flush denormals to zero unsafe { _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON) }; let mut args = Arguments::from_env(); let rays_per_pixel = args.opt_value_from_str(["-r", "--rays"])?.unwrap_or(100); let bounces = args.opt_value_from_str("--bounces")?.unwrap_or(8); let filename = args .opt_value_from_str("-o")? .unwrap_or("out.png".to_string()); args.finish()?; // Materials let bg = Material { emit_color: V3(0.3, 0.4, 0.8), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; let ground = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.5, 0.5, 0.5), t: MaterialType::Diffuse, }; let left = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(1.0, 0.0, 0.0), t: MaterialType::Specular, }; let center = Material { emit_color: V3(0.4, 0.8, 0.9), reflect_color: V3(0.8, 0.8, 0.8), t: MaterialType::Specular, }; let right = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.95, 0.95, 0.95), t: MaterialType::Specular, }; let spheres = Spheres::new(vec![ Sphere::new(V3(0.0, 0.0, -100.0), 100.0, ground), Sphere::new(V3(0.0, 0.0, 1.0), 1.0, center), Sphere::new(V3(-2.0, -3.0, 1.5), 0.3, right.clone()), Sphere::new(V3(-3.0, -6.0, 0.0), 0.3, right.clone()), Sphere::new(V3(-3.0, -5.0, 2.0), 0.5, left.clone()), Sphere::new(V3(3.0, -3.0, 0.8), 1.0, right.clone()),
{ let a = randf_range(rng_state, 0.0, 2.0 * PI); let z = randf_range(rng_state, -1.0, 1.0); let r = (1.0 - z * z).sqrt(); V3(r * a.cos(), r * a.sin(), z) }
conditional_block
main.rs
1 2 3 4 4 3 2 1 = 1 2 2 1 unpack hi, min 1 2 2 1 1 1 2 2 = 1 1 2 1 */ let x = self.0; let y = _mm256_extractf128_ps(x, 1); let m1 = _mm_min_ps(_mm256_castps256_ps128(x), y); let m2 = _mm_permute_ps(m1, 27); let m2 = _mm_min_ps(m1, m2); let m3 = _mm_unpackhi_ps(m2, m2); let m = _mm_min_ps(m2, m3); _mm_cvtss_f32(m) } } fn splat(x: f32) -> Self { Self(unsafe { _mm256_set1_ps(x) }) } fn select(x: WideF32, y: WideF32, mask: WideF32) -> Self { Self(unsafe { _mm256_blendv_ps(x.0, y.0, mask.0) }) } fn sqrt(&self) -> Self { Self(unsafe { _mm256_sqrt_ps(self.0) }) } #[allow(dead_code)] fn rsqrt(&self) -> Self { Self(unsafe { _mm256_rsqrt_ps(self.0) }) } // approximate a sqrt using an inverse sqrt and one iteration of Newton-Raphson // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots // Note: on many architectures this is significantly faster than the sqrt intrinsic. But this is not so on Skylake // for this program: approx_sqrt crowds the ports with additional mul/subs so is net slower #[allow(dead_code)] fn approx_sqrt(self) -> Self { let half = WideF32::splat(0.5); let three = WideF32::splat(3.0); let rsqrt = self.rsqrt(); let x = three - rsqrt * rsqrt * self; rsqrt * half * x * self } fn gt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_GT_OQ) }) } fn lt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_LT_OQ) }) } fn eq(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_EQ_OQ) }) } fn mul_add(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmadd_ps(x.0, y.0, z.0) }) } fn mul_sub(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) }) } } impl Add for WideF32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_ps(self.0, other.0) }) } } impl AddAssign for WideF32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_ps(self.0, other.0) } } } impl BitAnd for WideF32 { type Output = Self; fn bitand(self, other: Self) -> Self { Self(unsafe { _mm256_and_ps(self.0, other.0) }) } } impl BitOr for WideF32 { type Output = Self; fn bitor(self, other: Self) -> Self
} impl Div for WideF32 { type Output = Self; fn div(self, other: Self) -> Self { Self(unsafe { _mm256_div_ps(self.0, other.0) }) } } impl Sub for WideF32 { type Output = Self; fn sub(self, other: Self) -> Self { Self(unsafe { _mm256_sub_ps(self.0, other.0) }) } } impl Mul for WideF32 { type Output = Self; fn mul(self, other: Self) -> Self { Self(unsafe { _mm256_mul_ps(self.0, other.0) }) } } impl MulAssign for WideF32 { fn mul_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_mul_ps(self.0, other.0) } } } impl Neg for WideF32 { type Output = Self; fn neg(self) -> Self { Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) }) } } #[derive(Debug, Copy, Clone, PartialEq)] struct V3(f32, f32, f32); impl V3 { fn dot(self, other: V3) -> f32 { self.0 * other.0 + self.1 * other.1 + self.2 * other.2 } fn cross(self, other: V3) -> V3 { V3( self.1 * other.2 - self.2 * other.1, self.2 * other.0 - self.0 * other.2, self.0 * other.1 - self.1 * other.0, ) } fn normalize(self) -> V3 { self * (1.0 / self.len()) } fn reflect(self, normal: V3) -> V3 { self - normal * self.dot(normal) * 2.0 } fn len(self) -> f32 { self.dot(self).sqrt() } fn is_unit_vector(self) -> bool { (self.dot(self) - 1.0).abs() < TOLERANCE } } impl Add for V3 { type Output = Self; fn add(self, other: Self) -> Self { Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Add<f32> for V3 { type Output = Self; fn add(self, rhs: f32) -> Self { Self(self.0 + rhs, self.1 + rhs, self.2 + rhs) } } impl AddAssign for V3 { fn add_assign(&mut self, other: Self) { *self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Div<f32> for V3 { type Output = Self; fn div(self, rhs: f32) -> Self { Self(self.0 / rhs, self.1 / rhs, self.2 / rhs) } } impl Sub for V3 { type Output = Self; fn sub(self, other: Self) -> Self { Self(self.0 - other.0, self.1 - other.1, self.2 - other.2) } } impl Sub<f32> for V3 { type Output = Self; fn sub(self, rhs: f32) -> Self { Self(self.0 - rhs, self.1 - rhs, self.2 - rhs) } } impl Mul for V3 { type Output = Self; fn mul(self, other: Self) -> Self { Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } impl Mul<f32> for V3 { type Output = Self; fn mul(self, rhs: f32) -> Self { Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign<f32> for V3 { fn mul_assign(&mut self, rhs: f32) { *self = Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign for V3 { fn mul_assign(&mut self, other: Self) { *self = Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } #[derive(Debug)] struct Camera { origin: V3, x: V3, y: V3, z: V3, film_lower_left: V3, film_width: f32, film_height: f32, } impl Camera { fn new(look_from: V3, look_at: V3, aspect_ratio: f32) -> Camera { assert!(aspect_ratio > 1.0, "width must be greater than height"); let origin = look_from - look_at; let z = origin.normalize(); let x = V3(0.0, 0.0, 1.0).cross(z).normalize(); let y = z.cross(x).normalize(); let film_height = 1.0; let film_width = film_height * aspect_ratio; let film_lower_left = origin - z - y * 0.5 * film_height - x * 0.5 * film_width; Camera { origin, x, y, z, film_lower_left, film_width, film_height, } } } #[derive(Debug, Clone, PartialEq)] enum MaterialType { Diffuse, Specular, } #[derive(Debug, Clone, PartialEq)] struct Material { emit_color: V3, reflect_color: V3, t: MaterialType, } struct Sphere { p: V3, rsqrd: f32, m: Material, } impl Sphere { fn new(p: V3, r: f32, m: Material) -> Sphere { Sphere { p, rsqrd: r * r, m } } } struct Spheres { xs: Vec<f32>, ys: Vec<f32>, zs: Vec<f32>, rsqrds: Vec<f32>, mats: Vec<Material>, } impl Spheres { fn new(spheres: Vec<Sphere>) -> Self { let len = (spheres.len() + SIMD_WIDTH - 1) / SIMD_WIDTH * SIMD_WIDTH; let mut me = Self { xs: Vec::with_capacity(len), ys: Vec::with_capacity(len), zs: Vec::with_capacity(len), rsqrds: Vec::with_capacity(len), mats: Vec::with_capacity(len), }; for s in spheres { me.xs.push(s.p.0); me.ys.push(s.p.1); me.zs.push(s.p.2); me.rsqrds.push(s.rsqrd); me.mats.push(s.m); } // pad everything out to the simd width me.xs.resize(len, 0.0); me.ys.resize(len, 0.0); me.zs.resize(len, 0.0); me.rsqrds.resize(len, 0.0); let default_mat = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; me.mats.resize(len, default_mat); me } fn len(&self) -> usize { self.xs.len() } } // https://entropymine.com/imageworsener/srgbformula/ fn linear_to_srgb(x: f32) -> f32 { if x < 0.0 { 0.0 } else if x > 1.0 { 1.0 } else if x > 0.0031308 { 1.055 * x.powf(1.0 / 2.4) - 0.055 } else { x * 12.92 } } thread_local! { static THREAD_RNG: Cell<u64> = { let mut buf = [0u8; 8]; getrandom::getrandom(&mut buf).unwrap(); Cell::new(u64::from_le_bytes(buf)) }; } fn rand_seed() -> u32 { let mut buf = [0u8; 4]; getrandom::getrandom(&mut buf).unwrap(); u32::from_le_bytes(buf) } #[allow(dead_code)] fn thread_rand() -> u32 { // TODO(eli): thread local perf is terrible; causes function call and branching THREAD_RNG.with(|rng_cell| { let mut state = rng_cell.get(); let randu = pcg(&mut state); rng_cell.set(state); randu }) } // Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" // xorshift isn't great, but is good enough for our purposes and has two // nice properties: // 1. it only needs a u32 of state to generate a u32 // 2. it's easy to SIMD fn xorshift(state: &mut u32) -> u32 { debug_assert!(*state!= 0, "xorshift cannot be seeded with 0"); let mut x = *state; x ^= x << 13; x ^= x >> 17; x ^= x << 5; *state = x; x } // pcg xsh rs 64/32 (mcg) #[allow(dead_code)] fn pcg(state: &mut u64) -> u32 { let s = *state; *state = s.wrapping_mul(6364136223846793005); (((s >> 22) ^ s) >> ((s >> 61) + 22)) as u32 } fn randf(state: &mut u32) -> f32 { let randu = (xorshift(state) >> 9) | 0x3f800000; let randf = f32::from_bits(randu) - 1.0; randf } fn randf_range(state: &mut u32, min: f32, max: f32) -> f32 { min + (max - min) * randf(state) } #[inline(always)] fn cast( rng_state: &mut u32, bg: &Material, spheres: &Spheres, mut origin: V3, mut dir: V3, mut bounces: u32, ) -> (V3, u32) { let mut color = V3(0.0, 0.0, 0.0); let mut reflectance = V3(1.0, 1.0, 1.0); let orig_bounces = bounces; loop { debug_assert!(dir.is_unit_vector()); let origin_xs = WideF32::splat(origin.0); let origin_ys = WideF32::splat(origin.1); let origin_zs = WideF32::splat(origin.2); let dir_x = WideF32::splat(dir.0); let dir_y = WideF32::splat(dir.1); let dir_z = WideF32::splat(dir.2); let mut hit_ids = WideI32::splat(-1); let mut hit_dists = WideF32::splat(f32::MAX); let mut iteration_ids = WideI32::new(7, 6, 5, 4, 3, 2, 1, 0); // TODO(eli): egregious bounds checking here for i in (0..spheres.len()).step_by(SIMD_WIDTH) { let sphere_xs = WideF32::load(&spheres.xs[i..i + SIMD_WIDTH]); let sphere_ys = WideF32::load(&spheres.ys[i..i + SIMD_WIDTH]); let sphere_zs = WideF32::load(&spheres.zs[i..i + SIMD_WIDTH]); let sphere_rsqrds = WideF32::load(&spheres.rsqrds[i..i + SIMD_WIDTH]); // this is sphere_relative_origin = origin - sphere_origin // but the math is flipped backwards because it saves us having to negate the b term let relative_xs = sphere_xs - origin_xs; let relative_ys = sphere_ys - origin_ys; let relative_zs = sphere_zs - origin_zs; let neg_b = dir_x * relative_xs; let neg_b = WideF32::mul_add(dir_y, relative_ys, neg_b); let neg_b = WideF32::mul_add(dir_z, relative_zs, neg_b); let c = WideF32::mul_sub(relative_xs, relative_xs, sphere_rsqrds); let c = WideF32::mul_add(relative_ys, relative_ys, c); let c = WideF32::mul_add(relative_zs, relative_zs, c); let discr = WideF32::mul_sub(neg_b, neg_b, c); let discrmask = discr.gt(WideF32::splat(0.0)); if discrmask.any() { let root_term = discr.sqrt(); let t0 = neg_b - root_term; let t1 = neg_b + root_term; // t0 if hit, else t1 let t = WideF32::select(t1, t0, t0.gt(WideF32::splat(TOLERANCE))); let mask = discrmask & t.gt(WideF32::splat(TOLERANCE)) & t.lt(hit_dists); hit_ids = WideI32::select(hit_ids, iteration_ids, mask); hit_dists = WideF32::select(hit_dists, t, mask); } iteration_ids += WideI32::splat(SIMD_WIDTH as i32); } let hmin = hit_dists.hmin(); if hmin < f32::MAX { let minmask = hit_dists.eq(WideF32::splat(hmin)).mask(); let min_idx = minmask.trailing_zeros() as usize; let hit_ids_arr: [i32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_ids.0) }; let hit_dists_arr: [f32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_dists.0) }; let id = hit_ids_arr[min_idx] as usize; let hit_dist = hit_dists_arr[min_idx]; let mat = &spheres.mats[id]; if bounces == 0 { color += reflectance * mat.emit_color; break; } else { bounces -= 1; color += reflectance * mat.emit_color; reflectance *= mat.reflect_color; let hit_point = origin + dir * hit_dist; origin = hit_point; dir = match mat.t { MaterialType::Specular => { let sp = V3(spheres.xs[id], spheres.ys[id], spheres.zs[id]); let hit_normal = (hit_point - sp).normalize(); dir.reflect(hit_normal) } MaterialType::Diffuse => { let a = randf_range(rng_state, 0.0, 2.0 * PI); let z = randf_range(rng_state, -1.0, 1.0); let r = (1.0 - z * z).sqrt(); V3(r * a.cos(), r * a.sin(), z) } } } } else { color += reflectance * bg.emit_color; break; } } (color, orig_bounces - bounces) } fn main() -> Result<(), Box<dyn std::error::Error>> { // flush denormals to zero unsafe { _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON) }; let mut args = Arguments::from_env(); let rays_per_pixel = args.opt_value_from_str(["-r", "--rays"])?.unwrap_or(100); let bounces = args.opt_value_from_str("--bounces")?.unwrap_or(8); let filename = args .opt_value_from_str("-o")? .unwrap_or("out.png".to_string()); args.finish()?; // Materials let bg = Material { emit_color: V3(0.3, 0.4, 0.8), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; let ground = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.5, 0.5, 0.5), t: MaterialType::Diffuse, }; let left = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(1.0, 0.0, 0.0), t: MaterialType::Specular, }; let center = Material { emit_color: V3(0.4, 0.8, 0.9), reflect_color: V3(0.8, 0.8, 0.8), t: MaterialType::Specular, }; let right = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.95, 0.95, 0.95), t: MaterialType::Specular, }; let spheres = Spheres::new(vec![ Sphere::new(V3(0.0, 0.0, -100.0), 100.0, ground), Sphere::new(V3(0.0, 0.0, 1.0), 1.0, center), Sphere::new(V3(-2.0, -3.0, 1.5), 0.3, right.clone()), Sphere::new(V3(-3.0, -6.0, 0.0), 0.3, right.clone()), Sphere::new(V3(-3.0, -5.0, 2.0), 0.5, left.clone()), Sphere::new(V3(3.0, -3.0, 0.8), 1.0, right.clone()),
{ Self(unsafe { _mm256_or_ps(self.0, other.0) }) }
identifier_body
main.rs
1 2 3 4 4 3 2 1 = 1 2 2 1 unpack hi, min 1 2 2 1 1 1 2 2 = 1 1 2 1 */ let x = self.0; let y = _mm256_extractf128_ps(x, 1); let m1 = _mm_min_ps(_mm256_castps256_ps128(x), y); let m2 = _mm_permute_ps(m1, 27); let m2 = _mm_min_ps(m1, m2); let m3 = _mm_unpackhi_ps(m2, m2); let m = _mm_min_ps(m2, m3); _mm_cvtss_f32(m) } } fn splat(x: f32) -> Self { Self(unsafe { _mm256_set1_ps(x) }) } fn select(x: WideF32, y: WideF32, mask: WideF32) -> Self { Self(unsafe { _mm256_blendv_ps(x.0, y.0, mask.0) }) } fn sqrt(&self) -> Self { Self(unsafe { _mm256_sqrt_ps(self.0) }) } #[allow(dead_code)] fn rsqrt(&self) -> Self { Self(unsafe { _mm256_rsqrt_ps(self.0) }) } // approximate a sqrt using an inverse sqrt and one iteration of Newton-Raphson // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots // Note: on many architectures this is significantly faster than the sqrt intrinsic. But this is not so on Skylake // for this program: approx_sqrt crowds the ports with additional mul/subs so is net slower #[allow(dead_code)] fn approx_sqrt(self) -> Self { let half = WideF32::splat(0.5); let three = WideF32::splat(3.0); let rsqrt = self.rsqrt(); let x = three - rsqrt * rsqrt * self; rsqrt * half * x * self } fn gt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_GT_OQ) }) } fn lt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_LT_OQ) }) } fn eq(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_EQ_OQ) }) } fn mul_add(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmadd_ps(x.0, y.0, z.0) }) } fn mul_sub(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) }) } } impl Add for WideF32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_ps(self.0, other.0) }) } } impl AddAssign for WideF32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_ps(self.0, other.0) } } } impl BitAnd for WideF32 { type Output = Self; fn bitand(self, other: Self) -> Self { Self(unsafe { _mm256_and_ps(self.0, other.0) }) } } impl BitOr for WideF32 { type Output = Self; fn bitor(self, other: Self) -> Self { Self(unsafe { _mm256_or_ps(self.0, other.0) }) } } impl Div for WideF32 { type Output = Self; fn div(self, other: Self) -> Self { Self(unsafe { _mm256_div_ps(self.0, other.0) }) } } impl Sub for WideF32 { type Output = Self; fn sub(self, other: Self) -> Self { Self(unsafe { _mm256_sub_ps(self.0, other.0) }) } } impl Mul for WideF32 { type Output = Self; fn mul(self, other: Self) -> Self { Self(unsafe { _mm256_mul_ps(self.0, other.0) }) } } impl MulAssign for WideF32 { fn mul_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_mul_ps(self.0, other.0) } } } impl Neg for WideF32 { type Output = Self; fn neg(self) -> Self { Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) }) } } #[derive(Debug, Copy, Clone, PartialEq)] struct V3(f32, f32, f32); impl V3 { fn dot(self, other: V3) -> f32 { self.0 * other.0 + self.1 * other.1 + self.2 * other.2 } fn cross(self, other: V3) -> V3 { V3( self.1 * other.2 - self.2 * other.1, self.2 * other.0 - self.0 * other.2, self.0 * other.1 - self.1 * other.0, ) } fn normalize(self) -> V3 { self * (1.0 / self.len()) } fn reflect(self, normal: V3) -> V3 { self - normal * self.dot(normal) * 2.0 } fn len(self) -> f32 { self.dot(self).sqrt() } fn is_unit_vector(self) -> bool { (self.dot(self) - 1.0).abs() < TOLERANCE } } impl Add for V3 { type Output = Self; fn add(self, other: Self) -> Self { Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Add<f32> for V3 { type Output = Self; fn add(self, rhs: f32) -> Self { Self(self.0 + rhs, self.1 + rhs, self.2 + rhs) } } impl AddAssign for V3 { fn add_assign(&mut self, other: Self) { *self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Div<f32> for V3 { type Output = Self; fn div(self, rhs: f32) -> Self { Self(self.0 / rhs, self.1 / rhs, self.2 / rhs) } } impl Sub for V3 { type Output = Self; fn sub(self, other: Self) -> Self { Self(self.0 - other.0, self.1 - other.1, self.2 - other.2) } } impl Sub<f32> for V3 { type Output = Self; fn sub(self, rhs: f32) -> Self { Self(self.0 - rhs, self.1 - rhs, self.2 - rhs) } } impl Mul for V3 { type Output = Self; fn mul(self, other: Self) -> Self { Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } impl Mul<f32> for V3 { type Output = Self; fn mul(self, rhs: f32) -> Self { Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign<f32> for V3 { fn mul_assign(&mut self, rhs: f32) { *self = Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign for V3 { fn mul_assign(&mut self, other: Self) { *self = Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } #[derive(Debug)] struct Camera { origin: V3, x: V3, y: V3, z: V3, film_lower_left: V3, film_width: f32, film_height: f32, } impl Camera { fn new(look_from: V3, look_at: V3, aspect_ratio: f32) -> Camera { assert!(aspect_ratio > 1.0, "width must be greater than height"); let origin = look_from - look_at; let z = origin.normalize(); let x = V3(0.0, 0.0, 1.0).cross(z).normalize(); let y = z.cross(x).normalize(); let film_height = 1.0; let film_width = film_height * aspect_ratio; let film_lower_left = origin - z - y * 0.5 * film_height - x * 0.5 * film_width; Camera { origin, x, y, z, film_lower_left, film_width, film_height, } } } #[derive(Debug, Clone, PartialEq)] enum MaterialType { Diffuse, Specular, } #[derive(Debug, Clone, PartialEq)] struct Material { emit_color: V3, reflect_color: V3, t: MaterialType, } struct Sphere { p: V3, rsqrd: f32, m: Material, } impl Sphere { fn new(p: V3, r: f32, m: Material) -> Sphere { Sphere { p, rsqrd: r * r, m } } } struct
{ xs: Vec<f32>, ys: Vec<f32>, zs: Vec<f32>, rsqrds: Vec<f32>, mats: Vec<Material>, } impl Spheres { fn new(spheres: Vec<Sphere>) -> Self { let len = (spheres.len() + SIMD_WIDTH - 1) / SIMD_WIDTH * SIMD_WIDTH; let mut me = Self { xs: Vec::with_capacity(len), ys: Vec::with_capacity(len), zs: Vec::with_capacity(len), rsqrds: Vec::with_capacity(len), mats: Vec::with_capacity(len), }; for s in spheres { me.xs.push(s.p.0); me.ys.push(s.p.1); me.zs.push(s.p.2); me.rsqrds.push(s.rsqrd); me.mats.push(s.m); } // pad everything out to the simd width me.xs.resize(len, 0.0); me.ys.resize(len, 0.0); me.zs.resize(len, 0.0); me.rsqrds.resize(len, 0.0); let default_mat = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; me.mats.resize(len, default_mat); me } fn len(&self) -> usize { self.xs.len() } } // https://entropymine.com/imageworsener/srgbformula/ fn linear_to_srgb(x: f32) -> f32 { if x < 0.0 { 0.0 } else if x > 1.0 { 1.0 } else if x > 0.0031308 { 1.055 * x.powf(1.0 / 2.4) - 0.055 } else { x * 12.92 } } thread_local! { static THREAD_RNG: Cell<u64> = { let mut buf = [0u8; 8]; getrandom::getrandom(&mut buf).unwrap(); Cell::new(u64::from_le_bytes(buf)) }; } fn rand_seed() -> u32 { let mut buf = [0u8; 4]; getrandom::getrandom(&mut buf).unwrap(); u32::from_le_bytes(buf) } #[allow(dead_code)] fn thread_rand() -> u32 { // TODO(eli): thread local perf is terrible; causes function call and branching THREAD_RNG.with(|rng_cell| { let mut state = rng_cell.get(); let randu = pcg(&mut state); rng_cell.set(state); randu }) } // Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" // xorshift isn't great, but is good enough for our purposes and has two // nice properties: // 1. it only needs a u32 of state to generate a u32 // 2. it's easy to SIMD fn xorshift(state: &mut u32) -> u32 { debug_assert!(*state!= 0, "xorshift cannot be seeded with 0"); let mut x = *state; x ^= x << 13; x ^= x >> 17; x ^= x << 5; *state = x; x } // pcg xsh rs 64/32 (mcg) #[allow(dead_code)] fn pcg(state: &mut u64) -> u32 { let s = *state; *state = s.wrapping_mul(6364136223846793005); (((s >> 22) ^ s) >> ((s >> 61) + 22)) as u32 } fn randf(state: &mut u32) -> f32 { let randu = (xorshift(state) >> 9) | 0x3f800000; let randf = f32::from_bits(randu) - 1.0; randf } fn randf_range(state: &mut u32, min: f32, max: f32) -> f32 { min + (max - min) * randf(state) } #[inline(always)] fn cast( rng_state: &mut u32, bg: &Material, spheres: &Spheres, mut origin: V3, mut dir: V3, mut bounces: u32, ) -> (V3, u32) { let mut color = V3(0.0, 0.0, 0.0); let mut reflectance = V3(1.0, 1.0, 1.0); let orig_bounces = bounces; loop { debug_assert!(dir.is_unit_vector()); let origin_xs = WideF32::splat(origin.0); let origin_ys = WideF32::splat(origin.1); let origin_zs = WideF32::splat(origin.2); let dir_x = WideF32::splat(dir.0); let dir_y = WideF32::splat(dir.1); let dir_z = WideF32::splat(dir.2); let mut hit_ids = WideI32::splat(-1); let mut hit_dists = WideF32::splat(f32::MAX); let mut iteration_ids = WideI32::new(7, 6, 5, 4, 3, 2, 1, 0); // TODO(eli): egregious bounds checking here for i in (0..spheres.len()).step_by(SIMD_WIDTH) { let sphere_xs = WideF32::load(&spheres.xs[i..i + SIMD_WIDTH]); let sphere_ys = WideF32::load(&spheres.ys[i..i + SIMD_WIDTH]); let sphere_zs = WideF32::load(&spheres.zs[i..i + SIMD_WIDTH]); let sphere_rsqrds = WideF32::load(&spheres.rsqrds[i..i + SIMD_WIDTH]); // this is sphere_relative_origin = origin - sphere_origin // but the math is flipped backwards because it saves us having to negate the b term let relative_xs = sphere_xs - origin_xs; let relative_ys = sphere_ys - origin_ys; let relative_zs = sphere_zs - origin_zs; let neg_b = dir_x * relative_xs; let neg_b = WideF32::mul_add(dir_y, relative_ys, neg_b); let neg_b = WideF32::mul_add(dir_z, relative_zs, neg_b); let c = WideF32::mul_sub(relative_xs, relative_xs, sphere_rsqrds); let c = WideF32::mul_add(relative_ys, relative_ys, c); let c = WideF32::mul_add(relative_zs, relative_zs, c); let discr = WideF32::mul_sub(neg_b, neg_b, c); let discrmask = discr.gt(WideF32::splat(0.0)); if discrmask.any() { let root_term = discr.sqrt(); let t0 = neg_b - root_term; let t1 = neg_b + root_term; // t0 if hit, else t1 let t = WideF32::select(t1, t0, t0.gt(WideF32::splat(TOLERANCE))); let mask = discrmask & t.gt(WideF32::splat(TOLERANCE)) & t.lt(hit_dists); hit_ids = WideI32::select(hit_ids, iteration_ids, mask); hit_dists = WideF32::select(hit_dists, t, mask); } iteration_ids += WideI32::splat(SIMD_WIDTH as i32); } let hmin = hit_dists.hmin(); if hmin < f32::MAX { let minmask = hit_dists.eq(WideF32::splat(hmin)).mask(); let min_idx = minmask.trailing_zeros() as usize; let hit_ids_arr: [i32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_ids.0) }; let hit_dists_arr: [f32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_dists.0) }; let id = hit_ids_arr[min_idx] as usize; let hit_dist = hit_dists_arr[min_idx]; let mat = &spheres.mats[id]; if bounces == 0 { color += reflectance * mat.emit_color; break; } else { bounces -= 1; color += reflectance * mat.emit_color; reflectance *= mat.reflect_color; let hit_point = origin + dir * hit_dist; origin = hit_point; dir = match mat.t { MaterialType::Specular => { let sp = V3(spheres.xs[id], spheres.ys[id], spheres.zs[id]); let hit_normal = (hit_point - sp).normalize(); dir.reflect(hit_normal) } MaterialType::Diffuse => { let a = randf_range(rng_state, 0.0, 2.0 * PI); let z = randf_range(rng_state, -1.0, 1.0); let r = (1.0 - z * z).sqrt(); V3(r * a.cos(), r * a.sin(), z) } } } } else { color += reflectance * bg.emit_color; break; } } (color, orig_bounces - bounces) } fn main() -> Result<(), Box<dyn std::error::Error>> { // flush denormals to zero unsafe { _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON) }; let mut args = Arguments::from_env(); let rays_per_pixel = args.opt_value_from_str(["-r", "--rays"])?.unwrap_or(100); let bounces = args.opt_value_from_str("--bounces")?.unwrap_or(8); let filename = args .opt_value_from_str("-o")? .unwrap_or("out.png".to_string()); args.finish()?; // Materials let bg = Material { emit_color: V3(0.3, 0.4, 0.8), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; let ground = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.5, 0.5, 0.5), t: MaterialType::Diffuse, }; let left = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(1.0, 0.0, 0.0), t: MaterialType::Specular, }; let center = Material { emit_color: V3(0.4, 0.8, 0.9), reflect_color: V3(0.8, 0.8, 0.8), t: MaterialType::Specular, }; let right = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.95, 0.95, 0.95), t: MaterialType::Specular, }; let spheres = Spheres::new(vec![ Sphere::new(V3(0.0, 0.0, -100.0), 100.0, ground), Sphere::new(V3(0.0, 0.0, 1.0), 1.0, center), Sphere::new(V3(-2.0, -3.0, 1.5), 0.3, right.clone()), Sphere::new(V3(-3.0, -6.0, 0.0), 0.3, right.clone()), Sphere::new(V3(-3.0, -5.0, 2.0), 0.5, left.clone()), Sphere::new(V3(3.0, -3.0, 0.8), 1.0, right.clone()),
Spheres
identifier_name
main.rs
unsafe { _mm256_movemask_ps(self.0) } } fn hmin(&self) -> f32 { unsafe { /* This can be done entirely in avx with permute2f128, but that is allegedly very slow on AMD prior to Zen2 (and is anecdotally slower on my Intels as well) initial m256 1 2 3 4 5 6 7 8 extract half, cast the other half down to m128, min 1 2 3 4 5 6 7 8 = 1 2 3 4 permute backwards, min 1 2 3 4 4 3 2 1 = 1 2 2 1 unpack hi, min 1 2 2 1 1 1 2 2 = 1 1 2 1 */ let x = self.0; let y = _mm256_extractf128_ps(x, 1); let m1 = _mm_min_ps(_mm256_castps256_ps128(x), y); let m2 = _mm_permute_ps(m1, 27); let m2 = _mm_min_ps(m1, m2); let m3 = _mm_unpackhi_ps(m2, m2); let m = _mm_min_ps(m2, m3); _mm_cvtss_f32(m) } } fn splat(x: f32) -> Self { Self(unsafe { _mm256_set1_ps(x) }) } fn select(x: WideF32, y: WideF32, mask: WideF32) -> Self { Self(unsafe { _mm256_blendv_ps(x.0, y.0, mask.0) }) } fn sqrt(&self) -> Self { Self(unsafe { _mm256_sqrt_ps(self.0) }) } #[allow(dead_code)] fn rsqrt(&self) -> Self { Self(unsafe { _mm256_rsqrt_ps(self.0) }) } // approximate a sqrt using an inverse sqrt and one iteration of Newton-Raphson // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots // Note: on many architectures this is significantly faster than the sqrt intrinsic. But this is not so on Skylake // for this program: approx_sqrt crowds the ports with additional mul/subs so is net slower #[allow(dead_code)] fn approx_sqrt(self) -> Self { let half = WideF32::splat(0.5); let three = WideF32::splat(3.0); let rsqrt = self.rsqrt(); let x = three - rsqrt * rsqrt * self; rsqrt * half * x * self } fn gt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_GT_OQ) }) } fn lt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_LT_OQ) }) } fn eq(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_EQ_OQ) }) } fn mul_add(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmadd_ps(x.0, y.0, z.0) }) } fn mul_sub(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) }) } } impl Add for WideF32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_ps(self.0, other.0) }) } } impl AddAssign for WideF32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_ps(self.0, other.0) } } } impl BitAnd for WideF32 { type Output = Self; fn bitand(self, other: Self) -> Self { Self(unsafe { _mm256_and_ps(self.0, other.0) }) } } impl BitOr for WideF32 { type Output = Self; fn bitor(self, other: Self) -> Self { Self(unsafe { _mm256_or_ps(self.0, other.0) }) } } impl Div for WideF32 { type Output = Self; fn div(self, other: Self) -> Self { Self(unsafe { _mm256_div_ps(self.0, other.0) }) } } impl Sub for WideF32 { type Output = Self; fn sub(self, other: Self) -> Self { Self(unsafe { _mm256_sub_ps(self.0, other.0) }) } } impl Mul for WideF32 { type Output = Self; fn mul(self, other: Self) -> Self { Self(unsafe { _mm256_mul_ps(self.0, other.0) }) } } impl MulAssign for WideF32 { fn mul_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_mul_ps(self.0, other.0) } } } impl Neg for WideF32 { type Output = Self; fn neg(self) -> Self { Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) }) } } #[derive(Debug, Copy, Clone, PartialEq)] struct V3(f32, f32, f32); impl V3 { fn dot(self, other: V3) -> f32 { self.0 * other.0 + self.1 * other.1 + self.2 * other.2 } fn cross(self, other: V3) -> V3 { V3( self.1 * other.2 - self.2 * other.1, self.2 * other.0 - self.0 * other.2, self.0 * other.1 - self.1 * other.0, ) } fn normalize(self) -> V3 { self * (1.0 / self.len()) } fn reflect(self, normal: V3) -> V3 { self - normal * self.dot(normal) * 2.0 } fn len(self) -> f32 { self.dot(self).sqrt() } fn is_unit_vector(self) -> bool { (self.dot(self) - 1.0).abs() < TOLERANCE } } impl Add for V3 { type Output = Self; fn add(self, other: Self) -> Self { Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Add<f32> for V3 { type Output = Self; fn add(self, rhs: f32) -> Self { Self(self.0 + rhs, self.1 + rhs, self.2 + rhs) } } impl AddAssign for V3 { fn add_assign(&mut self, other: Self) { *self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Div<f32> for V3 { type Output = Self; fn div(self, rhs: f32) -> Self { Self(self.0 / rhs, self.1 / rhs, self.2 / rhs) } } impl Sub for V3 { type Output = Self; fn sub(self, other: Self) -> Self { Self(self.0 - other.0, self.1 - other.1, self.2 - other.2) } } impl Sub<f32> for V3 { type Output = Self; fn sub(self, rhs: f32) -> Self { Self(self.0 - rhs, self.1 - rhs, self.2 - rhs) } } impl Mul for V3 { type Output = Self; fn mul(self, other: Self) -> Self { Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } impl Mul<f32> for V3 { type Output = Self; fn mul(self, rhs: f32) -> Self { Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign<f32> for V3 { fn mul_assign(&mut self, rhs: f32) { *self = Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign for V3 { fn mul_assign(&mut self, other: Self) { *self = Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } #[derive(Debug)] struct Camera { origin: V3, x: V3, y: V3, z: V3, film_lower_left: V3, film_width: f32, film_height: f32, } impl Camera { fn new(look_from: V3, look_at: V3, aspect_ratio: f32) -> Camera { assert!(aspect_ratio > 1.0, "width must be greater than height"); let origin = look_from - look_at; let z = origin.normalize(); let x = V3(0.0, 0.0, 1.0).cross(z).normalize(); let y = z.cross(x).normalize(); let film_height = 1.0; let film_width = film_height * aspect_ratio; let film_lower_left = origin - z - y * 0.5 * film_height - x * 0.5 * film_width; Camera { origin, x, y, z, film_lower_left, film_width, film_height, } } } #[derive(Debug, Clone, PartialEq)] enum MaterialType { Diffuse, Specular, } #[derive(Debug, Clone, PartialEq)] struct Material { emit_color: V3, reflect_color: V3, t: MaterialType, } struct Sphere { p: V3, rsqrd: f32, m: Material, } impl Sphere { fn new(p: V3, r: f32, m: Material) -> Sphere { Sphere { p, rsqrd: r * r, m } } } struct Spheres { xs: Vec<f32>, ys: Vec<f32>, zs: Vec<f32>, rsqrds: Vec<f32>, mats: Vec<Material>, } impl Spheres { fn new(spheres: Vec<Sphere>) -> Self { let len = (spheres.len() + SIMD_WIDTH - 1) / SIMD_WIDTH * SIMD_WIDTH; let mut me = Self { xs: Vec::with_capacity(len), ys: Vec::with_capacity(len), zs: Vec::with_capacity(len), rsqrds: Vec::with_capacity(len), mats: Vec::with_capacity(len), }; for s in spheres { me.xs.push(s.p.0); me.ys.push(s.p.1); me.zs.push(s.p.2); me.rsqrds.push(s.rsqrd); me.mats.push(s.m); } // pad everything out to the simd width me.xs.resize(len, 0.0); me.ys.resize(len, 0.0); me.zs.resize(len, 0.0); me.rsqrds.resize(len, 0.0); let default_mat = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; me.mats.resize(len, default_mat); me } fn len(&self) -> usize { self.xs.len() } } // https://entropymine.com/imageworsener/srgbformula/ fn linear_to_srgb(x: f32) -> f32 { if x < 0.0 { 0.0 } else if x > 1.0 { 1.0 } else if x > 0.0031308 { 1.055 * x.powf(1.0 / 2.4) - 0.055 } else { x * 12.92 } } thread_local! { static THREAD_RNG: Cell<u64> = { let mut buf = [0u8; 8]; getrandom::getrandom(&mut buf).unwrap(); Cell::new(u64::from_le_bytes(buf)) }; } fn rand_seed() -> u32 { let mut buf = [0u8; 4]; getrandom::getrandom(&mut buf).unwrap(); u32::from_le_bytes(buf) } #[allow(dead_code)] fn thread_rand() -> u32 { // TODO(eli): thread local perf is terrible; causes function call and branching THREAD_RNG.with(|rng_cell| { let mut state = rng_cell.get(); let randu = pcg(&mut state); rng_cell.set(state); randu }) } // Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" // xorshift isn't great, but is good enough for our purposes and has two // nice properties: // 1. it only needs a u32 of state to generate a u32 // 2. it's easy to SIMD fn xorshift(state: &mut u32) -> u32 { debug_assert!(*state!= 0, "xorshift cannot be seeded with 0"); let mut x = *state; x ^= x << 13; x ^= x >> 17; x ^= x << 5; *state = x; x } // pcg xsh rs 64/32 (mcg) #[allow(dead_code)] fn pcg(state: &mut u64) -> u32 { let s = *state; *state = s.wrapping_mul(6364136223846793005); (((s >> 22) ^ s) >> ((s >> 61) + 22)) as u32 } fn randf(state: &mut u32) -> f32 { let randu = (xorshift(state) >> 9) | 0x3f800000; let randf = f32::from_bits(randu) - 1.0; randf } fn randf_range(state: &mut u32, min: f32, max: f32) -> f32 { min + (max - min) * randf(state) } #[inline(always)] fn cast( rng_state: &mut u32, bg: &Material, spheres: &Spheres, mut origin: V3, mut dir: V3, mut bounces: u32, ) -> (V3, u32) { let mut color = V3(0.0, 0.0, 0.0); let mut reflectance = V3(1.0, 1.0, 1.0); let orig_bounces = bounces; loop { debug_assert!(dir.is_unit_vector()); let origin_xs = WideF32::splat(origin.0); let origin_ys = WideF32::splat(origin.1); let origin_zs = WideF32::splat(origin.2); let dir_x = WideF32::splat(dir.0); let dir_y = WideF32::splat(dir.1); let dir_z = WideF32::splat(dir.2); let mut hit_ids = WideI32::splat(-1); let mut hit_dists = WideF32::splat(f32::MAX); let mut iteration_ids = WideI32::new(7, 6, 5, 4, 3, 2, 1, 0); // TODO(eli): egregious bounds checking here for i in (0..spheres.len()).step_by(SIMD_WIDTH) { let sphere_xs = WideF32::load(&spheres.xs[i..i + SIMD_WIDTH]); let sphere_ys = WideF32::load(&spheres.ys[i..i + SIMD_WIDTH]); let sphere_zs = WideF32::load(&spheres.zs[i..i + SIMD_WIDTH]); let sphere_rsqrds = WideF32::load(&spheres.rsqrds[i..i + SIMD_WIDTH]); // this is sphere_relative_origin = origin - sphere_origin // but the math is flipped backwards because it saves us having to negate the b term let relative_xs = sphere_xs - origin_xs; let relative_ys = sphere_ys - origin_ys; let relative_zs = sphere_zs - origin_zs; let neg_b = dir_x * relative_xs; let neg_b = WideF32::mul_add(dir_y, relative_ys, neg_b); let neg_b = WideF32::mul_add(dir_z, relative_zs, neg_b); let c = WideF32::mul_sub(relative_xs, relative_xs, sphere_rsqrds); let c = WideF32::mul_add(relative_ys, relative_ys, c); let c = WideF32::mul_add(relative_zs, relative_zs, c); let discr = WideF32::mul_sub(neg_b, neg_b, c); let discrmask = discr.gt(WideF32::splat(0.0)); if discrmask.any() { let root_term = discr.sqrt(); let t0 = neg_b - root_term; let t1 = neg_b + root_term; // t0 if hit, else t1 let t = WideF32::select(t1, t0, t0.gt(WideF32::splat(TOLERANCE))); let mask = discrmask & t.gt(WideF32::splat(TOLERANCE)) & t.lt(hit_dists); hit_ids = WideI32::select(hit_ids, iteration_ids, mask); hit_dists = WideF32::select(hit_dists, t, mask); } iteration_ids += WideI32::splat(SIMD_WIDTH as i32); } let hmin = hit_dists.hmin(); if hmin < f32::MAX { let minmask = hit_dists.eq(WideF32::splat(hmin)).mask(); let min_idx = minmask.trailing_zeros() as usize; let hit_ids_arr: [i32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_ids.0) }; let hit_dists_arr: [f32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_dists.0) }; let id = hit_ids_arr[min_idx] as usize; let hit_dist = hit_dists_arr[min_idx]; let mat = &spheres.mats[id]; if bounces == 0 { color += reflectance * mat.emit_color; break; } else { bounces -= 1; color += reflectance * mat.emit_color; reflectance *= mat.reflect_color; let hit_point = origin + dir * hit_dist; origin = hit_point; dir = match mat.t { MaterialType::Specular => { let sp = V3(spheres.xs[id], spheres.ys[id], spheres.zs[id]); let hit_normal = (hit_point - sp).normalize(); dir.reflect(hit_normal) } MaterialType::Diffuse => { let a = randf_range(rng_state, 0.0, 2.0 * PI); let z = randf_range(rng_state, -1.0, 1.0); let r = (1.0 - z * z).sqrt(); V3(r * a.cos(), r * a.sin(), z) } } } } else { color += reflectance * bg.emit_color; break; } } (color, orig_bounces - bounces) } fn main() -> Result<(), Box<dyn std::error::Error>> { // flush denormals to zero unsafe { _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON) }; let mut args = Arguments::from_env(); let rays_per_pixel = args.opt_value_from_str(["-r", "--rays"])?.unwrap_or(100); let bounces = args.opt_value_from_str("--bounces")?.unwrap_or(8); let filename = args .opt_value_from_str("-o")? .unwrap_or("out.png".to_string()); args.finish()?; // Materials let bg = Material { emit_color: V3(0.3, 0.4, 0.8), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; let ground = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.5, 0.5, 0.5), t: MaterialType::Diffuse, }; let left = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(1.0, 0.0, 0.0), t: MaterialType::Specular, }; let center = Material { emit_color: V3(0.4, 0.8, 0.9), reflect_color: V3(0.8, 0.8, 0.8), t: MaterialType::Specular, }; let right = Material {
self.mask() != 0 } fn mask(&self) -> i32 {
random_line_split
mod.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! Defines a runtime Starlark value ([`Value`]) and traits for defining custom values ([`StarlarkValue`]). //! //! This module contains code for working with Starlark values: //! //! * Most code dealing with Starlark will use [`Value`], as it represents the fundamental values used in //! Starlark. When frozen, they become [`FrozenValue`]. //! * Values are garbage-collected, so a given [`Value`] lives on a [`Heap`]. //! * Rust values (e.g. [`String`], [`Vec`]) can be added to the [`Heap`] with [`AllocValue`], //! and deconstructed from a [`Value`] with //! * To define your own Rust data type that can live in a [`Value`] it must implement the [`StarlarkValue`] //! trait. //! * All the nested modules represent the built-in Starlark values. These are all defined using [`StarlarkValue`], //! so may serve as interesting inspiration for writing your own values, in addition to occuring in Starlark programs. pub use crate::values::{error::*, iter::*, layout::*, owned::*, traits::*, types::*, unpack::*}; use crate::{ collections::{Hashed, SmallHashResult}, eval::Evaluator, values::{function::FUNCTION_TYPE, types::function::FunctionInvoker}, }; pub use gazebo::{any::AnyLifetime, cell::ARef, prelude::*}; use indexmap::Equivalent; use std::{ cell::RefMut, cmp::Ordering, fmt, fmt::{Debug, Display}, }; #[macro_use] mod comparison; // Submodules mod error; pub(crate) mod fast_string; mod index; mod interpolation; mod iter; mod layout; mod owned; mod stack_guard; mod traits; mod types; mod typing; mod unpack; impl Display for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.to_str()) } } impl Display for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", Value::new_frozen(*self).to_str()) } } fn debug_value(typ: &str, v: Value, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple(typ).field(v.get_aref().as_debug()).finish() } impl Debug for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("Value", *self, f) } } impl Debug for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("FrozenValue", Value::new_frozen(*self), f) } } impl<'v> PartialEq for Value<'v> { fn eq(&self, other: &Value<'v>) -> bool { self.equals(*other).ok() == Some(true) } } impl PartialEq for FrozenValue { fn eq(&self, other: &FrozenValue) -> bool { let v: Value = Value::new_frozen(*self); let other: Value = Value::new_frozen(*other); v.equals(other).ok() == Some(true) } } impl Eq for Value<'_> {} impl Eq for FrozenValue {} impl Equivalent<FrozenValue> for Value<'_> { fn equivalent(&self, key: &FrozenValue) -> bool { key.equals(*self).unwrap() } } impl Equivalent<Value<'_>> for FrozenValue { fn equivalent(&self, key: &Value) -> bool { self.equals(*key).unwrap() } } /// Trait for things that can be allocated on a [`Heap`] producing a [`Value`]. pub trait AllocValue<'v> { fn alloc_value(self, heap: &'v Heap) -> Value<'v>; } impl<'v> AllocValue<'v> for Value<'v> { fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { self } } /// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`]. pub trait AllocFrozenValue { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue; } impl FrozenHeap { /// Allocate a new value on a [`FrozenHeap`]. pub fn alloc<T: AllocFrozenValue>(&self, val: T) -> FrozenValue { val.alloc_frozen_value(self) } } impl Heap { /// Allocate a new value on a [`Heap`]. pub fn alloc<'v, T: AllocValue<'v>>(&'v self, x: T) -> Value<'v> { x.alloc_value(self) } } /// Abstract over [`Value`] and [`FrozenValue`]. /// /// The methods on this trait are those required to implement containers, /// allowing implementations of [`ComplexValue`] to be agnostic of their contained type. /// For details about each function, see the documentation for [`Value`], /// which provides the same functions (and more). pub trait ValueLike<'v>: Eq + Copy + Debug { /// Produce a [`Value`] regardless of the type you are starting with. fn to_value(self) -> Value<'v>; fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>>; fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.to_value().new_invoker(eval) } fn get_hash(self) -> anyhow::Result<u64> { self.get_aref().get_hash() } fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { Ok(Hashed::new_unchecked( SmallHashResult::new_unchecked(self.get_hash()?), self, )) } fn collect_repr(self, collector: &mut String) { self.get_aref().collect_repr(collector); } fn to_json(self) -> anyhow::Result<String> { self.get_aref().to_json() } fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { if self.to_value().ptr_eq(other) { Ok(true) } else { let _guard = stack_guard::stack_guard()?; self.get_aref().equals(other) } } fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { let _guard = stack_guard::stack_guard()?; self.get_aref().compare(other) } fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { let any = ARef::map(self.get_aref(), |e| e.as_dyn_any()); if any.is::<T>() { Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap())) } else { None } } } impl<'v, V: ValueLike<'v>> Hashed<V> { pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> { // Safe because we know frozen values have the same hash as non-frozen ones Hashed::new_unchecked(self.hash(), self.key().to_value()) } } impl<'v> Hashed<Value<'v>> { fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> { // Safe because we know frozen values have the same hash as non-frozen ones let key = self.key().freeze(freezer); // But it's an easy mistake to make, so actually check it in debug debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash())); Hashed::new_unchecked(self.hash(), key) } } impl<'v> ValueLike<'v> for Value<'v> { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { Value::get_aref(self) } fn to_value(self) -> Value<'v> { self } } impl<'v> ValueLike<'v> for FrozenValue { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { ARef::new_ptr(self.get_ref()) } fn to_value(self) -> Value<'v> { Value::new_frozen(self) } } impl FrozenValue { /// Convert a [`FrozenValue`] back to a [`Value`]. pub fn to_value<'v>(self) -> Value<'v> { Value::new_frozen(self) } } /// How an attribute (e.g. `x.f`) should behave. #[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum AttrType { /// The attribute is a field, a direct value with no special behaviour. Field, /// The attribute is a method, which should be called passing the `x` value /// as its first argument. It will either be a function (which is transformed /// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a /// [`NativeAttribute`](crate::values::function::NativeAttribute) /// (which is evaluated immediately). Method, } impl<'v> Value<'v> { /// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd), /// before falling back to [`add`](StarlarkValue::add). pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { let me = self.to_value(); if let Some(v) = other.get_aref().radd(me, heap) { v } else { self.get_aref().add(other, heap) } } /// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`]. pub fn freeze(self, freezer: &Freezer) -> FrozenValue { freezer.freeze(self) } /// Implement the `str()` function - converts a string value to itself, /// otherwise uses `repr()`. pub fn to_str(self) -> String { match self.unpack_str() { None => self.to_repr(), Some(s) => s.to_owned(), } } /// Implement the `repr()` function. pub fn to_repr(self) -> String { let mut s = String::new(); self.collect_repr(&mut s); s } /// Forwards to [`ComplexValue::set_attr`]. pub fn set_attr( self, attribute: &str, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_attr(attribute, alloc_value) } /// Forwards to [`ComplexValue::set_at`]. pub fn set_at( self, index: Value<'v>, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_at(index, alloc_value) } /// Return the contents of an iterable collection, as an owned vector. pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> { // You might reasonably think this is mostly called on lists (I think it is), // and thus that a fast-path here would speed things up. But in my experiments // it's completely irrelevant (you pay a bit for the check, you save a bit on each step). Ok(self.iterate(heap)?.iter().collect()) } /// Produce an iterable from a value. pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> { let me: ARef<'v, dyn StarlarkValue> = self.get_aref(); me.iterate()?; Ok(RefIterable::new( heap, ARef::map(me, |e| e.iterate().unwrap()), )) } /// Get the [`Hashed`] version of this [`Value`]. pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { ValueLike::get_hashed(self) } /// Get a reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function panics if the [`Value`] is borrowed mutably. /// /// In many cases you may wish to call [`FromValue`] instead, as that can /// get a non-frozen value from an underlying frozen value. pub fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { ValueLike::downcast_ref(self) } /// Are two values equal. If the values are of different types it will /// return [`false`]. It will only error if there is excessive recursion. pub fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { ValueLike::equals(self, other) } /// How are two values comparable. For values of different types will return [`Err`]. pub fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { ValueLike::compare(self, other) } /// Get a mutable reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function returns an [`Err`] if the [`Value`] is already borrowed, is frozen, /// or frozen for iteration. /// /// While this reference is active, any [`get_aref`](Value::get_aref) or similar on the value will /// _cause a panic_. Therefore, it's super important not to call any Starlark operations, /// even as simple as equality, while holding the [`RefMut`]. pub fn downcast_mut<T: AnyLifetime<'v>>( self, heap: &'v Heap, ) -> anyhow::Result<Option<RefMut<'_, T>>> { let vref = self.get_ref_mut(heap)?; let any: RefMut<'_, dyn AnyLifetime<'v>> = RefMut::map(vref, |v| v.as_dyn_any_mut()); Ok(if any.is::<T>() { Some(RefMut::map(any, |any| any.downcast_mut::<T>().unwrap())) } else { None }) } /// Describe the value, in order to get its metadata in a way that could be used /// to generate prototypes, help information or whatever other descriptive text /// is required. /// Plan is to make this return a data type at some point in the future, possibly /// move on to `StarlarkValue` and include data from members. pub fn describe(self, name: &str) -> String { if self.get_type() == FUNCTION_TYPE { format!("def {}: pass", self.to_repr().replace(" =...", " = None")) } else { format!("# {} = {}", name, self.to_repr()) } } /// Call `export_as` on the underlying value, but only if the type is mutable. /// Otherwise, does nothing. pub fn export_as(self, name: &str, heap: &'v Heap) { if let Some(mut mv) = self.get_ref_mut_already() { mv.export_as(heap, name) } } /// Return the attribute with the given name. Returns a pair of a boolean and the value. /// /// The type is [`AttrType::Method`] if the attribute was defined via [`StarlarkValue::get_methods`] /// and should be used as a signal that if the attribute is subsequently called, /// e.g. `object.attribute(argument)` then the `object` should be passed as the first /// argument to the function, e.g. `object.attribute(object, argument)`. pub fn get_attr( self, attribute: &str, heap: &'v Heap, ) -> anyhow::Result<(AttrType, Value<'v>)> { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if let Some(v) = methods.get(attribute) { return Ok((AttrType::Method, v)); } } aref.get_attr(attribute, heap).map(|v| (AttrType::Field, v)) } /// Query whether an attribute exists on a type. Should be equivalent to whether /// [`get_attr`](Value::get_attr) succeeds, but potentially more efficient. pub fn has_attr(self, attribute: &str) -> bool { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if methods.get(attribute).is_some() { return true; } } aref.has_attr(attribute) } /// Get a list of all the attributes this function supports, used to implement the /// `dir()` function. pub fn dir_attr(self) -> Vec<String> { let aref = self.get_aref(); let mut result = if let Some(methods) = aref.get_methods() { let mut res = methods.names(); res.extend(aref.dir_attr()); res } else { aref.dir_attr() }; result.sort(); result } } /// Methods that just forward to the underlying [`StarlarkValue`]. impl<'v> Value<'v> { pub fn get_type(self) -> &'static str { self.get_aref().get_type() } pub fn to_bool(self) -> bool { // Fast path for the common case if let Some(x) = self.unpack_bool() { x } else { self.get_aref().to_bool() } } pub fn to_int(self) -> anyhow::Result<i32> { // Fast path for the common case if let Some(x) = self.unpack_int() { Ok(x) } else { self.get_aref().to_int() } } pub fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().at(index, heap) } pub fn slice( self, start: Option<Value<'v>>, stop: Option<Value<'v>>, stride: Option<Value<'v>>, heap: &'v Heap, ) -> anyhow::Result<Value<'v>> { self.get_aref().slice(start, stop, stride, heap) } pub fn length(self) -> anyhow::Result<i32> { self.get_aref().length() } pub fn is_in(self, other: Value<'v>) -> anyhow::Result<bool> { self.get_aref().is_in(other) } pub fn plus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().plus(heap) } pub fn minus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().minus(heap) } pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().sub(other, heap) } pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().mul(other, heap) } pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().percent(other, heap) } pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().floor_div(other, heap) } pub fn bit_and(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_and(other) } pub fn bit_or(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_or(other) } pub fn bit_xor(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_xor(other) } pub fn left_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().left_shift(other) } pub fn right_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().right_shift(other) } pub fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.get_aref().new_invoker(self, eval) } pub fn get_type_value(self) -> &'static ConstFrozenValue
}
{ self.get_aref().get_type_value() }
identifier_body
mod.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! Defines a runtime Starlark value ([`Value`]) and traits for defining custom values ([`StarlarkValue`]). //! //! This module contains code for working with Starlark values: //! //! * Most code dealing with Starlark will use [`Value`], as it represents the fundamental values used in //! Starlark. When frozen, they become [`FrozenValue`]. //! * Values are garbage-collected, so a given [`Value`] lives on a [`Heap`]. //! * Rust values (e.g. [`String`], [`Vec`]) can be added to the [`Heap`] with [`AllocValue`], //! and deconstructed from a [`Value`] with //! * To define your own Rust data type that can live in a [`Value`] it must implement the [`StarlarkValue`] //! trait. //! * All the nested modules represent the built-in Starlark values. These are all defined using [`StarlarkValue`], //! so may serve as interesting inspiration for writing your own values, in addition to occuring in Starlark programs. pub use crate::values::{error::*, iter::*, layout::*, owned::*, traits::*, types::*, unpack::*}; use crate::{ collections::{Hashed, SmallHashResult}, eval::Evaluator, values::{function::FUNCTION_TYPE, types::function::FunctionInvoker}, }; pub use gazebo::{any::AnyLifetime, cell::ARef, prelude::*}; use indexmap::Equivalent; use std::{ cell::RefMut, cmp::Ordering, fmt, fmt::{Debug, Display}, }; #[macro_use] mod comparison; // Submodules mod error; pub(crate) mod fast_string; mod index; mod interpolation; mod iter; mod layout; mod owned; mod stack_guard; mod traits; mod types; mod typing; mod unpack; impl Display for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.to_str()) } } impl Display for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", Value::new_frozen(*self).to_str()) } } fn debug_value(typ: &str, v: Value, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple(typ).field(v.get_aref().as_debug()).finish() } impl Debug for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("Value", *self, f) } } impl Debug for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("FrozenValue", Value::new_frozen(*self), f) } } impl<'v> PartialEq for Value<'v> { fn eq(&self, other: &Value<'v>) -> bool { self.equals(*other).ok() == Some(true) } } impl PartialEq for FrozenValue { fn eq(&self, other: &FrozenValue) -> bool { let v: Value = Value::new_frozen(*self); let other: Value = Value::new_frozen(*other); v.equals(other).ok() == Some(true) } } impl Eq for Value<'_> {} impl Eq for FrozenValue {} impl Equivalent<FrozenValue> for Value<'_> { fn equivalent(&self, key: &FrozenValue) -> bool { key.equals(*self).unwrap() } } impl Equivalent<Value<'_>> for FrozenValue { fn equivalent(&self, key: &Value) -> bool { self.equals(*key).unwrap() } } /// Trait for things that can be allocated on a [`Heap`] producing a [`Value`]. pub trait AllocValue<'v> { fn alloc_value(self, heap: &'v Heap) -> Value<'v>; } impl<'v> AllocValue<'v> for Value<'v> { fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { self } } /// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`]. pub trait AllocFrozenValue { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue; } impl FrozenHeap { /// Allocate a new value on a [`FrozenHeap`]. pub fn alloc<T: AllocFrozenValue>(&self, val: T) -> FrozenValue { val.alloc_frozen_value(self) } } impl Heap { /// Allocate a new value on a [`Heap`]. pub fn alloc<'v, T: AllocValue<'v>>(&'v self, x: T) -> Value<'v> { x.alloc_value(self) } } /// Abstract over [`Value`] and [`FrozenValue`]. /// /// The methods on this trait are those required to implement containers, /// allowing implementations of [`ComplexValue`] to be agnostic of their contained type. /// For details about each function, see the documentation for [`Value`], /// which provides the same functions (and more). pub trait ValueLike<'v>: Eq + Copy + Debug { /// Produce a [`Value`] regardless of the type you are starting with. fn to_value(self) -> Value<'v>; fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>>; fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.to_value().new_invoker(eval) } fn get_hash(self) -> anyhow::Result<u64> { self.get_aref().get_hash() } fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { Ok(Hashed::new_unchecked( SmallHashResult::new_unchecked(self.get_hash()?), self, )) } fn collect_repr(self, collector: &mut String) { self.get_aref().collect_repr(collector); } fn to_json(self) -> anyhow::Result<String> { self.get_aref().to_json() } fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { if self.to_value().ptr_eq(other) { Ok(true) } else { let _guard = stack_guard::stack_guard()?; self.get_aref().equals(other) } } fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { let _guard = stack_guard::stack_guard()?; self.get_aref().compare(other) } fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { let any = ARef::map(self.get_aref(), |e| e.as_dyn_any()); if any.is::<T>() { Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap())) } else { None } } } impl<'v, V: ValueLike<'v>> Hashed<V> { pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> { // Safe because we know frozen values have the same hash as non-frozen ones Hashed::new_unchecked(self.hash(), self.key().to_value()) } } impl<'v> Hashed<Value<'v>> { fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> { // Safe because we know frozen values have the same hash as non-frozen ones let key = self.key().freeze(freezer); // But it's an easy mistake to make, so actually check it in debug debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash())); Hashed::new_unchecked(self.hash(), key) } } impl<'v> ValueLike<'v> for Value<'v> { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { Value::get_aref(self) } fn to_value(self) -> Value<'v> { self } } impl<'v> ValueLike<'v> for FrozenValue { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { ARef::new_ptr(self.get_ref()) } fn to_value(self) -> Value<'v> { Value::new_frozen(self) } } impl FrozenValue { /// Convert a [`FrozenValue`] back to a [`Value`]. pub fn to_value<'v>(self) -> Value<'v> { Value::new_frozen(self) } } /// How an attribute (e.g. `x.f`) should behave. #[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum AttrType { /// The attribute is a field, a direct value with no special behaviour. Field, /// The attribute is a method, which should be called passing the `x` value /// as its first argument. It will either be a function (which is transformed /// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a /// [`NativeAttribute`](crate::values::function::NativeAttribute) /// (which is evaluated immediately). Method, } impl<'v> Value<'v> { /// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd), /// before falling back to [`add`](StarlarkValue::add). pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { let me = self.to_value(); if let Some(v) = other.get_aref().radd(me, heap) { v } else { self.get_aref().add(other, heap) } } /// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`]. pub fn freeze(self, freezer: &Freezer) -> FrozenValue { freezer.freeze(self) } /// Implement the `str()` function - converts a string value to itself, /// otherwise uses `repr()`. pub fn to_str(self) -> String { match self.unpack_str() { None => self.to_repr(), Some(s) => s.to_owned(), } } /// Implement the `repr()` function. pub fn to_repr(self) -> String { let mut s = String::new(); self.collect_repr(&mut s); s } /// Forwards to [`ComplexValue::set_attr`]. pub fn set_attr( self, attribute: &str, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_attr(attribute, alloc_value) } /// Forwards to [`ComplexValue::set_at`]. pub fn set_at( self, index: Value<'v>, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_at(index, alloc_value) } /// Return the contents of an iterable collection, as an owned vector. pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> { // You might reasonably think this is mostly called on lists (I think it is), // and thus that a fast-path here would speed things up. But in my experiments // it's completely irrelevant (you pay a bit for the check, you save a bit on each step). Ok(self.iterate(heap)?.iter().collect()) } /// Produce an iterable from a value. pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> { let me: ARef<'v, dyn StarlarkValue> = self.get_aref(); me.iterate()?; Ok(RefIterable::new( heap, ARef::map(me, |e| e.iterate().unwrap()), )) } /// Get the [`Hashed`] version of this [`Value`]. pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { ValueLike::get_hashed(self) } /// Get a reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function panics if the [`Value`] is borrowed mutably. /// /// In many cases you may wish to call [`FromValue`] instead, as that can /// get a non-frozen value from an underlying frozen value. pub fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { ValueLike::downcast_ref(self) } /// Are two values equal. If the values are of different types it will /// return [`false`]. It will only error if there is excessive recursion. pub fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { ValueLike::equals(self, other) } /// How are two values comparable. For values of different types will return [`Err`]. pub fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { ValueLike::compare(self, other) } /// Get a mutable reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function returns an [`Err`] if the [`Value`] is already borrowed, is frozen, /// or frozen for iteration. /// /// While this reference is active, any [`get_aref`](Value::get_aref) or similar on the value will /// _cause a panic_. Therefore, it's super important not to call any Starlark operations, /// even as simple as equality, while holding the [`RefMut`]. pub fn downcast_mut<T: AnyLifetime<'v>>( self, heap: &'v Heap, ) -> anyhow::Result<Option<RefMut<'_, T>>> { let vref = self.get_ref_mut(heap)?; let any: RefMut<'_, dyn AnyLifetime<'v>> = RefMut::map(vref, |v| v.as_dyn_any_mut()); Ok(if any.is::<T>() { Some(RefMut::map(any, |any| any.downcast_mut::<T>().unwrap())) } else { None }) } /// Describe the value, in order to get its metadata in a way that could be used /// to generate prototypes, help information or whatever other descriptive text /// is required. /// Plan is to make this return a data type at some point in the future, possibly /// move on to `StarlarkValue` and include data from members. pub fn describe(self, name: &str) -> String { if self.get_type() == FUNCTION_TYPE { format!("def {}: pass", self.to_repr().replace(" =...", " = None")) } else { format!("# {} = {}", name, self.to_repr()) } } /// Call `export_as` on the underlying value, but only if the type is mutable. /// Otherwise, does nothing. pub fn
(self, name: &str, heap: &'v Heap) { if let Some(mut mv) = self.get_ref_mut_already() { mv.export_as(heap, name) } } /// Return the attribute with the given name. Returns a pair of a boolean and the value. /// /// The type is [`AttrType::Method`] if the attribute was defined via [`StarlarkValue::get_methods`] /// and should be used as a signal that if the attribute is subsequently called, /// e.g. `object.attribute(argument)` then the `object` should be passed as the first /// argument to the function, e.g. `object.attribute(object, argument)`. pub fn get_attr( self, attribute: &str, heap: &'v Heap, ) -> anyhow::Result<(AttrType, Value<'v>)> { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if let Some(v) = methods.get(attribute) { return Ok((AttrType::Method, v)); } } aref.get_attr(attribute, heap).map(|v| (AttrType::Field, v)) } /// Query whether an attribute exists on a type. Should be equivalent to whether /// [`get_attr`](Value::get_attr) succeeds, but potentially more efficient. pub fn has_attr(self, attribute: &str) -> bool { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if methods.get(attribute).is_some() { return true; } } aref.has_attr(attribute) } /// Get a list of all the attributes this function supports, used to implement the /// `dir()` function. pub fn dir_attr(self) -> Vec<String> { let aref = self.get_aref(); let mut result = if let Some(methods) = aref.get_methods() { let mut res = methods.names(); res.extend(aref.dir_attr()); res } else { aref.dir_attr() }; result.sort(); result } } /// Methods that just forward to the underlying [`StarlarkValue`]. impl<'v> Value<'v> { pub fn get_type(self) -> &'static str { self.get_aref().get_type() } pub fn to_bool(self) -> bool { // Fast path for the common case if let Some(x) = self.unpack_bool() { x } else { self.get_aref().to_bool() } } pub fn to_int(self) -> anyhow::Result<i32> { // Fast path for the common case if let Some(x) = self.unpack_int() { Ok(x) } else { self.get_aref().to_int() } } pub fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().at(index, heap) } pub fn slice( self, start: Option<Value<'v>>, stop: Option<Value<'v>>, stride: Option<Value<'v>>, heap: &'v Heap, ) -> anyhow::Result<Value<'v>> { self.get_aref().slice(start, stop, stride, heap) } pub fn length(self) -> anyhow::Result<i32> { self.get_aref().length() } pub fn is_in(self, other: Value<'v>) -> anyhow::Result<bool> { self.get_aref().is_in(other) } pub fn plus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().plus(heap) } pub fn minus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().minus(heap) } pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().sub(other, heap) } pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().mul(other, heap) } pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().percent(other, heap) } pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().floor_div(other, heap) } pub fn bit_and(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_and(other) } pub fn bit_or(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_or(other) } pub fn bit_xor(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_xor(other) } pub fn left_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().left_shift(other) } pub fn right_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().right_shift(other) } pub fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.get_aref().new_invoker(self, eval) } pub fn get_type_value(self) -> &'static ConstFrozenValue { self.get_aref().get_type_value() } }
export_as
identifier_name
mod.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! Defines a runtime Starlark value ([`Value`]) and traits for defining custom values ([`StarlarkValue`]). //! //! This module contains code for working with Starlark values: //! //! * Most code dealing with Starlark will use [`Value`], as it represents the fundamental values used in //! Starlark. When frozen, they become [`FrozenValue`]. //! * Values are garbage-collected, so a given [`Value`] lives on a [`Heap`]. //! * Rust values (e.g. [`String`], [`Vec`]) can be added to the [`Heap`] with [`AllocValue`], //! and deconstructed from a [`Value`] with //! * To define your own Rust data type that can live in a [`Value`] it must implement the [`StarlarkValue`] //! trait. //! * All the nested modules represent the built-in Starlark values. These are all defined using [`StarlarkValue`], //! so may serve as interesting inspiration for writing your own values, in addition to occuring in Starlark programs. pub use crate::values::{error::*, iter::*, layout::*, owned::*, traits::*, types::*, unpack::*}; use crate::{ collections::{Hashed, SmallHashResult}, eval::Evaluator, values::{function::FUNCTION_TYPE, types::function::FunctionInvoker}, }; pub use gazebo::{any::AnyLifetime, cell::ARef, prelude::*}; use indexmap::Equivalent; use std::{ cell::RefMut, cmp::Ordering, fmt, fmt::{Debug, Display}, }; #[macro_use] mod comparison; // Submodules mod error; pub(crate) mod fast_string; mod index; mod interpolation; mod iter; mod layout; mod owned; mod stack_guard; mod traits; mod types; mod typing; mod unpack; impl Display for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.to_str()) } } impl Display for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", Value::new_frozen(*self).to_str()) } } fn debug_value(typ: &str, v: Value, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple(typ).field(v.get_aref().as_debug()).finish() } impl Debug for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("Value", *self, f) } } impl Debug for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("FrozenValue", Value::new_frozen(*self), f) } } impl<'v> PartialEq for Value<'v> { fn eq(&self, other: &Value<'v>) -> bool { self.equals(*other).ok() == Some(true) } } impl PartialEq for FrozenValue { fn eq(&self, other: &FrozenValue) -> bool { let v: Value = Value::new_frozen(*self); let other: Value = Value::new_frozen(*other); v.equals(other).ok() == Some(true) } } impl Eq for Value<'_> {} impl Eq for FrozenValue {} impl Equivalent<FrozenValue> for Value<'_> { fn equivalent(&self, key: &FrozenValue) -> bool { key.equals(*self).unwrap() } } impl Equivalent<Value<'_>> for FrozenValue { fn equivalent(&self, key: &Value) -> bool { self.equals(*key).unwrap() } } /// Trait for things that can be allocated on a [`Heap`] producing a [`Value`]. pub trait AllocValue<'v> { fn alloc_value(self, heap: &'v Heap) -> Value<'v>; } impl<'v> AllocValue<'v> for Value<'v> { fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { self } } /// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`]. pub trait AllocFrozenValue { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue; } impl FrozenHeap { /// Allocate a new value on a [`FrozenHeap`]. pub fn alloc<T: AllocFrozenValue>(&self, val: T) -> FrozenValue { val.alloc_frozen_value(self) } } impl Heap { /// Allocate a new value on a [`Heap`]. pub fn alloc<'v, T: AllocValue<'v>>(&'v self, x: T) -> Value<'v> { x.alloc_value(self) } } /// Abstract over [`Value`] and [`FrozenValue`]. /// /// The methods on this trait are those required to implement containers, /// allowing implementations of [`ComplexValue`] to be agnostic of their contained type. /// For details about each function, see the documentation for [`Value`], /// which provides the same functions (and more). pub trait ValueLike<'v>: Eq + Copy + Debug { /// Produce a [`Value`] regardless of the type you are starting with. fn to_value(self) -> Value<'v>; fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>>; fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.to_value().new_invoker(eval) } fn get_hash(self) -> anyhow::Result<u64> { self.get_aref().get_hash() } fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { Ok(Hashed::new_unchecked( SmallHashResult::new_unchecked(self.get_hash()?), self, )) } fn collect_repr(self, collector: &mut String) { self.get_aref().collect_repr(collector); } fn to_json(self) -> anyhow::Result<String> { self.get_aref().to_json() } fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { if self.to_value().ptr_eq(other) { Ok(true) } else
} fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { let _guard = stack_guard::stack_guard()?; self.get_aref().compare(other) } fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { let any = ARef::map(self.get_aref(), |e| e.as_dyn_any()); if any.is::<T>() { Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap())) } else { None } } } impl<'v, V: ValueLike<'v>> Hashed<V> { pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> { // Safe because we know frozen values have the same hash as non-frozen ones Hashed::new_unchecked(self.hash(), self.key().to_value()) } } impl<'v> Hashed<Value<'v>> { fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> { // Safe because we know frozen values have the same hash as non-frozen ones let key = self.key().freeze(freezer); // But it's an easy mistake to make, so actually check it in debug debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash())); Hashed::new_unchecked(self.hash(), key) } } impl<'v> ValueLike<'v> for Value<'v> { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { Value::get_aref(self) } fn to_value(self) -> Value<'v> { self } } impl<'v> ValueLike<'v> for FrozenValue { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { ARef::new_ptr(self.get_ref()) } fn to_value(self) -> Value<'v> { Value::new_frozen(self) } } impl FrozenValue { /// Convert a [`FrozenValue`] back to a [`Value`]. pub fn to_value<'v>(self) -> Value<'v> { Value::new_frozen(self) } } /// How an attribute (e.g. `x.f`) should behave. #[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum AttrType { /// The attribute is a field, a direct value with no special behaviour. Field, /// The attribute is a method, which should be called passing the `x` value /// as its first argument. It will either be a function (which is transformed /// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a /// [`NativeAttribute`](crate::values::function::NativeAttribute) /// (which is evaluated immediately). Method, } impl<'v> Value<'v> { /// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd), /// before falling back to [`add`](StarlarkValue::add). pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { let me = self.to_value(); if let Some(v) = other.get_aref().radd(me, heap) { v } else { self.get_aref().add(other, heap) } } /// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`]. pub fn freeze(self, freezer: &Freezer) -> FrozenValue { freezer.freeze(self) } /// Implement the `str()` function - converts a string value to itself, /// otherwise uses `repr()`. pub fn to_str(self) -> String { match self.unpack_str() { None => self.to_repr(), Some(s) => s.to_owned(), } } /// Implement the `repr()` function. pub fn to_repr(self) -> String { let mut s = String::new(); self.collect_repr(&mut s); s } /// Forwards to [`ComplexValue::set_attr`]. pub fn set_attr( self, attribute: &str, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_attr(attribute, alloc_value) } /// Forwards to [`ComplexValue::set_at`]. pub fn set_at( self, index: Value<'v>, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_at(index, alloc_value) } /// Return the contents of an iterable collection, as an owned vector. pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> { // You might reasonably think this is mostly called on lists (I think it is), // and thus that a fast-path here would speed things up. But in my experiments // it's completely irrelevant (you pay a bit for the check, you save a bit on each step). Ok(self.iterate(heap)?.iter().collect()) } /// Produce an iterable from a value. pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> { let me: ARef<'v, dyn StarlarkValue> = self.get_aref(); me.iterate()?; Ok(RefIterable::new( heap, ARef::map(me, |e| e.iterate().unwrap()), )) } /// Get the [`Hashed`] version of this [`Value`]. pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { ValueLike::get_hashed(self) } /// Get a reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function panics if the [`Value`] is borrowed mutably. /// /// In many cases you may wish to call [`FromValue`] instead, as that can /// get a non-frozen value from an underlying frozen value. pub fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { ValueLike::downcast_ref(self) } /// Are two values equal. If the values are of different types it will /// return [`false`]. It will only error if there is excessive recursion. pub fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { ValueLike::equals(self, other) } /// How are two values comparable. For values of different types will return [`Err`]. pub fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { ValueLike::compare(self, other) } /// Get a mutable reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function returns an [`Err`] if the [`Value`] is already borrowed, is frozen, /// or frozen for iteration. /// /// While this reference is active, any [`get_aref`](Value::get_aref) or similar on the value will /// _cause a panic_. Therefore, it's super important not to call any Starlark operations, /// even as simple as equality, while holding the [`RefMut`]. pub fn downcast_mut<T: AnyLifetime<'v>>( self, heap: &'v Heap, ) -> anyhow::Result<Option<RefMut<'_, T>>> { let vref = self.get_ref_mut(heap)?; let any: RefMut<'_, dyn AnyLifetime<'v>> = RefMut::map(vref, |v| v.as_dyn_any_mut()); Ok(if any.is::<T>() { Some(RefMut::map(any, |any| any.downcast_mut::<T>().unwrap())) } else { None }) } /// Describe the value, in order to get its metadata in a way that could be used /// to generate prototypes, help information or whatever other descriptive text /// is required. /// Plan is to make this return a data type at some point in the future, possibly /// move on to `StarlarkValue` and include data from members. pub fn describe(self, name: &str) -> String { if self.get_type() == FUNCTION_TYPE { format!("def {}: pass", self.to_repr().replace(" =...", " = None")) } else { format!("# {} = {}", name, self.to_repr()) } } /// Call `export_as` on the underlying value, but only if the type is mutable. /// Otherwise, does nothing. pub fn export_as(self, name: &str, heap: &'v Heap) { if let Some(mut mv) = self.get_ref_mut_already() { mv.export_as(heap, name) } } /// Return the attribute with the given name. Returns a pair of a boolean and the value. /// /// The type is [`AttrType::Method`] if the attribute was defined via [`StarlarkValue::get_methods`] /// and should be used as a signal that if the attribute is subsequently called, /// e.g. `object.attribute(argument)` then the `object` should be passed as the first /// argument to the function, e.g. `object.attribute(object, argument)`. pub fn get_attr( self, attribute: &str, heap: &'v Heap, ) -> anyhow::Result<(AttrType, Value<'v>)> { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if let Some(v) = methods.get(attribute) { return Ok((AttrType::Method, v)); } } aref.get_attr(attribute, heap).map(|v| (AttrType::Field, v)) } /// Query whether an attribute exists on a type. Should be equivalent to whether /// [`get_attr`](Value::get_attr) succeeds, but potentially more efficient. pub fn has_attr(self, attribute: &str) -> bool { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if methods.get(attribute).is_some() { return true; } } aref.has_attr(attribute) } /// Get a list of all the attributes this function supports, used to implement the /// `dir()` function. pub fn dir_attr(self) -> Vec<String> { let aref = self.get_aref(); let mut result = if let Some(methods) = aref.get_methods() { let mut res = methods.names(); res.extend(aref.dir_attr()); res } else { aref.dir_attr() }; result.sort(); result } } /// Methods that just forward to the underlying [`StarlarkValue`]. impl<'v> Value<'v> { pub fn get_type(self) -> &'static str { self.get_aref().get_type() } pub fn to_bool(self) -> bool { // Fast path for the common case if let Some(x) = self.unpack_bool() { x } else { self.get_aref().to_bool() } } pub fn to_int(self) -> anyhow::Result<i32> { // Fast path for the common case if let Some(x) = self.unpack_int() { Ok(x) } else { self.get_aref().to_int() } } pub fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().at(index, heap) } pub fn slice( self, start: Option<Value<'v>>, stop: Option<Value<'v>>, stride: Option<Value<'v>>, heap: &'v Heap, ) -> anyhow::Result<Value<'v>> { self.get_aref().slice(start, stop, stride, heap) } pub fn length(self) -> anyhow::Result<i32> { self.get_aref().length() } pub fn is_in(self, other: Value<'v>) -> anyhow::Result<bool> { self.get_aref().is_in(other) } pub fn plus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().plus(heap) } pub fn minus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().minus(heap) } pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().sub(other, heap) } pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().mul(other, heap) } pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().percent(other, heap) } pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().floor_div(other, heap) } pub fn bit_and(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_and(other) } pub fn bit_or(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_or(other) } pub fn bit_xor(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_xor(other) } pub fn left_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().left_shift(other) } pub fn right_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().right_shift(other) } pub fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.get_aref().new_invoker(self, eval) } pub fn get_type_value(self) -> &'static ConstFrozenValue { self.get_aref().get_type_value() } }
{ let _guard = stack_guard::stack_guard()?; self.get_aref().equals(other) }
conditional_block
mod.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! Defines a runtime Starlark value ([`Value`]) and traits for defining custom values ([`StarlarkValue`]). //! //! This module contains code for working with Starlark values: //! //! * Most code dealing with Starlark will use [`Value`], as it represents the fundamental values used in //! Starlark. When frozen, they become [`FrozenValue`]. //! * Values are garbage-collected, so a given [`Value`] lives on a [`Heap`]. //! * Rust values (e.g. [`String`], [`Vec`]) can be added to the [`Heap`] with [`AllocValue`], //! and deconstructed from a [`Value`] with //! * To define your own Rust data type that can live in a [`Value`] it must implement the [`StarlarkValue`] //! trait. //! * All the nested modules represent the built-in Starlark values. These are all defined using [`StarlarkValue`], //! so may serve as interesting inspiration for writing your own values, in addition to occuring in Starlark programs. pub use crate::values::{error::*, iter::*, layout::*, owned::*, traits::*, types::*, unpack::*}; use crate::{ collections::{Hashed, SmallHashResult}, eval::Evaluator, values::{function::FUNCTION_TYPE, types::function::FunctionInvoker}, }; pub use gazebo::{any::AnyLifetime, cell::ARef, prelude::*}; use indexmap::Equivalent; use std::{ cell::RefMut, cmp::Ordering, fmt, fmt::{Debug, Display}, }; #[macro_use] mod comparison; // Submodules mod error; pub(crate) mod fast_string; mod index; mod interpolation; mod iter; mod layout; mod owned; mod stack_guard; mod traits; mod types; mod typing; mod unpack; impl Display for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.to_str()) } } impl Display for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", Value::new_frozen(*self).to_str()) } } fn debug_value(typ: &str, v: Value, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple(typ).field(v.get_aref().as_debug()).finish() } impl Debug for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
} } impl Debug for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("FrozenValue", Value::new_frozen(*self), f) } } impl<'v> PartialEq for Value<'v> { fn eq(&self, other: &Value<'v>) -> bool { self.equals(*other).ok() == Some(true) } } impl PartialEq for FrozenValue { fn eq(&self, other: &FrozenValue) -> bool { let v: Value = Value::new_frozen(*self); let other: Value = Value::new_frozen(*other); v.equals(other).ok() == Some(true) } } impl Eq for Value<'_> {} impl Eq for FrozenValue {} impl Equivalent<FrozenValue> for Value<'_> { fn equivalent(&self, key: &FrozenValue) -> bool { key.equals(*self).unwrap() } } impl Equivalent<Value<'_>> for FrozenValue { fn equivalent(&self, key: &Value) -> bool { self.equals(*key).unwrap() } } /// Trait for things that can be allocated on a [`Heap`] producing a [`Value`]. pub trait AllocValue<'v> { fn alloc_value(self, heap: &'v Heap) -> Value<'v>; } impl<'v> AllocValue<'v> for Value<'v> { fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { self } } /// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`]. pub trait AllocFrozenValue { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue; } impl FrozenHeap { /// Allocate a new value on a [`FrozenHeap`]. pub fn alloc<T: AllocFrozenValue>(&self, val: T) -> FrozenValue { val.alloc_frozen_value(self) } } impl Heap { /// Allocate a new value on a [`Heap`]. pub fn alloc<'v, T: AllocValue<'v>>(&'v self, x: T) -> Value<'v> { x.alloc_value(self) } } /// Abstract over [`Value`] and [`FrozenValue`]. /// /// The methods on this trait are those required to implement containers, /// allowing implementations of [`ComplexValue`] to be agnostic of their contained type. /// For details about each function, see the documentation for [`Value`], /// which provides the same functions (and more). pub trait ValueLike<'v>: Eq + Copy + Debug { /// Produce a [`Value`] regardless of the type you are starting with. fn to_value(self) -> Value<'v>; fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>>; fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.to_value().new_invoker(eval) } fn get_hash(self) -> anyhow::Result<u64> { self.get_aref().get_hash() } fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { Ok(Hashed::new_unchecked( SmallHashResult::new_unchecked(self.get_hash()?), self, )) } fn collect_repr(self, collector: &mut String) { self.get_aref().collect_repr(collector); } fn to_json(self) -> anyhow::Result<String> { self.get_aref().to_json() } fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { if self.to_value().ptr_eq(other) { Ok(true) } else { let _guard = stack_guard::stack_guard()?; self.get_aref().equals(other) } } fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { let _guard = stack_guard::stack_guard()?; self.get_aref().compare(other) } fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { let any = ARef::map(self.get_aref(), |e| e.as_dyn_any()); if any.is::<T>() { Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap())) } else { None } } } impl<'v, V: ValueLike<'v>> Hashed<V> { pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> { // Safe because we know frozen values have the same hash as non-frozen ones Hashed::new_unchecked(self.hash(), self.key().to_value()) } } impl<'v> Hashed<Value<'v>> { fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> { // Safe because we know frozen values have the same hash as non-frozen ones let key = self.key().freeze(freezer); // But it's an easy mistake to make, so actually check it in debug debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash())); Hashed::new_unchecked(self.hash(), key) } } impl<'v> ValueLike<'v> for Value<'v> { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { Value::get_aref(self) } fn to_value(self) -> Value<'v> { self } } impl<'v> ValueLike<'v> for FrozenValue { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { ARef::new_ptr(self.get_ref()) } fn to_value(self) -> Value<'v> { Value::new_frozen(self) } } impl FrozenValue { /// Convert a [`FrozenValue`] back to a [`Value`]. pub fn to_value<'v>(self) -> Value<'v> { Value::new_frozen(self) } } /// How an attribute (e.g. `x.f`) should behave. #[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum AttrType { /// The attribute is a field, a direct value with no special behaviour. Field, /// The attribute is a method, which should be called passing the `x` value /// as its first argument. It will either be a function (which is transformed /// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a /// [`NativeAttribute`](crate::values::function::NativeAttribute) /// (which is evaluated immediately). Method, } impl<'v> Value<'v> { /// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd), /// before falling back to [`add`](StarlarkValue::add). pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { let me = self.to_value(); if let Some(v) = other.get_aref().radd(me, heap) { v } else { self.get_aref().add(other, heap) } } /// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`]. pub fn freeze(self, freezer: &Freezer) -> FrozenValue { freezer.freeze(self) } /// Implement the `str()` function - converts a string value to itself, /// otherwise uses `repr()`. pub fn to_str(self) -> String { match self.unpack_str() { None => self.to_repr(), Some(s) => s.to_owned(), } } /// Implement the `repr()` function. pub fn to_repr(self) -> String { let mut s = String::new(); self.collect_repr(&mut s); s } /// Forwards to [`ComplexValue::set_attr`]. pub fn set_attr( self, attribute: &str, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_attr(attribute, alloc_value) } /// Forwards to [`ComplexValue::set_at`]. pub fn set_at( self, index: Value<'v>, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_at(index, alloc_value) } /// Return the contents of an iterable collection, as an owned vector. pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> { // You might reasonably think this is mostly called on lists (I think it is), // and thus that a fast-path here would speed things up. But in my experiments // it's completely irrelevant (you pay a bit for the check, you save a bit on each step). Ok(self.iterate(heap)?.iter().collect()) } /// Produce an iterable from a value. pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> { let me: ARef<'v, dyn StarlarkValue> = self.get_aref(); me.iterate()?; Ok(RefIterable::new( heap, ARef::map(me, |e| e.iterate().unwrap()), )) } /// Get the [`Hashed`] version of this [`Value`]. pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { ValueLike::get_hashed(self) } /// Get a reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function panics if the [`Value`] is borrowed mutably. /// /// In many cases you may wish to call [`FromValue`] instead, as that can /// get a non-frozen value from an underlying frozen value. pub fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { ValueLike::downcast_ref(self) } /// Are two values equal. If the values are of different types it will /// return [`false`]. It will only error if there is excessive recursion. pub fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { ValueLike::equals(self, other) } /// How are two values comparable. For values of different types will return [`Err`]. pub fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { ValueLike::compare(self, other) } /// Get a mutable reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function returns an [`Err`] if the [`Value`] is already borrowed, is frozen, /// or frozen for iteration. /// /// While this reference is active, any [`get_aref`](Value::get_aref) or similar on the value will /// _cause a panic_. Therefore, it's super important not to call any Starlark operations, /// even as simple as equality, while holding the [`RefMut`]. pub fn downcast_mut<T: AnyLifetime<'v>>( self, heap: &'v Heap, ) -> anyhow::Result<Option<RefMut<'_, T>>> { let vref = self.get_ref_mut(heap)?; let any: RefMut<'_, dyn AnyLifetime<'v>> = RefMut::map(vref, |v| v.as_dyn_any_mut()); Ok(if any.is::<T>() { Some(RefMut::map(any, |any| any.downcast_mut::<T>().unwrap())) } else { None }) } /// Describe the value, in order to get its metadata in a way that could be used /// to generate prototypes, help information or whatever other descriptive text /// is required. /// Plan is to make this return a data type at some point in the future, possibly /// move on to `StarlarkValue` and include data from members. pub fn describe(self, name: &str) -> String { if self.get_type() == FUNCTION_TYPE { format!("def {}: pass", self.to_repr().replace(" =...", " = None")) } else { format!("# {} = {}", name, self.to_repr()) } } /// Call `export_as` on the underlying value, but only if the type is mutable. /// Otherwise, does nothing. pub fn export_as(self, name: &str, heap: &'v Heap) { if let Some(mut mv) = self.get_ref_mut_already() { mv.export_as(heap, name) } } /// Return the attribute with the given name. Returns a pair of a boolean and the value. /// /// The type is [`AttrType::Method`] if the attribute was defined via [`StarlarkValue::get_methods`] /// and should be used as a signal that if the attribute is subsequently called, /// e.g. `object.attribute(argument)` then the `object` should be passed as the first /// argument to the function, e.g. `object.attribute(object, argument)`. pub fn get_attr( self, attribute: &str, heap: &'v Heap, ) -> anyhow::Result<(AttrType, Value<'v>)> { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if let Some(v) = methods.get(attribute) { return Ok((AttrType::Method, v)); } } aref.get_attr(attribute, heap).map(|v| (AttrType::Field, v)) } /// Query whether an attribute exists on a type. Should be equivalent to whether /// [`get_attr`](Value::get_attr) succeeds, but potentially more efficient. pub fn has_attr(self, attribute: &str) -> bool { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if methods.get(attribute).is_some() { return true; } } aref.has_attr(attribute) } /// Get a list of all the attributes this function supports, used to implement the /// `dir()` function. pub fn dir_attr(self) -> Vec<String> { let aref = self.get_aref(); let mut result = if let Some(methods) = aref.get_methods() { let mut res = methods.names(); res.extend(aref.dir_attr()); res } else { aref.dir_attr() }; result.sort(); result } } /// Methods that just forward to the underlying [`StarlarkValue`]. impl<'v> Value<'v> { pub fn get_type(self) -> &'static str { self.get_aref().get_type() } pub fn to_bool(self) -> bool { // Fast path for the common case if let Some(x) = self.unpack_bool() { x } else { self.get_aref().to_bool() } } pub fn to_int(self) -> anyhow::Result<i32> { // Fast path for the common case if let Some(x) = self.unpack_int() { Ok(x) } else { self.get_aref().to_int() } } pub fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().at(index, heap) } pub fn slice( self, start: Option<Value<'v>>, stop: Option<Value<'v>>, stride: Option<Value<'v>>, heap: &'v Heap, ) -> anyhow::Result<Value<'v>> { self.get_aref().slice(start, stop, stride, heap) } pub fn length(self) -> anyhow::Result<i32> { self.get_aref().length() } pub fn is_in(self, other: Value<'v>) -> anyhow::Result<bool> { self.get_aref().is_in(other) } pub fn plus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().plus(heap) } pub fn minus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().minus(heap) } pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().sub(other, heap) } pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().mul(other, heap) } pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().percent(other, heap) } pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().floor_div(other, heap) } pub fn bit_and(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_and(other) } pub fn bit_or(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_or(other) } pub fn bit_xor(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_xor(other) } pub fn left_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().left_shift(other) } pub fn right_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().right_shift(other) } pub fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.get_aref().new_invoker(self, eval) } pub fn get_type_value(self) -> &'static ConstFrozenValue { self.get_aref().get_type_value() } }
debug_value("Value", *self, f)
random_line_split
backend.rs
`. use crate::memory::Memory; use cranelift_codegen::binemit::{Addend, CodeOffset, NullTrapSink, Reloc, RelocSink}; use cranelift_codegen::isa::TargetIsa; use cranelift_codegen::{self, ir, settings}; use cranelift_module::{ Backend, DataContext, DataDescription, Init, Linkage, ModuleNamespace, ModuleResult, }; use cranelift_native; use libc; use std::collections::HashMap; use std::ffi::CString; use std::io::Write; use std::ptr; use target_lexicon::PointerWidth; #[cfg(windows)] use winapi; const EXECUTABLE_DATA_ALIGNMENT: u8 = 0x10; const WRITABLE_DATA_ALIGNMENT: u8 = 0x8; const READONLY_DATA_ALIGNMENT: u8 = 0x1; /// A builder for `SimpleJITBackend`. pub struct SimpleJITBuilder { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, } impl SimpleJITBuilder { /// Create a new `SimpleJITBuilder`. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn new(libcall_names: Box<dyn Fn(ir::LibCall) -> String>) -> Self { let flag_builder = settings::builder(); let isa_builder = cranelift_native::builder().unwrap_or_else(|msg| { panic!("host machine is not supported: {}", msg); }); let isa = isa_builder.finish(settings::Flags::new(flag_builder)); Self::with_isa(isa, libcall_names) } /// Create a new `SimpleJITBuilder` with an arbitrary target. This is mainly /// useful for testing. /// /// SimpleJIT requires a `TargetIsa` configured for non-PIC. /// /// To create a `SimpleJITBuilder` for native use, use the `new` constructor /// instead. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn with_isa( isa: Box<dyn TargetIsa>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, ) -> Self { debug_assert!(!isa.flags().is_pic(), "SimpleJIT requires non-PIC code"); let symbols = HashMap::new(); Self { isa, symbols, libcall_names, } } /// Define a symbol in the internal symbol table. /// /// The JIT will use the symbol table to resolve names that are declared, /// but not defined, in the module being compiled. A common example is /// external functions. With this method, functions and data can be exposed /// to the code being compiled which are defined by the host. /// /// If a symbol is defined more than once, the most recent definition will /// be retained. /// /// If the JIT fails to find a symbol in its internal table, it will fall /// back to a platform-specific search (this typically involves searching /// the current process for public symbols, followed by searching the /// platform's C runtime). pub fn symbol<K>(&mut self, name: K, ptr: *const u8) -> &Self where K: Into<String>, { self.symbols.insert(name.into(), ptr); self } /// Define multiple symbols in the internal symbol table. /// /// Using this is equivalent to calling `symbol` on each element. pub fn symbols<It, K>(&mut self, symbols: It) -> &Self where It: IntoIterator<Item = (K, *const u8)>, K: Into<String>, { for (name, ptr) in symbols { self.symbols.insert(name.into(), ptr); } self } } /// A `SimpleJITBackend` implements `Backend` and emits code and data into memory where it can be /// directly called and accessed. /// /// See the `SimpleJITBuilder` for a convenient way to construct `SimpleJITBackend` instances. pub struct SimpleJITBackend { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, code_memory: Memory, readonly_memory: Memory, writable_memory: Memory, } /// A record of a relocation to perform. struct RelocRecord { offset: CodeOffset, reloc: Reloc, name: ir::ExternalName, addend: Addend, } pub struct SimpleJITCompiledFunction { code: *mut u8, size: usize, relocs: Vec<RelocRecord>, } pub struct SimpleJITCompiledData { storage: *mut u8, size: usize, relocs: Vec<RelocRecord>, } impl SimpleJITBackend { fn lookup_symbol(&self, name: &str) -> *const u8 { match self.symbols.get(name) { Some(&ptr) => ptr, None => lookup_with_dlsym(name), } } fn get_definition( &self, namespace: &ModuleNamespace<Self>, name: &ir::ExternalName, ) -> *const u8 { match *name { ir::ExternalName::User {.. } => { if namespace.is_function(name) { let (def, name_str, _signature) = namespace.get_function_definition(&name); match def { Some(compiled) => compiled.code, None => self.lookup_symbol(name_str), } } else { let (def, name_str, _writable) = namespace.get_data_definition(&name); match def { Some(compiled) => compiled.storage, None => self.lookup_symbol(name_str), } } } ir::ExternalName::LibCall(ref libcall) => { let sym = (self.libcall_names)(*libcall); self.lookup_symbol(&sym) } _ => panic!("invalid ExternalName {}", name), } } } impl<'simple_jit_backend> Backend for SimpleJITBackend { type Builder = SimpleJITBuilder; /// SimpleJIT compiled function and data objects may have outstanding /// relocations that need to be performed before the memory can be used. /// These relocations are performed within `finalize_function` and /// `finalize_data`. type CompiledFunction = SimpleJITCompiledFunction; type CompiledData = SimpleJITCompiledData; /// SimpleJIT emits code and data into memory, and provides raw pointers /// to them. type FinalizedFunction = *const u8; type FinalizedData = (*mut u8, usize); /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. type Product = (); /// Create a new `SimpleJITBackend`. fn new(builder: SimpleJITBuilder) -> Self { Self { isa: builder.isa, symbols: builder.symbols, libcall_names: builder.libcall_names, code_memory: Memory::new(), readonly_memory: Memory::new(), writable_memory: Memory::new(), } } fn isa(&self) -> &dyn TargetIsa { &*self.isa } fn declare_function(&mut self, _name: &str, _linkage: Linkage) { // Nothing to do. } fn declare_data( &mut self, _name: &str, _linkage: Linkage, _writable: bool, _align: Option<u8>, ) { // Nothing to do. } fn define_function( &mut self, name: &str, ctx: &cranelift_codegen::Context, _namespace: &ModuleNamespace<Self>, code_size: u32, ) -> ModuleResult<Self::CompiledFunction> { let size = code_size as usize; let ptr = self .code_memory .allocate(size, EXECUTABLE_DATA_ALIGNMENT) .expect("TODO: handle OOM etc."); if cfg!(target_os = "linux") && ::std::env::var_os("PERF_BUILDID_DIR").is_some() { let mut map_file = ::std::fs::OpenOptions::new() .create(true) .append(true) .open(format!("/tmp/perf-{}.map", ::std::process::id())) .unwrap(); let _ = writeln!(map_file, "{:x} {:x} {}", ptr as usize, code_size, name); } let mut reloc_sink = SimpleJITRelocSink::new(); // Ignore traps for now. For now, frontends should just avoid generating code // that traps. let mut trap_sink = NullTrapSink {}; unsafe { ctx.emit_to_memory(&*self.isa, ptr, &mut reloc_sink, &mut trap_sink) }; Ok(Self::CompiledFunction { code: ptr, size, relocs: reloc_sink.relocs, }) } fn define_data( &mut self, _name: &str, writable: bool, align: Option<u8>, data: &DataContext, _namespace: &ModuleNamespace<Self>, ) -> ModuleResult<Self::CompiledData> { let &DataDescription { ref init, ref function_decls, ref data_decls, ref function_relocs, ref data_relocs, } = data.description(); let size = init.size(); let storage = if writable { self.writable_memory .allocate(size, align.unwrap_or(WRITABLE_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") } else { self.readonly_memory .allocate(size, align.unwrap_or(READONLY_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") }; match *init { Init::Uninitialized => { panic!("data is not initialized yet"); } Init::Zeros {.. } => { unsafe { ptr::write_bytes(storage, 0, size) }; } Init::Bytes { ref contents } => { let src = contents.as_ptr(); unsafe { ptr::copy_nonoverlapping(src, storage, size) }; } } let reloc = match self.isa.triple().pointer_width().unwrap() { PointerWidth::U16 => panic!(), PointerWidth::U32 => Reloc::Abs4, PointerWidth::U64 => Reloc::Abs8, }; let mut relocs = Vec::new(); for &(offset, id) in function_relocs { relocs.push(RelocRecord { reloc, offset, name: function_decls[id].clone(), addend: 0, }); } for &(offset, id, addend) in data_relocs { relocs.push(RelocRecord { reloc, offset, name: data_decls[id].clone(), addend, }); } Ok(Self::CompiledData { storage, size, relocs, }) } fn write_data_funcaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::FuncRef, ) { unimplemented!(); } fn write_data_dataaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::GlobalValue, _usize: Addend, ) { unimplemented!(); } fn finalize_function( &mut self, func: &Self::CompiledFunction, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedFunction { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &func.relocs { let ptr = func.code; debug_assert!((offset as usize) < func.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4 | Reloc::X86CallPCRel4 => { // TODO: Handle overflow. let pcrel = ((what as isize) - (at as isize)) as i32; #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut i32, pcrel) }; } Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected PIC relocation"), _ => unimplemented!(), } } func.code } fn
(&self, func: &Self::CompiledFunction) -> Self::FinalizedFunction { func.code } fn finalize_data( &mut self, data: &Self::CompiledData, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedData { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &data.relocs { let ptr = data.storage; debug_assert!((offset as usize) < data.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4 | Reloc::X86CallPCRel4 | Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected text relocation in data"), _ => unimplemented!(), } } (data.storage, data.size) } fn get_finalized_data(&self, data: &Self::CompiledData) -> Self::FinalizedData { (data.storage, data.size) } fn publish(&mut self) { // Now that we're done patching, prepare the memory for execution! self.readonly_memory.set_readonly(); self.code_memory.set_readable_and_executable(); } /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. fn finish(self) {} } #[cfg(not(windows))] fn lookup_with_dlsym(name: &str) -> *const u8 { let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); let sym = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c_str_ptr) }; if sym.is_null() { panic!("can't resolve symbol {}", name); } sym as *const u8 } #[cfg(windows)] fn lookup_with_dlsym(name: &str) -> *const u8 { const MSVCRT_DLL: &[u8] = b"msvcrt.dll\0"; let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); unsafe { let handles = [ // try to find the searched symbol in the currently running executable ptr::null_mut(), // try to find the searched symbol in local c runtime winapi::um::libloaderapi::GetModuleHandleA(MSVCRT_DLL.as_ptr() as *const i8), ]; for handle in &handles { let addr = winapi::um::libloaderapi::GetProcAddress(*handle, c_str_ptr); if addr.is_null() { continue; } return addr as *const u8; } let msg = if handles[1].is_null() { "(msvcrt not loaded)" } else { "" }; panic!("cannot resolve address of symbol {} {}", name, msg); } } struct SimpleJITRelocSink { pub relocs: Vec<RelocRecord>, } impl SimpleJITRelocSink { pub fn new() -> Self { Self { relocs: Vec::new() } } } impl RelocSink for SimpleJITRelocSink { fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) { unimplemented!(); } fn reloc_external( &mut self, offset: CodeOffset, reloc: Reloc, name: &ir::ExternalName, addend: Addend, ) { self.relocs.push(RelocRecord { offset, reloc, name: name.clone(), addend, }); } fn reloc_jt(&mut self, _offset: CodeOffset, reloc: Reloc, _jt: ir::JumpTable) { match reloc { Reloc::X86PCRelRodata4 => { // Not necessary to record this unless we are going to split apart code and its // jumptbl/rodata. } _ => { panic!("Unhandled reloc"); }
get_finalized_function
identifier_name
backend.rs
Backend`. use crate::memory::Memory; use cranelift_codegen::binemit::{Addend, CodeOffset, NullTrapSink, Reloc, RelocSink}; use cranelift_codegen::isa::TargetIsa; use cranelift_codegen::{self, ir, settings}; use cranelift_module::{ Backend, DataContext, DataDescription, Init, Linkage, ModuleNamespace, ModuleResult, }; use cranelift_native; use libc; use std::collections::HashMap; use std::ffi::CString; use std::io::Write; use std::ptr; use target_lexicon::PointerWidth; #[cfg(windows)] use winapi; const EXECUTABLE_DATA_ALIGNMENT: u8 = 0x10; const WRITABLE_DATA_ALIGNMENT: u8 = 0x8; const READONLY_DATA_ALIGNMENT: u8 = 0x1; /// A builder for `SimpleJITBackend`. pub struct SimpleJITBuilder { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, } impl SimpleJITBuilder { /// Create a new `SimpleJITBuilder`. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn new(libcall_names: Box<dyn Fn(ir::LibCall) -> String>) -> Self { let flag_builder = settings::builder(); let isa_builder = cranelift_native::builder().unwrap_or_else(|msg| { panic!("host machine is not supported: {}", msg); }); let isa = isa_builder.finish(settings::Flags::new(flag_builder)); Self::with_isa(isa, libcall_names) } /// Create a new `SimpleJITBuilder` with an arbitrary target. This is mainly /// useful for testing. /// /// SimpleJIT requires a `TargetIsa` configured for non-PIC. /// /// To create a `SimpleJITBuilder` for native use, use the `new` constructor /// instead. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn with_isa( isa: Box<dyn TargetIsa>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, ) -> Self { debug_assert!(!isa.flags().is_pic(), "SimpleJIT requires non-PIC code"); let symbols = HashMap::new(); Self { isa, symbols, libcall_names, } } /// Define a symbol in the internal symbol table. /// /// The JIT will use the symbol table to resolve names that are declared, /// but not defined, in the module being compiled. A common example is /// external functions. With this method, functions and data can be exposed /// to the code being compiled which are defined by the host. /// /// If a symbol is defined more than once, the most recent definition will /// be retained. /// /// If the JIT fails to find a symbol in its internal table, it will fall /// back to a platform-specific search (this typically involves searching /// the current process for public symbols, followed by searching the /// platform's C runtime). pub fn symbol<K>(&mut self, name: K, ptr: *const u8) -> &Self where K: Into<String>, { self.symbols.insert(name.into(), ptr); self } /// Define multiple symbols in the internal symbol table. /// /// Using this is equivalent to calling `symbol` on each element. pub fn symbols<It, K>(&mut self, symbols: It) -> &Self where It: IntoIterator<Item = (K, *const u8)>, K: Into<String>, { for (name, ptr) in symbols { self.symbols.insert(name.into(), ptr); } self } } /// A `SimpleJITBackend` implements `Backend` and emits code and data into memory where it can be /// directly called and accessed. /// /// See the `SimpleJITBuilder` for a convenient way to construct `SimpleJITBackend` instances. pub struct SimpleJITBackend { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, code_memory: Memory, readonly_memory: Memory, writable_memory: Memory, } /// A record of a relocation to perform. struct RelocRecord { offset: CodeOffset, reloc: Reloc, name: ir::ExternalName, addend: Addend, } pub struct SimpleJITCompiledFunction { code: *mut u8, size: usize, relocs: Vec<RelocRecord>, } pub struct SimpleJITCompiledData { storage: *mut u8, size: usize, relocs: Vec<RelocRecord>, } impl SimpleJITBackend { fn lookup_symbol(&self, name: &str) -> *const u8 { match self.symbols.get(name) { Some(&ptr) => ptr, None => lookup_with_dlsym(name), } } fn get_definition( &self, namespace: &ModuleNamespace<Self>, name: &ir::ExternalName, ) -> *const u8 { match *name { ir::ExternalName::User {.. } => { if namespace.is_function(name) { let (def, name_str, _signature) = namespace.get_function_definition(&name); match def { Some(compiled) => compiled.code, None => self.lookup_symbol(name_str), } } else { let (def, name_str, _writable) = namespace.get_data_definition(&name); match def { Some(compiled) => compiled.storage, None => self.lookup_symbol(name_str), } } } ir::ExternalName::LibCall(ref libcall) => { let sym = (self.libcall_names)(*libcall); self.lookup_symbol(&sym) } _ => panic!("invalid ExternalName {}", name), } } } impl<'simple_jit_backend> Backend for SimpleJITBackend { type Builder = SimpleJITBuilder; /// SimpleJIT compiled function and data objects may have outstanding /// relocations that need to be performed before the memory can be used. /// These relocations are performed within `finalize_function` and /// `finalize_data`. type CompiledFunction = SimpleJITCompiledFunction; type CompiledData = SimpleJITCompiledData; /// SimpleJIT emits code and data into memory, and provides raw pointers /// to them. type FinalizedFunction = *const u8; type FinalizedData = (*mut u8, usize); /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. type Product = (); /// Create a new `SimpleJITBackend`. fn new(builder: SimpleJITBuilder) -> Self { Self { isa: builder.isa, symbols: builder.symbols, libcall_names: builder.libcall_names, code_memory: Memory::new(), readonly_memory: Memory::new(), writable_memory: Memory::new(), } } fn isa(&self) -> &dyn TargetIsa { &*self.isa } fn declare_function(&mut self, _name: &str, _linkage: Linkage) { // Nothing to do. } fn declare_data( &mut self, _name: &str, _linkage: Linkage, _writable: bool, _align: Option<u8>, ) { // Nothing to do. } fn define_function( &mut self, name: &str, ctx: &cranelift_codegen::Context, _namespace: &ModuleNamespace<Self>, code_size: u32, ) -> ModuleResult<Self::CompiledFunction> { let size = code_size as usize; let ptr = self .code_memory .allocate(size, EXECUTABLE_DATA_ALIGNMENT) .expect("TODO: handle OOM etc."); if cfg!(target_os = "linux") && ::std::env::var_os("PERF_BUILDID_DIR").is_some() { let mut map_file = ::std::fs::OpenOptions::new() .create(true) .append(true) .open(format!("/tmp/perf-{}.map", ::std::process::id())) .unwrap(); let _ = writeln!(map_file, "{:x} {:x} {}", ptr as usize, code_size, name); } let mut reloc_sink = SimpleJITRelocSink::new(); // Ignore traps for now. For now, frontends should just avoid generating code // that traps. let mut trap_sink = NullTrapSink {}; unsafe { ctx.emit_to_memory(&*self.isa, ptr, &mut reloc_sink, &mut trap_sink) }; Ok(Self::CompiledFunction { code: ptr, size, relocs: reloc_sink.relocs, }) } fn define_data( &mut self, _name: &str, writable: bool, align: Option<u8>, data: &DataContext, _namespace: &ModuleNamespace<Self>, ) -> ModuleResult<Self::CompiledData> { let &DataDescription { ref init, ref function_decls, ref data_decls, ref function_relocs, ref data_relocs, } = data.description(); let size = init.size(); let storage = if writable { self.writable_memory .allocate(size, align.unwrap_or(WRITABLE_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") } else { self.readonly_memory .allocate(size, align.unwrap_or(READONLY_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") }; match *init { Init::Uninitialized => { panic!("data is not initialized yet"); } Init::Zeros {.. } => { unsafe { ptr::write_bytes(storage, 0, size) }; } Init::Bytes { ref contents } => { let src = contents.as_ptr(); unsafe { ptr::copy_nonoverlapping(src, storage, size) }; } } let reloc = match self.isa.triple().pointer_width().unwrap() { PointerWidth::U16 => panic!(), PointerWidth::U32 => Reloc::Abs4, PointerWidth::U64 => Reloc::Abs8, }; let mut relocs = Vec::new(); for &(offset, id) in function_relocs { relocs.push(RelocRecord { reloc, offset, name: function_decls[id].clone(), addend: 0, }); } for &(offset, id, addend) in data_relocs { relocs.push(RelocRecord { reloc, offset, name: data_decls[id].clone(), addend, }); } Ok(Self::CompiledData { storage, size, relocs, }) } fn write_data_funcaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::FuncRef, ) { unimplemented!(); } fn write_data_dataaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::GlobalValue, _usize: Addend, ) { unimplemented!(); } fn finalize_function( &mut self, func: &Self::CompiledFunction, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedFunction { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &func.relocs { let ptr = func.code; debug_assert!((offset as usize) < func.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4 | Reloc::X86CallPCRel4 => { // TODO: Handle overflow. let pcrel = ((what as isize) - (at as isize)) as i32; #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut i32, pcrel) }; } Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected PIC relocation"), _ => unimplemented!(), } } func.code } fn get_finalized_function(&self, func: &Self::CompiledFunction) -> Self::FinalizedFunction { func.code } fn finalize_data( &mut self, data: &Self::CompiledData, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedData { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &data.relocs { let ptr = data.storage; debug_assert!((offset as usize) < data.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4
| Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected text relocation in data"), _ => unimplemented!(), } } (data.storage, data.size) } fn get_finalized_data(&self, data: &Self::CompiledData) -> Self::FinalizedData { (data.storage, data.size) } fn publish(&mut self) { // Now that we're done patching, prepare the memory for execution! self.readonly_memory.set_readonly(); self.code_memory.set_readable_and_executable(); } /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. fn finish(self) {} } #[cfg(not(windows))] fn lookup_with_dlsym(name: &str) -> *const u8 { let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); let sym = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c_str_ptr) }; if sym.is_null() { panic!("can't resolve symbol {}", name); } sym as *const u8 } #[cfg(windows)] fn lookup_with_dlsym(name: &str) -> *const u8 { const MSVCRT_DLL: &[u8] = b"msvcrt.dll\0"; let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); unsafe { let handles = [ // try to find the searched symbol in the currently running executable ptr::null_mut(), // try to find the searched symbol in local c runtime winapi::um::libloaderapi::GetModuleHandleA(MSVCRT_DLL.as_ptr() as *const i8), ]; for handle in &handles { let addr = winapi::um::libloaderapi::GetProcAddress(*handle, c_str_ptr); if addr.is_null() { continue; } return addr as *const u8; } let msg = if handles[1].is_null() { "(msvcrt not loaded)" } else { "" }; panic!("cannot resolve address of symbol {} {}", name, msg); } } struct SimpleJITRelocSink { pub relocs: Vec<RelocRecord>, } impl SimpleJITRelocSink { pub fn new() -> Self { Self { relocs: Vec::new() } } } impl RelocSink for SimpleJITRelocSink { fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) { unimplemented!(); } fn reloc_external( &mut self, offset: CodeOffset, reloc: Reloc, name: &ir::ExternalName, addend: Addend, ) { self.relocs.push(RelocRecord { offset, reloc, name: name.clone(), addend, }); } fn reloc_jt(&mut self, _offset: CodeOffset, reloc: Reloc, _jt: ir::JumpTable) { match reloc { Reloc::X86PCRelRodata4 => { // Not necessary to record this unless we are going to split apart code and its // jumptbl/rodata. } _ => { panic!("Unhandled reloc"); }
| Reloc::X86CallPCRel4
random_line_split
backend.rs
`. use crate::memory::Memory; use cranelift_codegen::binemit::{Addend, CodeOffset, NullTrapSink, Reloc, RelocSink}; use cranelift_codegen::isa::TargetIsa; use cranelift_codegen::{self, ir, settings}; use cranelift_module::{ Backend, DataContext, DataDescription, Init, Linkage, ModuleNamespace, ModuleResult, }; use cranelift_native; use libc; use std::collections::HashMap; use std::ffi::CString; use std::io::Write; use std::ptr; use target_lexicon::PointerWidth; #[cfg(windows)] use winapi; const EXECUTABLE_DATA_ALIGNMENT: u8 = 0x10; const WRITABLE_DATA_ALIGNMENT: u8 = 0x8; const READONLY_DATA_ALIGNMENT: u8 = 0x1; /// A builder for `SimpleJITBackend`. pub struct SimpleJITBuilder { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, } impl SimpleJITBuilder { /// Create a new `SimpleJITBuilder`. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn new(libcall_names: Box<dyn Fn(ir::LibCall) -> String>) -> Self { let flag_builder = settings::builder(); let isa_builder = cranelift_native::builder().unwrap_or_else(|msg| { panic!("host machine is not supported: {}", msg); }); let isa = isa_builder.finish(settings::Flags::new(flag_builder)); Self::with_isa(isa, libcall_names) } /// Create a new `SimpleJITBuilder` with an arbitrary target. This is mainly /// useful for testing. /// /// SimpleJIT requires a `TargetIsa` configured for non-PIC. /// /// To create a `SimpleJITBuilder` for native use, use the `new` constructor /// instead. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn with_isa( isa: Box<dyn TargetIsa>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, ) -> Self { debug_assert!(!isa.flags().is_pic(), "SimpleJIT requires non-PIC code"); let symbols = HashMap::new(); Self { isa, symbols, libcall_names, } } /// Define a symbol in the internal symbol table. /// /// The JIT will use the symbol table to resolve names that are declared, /// but not defined, in the module being compiled. A common example is /// external functions. With this method, functions and data can be exposed /// to the code being compiled which are defined by the host. /// /// If a symbol is defined more than once, the most recent definition will /// be retained. /// /// If the JIT fails to find a symbol in its internal table, it will fall /// back to a platform-specific search (this typically involves searching /// the current process for public symbols, followed by searching the /// platform's C runtime). pub fn symbol<K>(&mut self, name: K, ptr: *const u8) -> &Self where K: Into<String>, { self.symbols.insert(name.into(), ptr); self } /// Define multiple symbols in the internal symbol table. /// /// Using this is equivalent to calling `symbol` on each element. pub fn symbols<It, K>(&mut self, symbols: It) -> &Self where It: IntoIterator<Item = (K, *const u8)>, K: Into<String>, { for (name, ptr) in symbols { self.symbols.insert(name.into(), ptr); } self } } /// A `SimpleJITBackend` implements `Backend` and emits code and data into memory where it can be /// directly called and accessed. /// /// See the `SimpleJITBuilder` for a convenient way to construct `SimpleJITBackend` instances. pub struct SimpleJITBackend { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, code_memory: Memory, readonly_memory: Memory, writable_memory: Memory, } /// A record of a relocation to perform. struct RelocRecord { offset: CodeOffset, reloc: Reloc, name: ir::ExternalName, addend: Addend, } pub struct SimpleJITCompiledFunction { code: *mut u8, size: usize, relocs: Vec<RelocRecord>, } pub struct SimpleJITCompiledData { storage: *mut u8, size: usize, relocs: Vec<RelocRecord>, } impl SimpleJITBackend { fn lookup_symbol(&self, name: &str) -> *const u8 { match self.symbols.get(name) { Some(&ptr) => ptr, None => lookup_with_dlsym(name), } } fn get_definition( &self, namespace: &ModuleNamespace<Self>, name: &ir::ExternalName, ) -> *const u8 { match *name { ir::ExternalName::User {.. } => { if namespace.is_function(name) { let (def, name_str, _signature) = namespace.get_function_definition(&name); match def { Some(compiled) => compiled.code, None => self.lookup_symbol(name_str), } } else { let (def, name_str, _writable) = namespace.get_data_definition(&name); match def { Some(compiled) => compiled.storage, None => self.lookup_symbol(name_str), } } } ir::ExternalName::LibCall(ref libcall) => { let sym = (self.libcall_names)(*libcall); self.lookup_symbol(&sym) } _ => panic!("invalid ExternalName {}", name), } } } impl<'simple_jit_backend> Backend for SimpleJITBackend { type Builder = SimpleJITBuilder; /// SimpleJIT compiled function and data objects may have outstanding /// relocations that need to be performed before the memory can be used. /// These relocations are performed within `finalize_function` and /// `finalize_data`. type CompiledFunction = SimpleJITCompiledFunction; type CompiledData = SimpleJITCompiledData; /// SimpleJIT emits code and data into memory, and provides raw pointers /// to them. type FinalizedFunction = *const u8; type FinalizedData = (*mut u8, usize); /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. type Product = (); /// Create a new `SimpleJITBackend`. fn new(builder: SimpleJITBuilder) -> Self { Self { isa: builder.isa, symbols: builder.symbols, libcall_names: builder.libcall_names, code_memory: Memory::new(), readonly_memory: Memory::new(), writable_memory: Memory::new(), } } fn isa(&self) -> &dyn TargetIsa
fn declare_function(&mut self, _name: &str, _linkage: Linkage) { // Nothing to do. } fn declare_data( &mut self, _name: &str, _linkage: Linkage, _writable: bool, _align: Option<u8>, ) { // Nothing to do. } fn define_function( &mut self, name: &str, ctx: &cranelift_codegen::Context, _namespace: &ModuleNamespace<Self>, code_size: u32, ) -> ModuleResult<Self::CompiledFunction> { let size = code_size as usize; let ptr = self .code_memory .allocate(size, EXECUTABLE_DATA_ALIGNMENT) .expect("TODO: handle OOM etc."); if cfg!(target_os = "linux") && ::std::env::var_os("PERF_BUILDID_DIR").is_some() { let mut map_file = ::std::fs::OpenOptions::new() .create(true) .append(true) .open(format!("/tmp/perf-{}.map", ::std::process::id())) .unwrap(); let _ = writeln!(map_file, "{:x} {:x} {}", ptr as usize, code_size, name); } let mut reloc_sink = SimpleJITRelocSink::new(); // Ignore traps for now. For now, frontends should just avoid generating code // that traps. let mut trap_sink = NullTrapSink {}; unsafe { ctx.emit_to_memory(&*self.isa, ptr, &mut reloc_sink, &mut trap_sink) }; Ok(Self::CompiledFunction { code: ptr, size, relocs: reloc_sink.relocs, }) } fn define_data( &mut self, _name: &str, writable: bool, align: Option<u8>, data: &DataContext, _namespace: &ModuleNamespace<Self>, ) -> ModuleResult<Self::CompiledData> { let &DataDescription { ref init, ref function_decls, ref data_decls, ref function_relocs, ref data_relocs, } = data.description(); let size = init.size(); let storage = if writable { self.writable_memory .allocate(size, align.unwrap_or(WRITABLE_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") } else { self.readonly_memory .allocate(size, align.unwrap_or(READONLY_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") }; match *init { Init::Uninitialized => { panic!("data is not initialized yet"); } Init::Zeros {.. } => { unsafe { ptr::write_bytes(storage, 0, size) }; } Init::Bytes { ref contents } => { let src = contents.as_ptr(); unsafe { ptr::copy_nonoverlapping(src, storage, size) }; } } let reloc = match self.isa.triple().pointer_width().unwrap() { PointerWidth::U16 => panic!(), PointerWidth::U32 => Reloc::Abs4, PointerWidth::U64 => Reloc::Abs8, }; let mut relocs = Vec::new(); for &(offset, id) in function_relocs { relocs.push(RelocRecord { reloc, offset, name: function_decls[id].clone(), addend: 0, }); } for &(offset, id, addend) in data_relocs { relocs.push(RelocRecord { reloc, offset, name: data_decls[id].clone(), addend, }); } Ok(Self::CompiledData { storage, size, relocs, }) } fn write_data_funcaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::FuncRef, ) { unimplemented!(); } fn write_data_dataaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::GlobalValue, _usize: Addend, ) { unimplemented!(); } fn finalize_function( &mut self, func: &Self::CompiledFunction, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedFunction { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &func.relocs { let ptr = func.code; debug_assert!((offset as usize) < func.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4 | Reloc::X86CallPCRel4 => { // TODO: Handle overflow. let pcrel = ((what as isize) - (at as isize)) as i32; #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut i32, pcrel) }; } Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected PIC relocation"), _ => unimplemented!(), } } func.code } fn get_finalized_function(&self, func: &Self::CompiledFunction) -> Self::FinalizedFunction { func.code } fn finalize_data( &mut self, data: &Self::CompiledData, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedData { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &data.relocs { let ptr = data.storage; debug_assert!((offset as usize) < data.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4 | Reloc::X86CallPCRel4 | Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected text relocation in data"), _ => unimplemented!(), } } (data.storage, data.size) } fn get_finalized_data(&self, data: &Self::CompiledData) -> Self::FinalizedData { (data.storage, data.size) } fn publish(&mut self) { // Now that we're done patching, prepare the memory for execution! self.readonly_memory.set_readonly(); self.code_memory.set_readable_and_executable(); } /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. fn finish(self) {} } #[cfg(not(windows))] fn lookup_with_dlsym(name: &str) -> *const u8 { let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); let sym = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c_str_ptr) }; if sym.is_null() { panic!("can't resolve symbol {}", name); } sym as *const u8 } #[cfg(windows)] fn lookup_with_dlsym(name: &str) -> *const u8 { const MSVCRT_DLL: &[u8] = b"msvcrt.dll\0"; let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); unsafe { let handles = [ // try to find the searched symbol in the currently running executable ptr::null_mut(), // try to find the searched symbol in local c runtime winapi::um::libloaderapi::GetModuleHandleA(MSVCRT_DLL.as_ptr() as *const i8), ]; for handle in &handles { let addr = winapi::um::libloaderapi::GetProcAddress(*handle, c_str_ptr); if addr.is_null() { continue; } return addr as *const u8; } let msg = if handles[1].is_null() { "(msvcrt not loaded)" } else { "" }; panic!("cannot resolve address of symbol {} {}", name, msg); } } struct SimpleJITRelocSink { pub relocs: Vec<RelocRecord>, } impl SimpleJITRelocSink { pub fn new() -> Self { Self { relocs: Vec::new() } } } impl RelocSink for SimpleJITRelocSink { fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) { unimplemented!(); } fn reloc_external( &mut self, offset: CodeOffset, reloc: Reloc, name: &ir::ExternalName, addend: Addend, ) { self.relocs.push(RelocRecord { offset, reloc, name: name.clone(), addend, }); } fn reloc_jt(&mut self, _offset: CodeOffset, reloc: Reloc, _jt: ir::JumpTable) { match reloc { Reloc::X86PCRelRodata4 => { // Not necessary to record this unless we are going to split apart code and its // jumptbl/rodata. } _ => { panic!("Unhandled reloc"); }
{ &*self.isa }
identifier_body
token.rs
/** * An advanced fungible token implementation. * */ use near_sdk::serde_json::{self, json}; use near_sdk::borsh::{ self, BorshDeserialize, BorshSerialize}; use near_sdk::{ env, near_bindgen, ext_contract, AccountId, Balance, Promise, StorageUsage}; use near_sdk::collections::LookupMap; use near_sdk::json_types::U128; use crate::receiver::{ ext_token_receiver }; use crate::utils::{ is_promise_success }; // TODO: All gas stipends are more or less random - check througfully const SINGLE_CALL_GAS: u64 = 200000000000000; /** * A balance ledger that keeps track of rollbackable promise transactions. * * TODO: Currently we lock balance by account, but this is not very flexible. * What we really want to is lock balancy by a promise chain. However, this * would need to be able to identify the originating transaction in NEAR * and currently I am not sure if this information is exposed * on the smart contract level. * * https://stackoverflow.com/questions/64170363/tracking-promise-chains-in-near-smart-contract-protocol * */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Ledger { // Total balances, including locked, for each user pub balances: LookupMap<AccountId, Balance>, /// Account has a pending promise chain in progress /// and balance locked is this chain cannot be withdawn. /// If a promise chain is succesful free the locked balance. /// If a promise chain fails, then the send() gets undoed pub locked_balances: LookupMap<AccountId, Balance>, /// Total supply of the token pub total_supply: Balance, /// Helper counter for testing to diagnose /// how many rollbacks have occured pub rollbacks: u64, } impl Ledger { /// Helper method to get the account details for `owner_id`. fn get_balance(&self, owner_id: &AccountId) -> u128 { match self.balances.get(owner_id) { Some(x) => return x, None => return 0, } } /// Helper method to set the account details for `owner_id` to the state. fn set_balance(&mut self, owner_id: &AccountId, balance: Balance) { assert!(env::is_valid_account_id(owner_id.as_bytes()), "Owner's account ID is invalid"); self.balances.insert(owner_id, &balance); } /// Helper method to get the account details for `owner_id`. fn get_locked_balance(&self, owner_id: &AccountId) -> Balance { match self.locked_balances.get(owner_id) { Some(x) => return x, None => return 0, } } /** * Send tokens to a new owner. * * message is an optional byte data that is passed to the receiving smart contract. * notify is a flag to tell if we are going to call a smart contract, because this cannot be currently resolved run-time * within NEAR smart contract. */ pub fn send(&mut self, owner_id: AccountId, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { assert!( env::is_valid_account_id(new_owner_id.as_bytes()), "New owner's account ID is invalid" ); let amount = amount.into(); if amount == 0 { env::panic(b"Can't transfer 0 tokens"); } assert_ne!( owner_id, new_owner_id, "The new owner should be different from the current owner" ); // Retrieving the account from the state. let source_balance = self.get_balance(&owner_id); let source_lock = self.get_locked_balance(&owner_id); // Checking and updating unlocked balance if source_balance < amount { env::panic(format!("Not enough balance, need {}, has {}", amount, source_balance).as_bytes()); } // Checking and updating unlocked balance if source_balance < amount + source_lock { env::panic(format!("Cannot send {} tokens, as account has {} and in tx lock {}", amount, source_balance, source_lock).as_bytes()); } self.set_balance(&owner_id, source_balance - amount); // Deposit amount to the new owner and save the new account to the state. let target_balance = self.get_balance(&new_owner_id); let new_target_balance = target_balance + amount; self.set_balance(&new_owner_id, new_target_balance); // This much of user balance is lockedup in promise chains self.set_balance(&new_owner_id, new_target_balance); let target_lock = self.get_locked_balance(&new_owner_id); self.locked_balances.insert(&new_owner_id, &(target_lock + amount)); let promise0 = env::promise_create( new_owner_id.clone(), b"is_receiver", &[], 0, SINGLE_CALL_GAS/3, ); let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_receiver", json!({ "old_owner_id": owner_id, "new_owner_id": new_owner_id, "amount_received": amount.to_string(), "amount_total": new_target_balance.to_string(), "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/3, ); env::promise_return(promise1); } /// All promise chains have been successful, release balance from the lock /// and consider the promise chain final. pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); } /// Smart contract call failed. We need to roll back the balance update pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes()); env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes()); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); // Roll back lock let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); self.balances.insert(&new_owner_id, &new_amount); // Rollback new owner let new_target_balance = target_balance - amount; self.set_balance(&new_owner_id, new_target_balance); // Rollback old owner let new_source_balance = source_balance + amount; self.set_balance(&old_owner_id, new_source_balance); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); self.rollbacks += 1; } } /* * Information about the token. * * We hold the name, symbol and homepage readibly available on chain, but other information must be * from the JSON data. This way we do not bloat the chain size and also make upgrading the information * somewhat easier. * * All metadata fields are optional. */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Metadata { // Name of the token pub name: String, // Symbol of the token pub symbol: String, // URL to the human readable page about the token pub web_link: String, // URL to the metadata file with more information about the token, like different icon sets pub metadata_link: String, } /** * Presents on token. */ #[near_bindgen] #[derive(BorshDeserialize, BorshSerialize)] pub struct Token { pub ledger: Ledger, pub metadata: Metadata, } impl Default for Token { fn default() -> Self { panic!("Token should be initialized before usage") } } #[near_bindgen] impl Token { /// Initializes the contract with the given total supply owned by the given `owner_id`. #[init] pub fn new(owner_id: AccountId, total_supply: Balance) -> Self { assert!(!env::state_exists(), "Already initialized"); let total_supply = total_supply.into(); // Initialize the ledger with the initial total supply let ledger = Ledger { balances: LookupMap::new(b"bal".to_vec()), locked_balances: LookupMap::new(b"lck".to_vec()), total_supply, rollbacks: 0, }; // Currently the constructor does not support passing of metadata. // Start with empty metadata, owner needs to initialize this // after the token has been created in another transaction let metadata = Metadata { name: String::from(""), symbol: String::from(""), web_link: String::from(""), metadata_link: String::from(""), }; let mut token = Self { ledger, metadata }; token.ledger.set_balance(&owner_id, total_supply); return token; } /// Returns total supply of tokens. pub fn get_total_supply(&self) -> Balance { self.ledger.total_supply.into() } /// Returns balance of the `owner_id` account. pub fn get_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_balance(&owner_id).into() } /// Returns balance lockedin pending transactions pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance
//// How many rollbacks we have had pub fn get_rollback_count(&self) -> u64 { self.ledger.rollbacks } /// Returns balance of the `owner_id` account. pub fn get_name(&self) -> &str { return &self.metadata.name; } /// Send owner's tokens to another person or a smart contract #[payable] pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message); } /** * After trying to call receiving smart contract if it reports it can receive tokens. * * We gpt the interface test promise back. If the account was not smart contract, finalise the transaction. * Otherwise trigger the smart contract notifier. */ pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"handle_receiver reached"); let uint_amount_received: u128 = amount_received.into(); let uint_amount_total: u128 = amount_total.into(); if is_promise_success() { // The send() was destined to a compatible receiver smart contract. // Build another promise that notifies the smart contract // that is has received new tokens. env::log(b"Constructing smart contract notifier promise"); let promise0 = env::promise_create( new_owner_id.clone(), b"on_token_received", json!({ "sender_id": old_owner_id, "amount_received": amount_received, "amount_total": amount_total, "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); // Construct the promise that calls back the // token contract to finalise the transaction let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_token_received", json!({ "old_owner_id": old_owner_id, "new_owner_id": new_owner_id, "amount_received": amount_received, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); env::promise_return(promise1); } else { // Non-code account // Finalise transaction now. self.ledger.finalise(new_owner_id, uint_amount_received); } } /// Smart contract notify succeed, free up any locked balance /// TODO: Add functionality so that the smart contract that received tokens can trigger a new promise chain here pub fn handle_token_received(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"Checking for the need to rollback smart contract transaction"); let amount_received: u128 = amount_received.into(); // TODO: Have some nice error code logic here if is_promise_success() { self.ledger.finalise(new_owner_id, amount_received); } else { self.ledger.rollback(old_owner_id, new_owner_id, amount_received); } } } #[cfg(test)] mod tests { use super::*; use near_sdk::MockedBlockchain; use near_sdk::{testing_env, VMContext}; fn alice() -> AccountId { "alice.near".to_string() } fn bob() -> AccountId { "bob.near".to_string() } fn carol() -> AccountId { "carol.near".to_string() } fn get_context(predecessor_account_id: AccountId) -> VMContext { VMContext { current_account_id: alice(), signer_account_id: bob(), signer_account_pk: vec![0, 1, 2], predecessor_account_id, input: vec![], block_index: 0, block_timestamp: 0, account_balance: 1_000_000_000_000_000_000_000_000_000u128, account_locked_balance: 0, storage_usage: 10u64.pow(6), attached_deposit: 0, prepaid_gas: 10u64.pow(18), random_seed: vec![0, 1, 2], is_view: false, output_data_receivers: vec![], epoch_height: 0, } } #[test] fn test_new() { let context = get_context(carol()); testing_env!(context); let total_supply = 1_000_000_000_000_000u128; let contract = Token::new(bob(), total_supply.into()); assert_eq!(contract.get_total_supply(), total_supply); assert_eq!(contract.get_balance(bob()), total_supply); } }
{ self.ledger.get_locked_balance(&owner_id).into() }
identifier_body
token.rs
/** * An advanced fungible token implementation. * */ use near_sdk::serde_json::{self, json}; use near_sdk::borsh::{ self, BorshDeserialize, BorshSerialize}; use near_sdk::{ env, near_bindgen, ext_contract, AccountId, Balance, Promise, StorageUsage}; use near_sdk::collections::LookupMap; use near_sdk::json_types::U128; use crate::receiver::{ ext_token_receiver }; use crate::utils::{ is_promise_success }; // TODO: All gas stipends are more or less random - check througfully const SINGLE_CALL_GAS: u64 = 200000000000000; /** * A balance ledger that keeps track of rollbackable promise transactions. * * TODO: Currently we lock balance by account, but this is not very flexible. * What we really want to is lock balancy by a promise chain. However, this * would need to be able to identify the originating transaction in NEAR * and currently I am not sure if this information is exposed * on the smart contract level. * * https://stackoverflow.com/questions/64170363/tracking-promise-chains-in-near-smart-contract-protocol * */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Ledger { // Total balances, including locked, for each user pub balances: LookupMap<AccountId, Balance>, /// Account has a pending promise chain in progress /// and balance locked is this chain cannot be withdawn. /// If a promise chain is succesful free the locked balance. /// If a promise chain fails, then the send() gets undoed pub locked_balances: LookupMap<AccountId, Balance>, /// Total supply of the token pub total_supply: Balance, /// Helper counter for testing to diagnose /// how many rollbacks have occured pub rollbacks: u64, } impl Ledger { /// Helper method to get the account details for `owner_id`. fn get_balance(&self, owner_id: &AccountId) -> u128 { match self.balances.get(owner_id) { Some(x) => return x, None => return 0, } } /// Helper method to set the account details for `owner_id` to the state. fn set_balance(&mut self, owner_id: &AccountId, balance: Balance) { assert!(env::is_valid_account_id(owner_id.as_bytes()), "Owner's account ID is invalid"); self.balances.insert(owner_id, &balance); } /// Helper method to get the account details for `owner_id`. fn get_locked_balance(&self, owner_id: &AccountId) -> Balance { match self.locked_balances.get(owner_id) { Some(x) => return x, None => return 0, } } /** * Send tokens to a new owner. * * message is an optional byte data that is passed to the receiving smart contract. * notify is a flag to tell if we are going to call a smart contract, because this cannot be currently resolved run-time * within NEAR smart contract. */ pub fn send(&mut self, owner_id: AccountId, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { assert!( env::is_valid_account_id(new_owner_id.as_bytes()), "New owner's account ID is invalid" ); let amount = amount.into(); if amount == 0 { env::panic(b"Can't transfer 0 tokens"); } assert_ne!( owner_id, new_owner_id, "The new owner should be different from the current owner" ); // Retrieving the account from the state. let source_balance = self.get_balance(&owner_id); let source_lock = self.get_locked_balance(&owner_id); // Checking and updating unlocked balance if source_balance < amount { env::panic(format!("Not enough balance, need {}, has {}", amount, source_balance).as_bytes()); } // Checking and updating unlocked balance if source_balance < amount + source_lock { env::panic(format!("Cannot send {} tokens, as account has {} and in tx lock {}", amount, source_balance, source_lock).as_bytes()); } self.set_balance(&owner_id, source_balance - amount); // Deposit amount to the new owner and save the new account to the state. let target_balance = self.get_balance(&new_owner_id); let new_target_balance = target_balance + amount; self.set_balance(&new_owner_id, new_target_balance); // This much of user balance is lockedup in promise chains self.set_balance(&new_owner_id, new_target_balance); let target_lock = self.get_locked_balance(&new_owner_id); self.locked_balances.insert(&new_owner_id, &(target_lock + amount)); let promise0 = env::promise_create( new_owner_id.clone(), b"is_receiver", &[], 0, SINGLE_CALL_GAS/3, ); let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_receiver", json!({ "old_owner_id": owner_id, "new_owner_id": new_owner_id, "amount_received": amount.to_string(), "amount_total": new_target_balance.to_string(), "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/3, ); env::promise_return(promise1); } /// All promise chains have been successful, release balance from the lock /// and consider the promise chain final. pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); } /// Smart contract call failed. We need to roll back the balance update pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes()); env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes()); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); // Roll back lock let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); self.balances.insert(&new_owner_id, &new_amount); // Rollback new owner let new_target_balance = target_balance - amount; self.set_balance(&new_owner_id, new_target_balance); // Rollback old owner let new_source_balance = source_balance + amount; self.set_balance(&old_owner_id, new_source_balance); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); self.rollbacks += 1; } } /* * Information about the token. * * We hold the name, symbol and homepage readibly available on chain, but other information must be * from the JSON data. This way we do not bloat the chain size and also make upgrading the information * somewhat easier. * * All metadata fields are optional. */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Metadata { // Name of the token pub name: String, // Symbol of the token pub symbol: String, // URL to the human readable page about the token pub web_link: String, // URL to the metadata file with more information about the token, like different icon sets pub metadata_link: String, } /** * Presents on token. */ #[near_bindgen] #[derive(BorshDeserialize, BorshSerialize)] pub struct Token { pub ledger: Ledger, pub metadata: Metadata, } impl Default for Token { fn default() -> Self { panic!("Token should be initialized before usage") } } #[near_bindgen] impl Token { /// Initializes the contract with the given total supply owned by the given `owner_id`. #[init] pub fn new(owner_id: AccountId, total_supply: Balance) -> Self { assert!(!env::state_exists(), "Already initialized"); let total_supply = total_supply.into(); // Initialize the ledger with the initial total supply let ledger = Ledger { balances: LookupMap::new(b"bal".to_vec()), locked_balances: LookupMap::new(b"lck".to_vec()), total_supply, rollbacks: 0, }; // Currently the constructor does not support passing of metadata. // Start with empty metadata, owner needs to initialize this // after the token has been created in another transaction let metadata = Metadata { name: String::from(""), symbol: String::from(""), web_link: String::from(""), metadata_link: String::from(""), }; let mut token = Self { ledger, metadata }; token.ledger.set_balance(&owner_id, total_supply); return token; } /// Returns total supply of tokens. pub fn get_total_supply(&self) -> Balance { self.ledger.total_supply.into() } /// Returns balance of the `owner_id` account. pub fn get_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_balance(&owner_id).into() } /// Returns balance lockedin pending transactions pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_locked_balance(&owner_id).into() } //// How many rollbacks we have had pub fn get_rollback_count(&self) -> u64 { self.ledger.rollbacks } /// Returns balance of the `owner_id` account. pub fn get_name(&self) -> &str { return &self.metadata.name; } /// Send owner's tokens to another person or a smart contract #[payable] pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message); } /** * After trying to call receiving smart contract if it reports it can receive tokens. * * We gpt the interface test promise back. If the account was not smart contract, finalise the transaction. * Otherwise trigger the smart contract notifier. */ pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"handle_receiver reached"); let uint_amount_received: u128 = amount_received.into(); let uint_amount_total: u128 = amount_total.into(); if is_promise_success()
// Construct the promise that calls back the // token contract to finalise the transaction let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_token_received", json!({ "old_owner_id": old_owner_id, "new_owner_id": new_owner_id, "amount_received": amount_received, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); env::promise_return(promise1); } else { // Non-code account // Finalise transaction now. self.ledger.finalise(new_owner_id, uint_amount_received); } } /// Smart contract notify succeed, free up any locked balance /// TODO: Add functionality so that the smart contract that received tokens can trigger a new promise chain here pub fn handle_token_received(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"Checking for the need to rollback smart contract transaction"); let amount_received: u128 = amount_received.into(); // TODO: Have some nice error code logic here if is_promise_success() { self.ledger.finalise(new_owner_id, amount_received); } else { self.ledger.rollback(old_owner_id, new_owner_id, amount_received); } } } #[cfg(test)] mod tests { use super::*; use near_sdk::MockedBlockchain; use near_sdk::{testing_env, VMContext}; fn alice() -> AccountId { "alice.near".to_string() } fn bob() -> AccountId { "bob.near".to_string() } fn carol() -> AccountId { "carol.near".to_string() } fn get_context(predecessor_account_id: AccountId) -> VMContext { VMContext { current_account_id: alice(), signer_account_id: bob(), signer_account_pk: vec![0, 1, 2], predecessor_account_id, input: vec![], block_index: 0, block_timestamp: 0, account_balance: 1_000_000_000_000_000_000_000_000_000u128, account_locked_balance: 0, storage_usage: 10u64.pow(6), attached_deposit: 0, prepaid_gas: 10u64.pow(18), random_seed: vec![0, 1, 2], is_view: false, output_data_receivers: vec![], epoch_height: 0, } } #[test] fn test_new() { let context = get_context(carol()); testing_env!(context); let total_supply = 1_000_000_000_000_000u128; let contract = Token::new(bob(), total_supply.into()); assert_eq!(contract.get_total_supply(), total_supply); assert_eq!(contract.get_balance(bob()), total_supply); } }
{ // The send() was destined to a compatible receiver smart contract. // Build another promise that notifies the smart contract // that is has received new tokens. env::log(b"Constructing smart contract notifier promise"); let promise0 = env::promise_create( new_owner_id.clone(), b"on_token_received", json!({ "sender_id": old_owner_id, "amount_received": amount_received, "amount_total": amount_total, "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, );
conditional_block
token.rs
/** * An advanced fungible token implementation. * */ use near_sdk::serde_json::{self, json}; use near_sdk::borsh::{ self, BorshDeserialize, BorshSerialize}; use near_sdk::{ env, near_bindgen, ext_contract, AccountId, Balance, Promise, StorageUsage}; use near_sdk::collections::LookupMap; use near_sdk::json_types::U128; use crate::receiver::{ ext_token_receiver }; use crate::utils::{ is_promise_success }; // TODO: All gas stipends are more or less random - check througfully const SINGLE_CALL_GAS: u64 = 200000000000000; /** * A balance ledger that keeps track of rollbackable promise transactions. * * TODO: Currently we lock balance by account, but this is not very flexible. * What we really want to is lock balancy by a promise chain. However, this * would need to be able to identify the originating transaction in NEAR * and currently I am not sure if this information is exposed * on the smart contract level. * * https://stackoverflow.com/questions/64170363/tracking-promise-chains-in-near-smart-contract-protocol * */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Ledger { // Total balances, including locked, for each user pub balances: LookupMap<AccountId, Balance>, /// Account has a pending promise chain in progress /// and balance locked is this chain cannot be withdawn. /// If a promise chain is succesful free the locked balance. /// If a promise chain fails, then the send() gets undoed pub locked_balances: LookupMap<AccountId, Balance>, /// Total supply of the token pub total_supply: Balance, /// Helper counter for testing to diagnose /// how many rollbacks have occured pub rollbacks: u64, } impl Ledger { /// Helper method to get the account details for `owner_id`. fn get_balance(&self, owner_id: &AccountId) -> u128 { match self.balances.get(owner_id) { Some(x) => return x, None => return 0, } } /// Helper method to set the account details for `owner_id` to the state. fn set_balance(&mut self, owner_id: &AccountId, balance: Balance) { assert!(env::is_valid_account_id(owner_id.as_bytes()), "Owner's account ID is invalid"); self.balances.insert(owner_id, &balance); } /// Helper method to get the account details for `owner_id`. fn get_locked_balance(&self, owner_id: &AccountId) -> Balance { match self.locked_balances.get(owner_id) { Some(x) => return x, None => return 0, } } /** * Send tokens to a new owner. * * message is an optional byte data that is passed to the receiving smart contract. * notify is a flag to tell if we are going to call a smart contract, because this cannot be currently resolved run-time * within NEAR smart contract. */ pub fn send(&mut self, owner_id: AccountId, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { assert!( env::is_valid_account_id(new_owner_id.as_bytes()), "New owner's account ID is invalid" ); let amount = amount.into(); if amount == 0 { env::panic(b"Can't transfer 0 tokens"); } assert_ne!( owner_id, new_owner_id, "The new owner should be different from the current owner" ); // Retrieving the account from the state. let source_balance = self.get_balance(&owner_id); let source_lock = self.get_locked_balance(&owner_id); // Checking and updating unlocked balance if source_balance < amount { env::panic(format!("Not enough balance, need {}, has {}", amount, source_balance).as_bytes()); } // Checking and updating unlocked balance if source_balance < amount + source_lock { env::panic(format!("Cannot send {} tokens, as account has {} and in tx lock {}", amount, source_balance, source_lock).as_bytes()); } self.set_balance(&owner_id, source_balance - amount); // Deposit amount to the new owner and save the new account to the state. let target_balance = self.get_balance(&new_owner_id); let new_target_balance = target_balance + amount; self.set_balance(&new_owner_id, new_target_balance); // This much of user balance is lockedup in promise chains self.set_balance(&new_owner_id, new_target_balance); let target_lock = self.get_locked_balance(&new_owner_id); self.locked_balances.insert(&new_owner_id, &(target_lock + amount)); let promise0 = env::promise_create( new_owner_id.clone(), b"is_receiver", &[], 0, SINGLE_CALL_GAS/3, ); let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_receiver", json!({ "old_owner_id": owner_id, "new_owner_id": new_owner_id, "amount_received": amount.to_string(), "amount_total": new_target_balance.to_string(), "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/3, ); env::promise_return(promise1); } /// All promise chains have been successful, release balance from the lock /// and consider the promise chain final. pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); } /// Smart contract call failed. We need to roll back the balance update pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes()); env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes()); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); // Roll back lock let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); self.balances.insert(&new_owner_id, &new_amount); // Rollback new owner let new_target_balance = target_balance - amount; self.set_balance(&new_owner_id, new_target_balance); // Rollback old owner let new_source_balance = source_balance + amount; self.set_balance(&old_owner_id, new_source_balance); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); self.rollbacks += 1; } } /* * Information about the token. * * We hold the name, symbol and homepage readibly available on chain, but other information must be * from the JSON data. This way we do not bloat the chain size and also make upgrading the information * somewhat easier. * * All metadata fields are optional. */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Metadata { // Name of the token pub name: String, // Symbol of the token pub symbol: String, // URL to the human readable page about the token pub web_link: String, // URL to the metadata file with more information about the token, like different icon sets pub metadata_link: String, } /** * Presents on token. */ #[near_bindgen] #[derive(BorshDeserialize, BorshSerialize)] pub struct Token { pub ledger: Ledger, pub metadata: Metadata, } impl Default for Token { fn default() -> Self { panic!("Token should be initialized before usage") } } #[near_bindgen] impl Token { /// Initializes the contract with the given total supply owned by the given `owner_id`. #[init] pub fn new(owner_id: AccountId, total_supply: Balance) -> Self { assert!(!env::state_exists(), "Already initialized"); let total_supply = total_supply.into(); // Initialize the ledger with the initial total supply let ledger = Ledger { balances: LookupMap::new(b"bal".to_vec()), locked_balances: LookupMap::new(b"lck".to_vec()), total_supply, rollbacks: 0, }; // Currently the constructor does not support passing of metadata. // Start with empty metadata, owner needs to initialize this // after the token has been created in another transaction let metadata = Metadata { name: String::from(""), symbol: String::from(""), web_link: String::from(""), metadata_link: String::from(""), }; let mut token = Self { ledger, metadata }; token.ledger.set_balance(&owner_id, total_supply); return token; } /// Returns total supply of tokens. pub fn get_total_supply(&self) -> Balance { self.ledger.total_supply.into() } /// Returns balance of the `owner_id` account. pub fn get_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_balance(&owner_id).into() } /// Returns balance lockedin pending transactions pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_locked_balance(&owner_id).into() } //// How many rollbacks we have had pub fn get_rollback_count(&self) -> u64 { self.ledger.rollbacks } /// Returns balance of the `owner_id` account. pub fn get_name(&self) -> &str { return &self.metadata.name; } /// Send owner's tokens to another person or a smart contract #[payable] pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message); } /** * After trying to call receiving smart contract if it reports it can receive tokens. * * We gpt the interface test promise back. If the account was not smart contract, finalise the transaction. * Otherwise trigger the smart contract notifier. */ pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"handle_receiver reached"); let uint_amount_received: u128 = amount_received.into(); let uint_amount_total: u128 = amount_total.into(); if is_promise_success() { // The send() was destined to a compatible receiver smart contract. // Build another promise that notifies the smart contract // that is has received new tokens. env::log(b"Constructing smart contract notifier promise"); let promise0 = env::promise_create( new_owner_id.clone(), b"on_token_received", json!({ "sender_id": old_owner_id, "amount_received": amount_received, "amount_total": amount_total, "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); // Construct the promise that calls back the // token contract to finalise the transaction let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_token_received", json!({ "old_owner_id": old_owner_id, "new_owner_id": new_owner_id, "amount_received": amount_received, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); env::promise_return(promise1); } else { // Non-code account // Finalise transaction now. self.ledger.finalise(new_owner_id, uint_amount_received); } } /// Smart contract notify succeed, free up any locked balance /// TODO: Add functionality so that the smart contract that received tokens can trigger a new promise chain here pub fn handle_token_received(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"Checking for the need to rollback smart contract transaction"); let amount_received: u128 = amount_received.into(); // TODO: Have some nice error code logic here if is_promise_success() { self.ledger.finalise(new_owner_id, amount_received); } else { self.ledger.rollback(old_owner_id, new_owner_id, amount_received); } } } #[cfg(test)] mod tests { use super::*; use near_sdk::MockedBlockchain; use near_sdk::{testing_env, VMContext}; fn alice() -> AccountId { "alice.near".to_string() } fn bob() -> AccountId { "bob.near".to_string() } fn carol() -> AccountId { "carol.near".to_string() } fn get_context(predecessor_account_id: AccountId) -> VMContext { VMContext { current_account_id: alice(), signer_account_id: bob(), signer_account_pk: vec![0, 1, 2], predecessor_account_id, input: vec![],
account_balance: 1_000_000_000_000_000_000_000_000_000u128, account_locked_balance: 0, storage_usage: 10u64.pow(6), attached_deposit: 0, prepaid_gas: 10u64.pow(18), random_seed: vec![0, 1, 2], is_view: false, output_data_receivers: vec![], epoch_height: 0, } } #[test] fn test_new() { let context = get_context(carol()); testing_env!(context); let total_supply = 1_000_000_000_000_000u128; let contract = Token::new(bob(), total_supply.into()); assert_eq!(contract.get_total_supply(), total_supply); assert_eq!(contract.get_balance(bob()), total_supply); } }
block_index: 0, block_timestamp: 0,
random_line_split
token.rs
/** * An advanced fungible token implementation. * */ use near_sdk::serde_json::{self, json}; use near_sdk::borsh::{ self, BorshDeserialize, BorshSerialize}; use near_sdk::{ env, near_bindgen, ext_contract, AccountId, Balance, Promise, StorageUsage}; use near_sdk::collections::LookupMap; use near_sdk::json_types::U128; use crate::receiver::{ ext_token_receiver }; use crate::utils::{ is_promise_success }; // TODO: All gas stipends are more or less random - check througfully const SINGLE_CALL_GAS: u64 = 200000000000000; /** * A balance ledger that keeps track of rollbackable promise transactions. * * TODO: Currently we lock balance by account, but this is not very flexible. * What we really want to is lock balancy by a promise chain. However, this * would need to be able to identify the originating transaction in NEAR * and currently I am not sure if this information is exposed * on the smart contract level. * * https://stackoverflow.com/questions/64170363/tracking-promise-chains-in-near-smart-contract-protocol * */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Ledger { // Total balances, including locked, for each user pub balances: LookupMap<AccountId, Balance>, /// Account has a pending promise chain in progress /// and balance locked is this chain cannot be withdawn. /// If a promise chain is succesful free the locked balance. /// If a promise chain fails, then the send() gets undoed pub locked_balances: LookupMap<AccountId, Balance>, /// Total supply of the token pub total_supply: Balance, /// Helper counter for testing to diagnose /// how many rollbacks have occured pub rollbacks: u64, } impl Ledger { /// Helper method to get the account details for `owner_id`. fn get_balance(&self, owner_id: &AccountId) -> u128 { match self.balances.get(owner_id) { Some(x) => return x, None => return 0, } } /// Helper method to set the account details for `owner_id` to the state. fn set_balance(&mut self, owner_id: &AccountId, balance: Balance) { assert!(env::is_valid_account_id(owner_id.as_bytes()), "Owner's account ID is invalid"); self.balances.insert(owner_id, &balance); } /// Helper method to get the account details for `owner_id`. fn get_locked_balance(&self, owner_id: &AccountId) -> Balance { match self.locked_balances.get(owner_id) { Some(x) => return x, None => return 0, } } /** * Send tokens to a new owner. * * message is an optional byte data that is passed to the receiving smart contract. * notify is a flag to tell if we are going to call a smart contract, because this cannot be currently resolved run-time * within NEAR smart contract. */ pub fn send(&mut self, owner_id: AccountId, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { assert!( env::is_valid_account_id(new_owner_id.as_bytes()), "New owner's account ID is invalid" ); let amount = amount.into(); if amount == 0 { env::panic(b"Can't transfer 0 tokens"); } assert_ne!( owner_id, new_owner_id, "The new owner should be different from the current owner" ); // Retrieving the account from the state. let source_balance = self.get_balance(&owner_id); let source_lock = self.get_locked_balance(&owner_id); // Checking and updating unlocked balance if source_balance < amount { env::panic(format!("Not enough balance, need {}, has {}", amount, source_balance).as_bytes()); } // Checking and updating unlocked balance if source_balance < amount + source_lock { env::panic(format!("Cannot send {} tokens, as account has {} and in tx lock {}", amount, source_balance, source_lock).as_bytes()); } self.set_balance(&owner_id, source_balance - amount); // Deposit amount to the new owner and save the new account to the state. let target_balance = self.get_balance(&new_owner_id); let new_target_balance = target_balance + amount; self.set_balance(&new_owner_id, new_target_balance); // This much of user balance is lockedup in promise chains self.set_balance(&new_owner_id, new_target_balance); let target_lock = self.get_locked_balance(&new_owner_id); self.locked_balances.insert(&new_owner_id, &(target_lock + amount)); let promise0 = env::promise_create( new_owner_id.clone(), b"is_receiver", &[], 0, SINGLE_CALL_GAS/3, ); let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_receiver", json!({ "old_owner_id": owner_id, "new_owner_id": new_owner_id, "amount_received": amount.to_string(), "amount_total": new_target_balance.to_string(), "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/3, ); env::promise_return(promise1); } /// All promise chains have been successful, release balance from the lock /// and consider the promise chain final. pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); } /// Smart contract call failed. We need to roll back the balance update pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes()); env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes()); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); // Roll back lock let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); self.balances.insert(&new_owner_id, &new_amount); // Rollback new owner let new_target_balance = target_balance - amount; self.set_balance(&new_owner_id, new_target_balance); // Rollback old owner let new_source_balance = source_balance + amount; self.set_balance(&old_owner_id, new_source_balance); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); self.rollbacks += 1; } } /* * Information about the token. * * We hold the name, symbol and homepage readibly available on chain, but other information must be * from the JSON data. This way we do not bloat the chain size and also make upgrading the information * somewhat easier. * * All metadata fields are optional. */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Metadata { // Name of the token pub name: String, // Symbol of the token pub symbol: String, // URL to the human readable page about the token pub web_link: String, // URL to the metadata file with more information about the token, like different icon sets pub metadata_link: String, } /** * Presents on token. */ #[near_bindgen] #[derive(BorshDeserialize, BorshSerialize)] pub struct Token { pub ledger: Ledger, pub metadata: Metadata, } impl Default for Token { fn default() -> Self { panic!("Token should be initialized before usage") } } #[near_bindgen] impl Token { /// Initializes the contract with the given total supply owned by the given `owner_id`. #[init] pub fn new(owner_id: AccountId, total_supply: Balance) -> Self { assert!(!env::state_exists(), "Already initialized"); let total_supply = total_supply.into(); // Initialize the ledger with the initial total supply let ledger = Ledger { balances: LookupMap::new(b"bal".to_vec()), locked_balances: LookupMap::new(b"lck".to_vec()), total_supply, rollbacks: 0, }; // Currently the constructor does not support passing of metadata. // Start with empty metadata, owner needs to initialize this // after the token has been created in another transaction let metadata = Metadata { name: String::from(""), symbol: String::from(""), web_link: String::from(""), metadata_link: String::from(""), }; let mut token = Self { ledger, metadata }; token.ledger.set_balance(&owner_id, total_supply); return token; } /// Returns total supply of tokens. pub fn get_total_supply(&self) -> Balance { self.ledger.total_supply.into() } /// Returns balance of the `owner_id` account. pub fn get_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_balance(&owner_id).into() } /// Returns balance lockedin pending transactions pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_locked_balance(&owner_id).into() } //// How many rollbacks we have had pub fn get_rollback_count(&self) -> u64 { self.ledger.rollbacks } /// Returns balance of the `owner_id` account. pub fn get_name(&self) -> &str { return &self.metadata.name; } /// Send owner's tokens to another person or a smart contract #[payable] pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message); } /** * After trying to call receiving smart contract if it reports it can receive tokens. * * We gpt the interface test promise back. If the account was not smart contract, finalise the transaction. * Otherwise trigger the smart contract notifier. */ pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"handle_receiver reached"); let uint_amount_received: u128 = amount_received.into(); let uint_amount_total: u128 = amount_total.into(); if is_promise_success() { // The send() was destined to a compatible receiver smart contract. // Build another promise that notifies the smart contract // that is has received new tokens. env::log(b"Constructing smart contract notifier promise"); let promise0 = env::promise_create( new_owner_id.clone(), b"on_token_received", json!({ "sender_id": old_owner_id, "amount_received": amount_received, "amount_total": amount_total, "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); // Construct the promise that calls back the // token contract to finalise the transaction let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_token_received", json!({ "old_owner_id": old_owner_id, "new_owner_id": new_owner_id, "amount_received": amount_received, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); env::promise_return(promise1); } else { // Non-code account // Finalise transaction now. self.ledger.finalise(new_owner_id, uint_amount_received); } } /// Smart contract notify succeed, free up any locked balance /// TODO: Add functionality so that the smart contract that received tokens can trigger a new promise chain here pub fn handle_token_received(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"Checking for the need to rollback smart contract transaction"); let amount_received: u128 = amount_received.into(); // TODO: Have some nice error code logic here if is_promise_success() { self.ledger.finalise(new_owner_id, amount_received); } else { self.ledger.rollback(old_owner_id, new_owner_id, amount_received); } } } #[cfg(test)] mod tests { use super::*; use near_sdk::MockedBlockchain; use near_sdk::{testing_env, VMContext}; fn alice() -> AccountId { "alice.near".to_string() } fn
() -> AccountId { "bob.near".to_string() } fn carol() -> AccountId { "carol.near".to_string() } fn get_context(predecessor_account_id: AccountId) -> VMContext { VMContext { current_account_id: alice(), signer_account_id: bob(), signer_account_pk: vec![0, 1, 2], predecessor_account_id, input: vec![], block_index: 0, block_timestamp: 0, account_balance: 1_000_000_000_000_000_000_000_000_000u128, account_locked_balance: 0, storage_usage: 10u64.pow(6), attached_deposit: 0, prepaid_gas: 10u64.pow(18), random_seed: vec![0, 1, 2], is_view: false, output_data_receivers: vec![], epoch_height: 0, } } #[test] fn test_new() { let context = get_context(carol()); testing_env!(context); let total_supply = 1_000_000_000_000_000u128; let contract = Token::new(bob(), total_supply.into()); assert_eq!(contract.get_total_supply(), total_supply); assert_eq!(contract.get_balance(bob()), total_supply); } }
bob
identifier_name
processor.rs
mut c_char; fn system_info_os_version(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_family(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_info(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_count(info: *const SystemInfo) -> u32; fn process_minidump( buffer: *const c_char, buffer_size: usize, symbols: *const SymbolEntry, symbol_count: usize, result: *mut ProcessResult, ) -> *mut IProcessState; fn process_state_delete(state: *mut IProcessState); fn process_state_threads( state: *const IProcessState, size_out: *mut usize, ) -> *const *const CallStack; fn process_state_requesting_thread(state: *const IProcessState) -> i32; fn process_state_timestamp(state: *const IProcessState) -> u64; fn process_state_crashed(state: *const IProcessState) -> bool; fn process_state_crash_address(state: *const IProcessState) -> u64; fn process_state_crash_reason(state: *const IProcessState) -> *mut c_char; fn process_state_assertion(state: *const IProcessState) -> *mut c_char; fn process_state_system_info(state: *const IProcessState) -> *mut SystemInfo; } /// An error returned when parsing invalid `CodeModuleId`s. pub type ParseCodeModuleIdError = ParseDebugIdError; /// Breakpad code module IDs. /// /// **Example:** /// /// ``` /// # extern crate symbolic_common; /// # extern crate symbolic_minidump; /// use std::str::FromStr; /// use symbolic_minidump::processor::CodeModuleId; /// # use symbolic_minidump::processor::ParseCodeModuleIdError; /// /// # fn foo() -> Result<(), ParseCodeModuleIdError> { /// let id = CodeModuleId::from_str("DFB8E43AF2423D73A453AEB6A777EF75a")?; /// assert_eq!("DFB8E43AF2423D73A453AEB6A777EF75a".to_string(), id.to_string()); /// # Ok(()) /// # } /// /// # fn main() { foo().unwrap() } /// ``` #[derive(Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Copy)] pub struct CodeModuleId { inner: DebugId, } impl CodeModuleId { /// Constructs a `CodeModuleId` from its `uuid` and `age` parts. pub fn from_parts(uuid: Uuid, age: u32) -> CodeModuleId { CodeModuleId { inner: DebugId::from_parts(uuid, age), } } /// Returns the UUID part of the code module id. pub fn uuid(&self) -> Uuid { self.inner.uuid() } /// Returns the appendix part of the code module id. /// /// On Windows, this is an incrementing counter to identify the build. /// On all other platforms, this value will always be zero. pub fn age(&self) -> u32 { self.inner.appendix() } /// Converts this code module id into a debug identifier. pub fn as_object_id(&self) -> DebugId { self.inner } } impl From<DebugId> for CodeModuleId { fn from(inner: DebugId) -> Self { CodeModuleId { inner } } } impl Into<DebugId> for CodeModuleId { fn into(self) -> DebugId { self.inner } } impl fmt::Display for CodeModuleId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.breakpad().fmt(f) } } impl str::FromStr for CodeModuleId { type Err = ParseCodeModuleIdError; fn from_str(string: &str) -> Result<CodeModuleId, ParseCodeModuleIdError> { Ok(CodeModuleId { inner: DebugId::from_breakpad(string)?, }) } } #[cfg(feature = "with_serde")] derive_deserialize_from_str!(CodeModuleId, "CodeModuleId"); #[cfg(feature = "with_serde")] derive_serialize_from_display!(CodeModuleId); /// Carries information about a code module loaded into the process during the /// crash. The `debug_identifier` uniquely identifies this module. #[repr(C)] pub struct CodeModule(c_void); impl CodeModule { /// Returns the unique identifier of this `CodeModule`. pub fn id(&self) -> Option<CodeModuleId> { CodeModuleId::from_str(&self.debug_identifier()).ok() } /// Returns the base address of this code module as it was loaded by the /// process. (uint64_t)-1 on error. pub fn base_address(&self) -> u64 { unsafe { code_module_base_address(self) } } /// The size of the code module. 0 on error. pub fn size(&self) -> u64 { unsafe { code_module_size(self) } } /// Returns the path or file name that the code module was loaded from. pub fn code_file(&self) -> String { unsafe { let ptr = code_module_code_file(self); utils::ptr_to_string(ptr) } } /// An identifying string used to discriminate between multiple versions and builds of the same /// code module. /// /// This may contain a UUID, timestamp, version number, or any combination of this or other /// information, in an implementation-defined format. pub fn code_identifier(&self) -> String { unsafe { let ptr = code_module_code_identifier(self); utils::ptr_to_string(ptr) } } /// Returns the filename containing debugging information of this code module. /// /// If debugging information is stored in a file separate from the code module itself (as is the /// case when.pdb or.dSYM files are used), this will be different from `code_file`. If /// debugging information is stored in the code module itself (possibly prior to stripping), /// this will be the same as code_file. pub fn debug_file(&self) -> String { unsafe { let ptr = code_module_debug_file(self); utils::ptr_to_string(ptr) } } /// Returns a string identifying the specific version and build of the associated debug file. /// /// This may be the same as `code_identifier` when the `debug_file` and `code_file` are /// identical or when the same identifier is used to identify distinct debug and code files. /// /// It usually comprises the library's UUID and an age field. On Windows, the age field is a /// generation counter, on all other platforms it is mostly zero. pub fn debug_identifier(&self) -> String { unsafe { let ptr = code_module_debug_identifier(self); utils::ptr_to_string(ptr) } } } impl Eq for CodeModule {} impl PartialEq for CodeModule { fn eq(&self, other: &Self) -> bool { self.id() == other.id() } } impl Hash for CodeModule { fn hash<H: Hasher>(&self, state: &mut H) { self.id().hash(state) } } impl Ord for CodeModule { fn cmp(&self, other: &Self) -> Ordering { self.id().cmp(&other.id()) } } impl PartialOrd for CodeModule { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl fmt::Debug for CodeModule { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CodeModule") .field("id", &self.id()) .field("base_address", &self.base_address()) .field("size", &self.size()) .field("code_file", &self.code_file()) .field("code_identifier", &self.code_identifier()) .field("debug_file", &self.debug_file()) .field("debug_identifier", &self.debug_identifier()) .finish() } } /// Indicates how well the instruction pointer derived during /// stack walking is trusted. Since the stack walker can resort to /// stack scanning, it can wind up with dubious frames. /// /// In rough order of "trust metric". #[repr(u32)] #[derive(Debug)] pub enum FrameTrust { /// Unknown trust. None, /// Scanned the stack, found this (lowest precision). Scan, /// Found while scanning stack using call frame info. CFIScan, /// Derived from frame pointer. FP, /// Derived from call frame info. CFI, /// Explicitly provided by some external stack walker. Prewalked, /// Given as instruction pointer in a context (highest precision). Context, } impl fmt::Display for FrameTrust { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let string = match *self { FrameTrust::None => "none", FrameTrust::Scan => "stack scanning", FrameTrust::CFIScan => "call frame info with scanning", FrameTrust::FP => "previous frame's frame pointer", FrameTrust::CFI => "call frame info", FrameTrust::Prewalked => "recovered by external stack walker", FrameTrust::Context => "given as instruction pointer in context", }; write!(f, "{}", string) } } /// Contains information from the memorydump, especially the frame's instruction /// pointer. Also references an optional `CodeModule` that contains the /// instruction of this stack frame. #[repr(C)] pub struct StackFrame(c_void); impl StackFrame { /// Returns the program counter location as an absolute virtual address. /// /// - For the innermost called frame in a stack, this will be an exact /// program counter or instruction pointer value. /// /// - For all other frames, this address is within the instruction that /// caused execution to branch to this frame's callee (although it may /// not point to the exact beginning of that instruction). This ensures /// that, when we look up the source code location for this frame, we /// get the source location of the call, not of the point at which /// control will resume when the call returns, which may be on the next /// line. (If the compiler knows the callee never returns, it may even /// place the call instruction at the very end of the caller's machine /// code, such that the "return address" (which will never be used) /// immediately after the call instruction is in an entirely different /// function, perhaps even from a different source file.) /// /// On some architectures, the return address as saved on the stack or in /// a register is fine for looking up the point of the call. On others, it /// requires adjustment. ReturnAddress returns the address as saved by the /// machine. /// /// Use `trust` to obtain how trustworthy this instruction is. pub fn instruction(&self) -> u64 { unsafe { stack_frame_instruction(self) } } // Return the actual return address, as saved on the stack or in a // register. See the comments for `StackFrame::instruction' for // details. pub fn return_address(&self, arch: Arch) -> u64 { let address = unsafe { stack_frame_return_address(self) }; // The return address reported for ARM* frames is actually the // instruction with heuristics from Breakpad applied already. // To resolve the original return address value, compensate // by adding the offsets applied in `StackwalkerARM::GetCallerFrame` // and `StackwalkerARM64::GetCallerFrame`. match arch.cpu_family() { CpuFamily::Arm32 => address + 2, CpuFamily::Arm64 => address + 4, _ => address, } } /// Returns the `CodeModule` that contains this frame's instruction. pub fn module(&self) -> Option<&CodeModule> { unsafe { stack_frame_module(self).as_ref() } } /// Returns how well the instruction pointer is trusted. pub fn trust(&self) -> FrameTrust { unsafe { stack_frame_trust(self) } } } impl fmt::Debug for StackFrame { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("StackFrame") .field("return_address", &self.return_address(Arch::Unknown)) .field("instruction", &self.instruction()) .field("trust", &self.trust()) .field("module", &self.module()) .finish() } } /// Represents a thread of the `ProcessState` which holds a list of `StackFrame`s. #[repr(C)] pub struct CallStack(c_void); impl CallStack { /// Returns the thread identifier of this callstack. pub fn thread_id(&self) -> u32 { unsafe { call_stack_thread_id(self) } } /// Returns the list of `StackFrame`s in the call stack. pub fn frames(&self) -> &[&StackFrame] { unsafe { let mut size = 0 as usize; let data = call_stack_frames(self, &mut size); let slice = slice::from_raw_parts(data, size); mem::transmute(slice) } } } impl fmt::Debug for CallStack { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CallStack") .field("thread_id", &self.thread_id()) .field("frames", &self.frames()) .finish() } } /// Information about the CPU and OS on which a minidump was generated. #[repr(C)] pub struct SystemInfo(c_void); impl SystemInfo { /// A string identifying the operating system, such as "Windows NT", "Mac OS X", or "Linux". /// /// If the information is present in the dump but its value is unknown, this field will contain /// a numeric value. If the information is not present in the dump, this field will be empty. pub fn os_name(&self) -> String { unsafe { let ptr = system_info_os_name(self); utils::ptr_to_string(ptr) } } /// Strings identifying the version and build number of the operating system. /// /// If the dump does not contain either information, the component will be empty. Tries to parse /// the version number from the build if it is not apparent from the version string. pub fn os_parts(&self) -> (String, String) { let string = unsafe { let ptr = system_info_os_version(self); utils::ptr_to_string(ptr) }; let mut parts = string.splitn(2,''); let version = parts.next().unwrap_or("0.0.0"); let build = parts.next().unwrap_or(""); if version == "0.0.0" { // Try to parse the Linux build string. Breakpad and Crashpad run // `uname -srvmo` to generate it. This roughtly resembles: // "Linux [version] [build...] [arch] Linux/GNU" if let Some(captures) = LINUX_BUILD_RE.captures(&build) { let version = captures.get(1).unwrap(); // uname -r portion let build = captures.get(2).unwrap(); // uname -v portion return (version.as_str().into(), build.as_str().into()); } } (version.into(), build.into()) } /// A string identifying the version of the operating system. /// /// The version will be formatted as three-component semantic version, such as "5.1.2600" or /// "10.4.8". If the dump does not contain this information, this field will contain "0.0.0". pub fn os_version(&self) -> String { self.os_parts().0 } /// A string identifying the build of the operating system. /// /// This build version is platform dependent, such as "Service Pack 2" or "8L2127". If the dump /// does not contain this information, this field will be empty. pub fn os_build(&self) -> String { self.os_parts().1 } /// A string identifying the basic CPU family, such as "x86" or "ppc". /// /// If this information is present in the dump but its value is unknown, /// this field will contain a numeric value. If the information is not /// present in the dump, this field will be empty. pub fn cpu_family(&self) -> String { unsafe { let ptr = system_info_cpu_family(self); utils::ptr_to_string(ptr) } } /// The architecture of the CPU parsed from `ProcessState::cpu_family`. /// /// If this information is present in the dump but its value is unknown /// or if the value is missing, this field will contain `Arch::Unknown`. pub fn cpu_arch(&self) -> Arch { Arch::from_breakpad(&self.cpu_family()).unwrap_or_default() } /// A string further identifying the specific CPU. /// /// This information depends on the CPU vendor, such as "GenuineIntel level 6 model 13 stepping /// 8". If the information is not present in the dump, or additional identifying information is /// not defined for the CPU family, this field will be empty. pub fn cpu_info(&self) -> String { unsafe { let ptr = system_info_cpu_info(self); utils::ptr_to_string(ptr) } } /// The number of processors in the system. /// /// Will be greater than one for multi-core systems. pub fn cpu_count(&self) -> u32 { unsafe { system_info_cpu_count(self) } } } impl fmt::Debug for SystemInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SystemInfo") .field("os_name", &self.os_name()) .field("os_version", &self.os_version()) .field("cpu_family", &self.cpu_family()) .field("cpu_info", &self.cpu_info()) .field("cpu_count", &self.cpu_count()) .finish() } } /// Result of processing a Minidump or Microdump file. /// /// Usually included in `ProcessError` when the file cannot be processed. #[repr(u32)] #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum ProcessResult { /// The dump was processed successfully. Ok, /// The minidump file was not found or the buffer was empty. MinidumpNotFound, /// The minidump file had no header. NoMinidumpHeader, /// The minidump file has no thread list. NoThreadList, /// There was an error getting one thread's data from the dump. InvalidThreadIndex, /// There was an error getting a thread id from the thread's data. InvalidThreadId, /// There was more than one requesting thread. DuplicateRequestingThreads, /// The dump processing was interrupted (not fatal). SymbolSupplierInterrupted, } impl fmt::Display for ProcessResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let formatted = match self { &ProcessResult::Ok => "dump processed successfully", &ProcessResult::MinidumpNotFound => "file could not be opened", &ProcessResult::NoMinidumpHeader => "minidump header missing", &ProcessResult::NoThreadList => "minidump has no thread list", &ProcessResult::InvalidThreadIndex => "could not get thread data", &ProcessResult::InvalidThreadId => "could not get a thread by id", &ProcessResult::DuplicateRequestingThreads => "multiple requesting threads", &ProcessResult::SymbolSupplierInterrupted => "processing was interrupted (not fatal)", }; write!(f, "{}", formatted) } } /// An error generated when trying to process a minidump. #[derive(Debug, Fail, Copy, Clone)] #[fail(display = "minidump processing failed: {}", _0)] pub struct ProcessMinidumpError(ProcessResult); impl ProcessMinidumpError { /// Returns the kind of this error. pub fn kind(&self) -> ProcessResult { self.0 } } /// Internal type used to transfer Breakpad symbols over FFI. #[repr(C)] struct SymbolEntry { debug_identifier: *const c_char, symbol_size: usize, symbol_data: *const u8, } /// Container for call frame information (CFI) of `CodeModules`. /// /// This information is required by the stackwalker in case framepointers are /// missing in the raw stacktraces. Frame information is given as plain ASCII /// text as specified in the Breakpad symbol file specification. pub type FrameInfoMap<'a> = BTreeMap<CodeModuleId, ByteView<'a>>; type IProcessState = c_void; /// Snapshot of the state of a processes during its crash. The object can be /// obtained by processing Minidump or Microdump files. pub struct ProcessState<'a> { internal: *mut IProcessState, _ty: PhantomData<ByteView<'a>>, } impl<'a> ProcessState<'a> { /// Processes a minidump supplied via raw binary data. /// /// Returns a `ProcessState` that contains information about the crashed /// process. The parameter `frame_infos` expects a map of Breakpad symbols /// containing STACK CFI and STACK WIN records to allow stackwalking with /// omitted frame pointers. pub fn from_minidump( buffer: &ByteView<'a>, frame_infos: Option<&FrameInfoMap>, ) -> Result<ProcessState<'a>, ProcessMinidumpError> { let cfi_count = frame_infos.map_or(0, |s| s.len()); let mut result: ProcessResult = ProcessResult::Ok; // Keep a reference to all CStrings to extend their lifetime. let cfi_vec: Vec<_> = frame_infos.map_or(Vec::new(), |s| { s.iter() .map(|(k, v)| (CString::new(k.to_string()), v.len(), v.as_ptr())) .collect() }); // Keep a reference to all symbol entries to extend their lifetime. let cfi_entries: Vec<_> = cfi_vec .iter() .map(|&(ref id, size, data)| SymbolEntry { debug_identifier: id.as_ref().map(|i| i.as_ptr()).unwrap_or(ptr::null()), symbol_size: size, symbol_data: data, }) .collect(); let internal = unsafe { process_minidump( buffer.as_ptr() as *const c_char, buffer.len(), cfi_entries.as_ptr(), cfi_count, &mut result, ) }; if result == ProcessResult::Ok &&!internal.is_null()
else { Err(ProcessMinidumpError(result)) } } /// The index of the thread that requested a dump be written in the threads vector. /// /// If a dump was produced as a result of a crash, this will point to the thread that crashed. /// If the dump was produced as by user code without crashing, and the dump contains extended /// Breakpad information, this will point to the thread that requested the dump. If the dump was /// not produced as a result of an exception and no extended Breakpad information is present, /// this field will be set to -1, indicating that the dump thread is not available. pub fn requesting_thread(&self) -> i32 {
{ Ok(ProcessState { internal, _ty: PhantomData, }) }
conditional_block
processor.rs
mut c_char; fn system_info_os_version(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_family(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_info(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_count(info: *const SystemInfo) -> u32; fn process_minidump( buffer: *const c_char, buffer_size: usize, symbols: *const SymbolEntry, symbol_count: usize, result: *mut ProcessResult, ) -> *mut IProcessState; fn process_state_delete(state: *mut IProcessState); fn process_state_threads( state: *const IProcessState, size_out: *mut usize, ) -> *const *const CallStack; fn process_state_requesting_thread(state: *const IProcessState) -> i32; fn process_state_timestamp(state: *const IProcessState) -> u64; fn process_state_crashed(state: *const IProcessState) -> bool; fn process_state_crash_address(state: *const IProcessState) -> u64; fn process_state_crash_reason(state: *const IProcessState) -> *mut c_char; fn process_state_assertion(state: *const IProcessState) -> *mut c_char; fn process_state_system_info(state: *const IProcessState) -> *mut SystemInfo; } /// An error returned when parsing invalid `CodeModuleId`s. pub type ParseCodeModuleIdError = ParseDebugIdError; /// Breakpad code module IDs. /// /// **Example:** /// /// ``` /// # extern crate symbolic_common; /// # extern crate symbolic_minidump; /// use std::str::FromStr; /// use symbolic_minidump::processor::CodeModuleId; /// # use symbolic_minidump::processor::ParseCodeModuleIdError; /// /// # fn foo() -> Result<(), ParseCodeModuleIdError> { /// let id = CodeModuleId::from_str("DFB8E43AF2423D73A453AEB6A777EF75a")?; /// assert_eq!("DFB8E43AF2423D73A453AEB6A777EF75a".to_string(), id.to_string()); /// # Ok(()) /// # } /// /// # fn main() { foo().unwrap() } /// ``` #[derive(Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Copy)] pub struct CodeModuleId { inner: DebugId, } impl CodeModuleId { /// Constructs a `CodeModuleId` from its `uuid` and `age` parts. pub fn from_parts(uuid: Uuid, age: u32) -> CodeModuleId { CodeModuleId { inner: DebugId::from_parts(uuid, age), } } /// Returns the UUID part of the code module id. pub fn uuid(&self) -> Uuid { self.inner.uuid() } /// Returns the appendix part of the code module id. /// /// On Windows, this is an incrementing counter to identify the build. /// On all other platforms, this value will always be zero. pub fn age(&self) -> u32 { self.inner.appendix() } /// Converts this code module id into a debug identifier. pub fn as_object_id(&self) -> DebugId { self.inner } } impl From<DebugId> for CodeModuleId { fn from(inner: DebugId) -> Self { CodeModuleId { inner } } } impl Into<DebugId> for CodeModuleId { fn into(self) -> DebugId { self.inner } } impl fmt::Display for CodeModuleId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.breakpad().fmt(f) } } impl str::FromStr for CodeModuleId { type Err = ParseCodeModuleIdError; fn from_str(string: &str) -> Result<CodeModuleId, ParseCodeModuleIdError> { Ok(CodeModuleId { inner: DebugId::from_breakpad(string)?, }) } } #[cfg(feature = "with_serde")] derive_deserialize_from_str!(CodeModuleId, "CodeModuleId"); #[cfg(feature = "with_serde")] derive_serialize_from_display!(CodeModuleId); /// Carries information about a code module loaded into the process during the /// crash. The `debug_identifier` uniquely identifies this module. #[repr(C)] pub struct CodeModule(c_void); impl CodeModule { /// Returns the unique identifier of this `CodeModule`. pub fn id(&self) -> Option<CodeModuleId> { CodeModuleId::from_str(&self.debug_identifier()).ok() } /// Returns the base address of this code module as it was loaded by the /// process. (uint64_t)-1 on error. pub fn base_address(&self) -> u64 { unsafe { code_module_base_address(self) } } /// The size of the code module. 0 on error. pub fn size(&self) -> u64 { unsafe { code_module_size(self) } } /// Returns the path or file name that the code module was loaded from. pub fn code_file(&self) -> String { unsafe { let ptr = code_module_code_file(self); utils::ptr_to_string(ptr) } } /// An identifying string used to discriminate between multiple versions and builds of the same /// code module. /// /// This may contain a UUID, timestamp, version number, or any combination of this or other /// information, in an implementation-defined format. pub fn code_identifier(&self) -> String { unsafe { let ptr = code_module_code_identifier(self); utils::ptr_to_string(ptr) } } /// Returns the filename containing debugging information of this code module. /// /// If debugging information is stored in a file separate from the code module itself (as is the /// case when.pdb or.dSYM files are used), this will be different from `code_file`. If /// debugging information is stored in the code module itself (possibly prior to stripping), /// this will be the same as code_file. pub fn debug_file(&self) -> String { unsafe { let ptr = code_module_debug_file(self); utils::ptr_to_string(ptr) } } /// Returns a string identifying the specific version and build of the associated debug file. /// /// This may be the same as `code_identifier` when the `debug_file` and `code_file` are /// identical or when the same identifier is used to identify distinct debug and code files. /// /// It usually comprises the library's UUID and an age field. On Windows, the age field is a /// generation counter, on all other platforms it is mostly zero. pub fn debug_identifier(&self) -> String { unsafe { let ptr = code_module_debug_identifier(self); utils::ptr_to_string(ptr) } } } impl Eq for CodeModule {} impl PartialEq for CodeModule { fn eq(&self, other: &Self) -> bool { self.id() == other.id() } } impl Hash for CodeModule { fn hash<H: Hasher>(&self, state: &mut H) { self.id().hash(state) } } impl Ord for CodeModule { fn cmp(&self, other: &Self) -> Ordering { self.id().cmp(&other.id()) } } impl PartialOrd for CodeModule { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl fmt::Debug for CodeModule { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CodeModule") .field("id", &self.id()) .field("base_address", &self.base_address()) .field("size", &self.size()) .field("code_file", &self.code_file()) .field("code_identifier", &self.code_identifier()) .field("debug_file", &self.debug_file()) .field("debug_identifier", &self.debug_identifier()) .finish() } } /// Indicates how well the instruction pointer derived during /// stack walking is trusted. Since the stack walker can resort to /// stack scanning, it can wind up with dubious frames. /// /// In rough order of "trust metric". #[repr(u32)] #[derive(Debug)] pub enum FrameTrust { /// Unknown trust. None, /// Scanned the stack, found this (lowest precision). Scan, /// Found while scanning stack using call frame info. CFIScan, /// Derived from frame pointer. FP, /// Derived from call frame info. CFI, /// Explicitly provided by some external stack walker. Prewalked, /// Given as instruction pointer in a context (highest precision). Context, } impl fmt::Display for FrameTrust { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let string = match *self { FrameTrust::None => "none", FrameTrust::Scan => "stack scanning", FrameTrust::CFIScan => "call frame info with scanning", FrameTrust::FP => "previous frame's frame pointer", FrameTrust::CFI => "call frame info", FrameTrust::Prewalked => "recovered by external stack walker", FrameTrust::Context => "given as instruction pointer in context", }; write!(f, "{}", string) } } /// Contains information from the memorydump, especially the frame's instruction /// pointer. Also references an optional `CodeModule` that contains the /// instruction of this stack frame. #[repr(C)] pub struct StackFrame(c_void); impl StackFrame { /// Returns the program counter location as an absolute virtual address. /// /// - For the innermost called frame in a stack, this will be an exact /// program counter or instruction pointer value. /// /// - For all other frames, this address is within the instruction that /// caused execution to branch to this frame's callee (although it may /// not point to the exact beginning of that instruction). This ensures /// that, when we look up the source code location for this frame, we /// get the source location of the call, not of the point at which /// control will resume when the call returns, which may be on the next /// line. (If the compiler knows the callee never returns, it may even /// place the call instruction at the very end of the caller's machine /// code, such that the "return address" (which will never be used) /// immediately after the call instruction is in an entirely different /// function, perhaps even from a different source file.) /// /// On some architectures, the return address as saved on the stack or in /// a register is fine for looking up the point of the call. On others, it /// requires adjustment. ReturnAddress returns the address as saved by the /// machine. /// /// Use `trust` to obtain how trustworthy this instruction is. pub fn instruction(&self) -> u64 { unsafe { stack_frame_instruction(self) } } // Return the actual return address, as saved on the stack or in a // register. See the comments for `StackFrame::instruction' for // details. pub fn return_address(&self, arch: Arch) -> u64 { let address = unsafe { stack_frame_return_address(self) }; // The return address reported for ARM* frames is actually the // instruction with heuristics from Breakpad applied already. // To resolve the original return address value, compensate // by adding the offsets applied in `StackwalkerARM::GetCallerFrame` // and `StackwalkerARM64::GetCallerFrame`. match arch.cpu_family() { CpuFamily::Arm32 => address + 2, CpuFamily::Arm64 => address + 4, _ => address, } } /// Returns the `CodeModule` that contains this frame's instruction. pub fn module(&self) -> Option<&CodeModule> { unsafe { stack_frame_module(self).as_ref() } } /// Returns how well the instruction pointer is trusted. pub fn trust(&self) -> FrameTrust { unsafe { stack_frame_trust(self) } } } impl fmt::Debug for StackFrame { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("StackFrame") .field("return_address", &self.return_address(Arch::Unknown)) .field("instruction", &self.instruction()) .field("trust", &self.trust()) .field("module", &self.module()) .finish() } } /// Represents a thread of the `ProcessState` which holds a list of `StackFrame`s. #[repr(C)] pub struct CallStack(c_void); impl CallStack { /// Returns the thread identifier of this callstack. pub fn thread_id(&self) -> u32 { unsafe { call_stack_thread_id(self) } } /// Returns the list of `StackFrame`s in the call stack. pub fn frames(&self) -> &[&StackFrame] { unsafe { let mut size = 0 as usize; let data = call_stack_frames(self, &mut size); let slice = slice::from_raw_parts(data, size); mem::transmute(slice) } } } impl fmt::Debug for CallStack { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CallStack") .field("thread_id", &self.thread_id()) .field("frames", &self.frames()) .finish() } } /// Information about the CPU and OS on which a minidump was generated. #[repr(C)] pub struct SystemInfo(c_void); impl SystemInfo { /// A string identifying the operating system, such as "Windows NT", "Mac OS X", or "Linux". /// /// If the information is present in the dump but its value is unknown, this field will contain /// a numeric value. If the information is not present in the dump, this field will be empty. pub fn os_name(&self) -> String { unsafe { let ptr = system_info_os_name(self); utils::ptr_to_string(ptr) } } /// Strings identifying the version and build number of the operating system. /// /// If the dump does not contain either information, the component will be empty. Tries to parse /// the version number from the build if it is not apparent from the version string. pub fn os_parts(&self) -> (String, String) { let string = unsafe { let ptr = system_info_os_version(self); utils::ptr_to_string(ptr) }; let mut parts = string.splitn(2,''); let version = parts.next().unwrap_or("0.0.0"); let build = parts.next().unwrap_or(""); if version == "0.0.0" { // Try to parse the Linux build string. Breakpad and Crashpad run // `uname -srvmo` to generate it. This roughtly resembles: // "Linux [version] [build...] [arch] Linux/GNU" if let Some(captures) = LINUX_BUILD_RE.captures(&build) { let version = captures.get(1).unwrap(); // uname -r portion let build = captures.get(2).unwrap(); // uname -v portion return (version.as_str().into(), build.as_str().into()); } } (version.into(), build.into()) } /// A string identifying the version of the operating system. /// /// The version will be formatted as three-component semantic version, such as "5.1.2600" or /// "10.4.8". If the dump does not contain this information, this field will contain "0.0.0". pub fn os_version(&self) -> String { self.os_parts().0 } /// A string identifying the build of the operating system. /// /// This build version is platform dependent, such as "Service Pack 2" or "8L2127". If the dump /// does not contain this information, this field will be empty. pub fn os_build(&self) -> String { self.os_parts().1 } /// A string identifying the basic CPU family, such as "x86" or "ppc". /// /// If this information is present in the dump but its value is unknown, /// this field will contain a numeric value. If the information is not /// present in the dump, this field will be empty. pub fn cpu_family(&self) -> String { unsafe { let ptr = system_info_cpu_family(self); utils::ptr_to_string(ptr) } } /// The architecture of the CPU parsed from `ProcessState::cpu_family`. /// /// If this information is present in the dump but its value is unknown /// or if the value is missing, this field will contain `Arch::Unknown`. pub fn cpu_arch(&self) -> Arch { Arch::from_breakpad(&self.cpu_family()).unwrap_or_default() } /// A string further identifying the specific CPU. /// /// This information depends on the CPU vendor, such as "GenuineIntel level 6 model 13 stepping /// 8". If the information is not present in the dump, or additional identifying information is /// not defined for the CPU family, this field will be empty. pub fn cpu_info(&self) -> String { unsafe { let ptr = system_info_cpu_info(self); utils::ptr_to_string(ptr) } } /// The number of processors in the system. /// /// Will be greater than one for multi-core systems. pub fn cpu_count(&self) -> u32 { unsafe { system_info_cpu_count(self) } } } impl fmt::Debug for SystemInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SystemInfo") .field("os_name", &self.os_name()) .field("os_version", &self.os_version()) .field("cpu_family", &self.cpu_family()) .field("cpu_info", &self.cpu_info()) .field("cpu_count", &self.cpu_count()) .finish() } } /// Result of processing a Minidump or Microdump file. /// /// Usually included in `ProcessError` when the file cannot be processed. #[repr(u32)] #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum ProcessResult { /// The dump was processed successfully. Ok, /// The minidump file was not found or the buffer was empty. MinidumpNotFound, /// The minidump file had no header. NoMinidumpHeader, /// The minidump file has no thread list. NoThreadList, /// There was an error getting one thread's data from the dump. InvalidThreadIndex, /// There was an error getting a thread id from the thread's data. InvalidThreadId, /// There was more than one requesting thread. DuplicateRequestingThreads, /// The dump processing was interrupted (not fatal). SymbolSupplierInterrupted, } impl fmt::Display for ProcessResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let formatted = match self { &ProcessResult::Ok => "dump processed successfully", &ProcessResult::MinidumpNotFound => "file could not be opened", &ProcessResult::NoMinidumpHeader => "minidump header missing", &ProcessResult::NoThreadList => "minidump has no thread list", &ProcessResult::InvalidThreadIndex => "could not get thread data", &ProcessResult::InvalidThreadId => "could not get a thread by id", &ProcessResult::DuplicateRequestingThreads => "multiple requesting threads", &ProcessResult::SymbolSupplierInterrupted => "processing was interrupted (not fatal)", }; write!(f, "{}", formatted) } } /// An error generated when trying to process a minidump. #[derive(Debug, Fail, Copy, Clone)] #[fail(display = "minidump processing failed: {}", _0)] pub struct ProcessMinidumpError(ProcessResult); impl ProcessMinidumpError { /// Returns the kind of this error. pub fn kind(&self) -> ProcessResult { self.0 } } /// Internal type used to transfer Breakpad symbols over FFI. #[repr(C)] struct SymbolEntry { debug_identifier: *const c_char, symbol_size: usize, symbol_data: *const u8, } /// Container for call frame information (CFI) of `CodeModules`. /// /// This information is required by the stackwalker in case framepointers are /// missing in the raw stacktraces. Frame information is given as plain ASCII /// text as specified in the Breakpad symbol file specification. pub type FrameInfoMap<'a> = BTreeMap<CodeModuleId, ByteView<'a>>; type IProcessState = c_void; /// Snapshot of the state of a processes during its crash. The object can be /// obtained by processing Minidump or Microdump files. pub struct
<'a> { internal: *mut IProcessState, _ty: PhantomData<ByteView<'a>>, } impl<'a> ProcessState<'a> { /// Processes a minidump supplied via raw binary data. /// /// Returns a `ProcessState` that contains information about the crashed /// process. The parameter `frame_infos` expects a map of Breakpad symbols /// containing STACK CFI and STACK WIN records to allow stackwalking with /// omitted frame pointers. pub fn from_minidump( buffer: &ByteView<'a>, frame_infos: Option<&FrameInfoMap>, ) -> Result<ProcessState<'a>, ProcessMinidumpError> { let cfi_count = frame_infos.map_or(0, |s| s.len()); let mut result: ProcessResult = ProcessResult::Ok; // Keep a reference to all CStrings to extend their lifetime. let cfi_vec: Vec<_> = frame_infos.map_or(Vec::new(), |s| { s.iter() .map(|(k, v)| (CString::new(k.to_string()), v.len(), v.as_ptr())) .collect() }); // Keep a reference to all symbol entries to extend their lifetime. let cfi_entries: Vec<_> = cfi_vec .iter() .map(|&(ref id, size, data)| SymbolEntry { debug_identifier: id.as_ref().map(|i| i.as_ptr()).unwrap_or(ptr::null()), symbol_size: size, symbol_data: data, }) .collect(); let internal = unsafe { process_minidump( buffer.as_ptr() as *const c_char, buffer.len(), cfi_entries.as_ptr(), cfi_count, &mut result, ) }; if result == ProcessResult::Ok &&!internal.is_null() { Ok(ProcessState { internal, _ty: PhantomData, }) } else { Err(ProcessMinidumpError(result)) } } /// The index of the thread that requested a dump be written in the threads vector. /// /// If a dump was produced as a result of a crash, this will point to the thread that crashed. /// If the dump was produced as by user code without crashing, and the dump contains extended /// Breakpad information, this will point to the thread that requested the dump. If the dump was /// not produced as a result of an exception and no extended Breakpad information is present, /// this field will be set to -1, indicating that the dump thread is not available. pub fn requesting_thread(&self) -> i32 {
ProcessState
identifier_name
processor.rs
*mut c_char; fn system_info_os_version(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_family(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_info(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_count(info: *const SystemInfo) -> u32; fn process_minidump( buffer: *const c_char, buffer_size: usize, symbols: *const SymbolEntry, symbol_count: usize, result: *mut ProcessResult, ) -> *mut IProcessState; fn process_state_delete(state: *mut IProcessState); fn process_state_threads( state: *const IProcessState, size_out: *mut usize, ) -> *const *const CallStack; fn process_state_requesting_thread(state: *const IProcessState) -> i32; fn process_state_timestamp(state: *const IProcessState) -> u64; fn process_state_crashed(state: *const IProcessState) -> bool; fn process_state_crash_address(state: *const IProcessState) -> u64; fn process_state_crash_reason(state: *const IProcessState) -> *mut c_char; fn process_state_assertion(state: *const IProcessState) -> *mut c_char; fn process_state_system_info(state: *const IProcessState) -> *mut SystemInfo; } /// An error returned when parsing invalid `CodeModuleId`s. pub type ParseCodeModuleIdError = ParseDebugIdError; /// Breakpad code module IDs. /// /// **Example:** /// /// ``` /// # extern crate symbolic_common; /// # extern crate symbolic_minidump; /// use std::str::FromStr; /// use symbolic_minidump::processor::CodeModuleId; /// # use symbolic_minidump::processor::ParseCodeModuleIdError; /// /// # fn foo() -> Result<(), ParseCodeModuleIdError> { /// let id = CodeModuleId::from_str("DFB8E43AF2423D73A453AEB6A777EF75a")?; /// assert_eq!("DFB8E43AF2423D73A453AEB6A777EF75a".to_string(), id.to_string()); /// # Ok(()) /// # } /// /// # fn main() { foo().unwrap() } /// ``` #[derive(Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Copy)] pub struct CodeModuleId { inner: DebugId, } impl CodeModuleId { /// Constructs a `CodeModuleId` from its `uuid` and `age` parts. pub fn from_parts(uuid: Uuid, age: u32) -> CodeModuleId { CodeModuleId { inner: DebugId::from_parts(uuid, age), } } /// Returns the UUID part of the code module id. pub fn uuid(&self) -> Uuid { self.inner.uuid() } /// Returns the appendix part of the code module id. /// /// On Windows, this is an incrementing counter to identify the build. /// On all other platforms, this value will always be zero. pub fn age(&self) -> u32 { self.inner.appendix() } /// Converts this code module id into a debug identifier. pub fn as_object_id(&self) -> DebugId { self.inner } } impl From<DebugId> for CodeModuleId { fn from(inner: DebugId) -> Self { CodeModuleId { inner } } } impl Into<DebugId> for CodeModuleId { fn into(self) -> DebugId { self.inner } } impl fmt::Display for CodeModuleId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.breakpad().fmt(f) } } impl str::FromStr for CodeModuleId { type Err = ParseCodeModuleIdError; fn from_str(string: &str) -> Result<CodeModuleId, ParseCodeModuleIdError> { Ok(CodeModuleId { inner: DebugId::from_breakpad(string)?, }) } } #[cfg(feature = "with_serde")] derive_deserialize_from_str!(CodeModuleId, "CodeModuleId"); #[cfg(feature = "with_serde")] derive_serialize_from_display!(CodeModuleId); /// Carries information about a code module loaded into the process during the /// crash. The `debug_identifier` uniquely identifies this module. #[repr(C)] pub struct CodeModule(c_void); impl CodeModule { /// Returns the unique identifier of this `CodeModule`. pub fn id(&self) -> Option<CodeModuleId> { CodeModuleId::from_str(&self.debug_identifier()).ok() } /// Returns the base address of this code module as it was loaded by the /// process. (uint64_t)-1 on error. pub fn base_address(&self) -> u64 { unsafe { code_module_base_address(self) } } /// The size of the code module. 0 on error. pub fn size(&self) -> u64 { unsafe { code_module_size(self) } } /// Returns the path or file name that the code module was loaded from. pub fn code_file(&self) -> String { unsafe { let ptr = code_module_code_file(self); utils::ptr_to_string(ptr) } } /// An identifying string used to discriminate between multiple versions and builds of the same /// code module. /// /// This may contain a UUID, timestamp, version number, or any combination of this or other /// information, in an implementation-defined format. pub fn code_identifier(&self) -> String { unsafe { let ptr = code_module_code_identifier(self); utils::ptr_to_string(ptr) } } /// Returns the filename containing debugging information of this code module. /// /// If debugging information is stored in a file separate from the code module itself (as is the /// case when.pdb or.dSYM files are used), this will be different from `code_file`. If /// debugging information is stored in the code module itself (possibly prior to stripping), /// this will be the same as code_file. pub fn debug_file(&self) -> String { unsafe { let ptr = code_module_debug_file(self); utils::ptr_to_string(ptr) } } /// Returns a string identifying the specific version and build of the associated debug file. /// /// This may be the same as `code_identifier` when the `debug_file` and `code_file` are /// identical or when the same identifier is used to identify distinct debug and code files. /// /// It usually comprises the library's UUID and an age field. On Windows, the age field is a /// generation counter, on all other platforms it is mostly zero. pub fn debug_identifier(&self) -> String { unsafe { let ptr = code_module_debug_identifier(self); utils::ptr_to_string(ptr) } } } impl Eq for CodeModule {} impl PartialEq for CodeModule { fn eq(&self, other: &Self) -> bool { self.id() == other.id() } } impl Hash for CodeModule { fn hash<H: Hasher>(&self, state: &mut H) { self.id().hash(state) } } impl Ord for CodeModule { fn cmp(&self, other: &Self) -> Ordering { self.id().cmp(&other.id()) } } impl PartialOrd for CodeModule { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl fmt::Debug for CodeModule { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CodeModule") .field("id", &self.id()) .field("base_address", &self.base_address()) .field("size", &self.size()) .field("code_file", &self.code_file()) .field("code_identifier", &self.code_identifier()) .field("debug_file", &self.debug_file()) .field("debug_identifier", &self.debug_identifier()) .finish() } } /// Indicates how well the instruction pointer derived during /// stack walking is trusted. Since the stack walker can resort to /// stack scanning, it can wind up with dubious frames. /// /// In rough order of "trust metric". #[repr(u32)] #[derive(Debug)] pub enum FrameTrust { /// Unknown trust. None, /// Scanned the stack, found this (lowest precision). Scan, /// Found while scanning stack using call frame info. CFIScan, /// Derived from frame pointer. FP, /// Derived from call frame info. CFI, /// Explicitly provided by some external stack walker. Prewalked, /// Given as instruction pointer in a context (highest precision). Context, } impl fmt::Display for FrameTrust { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let string = match *self { FrameTrust::None => "none", FrameTrust::Scan => "stack scanning", FrameTrust::CFIScan => "call frame info with scanning", FrameTrust::FP => "previous frame's frame pointer", FrameTrust::CFI => "call frame info", FrameTrust::Prewalked => "recovered by external stack walker", FrameTrust::Context => "given as instruction pointer in context", }; write!(f, "{}", string) } } /// Contains information from the memorydump, especially the frame's instruction /// pointer. Also references an optional `CodeModule` that contains the /// instruction of this stack frame. #[repr(C)] pub struct StackFrame(c_void); impl StackFrame { /// Returns the program counter location as an absolute virtual address. /// /// - For the innermost called frame in a stack, this will be an exact /// program counter or instruction pointer value. /// /// - For all other frames, this address is within the instruction that /// caused execution to branch to this frame's callee (although it may /// not point to the exact beginning of that instruction). This ensures /// that, when we look up the source code location for this frame, we /// get the source location of the call, not of the point at which /// control will resume when the call returns, which may be on the next /// line. (If the compiler knows the callee never returns, it may even /// place the call instruction at the very end of the caller's machine /// code, such that the "return address" (which will never be used) /// immediately after the call instruction is in an entirely different /// function, perhaps even from a different source file.) /// /// On some architectures, the return address as saved on the stack or in /// a register is fine for looking up the point of the call. On others, it /// requires adjustment. ReturnAddress returns the address as saved by the /// machine. /// /// Use `trust` to obtain how trustworthy this instruction is. pub fn instruction(&self) -> u64 { unsafe { stack_frame_instruction(self) } } // Return the actual return address, as saved on the stack or in a // register. See the comments for `StackFrame::instruction' for // details. pub fn return_address(&self, arch: Arch) -> u64 { let address = unsafe { stack_frame_return_address(self) }; // The return address reported for ARM* frames is actually the // instruction with heuristics from Breakpad applied already. // To resolve the original return address value, compensate // by adding the offsets applied in `StackwalkerARM::GetCallerFrame` // and `StackwalkerARM64::GetCallerFrame`. match arch.cpu_family() { CpuFamily::Arm32 => address + 2, CpuFamily::Arm64 => address + 4, _ => address, } } /// Returns the `CodeModule` that contains this frame's instruction. pub fn module(&self) -> Option<&CodeModule> { unsafe { stack_frame_module(self).as_ref() } } /// Returns how well the instruction pointer is trusted. pub fn trust(&self) -> FrameTrust { unsafe { stack_frame_trust(self) } } } impl fmt::Debug for StackFrame { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("StackFrame") .field("return_address", &self.return_address(Arch::Unknown)) .field("instruction", &self.instruction()) .field("trust", &self.trust()) .field("module", &self.module()) .finish() } } /// Represents a thread of the `ProcessState` which holds a list of `StackFrame`s. #[repr(C)] pub struct CallStack(c_void); impl CallStack { /// Returns the thread identifier of this callstack. pub fn thread_id(&self) -> u32 { unsafe { call_stack_thread_id(self) } } /// Returns the list of `StackFrame`s in the call stack. pub fn frames(&self) -> &[&StackFrame] { unsafe { let mut size = 0 as usize; let data = call_stack_frames(self, &mut size); let slice = slice::from_raw_parts(data, size); mem::transmute(slice) } } } impl fmt::Debug for CallStack { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CallStack") .field("thread_id", &self.thread_id()) .field("frames", &self.frames()) .finish() } } /// Information about the CPU and OS on which a minidump was generated. #[repr(C)] pub struct SystemInfo(c_void); impl SystemInfo { /// A string identifying the operating system, such as "Windows NT", "Mac OS X", or "Linux". /// /// If the information is present in the dump but its value is unknown, this field will contain /// a numeric value. If the information is not present in the dump, this field will be empty. pub fn os_name(&self) -> String { unsafe { let ptr = system_info_os_name(self); utils::ptr_to_string(ptr) } } /// Strings identifying the version and build number of the operating system. /// /// If the dump does not contain either information, the component will be empty. Tries to parse /// the version number from the build if it is not apparent from the version string. pub fn os_parts(&self) -> (String, String) { let string = unsafe { let ptr = system_info_os_version(self); utils::ptr_to_string(ptr) }; let mut parts = string.splitn(2,''); let version = parts.next().unwrap_or("0.0.0"); let build = parts.next().unwrap_or(""); if version == "0.0.0" { // Try to parse the Linux build string. Breakpad and Crashpad run // `uname -srvmo` to generate it. This roughtly resembles: // "Linux [version] [build...] [arch] Linux/GNU" if let Some(captures) = LINUX_BUILD_RE.captures(&build) { let version = captures.get(1).unwrap(); // uname -r portion let build = captures.get(2).unwrap(); // uname -v portion return (version.as_str().into(), build.as_str().into()); } } (version.into(), build.into()) } /// A string identifying the version of the operating system. /// /// The version will be formatted as three-component semantic version, such as "5.1.2600" or /// "10.4.8". If the dump does not contain this information, this field will contain "0.0.0". pub fn os_version(&self) -> String { self.os_parts().0 } /// A string identifying the build of the operating system. /// /// This build version is platform dependent, such as "Service Pack 2" or "8L2127". If the dump /// does not contain this information, this field will be empty. pub fn os_build(&self) -> String { self.os_parts().1 } /// A string identifying the basic CPU family, such as "x86" or "ppc". /// /// If this information is present in the dump but its value is unknown, /// this field will contain a numeric value. If the information is not /// present in the dump, this field will be empty. pub fn cpu_family(&self) -> String { unsafe { let ptr = system_info_cpu_family(self); utils::ptr_to_string(ptr) } } /// The architecture of the CPU parsed from `ProcessState::cpu_family`. /// /// If this information is present in the dump but its value is unknown /// or if the value is missing, this field will contain `Arch::Unknown`. pub fn cpu_arch(&self) -> Arch { Arch::from_breakpad(&self.cpu_family()).unwrap_or_default() } /// A string further identifying the specific CPU. /// /// This information depends on the CPU vendor, such as "GenuineIntel level 6 model 13 stepping /// 8". If the information is not present in the dump, or additional identifying information is /// not defined for the CPU family, this field will be empty. pub fn cpu_info(&self) -> String { unsafe { let ptr = system_info_cpu_info(self); utils::ptr_to_string(ptr) } } /// The number of processors in the system. /// /// Will be greater than one for multi-core systems. pub fn cpu_count(&self) -> u32 { unsafe { system_info_cpu_count(self) } } } impl fmt::Debug for SystemInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SystemInfo") .field("os_name", &self.os_name()) .field("os_version", &self.os_version()) .field("cpu_family", &self.cpu_family()) .field("cpu_info", &self.cpu_info()) .field("cpu_count", &self.cpu_count()) .finish() } } /// Result of processing a Minidump or Microdump file. /// /// Usually included in `ProcessError` when the file cannot be processed. #[repr(u32)] #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum ProcessResult { /// The dump was processed successfully. Ok, /// The minidump file was not found or the buffer was empty. MinidumpNotFound,
NoMinidumpHeader, /// The minidump file has no thread list. NoThreadList, /// There was an error getting one thread's data from the dump. InvalidThreadIndex, /// There was an error getting a thread id from the thread's data. InvalidThreadId, /// There was more than one requesting thread. DuplicateRequestingThreads, /// The dump processing was interrupted (not fatal). SymbolSupplierInterrupted, } impl fmt::Display for ProcessResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let formatted = match self { &ProcessResult::Ok => "dump processed successfully", &ProcessResult::MinidumpNotFound => "file could not be opened", &ProcessResult::NoMinidumpHeader => "minidump header missing", &ProcessResult::NoThreadList => "minidump has no thread list", &ProcessResult::InvalidThreadIndex => "could not get thread data", &ProcessResult::InvalidThreadId => "could not get a thread by id", &ProcessResult::DuplicateRequestingThreads => "multiple requesting threads", &ProcessResult::SymbolSupplierInterrupted => "processing was interrupted (not fatal)", }; write!(f, "{}", formatted) } } /// An error generated when trying to process a minidump. #[derive(Debug, Fail, Copy, Clone)] #[fail(display = "minidump processing failed: {}", _0)] pub struct ProcessMinidumpError(ProcessResult); impl ProcessMinidumpError { /// Returns the kind of this error. pub fn kind(&self) -> ProcessResult { self.0 } } /// Internal type used to transfer Breakpad symbols over FFI. #[repr(C)] struct SymbolEntry { debug_identifier: *const c_char, symbol_size: usize, symbol_data: *const u8, } /// Container for call frame information (CFI) of `CodeModules`. /// /// This information is required by the stackwalker in case framepointers are /// missing in the raw stacktraces. Frame information is given as plain ASCII /// text as specified in the Breakpad symbol file specification. pub type FrameInfoMap<'a> = BTreeMap<CodeModuleId, ByteView<'a>>; type IProcessState = c_void; /// Snapshot of the state of a processes during its crash. The object can be /// obtained by processing Minidump or Microdump files. pub struct ProcessState<'a> { internal: *mut IProcessState, _ty: PhantomData<ByteView<'a>>, } impl<'a> ProcessState<'a> { /// Processes a minidump supplied via raw binary data. /// /// Returns a `ProcessState` that contains information about the crashed /// process. The parameter `frame_infos` expects a map of Breakpad symbols /// containing STACK CFI and STACK WIN records to allow stackwalking with /// omitted frame pointers. pub fn from_minidump( buffer: &ByteView<'a>, frame_infos: Option<&FrameInfoMap>, ) -> Result<ProcessState<'a>, ProcessMinidumpError> { let cfi_count = frame_infos.map_or(0, |s| s.len()); let mut result: ProcessResult = ProcessResult::Ok; // Keep a reference to all CStrings to extend their lifetime. let cfi_vec: Vec<_> = frame_infos.map_or(Vec::new(), |s| { s.iter() .map(|(k, v)| (CString::new(k.to_string()), v.len(), v.as_ptr())) .collect() }); // Keep a reference to all symbol entries to extend their lifetime. let cfi_entries: Vec<_> = cfi_vec .iter() .map(|&(ref id, size, data)| SymbolEntry { debug_identifier: id.as_ref().map(|i| i.as_ptr()).unwrap_or(ptr::null()), symbol_size: size, symbol_data: data, }) .collect(); let internal = unsafe { process_minidump( buffer.as_ptr() as *const c_char, buffer.len(), cfi_entries.as_ptr(), cfi_count, &mut result, ) }; if result == ProcessResult::Ok &&!internal.is_null() { Ok(ProcessState { internal, _ty: PhantomData, }) } else { Err(ProcessMinidumpError(result)) } } /// The index of the thread that requested a dump be written in the threads vector. /// /// If a dump was produced as a result of a crash, this will point to the thread that crashed. /// If the dump was produced as by user code without crashing, and the dump contains extended /// Breakpad information, this will point to the thread that requested the dump. If the dump was /// not produced as a result of an exception and no extended Breakpad information is present, /// this field will be set to -1, indicating that the dump thread is not available. pub fn requesting_thread(&self) -> i32 { unsafe
/// The minidump file had no header.
random_line_split
builders.rs
//! Builder types used for patches and other complex data structures. //! //! These types do not usually need to be imported, but the methods available //! on them are very relevant to where they are used. use serde_json::Value; use chrono::offset::FixedOffset; use chrono::DateTime; use model::*; use Object; macro_rules! builder { ($(#[$attr:meta] $name:ident($inner:ty);)*) => { $( #[$attr] #[derive(Serialize, Deserialize)] pub struct $name($inner); impl $name { #[doc(hidden)] #[inline(always)] pub fn __build<F: FnOnce($name) -> $name>(f: F) -> $inner where $inner: Default { Self::__apply(f, Default::default()) } #[doc(hidden)] pub fn __apply<F: FnOnce($name) -> $name>(f: F, inp: $inner) -> $inner { f($name(inp)).0 } /// Merge this builder's contents with another of the same type. /// Keys in `other` will override those in `self`. /// /// This method is intended to be used with deserialized /// instances. Note that deserialization *does not* check that /// the keys are valid for the relevant API call. /// /// ```ignore /// discord.edit_server(|b| b /// .merge(serde_json::from_str(r#"{"name":"My Server"}"#)?)) /// ``` pub fn merge(mut self, other: $name) -> $name { self.0.extend(other.0); self } } )* } } builder! { /// Patch content for the `edit_server` call. EditServer(Object); /// Patch content for the `edit_channel` call. EditChannel(Object); /// Patch content for the `edit_member` call. EditMember(Object); /// Patch content for the `edit_profile` call. EditProfile(Object); /// Patch content for the `edit_user_profile` call. EditUserProfile(Object); /// Patch content for the `edit_role` call. EditRole(Object); /// Content for the `send_message` call. SendMessage(Object); /// `allowed_mentions` object for use within `send_message`. AllowedMentions(Object); /// Patch content for the `send_embed` call. EmbedBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFooterBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedAuthorBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFieldsBuilder(Vec<Value>); } macro_rules! set { ($self:ident, $key:expr, $($rest:tt)*) => {{ {let mut s = $self; s.0.insert($key.into(), json!($($rest)*)); s} }} } impl EditServer { /// Edit the server's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the server's voice region. pub fn region(self, region: &str) -> Self { set!(self, "region", region) } /// Edit the server's icon. Use `None` to remove the icon. pub fn icon(self, icon: Option<&str>) -> Self { set!(self, "icon", icon) } /// Edit the server's AFK channel. Use `None` to select no AFK channel. pub fn afk_channel(self, channel: Option<ChannelId>) -> Self { set!(self, "afk_channel_id", channel) } /// Edit the server's AFK timeout. pub fn afk_timeout(self, timeout: u64) -> Self { set!(self, "afk_timeout", timeout) } /// Transfer ownership of the server to a new owner. pub fn owner(self, owner: UserId) -> Self { set!(self, "owner_id", owner.0) } /// Edit the verification level of the server. pub fn verification_level(self, verification_level: VerificationLevel) -> Self { set!(self, "verification_level", verification_level) } /// Edit the server's splash. Use `None` to remove the splash. pub fn splash(self, splash: Option<&str>) -> Self { set!(self, "splash", splash) } } impl EditChannel { /// Edit the channel's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the text channel's topic. pub fn topic(self, topic: &str) -> Self { set!(self, "topic", topic) } /// Edit the channel's position in the list. pub fn position(self, position: u64) -> Self { set!(self, "position", position) } /// Edit the voice channel's bitrate. pub fn
(self, bitrate: u64) -> Self { set!(self, "bitrate", bitrate) } /// Edit the voice channel's user limit. Zero (`0`) means unlimited. pub fn user_limit(self, user_limit: u64) -> Self { set!(self, "user_limit", user_limit) } } impl EditMember { /// Edit the member's nickname. Supply the empty string to remove a nickname. pub fn nickname(self, nick: &str) -> Self { set!(self, "nick", nick) } /// Edit whether the member is server-muted. pub fn mute(self, mute: bool) -> Self { set!(self, "mute", mute) } /// Edit whether the member is server-deafened. pub fn deaf(self, deafen: bool) -> Self { set!(self, "deaf", deafen) } /// Edit the member's assigned roles. pub fn roles(self, roles: &[RoleId]) -> Self { set!(self, "roles", roles) } /// Move the member to another voice channel. pub fn channel(self, channel: ChannelId) -> Self { set!(self, "channel_id", channel.0) } } impl EditProfile { /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditUserProfile { /// Provide the user's current password for authentication. Required if /// the email or password is being changed. pub fn password(self, password: &str) -> Self { set!(self, "password", password) } /// Edit the user's email address. pub fn email(self, email: &str) -> Self { set!(self, "email", email) } /// Edit the user's password. pub fn new_password(self, password: &str) -> Self { set!(self, "new_password", password) } /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditRole { /// Edit the role's name. Supply the empty string to remove a name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the role's permissions. pub fn permissions(self, permissions: Permissions) -> Self { set!(self, "permissions", permissions) } /// Edit the role's color. Set to zero for default. pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Edit the role's hoist status (whether the role should be displayed separately in the sidebar). pub fn hoist(self, hoist: bool) -> Self { set!(self, "hoist", hoist) } /// Edit the role's mentionability, if the role can be mentioned. pub fn mentionable(self, mentionable: bool) -> Self { set!(self, "mentionable", mentionable) } } impl SendMessage { /// Set the text content of the message. pub fn content(self, content: &str) -> Self { set!(self, "content", content) } /// Set a nonce that can be used for optimistic message sending. pub fn nonce(self, nonce: &str) -> Self { set!(self, "nonce", nonce) } /// Set to true to use text-to-speech. pub fn tts(self, tts: bool) -> Self { set!(self, "tts", tts) } /// Embed rich content. pub fn embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>(self, f: F) -> Self { set!(self, "embed", EmbedBuilder::__build(f)) } /// Restrict allowed mentions for this message. pub fn allowed_mentions<F: FnOnce(AllowedMentions) -> AllowedMentions>(self, f: F) -> Self { set!(self, "allowed_mentions", AllowedMentions::__build(f)) } /// Reply to the given message, optionally mentioning the sender. /// /// The given `message_id` must be in the same channel that this message is /// being sent to. pub fn reply(self, message_id: MessageId, mention: bool) -> Self { set!(self, "message_reference", json! {{ "message_id": message_id, }}).allowed_mentions(|b| b.replied_user(mention)) } /// Change the message's flags. /// /// Can only be set while editing. Only `SUPPRESS_EMBEDS` can be edited on /// request. pub fn flags(self, flags: MessageFlags) -> Self { set!(self, "flags", flags) } // TODO: file, payload_json, message_reference } impl AllowedMentions { // TODO: parse, roles, users /// Set to `false` to disable mentioning a replied-to user. pub fn replied_user(self, replied_user: bool) -> Self { set!(self, "replied_user", replied_user) } } impl EmbedBuilder { /// Add the "title of embed". pub fn title(self, title: &str) -> Self { set!(self, "title", title) } /// Add the "description of embed". pub fn description(self, description: &str) -> Self { set!(self, "description", description) } /// Add the "url of embed". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "timestamp of embed content". pub fn timestamp(self, timestamp: DateTime<FixedOffset>) -> Self { set!(self, "timestamp", timestamp.to_rfc3339()) } /// Add the "color code of the embed". pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Add "footer information". See the `EmbedFooterBuilder` struct for the editable fields. pub fn footer<F: FnOnce(EmbedFooterBuilder) -> EmbedFooterBuilder>(self, f: F) -> Self { set!(self, "footer", EmbedFooterBuilder::__build(f)) } /// Add "source url of image". Only supports http(s). pub fn image(self, url: &str) -> Self { set!(self, "image", { "url": url }) } /// Add "source url of thumbnail". Only supports http(s). pub fn thumbnail(self, url: &str) -> Self { set!(self, "thumbnail", { "url": url }) } /// Add "author information". See the `EmbedAuthorBuilder` struct for the editable fields. pub fn author<F: FnOnce(EmbedAuthorBuilder) -> EmbedAuthorBuilder>(self, f: F) -> Self { set!(self, "author", EmbedAuthorBuilder::__build(f)) } /// Add "fields information". See the `EmbedFieldsBuilder` struct for the editable fields. pub fn fields<F: FnOnce(EmbedFieldsBuilder) -> EmbedFieldsBuilder>(self, f: F) -> Self { set!(self, "fields", EmbedFieldsBuilder::__build(f)) } } impl EmbedFooterBuilder { /// Add the "footer text". pub fn text(self, text: &str) -> Self { set!(self, "text", text) } /// Add the "url of footer icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedAuthorBuilder { /// Add the "name of author". pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Add the "url of author". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "url of author icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedFieldsBuilder { /// Add an entire field structure, representing a mapping from `name` to `value`. /// /// `inline` determines "whether or not this field should display inline". pub fn field(mut self, name: &str, value: &str, inline: bool) -> Self { self.0.push(json! {{ "name": name, "value": value, "inline": inline, }}); self } }
bitrate
identifier_name
builders.rs
//! Builder types used for patches and other complex data structures. //! //! These types do not usually need to be imported, but the methods available //! on them are very relevant to where they are used. use serde_json::Value; use chrono::offset::FixedOffset; use chrono::DateTime; use model::*; use Object; macro_rules! builder { ($(#[$attr:meta] $name:ident($inner:ty);)*) => { $( #[$attr] #[derive(Serialize, Deserialize)] pub struct $name($inner);
#[inline(always)] pub fn __build<F: FnOnce($name) -> $name>(f: F) -> $inner where $inner: Default { Self::__apply(f, Default::default()) } #[doc(hidden)] pub fn __apply<F: FnOnce($name) -> $name>(f: F, inp: $inner) -> $inner { f($name(inp)).0 } /// Merge this builder's contents with another of the same type. /// Keys in `other` will override those in `self`. /// /// This method is intended to be used with deserialized /// instances. Note that deserialization *does not* check that /// the keys are valid for the relevant API call. /// /// ```ignore /// discord.edit_server(|b| b /// .merge(serde_json::from_str(r#"{"name":"My Server"}"#)?)) /// ``` pub fn merge(mut self, other: $name) -> $name { self.0.extend(other.0); self } } )* } } builder! { /// Patch content for the `edit_server` call. EditServer(Object); /// Patch content for the `edit_channel` call. EditChannel(Object); /// Patch content for the `edit_member` call. EditMember(Object); /// Patch content for the `edit_profile` call. EditProfile(Object); /// Patch content for the `edit_user_profile` call. EditUserProfile(Object); /// Patch content for the `edit_role` call. EditRole(Object); /// Content for the `send_message` call. SendMessage(Object); /// `allowed_mentions` object for use within `send_message`. AllowedMentions(Object); /// Patch content for the `send_embed` call. EmbedBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFooterBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedAuthorBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFieldsBuilder(Vec<Value>); } macro_rules! set { ($self:ident, $key:expr, $($rest:tt)*) => {{ {let mut s = $self; s.0.insert($key.into(), json!($($rest)*)); s} }} } impl EditServer { /// Edit the server's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the server's voice region. pub fn region(self, region: &str) -> Self { set!(self, "region", region) } /// Edit the server's icon. Use `None` to remove the icon. pub fn icon(self, icon: Option<&str>) -> Self { set!(self, "icon", icon) } /// Edit the server's AFK channel. Use `None` to select no AFK channel. pub fn afk_channel(self, channel: Option<ChannelId>) -> Self { set!(self, "afk_channel_id", channel) } /// Edit the server's AFK timeout. pub fn afk_timeout(self, timeout: u64) -> Self { set!(self, "afk_timeout", timeout) } /// Transfer ownership of the server to a new owner. pub fn owner(self, owner: UserId) -> Self { set!(self, "owner_id", owner.0) } /// Edit the verification level of the server. pub fn verification_level(self, verification_level: VerificationLevel) -> Self { set!(self, "verification_level", verification_level) } /// Edit the server's splash. Use `None` to remove the splash. pub fn splash(self, splash: Option<&str>) -> Self { set!(self, "splash", splash) } } impl EditChannel { /// Edit the channel's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the text channel's topic. pub fn topic(self, topic: &str) -> Self { set!(self, "topic", topic) } /// Edit the channel's position in the list. pub fn position(self, position: u64) -> Self { set!(self, "position", position) } /// Edit the voice channel's bitrate. pub fn bitrate(self, bitrate: u64) -> Self { set!(self, "bitrate", bitrate) } /// Edit the voice channel's user limit. Zero (`0`) means unlimited. pub fn user_limit(self, user_limit: u64) -> Self { set!(self, "user_limit", user_limit) } } impl EditMember { /// Edit the member's nickname. Supply the empty string to remove a nickname. pub fn nickname(self, nick: &str) -> Self { set!(self, "nick", nick) } /// Edit whether the member is server-muted. pub fn mute(self, mute: bool) -> Self { set!(self, "mute", mute) } /// Edit whether the member is server-deafened. pub fn deaf(self, deafen: bool) -> Self { set!(self, "deaf", deafen) } /// Edit the member's assigned roles. pub fn roles(self, roles: &[RoleId]) -> Self { set!(self, "roles", roles) } /// Move the member to another voice channel. pub fn channel(self, channel: ChannelId) -> Self { set!(self, "channel_id", channel.0) } } impl EditProfile { /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditUserProfile { /// Provide the user's current password for authentication. Required if /// the email or password is being changed. pub fn password(self, password: &str) -> Self { set!(self, "password", password) } /// Edit the user's email address. pub fn email(self, email: &str) -> Self { set!(self, "email", email) } /// Edit the user's password. pub fn new_password(self, password: &str) -> Self { set!(self, "new_password", password) } /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditRole { /// Edit the role's name. Supply the empty string to remove a name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the role's permissions. pub fn permissions(self, permissions: Permissions) -> Self { set!(self, "permissions", permissions) } /// Edit the role's color. Set to zero for default. pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Edit the role's hoist status (whether the role should be displayed separately in the sidebar). pub fn hoist(self, hoist: bool) -> Self { set!(self, "hoist", hoist) } /// Edit the role's mentionability, if the role can be mentioned. pub fn mentionable(self, mentionable: bool) -> Self { set!(self, "mentionable", mentionable) } } impl SendMessage { /// Set the text content of the message. pub fn content(self, content: &str) -> Self { set!(self, "content", content) } /// Set a nonce that can be used for optimistic message sending. pub fn nonce(self, nonce: &str) -> Self { set!(self, "nonce", nonce) } /// Set to true to use text-to-speech. pub fn tts(self, tts: bool) -> Self { set!(self, "tts", tts) } /// Embed rich content. pub fn embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>(self, f: F) -> Self { set!(self, "embed", EmbedBuilder::__build(f)) } /// Restrict allowed mentions for this message. pub fn allowed_mentions<F: FnOnce(AllowedMentions) -> AllowedMentions>(self, f: F) -> Self { set!(self, "allowed_mentions", AllowedMentions::__build(f)) } /// Reply to the given message, optionally mentioning the sender. /// /// The given `message_id` must be in the same channel that this message is /// being sent to. pub fn reply(self, message_id: MessageId, mention: bool) -> Self { set!(self, "message_reference", json! {{ "message_id": message_id, }}).allowed_mentions(|b| b.replied_user(mention)) } /// Change the message's flags. /// /// Can only be set while editing. Only `SUPPRESS_EMBEDS` can be edited on /// request. pub fn flags(self, flags: MessageFlags) -> Self { set!(self, "flags", flags) } // TODO: file, payload_json, message_reference } impl AllowedMentions { // TODO: parse, roles, users /// Set to `false` to disable mentioning a replied-to user. pub fn replied_user(self, replied_user: bool) -> Self { set!(self, "replied_user", replied_user) } } impl EmbedBuilder { /// Add the "title of embed". pub fn title(self, title: &str) -> Self { set!(self, "title", title) } /// Add the "description of embed". pub fn description(self, description: &str) -> Self { set!(self, "description", description) } /// Add the "url of embed". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "timestamp of embed content". pub fn timestamp(self, timestamp: DateTime<FixedOffset>) -> Self { set!(self, "timestamp", timestamp.to_rfc3339()) } /// Add the "color code of the embed". pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Add "footer information". See the `EmbedFooterBuilder` struct for the editable fields. pub fn footer<F: FnOnce(EmbedFooterBuilder) -> EmbedFooterBuilder>(self, f: F) -> Self { set!(self, "footer", EmbedFooterBuilder::__build(f)) } /// Add "source url of image". Only supports http(s). pub fn image(self, url: &str) -> Self { set!(self, "image", { "url": url }) } /// Add "source url of thumbnail". Only supports http(s). pub fn thumbnail(self, url: &str) -> Self { set!(self, "thumbnail", { "url": url }) } /// Add "author information". See the `EmbedAuthorBuilder` struct for the editable fields. pub fn author<F: FnOnce(EmbedAuthorBuilder) -> EmbedAuthorBuilder>(self, f: F) -> Self { set!(self, "author", EmbedAuthorBuilder::__build(f)) } /// Add "fields information". See the `EmbedFieldsBuilder` struct for the editable fields. pub fn fields<F: FnOnce(EmbedFieldsBuilder) -> EmbedFieldsBuilder>(self, f: F) -> Self { set!(self, "fields", EmbedFieldsBuilder::__build(f)) } } impl EmbedFooterBuilder { /// Add the "footer text". pub fn text(self, text: &str) -> Self { set!(self, "text", text) } /// Add the "url of footer icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedAuthorBuilder { /// Add the "name of author". pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Add the "url of author". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "url of author icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedFieldsBuilder { /// Add an entire field structure, representing a mapping from `name` to `value`. /// /// `inline` determines "whether or not this field should display inline". pub fn field(mut self, name: &str, value: &str, inline: bool) -> Self { self.0.push(json! {{ "name": name, "value": value, "inline": inline, }}); self } }
impl $name { #[doc(hidden)]
random_line_split
builders.rs
//! Builder types used for patches and other complex data structures. //! //! These types do not usually need to be imported, but the methods available //! on them are very relevant to where they are used. use serde_json::Value; use chrono::offset::FixedOffset; use chrono::DateTime; use model::*; use Object; macro_rules! builder { ($(#[$attr:meta] $name:ident($inner:ty);)*) => { $( #[$attr] #[derive(Serialize, Deserialize)] pub struct $name($inner); impl $name { #[doc(hidden)] #[inline(always)] pub fn __build<F: FnOnce($name) -> $name>(f: F) -> $inner where $inner: Default { Self::__apply(f, Default::default()) } #[doc(hidden)] pub fn __apply<F: FnOnce($name) -> $name>(f: F, inp: $inner) -> $inner { f($name(inp)).0 } /// Merge this builder's contents with another of the same type. /// Keys in `other` will override those in `self`. /// /// This method is intended to be used with deserialized /// instances. Note that deserialization *does not* check that /// the keys are valid for the relevant API call. /// /// ```ignore /// discord.edit_server(|b| b /// .merge(serde_json::from_str(r#"{"name":"My Server"}"#)?)) /// ``` pub fn merge(mut self, other: $name) -> $name { self.0.extend(other.0); self } } )* } } builder! { /// Patch content for the `edit_server` call. EditServer(Object); /// Patch content for the `edit_channel` call. EditChannel(Object); /// Patch content for the `edit_member` call. EditMember(Object); /// Patch content for the `edit_profile` call. EditProfile(Object); /// Patch content for the `edit_user_profile` call. EditUserProfile(Object); /// Patch content for the `edit_role` call. EditRole(Object); /// Content for the `send_message` call. SendMessage(Object); /// `allowed_mentions` object for use within `send_message`. AllowedMentions(Object); /// Patch content for the `send_embed` call. EmbedBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFooterBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedAuthorBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFieldsBuilder(Vec<Value>); } macro_rules! set { ($self:ident, $key:expr, $($rest:tt)*) => {{ {let mut s = $self; s.0.insert($key.into(), json!($($rest)*)); s} }} } impl EditServer { /// Edit the server's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the server's voice region. pub fn region(self, region: &str) -> Self { set!(self, "region", region) } /// Edit the server's icon. Use `None` to remove the icon. pub fn icon(self, icon: Option<&str>) -> Self { set!(self, "icon", icon) } /// Edit the server's AFK channel. Use `None` to select no AFK channel. pub fn afk_channel(self, channel: Option<ChannelId>) -> Self { set!(self, "afk_channel_id", channel) } /// Edit the server's AFK timeout. pub fn afk_timeout(self, timeout: u64) -> Self
/// Transfer ownership of the server to a new owner. pub fn owner(self, owner: UserId) -> Self { set!(self, "owner_id", owner.0) } /// Edit the verification level of the server. pub fn verification_level(self, verification_level: VerificationLevel) -> Self { set!(self, "verification_level", verification_level) } /// Edit the server's splash. Use `None` to remove the splash. pub fn splash(self, splash: Option<&str>) -> Self { set!(self, "splash", splash) } } impl EditChannel { /// Edit the channel's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the text channel's topic. pub fn topic(self, topic: &str) -> Self { set!(self, "topic", topic) } /// Edit the channel's position in the list. pub fn position(self, position: u64) -> Self { set!(self, "position", position) } /// Edit the voice channel's bitrate. pub fn bitrate(self, bitrate: u64) -> Self { set!(self, "bitrate", bitrate) } /// Edit the voice channel's user limit. Zero (`0`) means unlimited. pub fn user_limit(self, user_limit: u64) -> Self { set!(self, "user_limit", user_limit) } } impl EditMember { /// Edit the member's nickname. Supply the empty string to remove a nickname. pub fn nickname(self, nick: &str) -> Self { set!(self, "nick", nick) } /// Edit whether the member is server-muted. pub fn mute(self, mute: bool) -> Self { set!(self, "mute", mute) } /// Edit whether the member is server-deafened. pub fn deaf(self, deafen: bool) -> Self { set!(self, "deaf", deafen) } /// Edit the member's assigned roles. pub fn roles(self, roles: &[RoleId]) -> Self { set!(self, "roles", roles) } /// Move the member to another voice channel. pub fn channel(self, channel: ChannelId) -> Self { set!(self, "channel_id", channel.0) } } impl EditProfile { /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditUserProfile { /// Provide the user's current password for authentication. Required if /// the email or password is being changed. pub fn password(self, password: &str) -> Self { set!(self, "password", password) } /// Edit the user's email address. pub fn email(self, email: &str) -> Self { set!(self, "email", email) } /// Edit the user's password. pub fn new_password(self, password: &str) -> Self { set!(self, "new_password", password) } /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditRole { /// Edit the role's name. Supply the empty string to remove a name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the role's permissions. pub fn permissions(self, permissions: Permissions) -> Self { set!(self, "permissions", permissions) } /// Edit the role's color. Set to zero for default. pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Edit the role's hoist status (whether the role should be displayed separately in the sidebar). pub fn hoist(self, hoist: bool) -> Self { set!(self, "hoist", hoist) } /// Edit the role's mentionability, if the role can be mentioned. pub fn mentionable(self, mentionable: bool) -> Self { set!(self, "mentionable", mentionable) } } impl SendMessage { /// Set the text content of the message. pub fn content(self, content: &str) -> Self { set!(self, "content", content) } /// Set a nonce that can be used for optimistic message sending. pub fn nonce(self, nonce: &str) -> Self { set!(self, "nonce", nonce) } /// Set to true to use text-to-speech. pub fn tts(self, tts: bool) -> Self { set!(self, "tts", tts) } /// Embed rich content. pub fn embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>(self, f: F) -> Self { set!(self, "embed", EmbedBuilder::__build(f)) } /// Restrict allowed mentions for this message. pub fn allowed_mentions<F: FnOnce(AllowedMentions) -> AllowedMentions>(self, f: F) -> Self { set!(self, "allowed_mentions", AllowedMentions::__build(f)) } /// Reply to the given message, optionally mentioning the sender. /// /// The given `message_id` must be in the same channel that this message is /// being sent to. pub fn reply(self, message_id: MessageId, mention: bool) -> Self { set!(self, "message_reference", json! {{ "message_id": message_id, }}).allowed_mentions(|b| b.replied_user(mention)) } /// Change the message's flags. /// /// Can only be set while editing. Only `SUPPRESS_EMBEDS` can be edited on /// request. pub fn flags(self, flags: MessageFlags) -> Self { set!(self, "flags", flags) } // TODO: file, payload_json, message_reference } impl AllowedMentions { // TODO: parse, roles, users /// Set to `false` to disable mentioning a replied-to user. pub fn replied_user(self, replied_user: bool) -> Self { set!(self, "replied_user", replied_user) } } impl EmbedBuilder { /// Add the "title of embed". pub fn title(self, title: &str) -> Self { set!(self, "title", title) } /// Add the "description of embed". pub fn description(self, description: &str) -> Self { set!(self, "description", description) } /// Add the "url of embed". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "timestamp of embed content". pub fn timestamp(self, timestamp: DateTime<FixedOffset>) -> Self { set!(self, "timestamp", timestamp.to_rfc3339()) } /// Add the "color code of the embed". pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Add "footer information". See the `EmbedFooterBuilder` struct for the editable fields. pub fn footer<F: FnOnce(EmbedFooterBuilder) -> EmbedFooterBuilder>(self, f: F) -> Self { set!(self, "footer", EmbedFooterBuilder::__build(f)) } /// Add "source url of image". Only supports http(s). pub fn image(self, url: &str) -> Self { set!(self, "image", { "url": url }) } /// Add "source url of thumbnail". Only supports http(s). pub fn thumbnail(self, url: &str) -> Self { set!(self, "thumbnail", { "url": url }) } /// Add "author information". See the `EmbedAuthorBuilder` struct for the editable fields. pub fn author<F: FnOnce(EmbedAuthorBuilder) -> EmbedAuthorBuilder>(self, f: F) -> Self { set!(self, "author", EmbedAuthorBuilder::__build(f)) } /// Add "fields information". See the `EmbedFieldsBuilder` struct for the editable fields. pub fn fields<F: FnOnce(EmbedFieldsBuilder) -> EmbedFieldsBuilder>(self, f: F) -> Self { set!(self, "fields", EmbedFieldsBuilder::__build(f)) } } impl EmbedFooterBuilder { /// Add the "footer text". pub fn text(self, text: &str) -> Self { set!(self, "text", text) } /// Add the "url of footer icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedAuthorBuilder { /// Add the "name of author". pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Add the "url of author". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "url of author icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedFieldsBuilder { /// Add an entire field structure, representing a mapping from `name` to `value`. /// /// `inline` determines "whether or not this field should display inline". pub fn field(mut self, name: &str, value: &str, inline: bool) -> Self { self.0.push(json! {{ "name": name, "value": value, "inline": inline, }}); self } }
{ set!(self, "afk_timeout", timeout) }
identifier_body
verify_cert.rs
// Copyright 2015 Brian Smith. // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. use crate::{ cert::{self, Cert, EndEntityOrCa}, der, name, signed_data, time, Error, SignatureAlgorithm, TrustAnchor, }; pub fn build_chain( required_eku_if_present: KeyPurposeId, supported_sig_algs: &[&SignatureAlgorithm], trust_anchors: &[TrustAnchor], intermediate_certs: &[&[u8]], cert: &Cert, time: time::Time, sub_ca_count: usize, ) -> Result<(), Error> { let used_as_ca = used_as_ca(&cert.ee_or_ca); check_issuer_independent_properties( cert, time, used_as_ca, sub_ca_count, required_eku_if_present, )?; // TODO: HPKP checks. match used_as_ca { UsedAsCa::Yes => { const MAX_SUB_CA_COUNT: usize = 6; if sub_ca_count >= MAX_SUB_CA_COUNT { return Err(Error::UnknownIssuer); } } UsedAsCa::No => { assert_eq!(0, sub_ca_count); } } // TODO: revocation. match loop_while_non_fatal_error(trust_anchors, |trust_anchor: &TrustAnchor| { let trust_anchor_subject = untrusted::Input::from(trust_anchor.subject); if cert.issuer!= trust_anchor_subject { return Err(Error::UnknownIssuer); } let name_constraints = trust_anchor.name_constraints.map(untrusted::Input::from); untrusted::read_all_optional(name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let trust_anchor_spki = untrusted::Input::from(trust_anchor.spki); // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; check_signatures(supported_sig_algs, cert, trust_anchor_spki)?; Ok(()) }) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } loop_while_non_fatal_error(intermediate_certs, |cert_der| { let potential_issuer = cert::parse_cert(untrusted::Input::from(*cert_der), EndEntityOrCa::Ca(&cert))?; if potential_issuer.subject!= cert.issuer { return Err(Error::UnknownIssuer); } // Prevent loops; see RFC 4158 section 5.2. let mut prev = cert; loop { if potential_issuer.spki.value() == prev.spki.value() && potential_issuer.subject == prev.subject { return Err(Error::UnknownIssuer); } match &prev.ee_or_ca { EndEntityOrCa::EndEntity => { break; } EndEntityOrCa::Ca(child_cert) => { prev = child_cert; } } } untrusted::read_all_optional(potential_issuer.name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let next_sub_ca_count = match used_as_ca { UsedAsCa::No => sub_ca_count, UsedAsCa::Yes => sub_ca_count + 1, }; build_chain( required_eku_if_present, supported_sig_algs, trust_anchors, intermediate_certs, &potential_issuer, time, next_sub_ca_count, ) }) } fn check_signatures( supported_sig_algs: &[&SignatureAlgorithm], cert_chain: &Cert, trust_anchor_key: untrusted::Input, ) -> Result<(), Error> { let mut spki_value = trust_anchor_key; let mut cert = cert_chain; loop { signed_data::verify_signed_data(supported_sig_algs, spki_value, &cert.signed_data)?; // TODO: check revocation match &cert.ee_or_ca { EndEntityOrCa::Ca(child_cert) => { spki_value = cert.spki.value(); cert = child_cert; } EndEntityOrCa::EndEntity => { break; } } } Ok(()) } fn check_issuer_independent_properties( cert: &Cert, time: time::Time, used_as_ca: UsedAsCa, sub_ca_count: usize, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error> { // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; // TODO: Check signature algorithm like mozilla::pkix. // TODO: Check SPKI like mozilla::pkix. // TODO: check for active distrust like mozilla::pkix. // See the comment in `remember_extension` for why we don't check the // KeyUsage extension. cert.validity .read_all(Error::BadDer, |value| check_validity(value, time))?; untrusted::read_all_optional(cert.basic_constraints, Error::BadDer, |value| { check_basic_constraints(value, used_as_ca, sub_ca_count) })?; untrusted::read_all_optional(cert.eku, Error::BadDer, |value| { check_eku(value, required_eku_if_present) })?; Ok(()) } // https://tools.ietf.org/html/rfc5280#section-4.1.2.5 fn check_validity(input: &mut untrusted::Reader, time: time::Time) -> Result<(), Error> { let not_before = der::time_choice(input)?; let not_after = der::time_choice(input)?; if not_before > not_after { return Err(Error::InvalidCertValidity); } if time < not_before { return Err(Error::CertNotValidYet); } if time > not_after { return Err(Error::CertExpired); } // TODO: mozilla::pkix allows the TrustDomain to check not_before and // not_after, to enforce things like a maximum validity period. We should // do something similar. Ok(()) } #[derive(Clone, Copy)] enum UsedAsCa { Yes, No, } fn used_as_ca(ee_or_ca: &EndEntityOrCa) -> UsedAsCa { match ee_or_ca { EndEntityOrCa::EndEntity => UsedAsCa::No, EndEntityOrCa::Ca(..) => UsedAsCa::Yes, } } // https://tools.ietf.org/html/rfc5280#section-4.2.1.9 fn
( input: Option<&mut untrusted::Reader>, used_as_ca: UsedAsCa, sub_ca_count: usize, ) -> Result<(), Error> { let (is_ca, path_len_constraint) = match input { Some(input) => { let is_ca = der::optional_boolean(input)?; // https://bugzilla.mozilla.org/show_bug.cgi?id=985025: RFC 5280 // says that a certificate must not have pathLenConstraint unless // it is a CA certificate, but some real-world end-entity // certificates have pathLenConstraint. let path_len_constraint = if!input.at_end() { let value = der::small_nonnegative_integer(input)?; Some(usize::from(value)) } else { None }; (is_ca, path_len_constraint) } None => (false, None), }; match (used_as_ca, is_ca, path_len_constraint) { (UsedAsCa::No, true, _) => Err(Error::CaUsedAsEndEntity), (UsedAsCa::Yes, false, _) => Err(Error::EndEntityUsedAsCa), (UsedAsCa::Yes, true, Some(len)) if sub_ca_count > len => { Err(Error::PathLenConstraintViolated) } _ => Ok(()), } } #[derive(Clone, Copy)] pub struct KeyPurposeId { oid_value: untrusted::Input<'static>, } // id-pkix OBJECT IDENTIFIER ::= { 1 3 6 1 5 5 7 } // id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } // id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_SERVER_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 1]), }; // id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_CLIENT_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 2]), }; // id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_OCSP_SIGNING: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 9]), }; // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 // // Notable Differences from RFC 5280: // // * We follow the convention established by Microsoft's implementation and // mozilla::pkix of treating the EKU extension in a CA certificate as a // restriction on the allowable EKUs for certificates issued by that CA. RFC // 5280 doesn't prescribe any meaning to the EKU extension when a certificate // is being used as a CA certificate. // // * We do not recognize anyExtendedKeyUsage. NSS and mozilla::pkix do not // recognize it either. // // * We treat id-Netscape-stepUp as being equivalent to id-kp-serverAuth in CA // certificates (only). Comodo has issued certificates that require this // behavior that don't expire until June 2020. See https://bugzilla.mozilla.org/show_bug.cgi?id=982292. fn check_eku( input: Option<&mut untrusted::Reader>, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error> { match input { Some(input) => { loop { let value = der::expect_tag_and_get_value(input, der::Tag::OID)?; if value == required_eku_if_present.oid_value { input.skip_to_end(); break; } if input.at_end() { return Err(Error::RequiredEkuNotFound); } } Ok(()) } None => { // http://tools.ietf.org/html/rfc6960#section-4.2.2.2: // "OCSP signing delegation SHALL be designated by the inclusion of // id-kp-OCSPSigning in an extended key usage certificate extension // included in the OCSP response signer's certificate." // // A missing EKU extension generally means "any EKU", but it is // important that id-kp-OCSPSigning is explicit so that a normal // end-entity certificate isn't able to sign trusted OCSP responses // for itself or for other certificates issued by its issuing CA. if required_eku_if_present.oid_value == EKU_OCSP_SIGNING.oid_value { return Err(Error::RequiredEkuNotFound); } Ok(()) } } } fn loop_while_non_fatal_error<V>( values: V, f: impl Fn(V::Item) -> Result<(), Error>, ) -> Result<(), Error> where V: IntoIterator, { for v in values { match f(v) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } } Err(Error::UnknownIssuer) }
check_basic_constraints
identifier_name
verify_cert.rs
// Copyright 2015 Brian Smith. // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. use crate::{ cert::{self, Cert, EndEntityOrCa}, der, name, signed_data, time, Error, SignatureAlgorithm, TrustAnchor, }; pub fn build_chain( required_eku_if_present: KeyPurposeId, supported_sig_algs: &[&SignatureAlgorithm], trust_anchors: &[TrustAnchor], intermediate_certs: &[&[u8]], cert: &Cert, time: time::Time, sub_ca_count: usize, ) -> Result<(), Error> { let used_as_ca = used_as_ca(&cert.ee_or_ca); check_issuer_independent_properties( cert, time, used_as_ca, sub_ca_count, required_eku_if_present, )?; // TODO: HPKP checks. match used_as_ca { UsedAsCa::Yes => { const MAX_SUB_CA_COUNT: usize = 6; if sub_ca_count >= MAX_SUB_CA_COUNT { return Err(Error::UnknownIssuer); } } UsedAsCa::No => { assert_eq!(0, sub_ca_count); } } // TODO: revocation. match loop_while_non_fatal_error(trust_anchors, |trust_anchor: &TrustAnchor| { let trust_anchor_subject = untrusted::Input::from(trust_anchor.subject); if cert.issuer!= trust_anchor_subject { return Err(Error::UnknownIssuer); } let name_constraints = trust_anchor.name_constraints.map(untrusted::Input::from); untrusted::read_all_optional(name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let trust_anchor_spki = untrusted::Input::from(trust_anchor.spki); // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; check_signatures(supported_sig_algs, cert, trust_anchor_spki)?; Ok(()) }) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } loop_while_non_fatal_error(intermediate_certs, |cert_der| { let potential_issuer = cert::parse_cert(untrusted::Input::from(*cert_der), EndEntityOrCa::Ca(&cert))?; if potential_issuer.subject!= cert.issuer { return Err(Error::UnknownIssuer); } // Prevent loops; see RFC 4158 section 5.2. let mut prev = cert; loop { if potential_issuer.spki.value() == prev.spki.value() && potential_issuer.subject == prev.subject { return Err(Error::UnknownIssuer); } match &prev.ee_or_ca { EndEntityOrCa::EndEntity => { break; } EndEntityOrCa::Ca(child_cert) => { prev = child_cert; } } } untrusted::read_all_optional(potential_issuer.name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let next_sub_ca_count = match used_as_ca { UsedAsCa::No => sub_ca_count, UsedAsCa::Yes => sub_ca_count + 1, }; build_chain( required_eku_if_present, supported_sig_algs, trust_anchors, intermediate_certs, &potential_issuer, time, next_sub_ca_count, ) }) } fn check_signatures( supported_sig_algs: &[&SignatureAlgorithm], cert_chain: &Cert, trust_anchor_key: untrusted::Input, ) -> Result<(), Error> { let mut spki_value = trust_anchor_key; let mut cert = cert_chain; loop { signed_data::verify_signed_data(supported_sig_algs, spki_value, &cert.signed_data)?; // TODO: check revocation match &cert.ee_or_ca { EndEntityOrCa::Ca(child_cert) => { spki_value = cert.spki.value(); cert = child_cert; } EndEntityOrCa::EndEntity => { break; } } } Ok(()) } fn check_issuer_independent_properties( cert: &Cert, time: time::Time, used_as_ca: UsedAsCa, sub_ca_count: usize, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error>
// https://tools.ietf.org/html/rfc5280#section-4.1.2.5 fn check_validity(input: &mut untrusted::Reader, time: time::Time) -> Result<(), Error> { let not_before = der::time_choice(input)?; let not_after = der::time_choice(input)?; if not_before > not_after { return Err(Error::InvalidCertValidity); } if time < not_before { return Err(Error::CertNotValidYet); } if time > not_after { return Err(Error::CertExpired); } // TODO: mozilla::pkix allows the TrustDomain to check not_before and // not_after, to enforce things like a maximum validity period. We should // do something similar. Ok(()) } #[derive(Clone, Copy)] enum UsedAsCa { Yes, No, } fn used_as_ca(ee_or_ca: &EndEntityOrCa) -> UsedAsCa { match ee_or_ca { EndEntityOrCa::EndEntity => UsedAsCa::No, EndEntityOrCa::Ca(..) => UsedAsCa::Yes, } } // https://tools.ietf.org/html/rfc5280#section-4.2.1.9 fn check_basic_constraints( input: Option<&mut untrusted::Reader>, used_as_ca: UsedAsCa, sub_ca_count: usize, ) -> Result<(), Error> { let (is_ca, path_len_constraint) = match input { Some(input) => { let is_ca = der::optional_boolean(input)?; // https://bugzilla.mozilla.org/show_bug.cgi?id=985025: RFC 5280 // says that a certificate must not have pathLenConstraint unless // it is a CA certificate, but some real-world end-entity // certificates have pathLenConstraint. let path_len_constraint = if!input.at_end() { let value = der::small_nonnegative_integer(input)?; Some(usize::from(value)) } else { None }; (is_ca, path_len_constraint) } None => (false, None), }; match (used_as_ca, is_ca, path_len_constraint) { (UsedAsCa::No, true, _) => Err(Error::CaUsedAsEndEntity), (UsedAsCa::Yes, false, _) => Err(Error::EndEntityUsedAsCa), (UsedAsCa::Yes, true, Some(len)) if sub_ca_count > len => { Err(Error::PathLenConstraintViolated) } _ => Ok(()), } } #[derive(Clone, Copy)] pub struct KeyPurposeId { oid_value: untrusted::Input<'static>, } // id-pkix OBJECT IDENTIFIER ::= { 1 3 6 1 5 5 7 } // id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } // id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_SERVER_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 1]), }; // id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_CLIENT_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 2]), }; // id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_OCSP_SIGNING: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 9]), }; // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 // // Notable Differences from RFC 5280: // // * We follow the convention established by Microsoft's implementation and // mozilla::pkix of treating the EKU extension in a CA certificate as a // restriction on the allowable EKUs for certificates issued by that CA. RFC // 5280 doesn't prescribe any meaning to the EKU extension when a certificate // is being used as a CA certificate. // // * We do not recognize anyExtendedKeyUsage. NSS and mozilla::pkix do not // recognize it either. // // * We treat id-Netscape-stepUp as being equivalent to id-kp-serverAuth in CA // certificates (only). Comodo has issued certificates that require this // behavior that don't expire until June 2020. See https://bugzilla.mozilla.org/show_bug.cgi?id=982292. fn check_eku( input: Option<&mut untrusted::Reader>, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error> { match input { Some(input) => { loop { let value = der::expect_tag_and_get_value(input, der::Tag::OID)?; if value == required_eku_if_present.oid_value { input.skip_to_end(); break; } if input.at_end() { return Err(Error::RequiredEkuNotFound); } } Ok(()) } None => { // http://tools.ietf.org/html/rfc6960#section-4.2.2.2: // "OCSP signing delegation SHALL be designated by the inclusion of // id-kp-OCSPSigning in an extended key usage certificate extension // included in the OCSP response signer's certificate." // // A missing EKU extension generally means "any EKU", but it is // important that id-kp-OCSPSigning is explicit so that a normal // end-entity certificate isn't able to sign trusted OCSP responses // for itself or for other certificates issued by its issuing CA. if required_eku_if_present.oid_value == EKU_OCSP_SIGNING.oid_value { return Err(Error::RequiredEkuNotFound); } Ok(()) } } } fn loop_while_non_fatal_error<V>( values: V, f: impl Fn(V::Item) -> Result<(), Error>, ) -> Result<(), Error> where V: IntoIterator, { for v in values { match f(v) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } } Err(Error::UnknownIssuer) }
{ // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; // TODO: Check signature algorithm like mozilla::pkix. // TODO: Check SPKI like mozilla::pkix. // TODO: check for active distrust like mozilla::pkix. // See the comment in `remember_extension` for why we don't check the // KeyUsage extension. cert.validity .read_all(Error::BadDer, |value| check_validity(value, time))?; untrusted::read_all_optional(cert.basic_constraints, Error::BadDer, |value| { check_basic_constraints(value, used_as_ca, sub_ca_count) })?; untrusted::read_all_optional(cert.eku, Error::BadDer, |value| { check_eku(value, required_eku_if_present) })?; Ok(()) }
identifier_body
verify_cert.rs
// Copyright 2015 Brian Smith. // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. use crate::{ cert::{self, Cert, EndEntityOrCa}, der, name, signed_data, time, Error, SignatureAlgorithm, TrustAnchor, }; pub fn build_chain( required_eku_if_present: KeyPurposeId, supported_sig_algs: &[&SignatureAlgorithm], trust_anchors: &[TrustAnchor], intermediate_certs: &[&[u8]], cert: &Cert, time: time::Time, sub_ca_count: usize, ) -> Result<(), Error> { let used_as_ca = used_as_ca(&cert.ee_or_ca); check_issuer_independent_properties( cert, time, used_as_ca, sub_ca_count, required_eku_if_present, )?; // TODO: HPKP checks. match used_as_ca { UsedAsCa::Yes => { const MAX_SUB_CA_COUNT: usize = 6; if sub_ca_count >= MAX_SUB_CA_COUNT { return Err(Error::UnknownIssuer); } } UsedAsCa::No => { assert_eq!(0, sub_ca_count); } } // TODO: revocation. match loop_while_non_fatal_error(trust_anchors, |trust_anchor: &TrustAnchor| { let trust_anchor_subject = untrusted::Input::from(trust_anchor.subject); if cert.issuer!= trust_anchor_subject { return Err(Error::UnknownIssuer); } let name_constraints = trust_anchor.name_constraints.map(untrusted::Input::from); untrusted::read_all_optional(name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let trust_anchor_spki = untrusted::Input::from(trust_anchor.spki); // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; check_signatures(supported_sig_algs, cert, trust_anchor_spki)?; Ok(()) }) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } loop_while_non_fatal_error(intermediate_certs, |cert_der| { let potential_issuer = cert::parse_cert(untrusted::Input::from(*cert_der), EndEntityOrCa::Ca(&cert))?; if potential_issuer.subject!= cert.issuer { return Err(Error::UnknownIssuer); } // Prevent loops; see RFC 4158 section 5.2. let mut prev = cert; loop { if potential_issuer.spki.value() == prev.spki.value() && potential_issuer.subject == prev.subject { return Err(Error::UnknownIssuer); } match &prev.ee_or_ca { EndEntityOrCa::EndEntity => { break; } EndEntityOrCa::Ca(child_cert) => { prev = child_cert; } } } untrusted::read_all_optional(potential_issuer.name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let next_sub_ca_count = match used_as_ca { UsedAsCa::No => sub_ca_count, UsedAsCa::Yes => sub_ca_count + 1, }; build_chain( required_eku_if_present, supported_sig_algs, trust_anchors, intermediate_certs, &potential_issuer, time, next_sub_ca_count, ) }) } fn check_signatures( supported_sig_algs: &[&SignatureAlgorithm], cert_chain: &Cert, trust_anchor_key: untrusted::Input, ) -> Result<(), Error> { let mut spki_value = trust_anchor_key; let mut cert = cert_chain; loop { signed_data::verify_signed_data(supported_sig_algs, spki_value, &cert.signed_data)?; // TODO: check revocation match &cert.ee_or_ca { EndEntityOrCa::Ca(child_cert) => { spki_value = cert.spki.value(); cert = child_cert; } EndEntityOrCa::EndEntity => { break; } } } Ok(()) } fn check_issuer_independent_properties( cert: &Cert, time: time::Time, used_as_ca: UsedAsCa, sub_ca_count: usize, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error> { // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; // TODO: Check signature algorithm like mozilla::pkix. // TODO: Check SPKI like mozilla::pkix. // TODO: check for active distrust like mozilla::pkix. // See the comment in `remember_extension` for why we don't check the // KeyUsage extension. cert.validity .read_all(Error::BadDer, |value| check_validity(value, time))?; untrusted::read_all_optional(cert.basic_constraints, Error::BadDer, |value| { check_basic_constraints(value, used_as_ca, sub_ca_count) })?; untrusted::read_all_optional(cert.eku, Error::BadDer, |value| { check_eku(value, required_eku_if_present) })?; Ok(()) } // https://tools.ietf.org/html/rfc5280#section-4.1.2.5 fn check_validity(input: &mut untrusted::Reader, time: time::Time) -> Result<(), Error> { let not_before = der::time_choice(input)?; let not_after = der::time_choice(input)?; if not_before > not_after { return Err(Error::InvalidCertValidity); } if time < not_before { return Err(Error::CertNotValidYet); } if time > not_after { return Err(Error::CertExpired); } // TODO: mozilla::pkix allows the TrustDomain to check not_before and // not_after, to enforce things like a maximum validity period. We should // do something similar. Ok(()) } #[derive(Clone, Copy)] enum UsedAsCa { Yes, No, } fn used_as_ca(ee_or_ca: &EndEntityOrCa) -> UsedAsCa { match ee_or_ca { EndEntityOrCa::EndEntity => UsedAsCa::No, EndEntityOrCa::Ca(..) => UsedAsCa::Yes, } } // https://tools.ietf.org/html/rfc5280#section-4.2.1.9 fn check_basic_constraints( input: Option<&mut untrusted::Reader>, used_as_ca: UsedAsCa, sub_ca_count: usize, ) -> Result<(), Error> { let (is_ca, path_len_constraint) = match input { Some(input) => { let is_ca = der::optional_boolean(input)?; // https://bugzilla.mozilla.org/show_bug.cgi?id=985025: RFC 5280 // says that a certificate must not have pathLenConstraint unless // it is a CA certificate, but some real-world end-entity // certificates have pathLenConstraint. let path_len_constraint = if!input.at_end() { let value = der::small_nonnegative_integer(input)?; Some(usize::from(value)) } else { None }; (is_ca, path_len_constraint) } None => (false, None), }; match (used_as_ca, is_ca, path_len_constraint) { (UsedAsCa::No, true, _) => Err(Error::CaUsedAsEndEntity), (UsedAsCa::Yes, false, _) => Err(Error::EndEntityUsedAsCa), (UsedAsCa::Yes, true, Some(len)) if sub_ca_count > len => { Err(Error::PathLenConstraintViolated) } _ => Ok(()), } } #[derive(Clone, Copy)] pub struct KeyPurposeId { oid_value: untrusted::Input<'static>, } // id-pkix OBJECT IDENTIFIER ::= { 1 3 6 1 5 5 7 } // id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } // id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_SERVER_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 1]), }; // id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_CLIENT_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 2]), }; // id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_OCSP_SIGNING: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 9]), }; // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 //
// * We follow the convention established by Microsoft's implementation and // mozilla::pkix of treating the EKU extension in a CA certificate as a // restriction on the allowable EKUs for certificates issued by that CA. RFC // 5280 doesn't prescribe any meaning to the EKU extension when a certificate // is being used as a CA certificate. // // * We do not recognize anyExtendedKeyUsage. NSS and mozilla::pkix do not // recognize it either. // // * We treat id-Netscape-stepUp as being equivalent to id-kp-serverAuth in CA // certificates (only). Comodo has issued certificates that require this // behavior that don't expire until June 2020. See https://bugzilla.mozilla.org/show_bug.cgi?id=982292. fn check_eku( input: Option<&mut untrusted::Reader>, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error> { match input { Some(input) => { loop { let value = der::expect_tag_and_get_value(input, der::Tag::OID)?; if value == required_eku_if_present.oid_value { input.skip_to_end(); break; } if input.at_end() { return Err(Error::RequiredEkuNotFound); } } Ok(()) } None => { // http://tools.ietf.org/html/rfc6960#section-4.2.2.2: // "OCSP signing delegation SHALL be designated by the inclusion of // id-kp-OCSPSigning in an extended key usage certificate extension // included in the OCSP response signer's certificate." // // A missing EKU extension generally means "any EKU", but it is // important that id-kp-OCSPSigning is explicit so that a normal // end-entity certificate isn't able to sign trusted OCSP responses // for itself or for other certificates issued by its issuing CA. if required_eku_if_present.oid_value == EKU_OCSP_SIGNING.oid_value { return Err(Error::RequiredEkuNotFound); } Ok(()) } } } fn loop_while_non_fatal_error<V>( values: V, f: impl Fn(V::Item) -> Result<(), Error>, ) -> Result<(), Error> where V: IntoIterator, { for v in values { match f(v) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } } Err(Error::UnknownIssuer) }
// Notable Differences from RFC 5280: //
random_line_split
cc.rs
t)] pub(crate) name: String, value: UnsafeCell<ManuallyDrop<T>>, } /// The real layout if `T` is tracked by the collector. The main APIs still use /// the `CcBox` type. This type is only used for allocation and deallocation. /// /// This is a private type. #[repr(C)] pub struct RawCcBoxWithGcHeader<T:?Sized, O: AbstractObjectSpace> { header: O::Header, cc_box: RawCcBox<T, O>, } /// A single-threaded reference-counting pointer that integrates /// with cyclic garbage collection. /// /// See [module level documentation](index.html) for more details. /// /// [`Cc`](type.Cc.html) is not thread-safe. It does not implement `Send` /// or `Sync`: /// /// ```compile_fail /// use std::ops::Deref; /// use gcmodule::Cc; /// let cc = Cc::new(5); /// std::thread::spawn(move || { /// println!("{}", cc.deref()); /// }); /// ``` pub type Cc<T> = RawCc<T, ObjectSpace>; /// Weak reference of [`Cc`](type.Cc.html). pub type Weak<T> = RawWeak<T, ObjectSpace>; /// Low-level type for [`Cc<T>`](type.Cc.html). pub struct RawCc<T:?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); /// Low-level type for [`Weak<T>`](type.Weak.html). pub struct RawWeak<T:?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); // `ManuallyDrop<T>` does not implement `UnwindSafe`. But `CcBox::drop` does // make sure `T` is dropped. If `T` is unwind-safe, so does `CcBox<T>`. impl<T: UnwindSafe +?Sized> UnwindSafe for RawCcBox<T, ObjectSpace> {} // `NonNull` does not implement `UnwindSafe`. But `Cc` and `Weak` only use it // as a "const" pointer. If `T` is unwind-safe, so does `Cc<T>`. impl<T: UnwindSafe +?Sized, O: AbstractObjectSpace> UnwindSafe for RawCc<T, O> {} impl<T: UnwindSafe +?Sized, O: AbstractObjectSpace> UnwindSafe for RawWeak<T, O> {} /// Type-erased `Cc<T>` with interfaces needed by GC. /// /// This is a private type. pub trait CcDyn { /// Returns the reference count for cycle detection. fn gc_ref_count(&self) -> usize; /// Visit referents for cycle detection. fn gc_traverse(&self, tracer: &mut Tracer); /// Get an cloned `Cc<dyn Trace>`. This has 2 purposes: /// - Keep a reference so `CcBox<T>` is not released in the next step. /// So metadata like `ref_count` can still be read. /// - Operate on the object. fn gc_clone(&self) -> Box<dyn GcClone>; #[cfg(feature = "debug")] /// Name used in collect.rs. fn gc_debug_name(&self) -> String { "?".to_string() } } /// Type-erased gc_clone result. /// /// This is a private type. pub trait GcClone { /// Force drop the value T. fn gc_drop_t(&self); /// Returns the reference count. This is useful for verification. fn gc_ref_count(&self) -> usize; } /// A dummy implementation without drop side-effects. pub(crate) struct CcDummy; impl CcDummy { pub(crate) fn ccdyn_vptr() -> *mut () { let mut dummy = CcDummy; // safety: To access vtable pointer. Stable API cannot do it. let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) }; fat_ptr[1] } } impl CcDyn for CcDummy { fn gc_ref_count(&self) -> usize { 1 } fn gc_traverse(&self, _tracer: &mut Tracer) {} fn gc_clone(&self) -> Box<dyn GcClone> { panic!("bug: CcDummy::gc_clone should never be called"); } } impl<T: Trace> Cc<T> { /// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage. /// /// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html). pub fn new(value: T) -> Cc<T> { collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space)) } } impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> { /// Constructs a new [`Cc<T>`](type.Cc.html) in the given /// [`ObjectSpace`](struct.ObjectSpace.html). /// /// To collect cycles, call `ObjectSpace::collect_cycles()`. pub(crate) fn new_in_space(value: T, space: &O) -> Self { let is_tracked = T::is_type_tracked(); let cc_box = RawCcBox { ref_count: space.new_ref_count(is_tracked), value: UnsafeCell::new(ManuallyDrop::new(value)), #[cfg(test)] name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()), }; let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked { // Create a GcHeader before the CcBox. This is similar to cpython. let header = space.empty_header(); let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box }; let mut boxed = Box::new(cc_box_with_header); // Fix-up fields in GcHeader. This is done after the creation of the // Box so the memory addresses are stable. space.insert(&mut boxed.header, &boxed.cc_box); debug_assert_eq!( mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(), mem::size_of::<RawCcBoxWithGcHeader<T, O>>() ); let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box; Box::leak(boxed); ptr } else
; // safety: ccbox_ptr cannot be null from the above code. let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) }; let result = Self(non_null); if is_tracked { debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)")); } else { debug::log(|| (result.debug_name(), "new (CcBox)")); } debug_assert_eq!(result.ref_count(), 1); result } /// Convert to `RawCc<dyn Trace>`. pub fn into_dyn(self) -> RawCc<dyn Trace, O> { #[cfg(feature = "nightly")] { // Requires CoerceUnsized, which is currently unstable. self } // safety: Trait object magic. Test by test_dyn_downcast. #[cfg(not(feature = "nightly"))] unsafe { // XXX: This depends on rust internals. But it works on stable. // Replace this with CoerceUnsized once that becomes stable. // Cc<dyn Trace> has 2 usize values: The first one is the same // as Cc<T>. The second one is the vtable. The vtable pointer // is the same as the second pointer of `&dyn Trace`. let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace); let self_ptr: usize = mem::transmute(self); fat_ptr[0] = self_ptr; mem::transmute(fat_ptr) } } } impl<T: Trace + Clone> Cc<T> { /// Update the value `T` in a copy-on-write way. /// /// If the ref count is 1, the value is updated in-place. /// Otherwise a new `Cc<T>` will be created. pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) { let need_clone = self.ref_count() > 1; if need_clone { let mut value = <Cc<T>>::deref(self).clone(); update_func(&mut value); *self = Cc::new(value); } else { let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get(); let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut(); update_func(value_mut); } } } impl<T:?Sized, O: AbstractObjectSpace> RawCcBox<T, O> { #[inline] fn header_ptr(&self) -> *const () { self.header() as *const _ as _ } #[inline] fn header(&self) -> &O::Header { debug_assert!(self.is_tracked()); // safety: See `Cc::new`. GcHeader is before CcBox for tracked objects. unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) } } #[inline] fn is_tracked(&self) -> bool { self.ref_count.is_tracked() } #[inline] fn is_dropped(&self) -> bool { self.ref_count.is_dropped() } #[inline] fn inc_ref(&self) -> usize { self.ref_count.inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.ref_count.dec_ref() } #[inline] fn ref_count(&self) -> usize { self.ref_count.ref_count() } #[inline] fn weak_count(&self) -> usize { self.ref_count.weak_count() } #[inline] fn set_dropped(&self) -> bool { self.ref_count.set_dropped() } #[inline] pub(crate) fn drop_t(&self) { let already_dropped = self.set_dropped(); if!already_dropped { debug::log(|| (self.debug_name(), "drop (T)")); // safety: is_dropped() check ensures T is only dropped once. Other // places (ex. gc collector) ensure that T is no longer accessed. unsafe { ManuallyDrop::drop(&mut *(self.value.get())) }; } } pub(crate) fn trace_t(&self, tracer: &mut Tracer) { if!self.is_tracked() { return; } debug::log(|| (self.debug_name(), "trace")); // For other non-`Cc<T>` container types, `trace` visit referents, // is recursive, and does not call `tracer` directly. For `Cc<T>`, // `trace` stops here, is non-recursive, and does apply `tracer` // to the actual `GcHeader`. It's expected that the upper layer // calls `gc_traverse` on everything (not just roots). tracer(self.header_ptr()); } pub(crate) fn debug_name(&self) -> String { #[cfg(test)] { self.name.clone() } #[cfg(not(test))] { #[allow(unused_mut)] let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value); #[cfg(all(feature = "debug", feature = "nightly"))] { if!self.is_dropped() && crate::debug::GC_DROPPING.with(|t|!t.get()) { let debug = self.deref().optional_debug(); if!debug.is_empty() { result += &format!(" {}", debug); } } } return result; } } } #[cfg(all(feature = "debug", feature = "nightly"))] pub(crate) trait OptionalDebug { fn optional_debug(&self) -> String; } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T:?Sized> OptionalDebug for T { default fn optional_debug(&self) -> String { "".to_string() } } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: std::fmt::Debug +?Sized> OptionalDebug for T { fn optional_debug(&self) -> String { format!("{:?}", self) } } impl<T:?Sized, O: AbstractObjectSpace> RawCc<T, O> { /// Obtains a "weak reference", a non-owning pointer. pub fn downgrade(&self) -> RawWeak<T, O> { let inner = self.inner(); inner.ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("new-weak ({})", inner.ref_count.weak_count()), ) }); RawWeak(self.0) } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.ref_count() } } impl<T:?Sized, O: AbstractObjectSpace> RawWeak<T, O> { /// Attempts to obtain a "strong reference". /// /// Returns `None` if the value has already been dropped. pub fn upgrade(&self) -> Option<RawCc<T, O>> { let inner = self.inner(); // Make the below operation "atomic". let _locked = inner.ref_count.locked(); if inner.is_dropped() { None } else { inner.inc_ref(); debug::log(|| { ( inner.debug_name(), format!("new-strong ({})", inner.ref_count.ref_count()), ) }); Some(RawCc(self.0)) } } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } } impl<T:?Sized, O: AbstractObjectSpace> RawCc<T, O> { #[inline] pub(crate) fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } /// `trace` without `T: Trace` bound. /// /// Useful for structures with `Cc<T>` fields where `T` does not implement /// `Trace`. For example, `struct S(Cc<Box<dyn MyTrait>>)`. To implement /// `Trace` for `S`, it can use `Cc::trace(&self.0, tracer)`. #[inline] pub fn trace(&self, tracer: &mut Tracer) { self.inner().trace_t(tracer); } #[inline] fn inc_ref(&self) -> usize { self.inner().inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.inner().dec_ref() } #[inline] pub(crate) fn ref_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } pub(crate) fn debug_name(&self) -> String { self.inner().debug_name() } } impl<T:?Sized, O: AbstractObjectSpace> RawWeak<T, O> { #[inline] fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } } impl<T:?Sized, O: AbstractObjectSpace> Clone for RawCc<T, O> { #[inline] fn clone(&self) -> Self { // In theory self.inner().ref_count.locked() is needed. // Practically this is an atomic operation that cannot be split so locking // becomes optional. // let _locked = self.inner().ref_count.locked(); self.inc_ref(); debug::log(|| (self.debug_name(), format!("clone ({})", self.ref_count()))); Self(self.0) } } impl<T:?Sized, O: AbstractObjectSpace> Clone for RawWeak<T, O> { #[inline] fn clone(&self) -> Self { let inner = self.inner(); let ref_count = &inner.ref_count; ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("clone-weak ({})", ref_count.weak_count()), ) }); Self(self.0) } } impl<T:?Sized> Deref for Cc<T> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { self.inner().deref() } } impl<T:?Sized, O: AbstractObjectSpace> Deref for RawCcBox<T, O> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { debug_assert!( !self.is_dropped(), concat!( "bug: accessing a dropped CcBox detected\n", "This usually happens after ignoring another panic triggered by the collector." ) ); // safety: CcBox (and its value) lifetime maintained by ref count. // If `Trace` is implemented correctly then the GC won't drop_t() // incorrectly and this pointer is valid. Otherwise the above // assertion can prevent UBs on debug build. unsafe { &*self.value.get() } } } fn drop_ccbox<T:?Sized, O: AbstractObjectSpace>(cc_box: *mut RawCcBox<T, O>) { // safety: See Cc::new. The pointer was created by Box::into_raw. let cc_box: Box<RawCcBox<T, O>> = unsafe { Box::from_raw(cc_box) }; let is_tracked = cc_box.is_tracked(); if is_tracked { // The real object is CcBoxWithGcHeader. Drop that instead. // safety: See Cc::new for CcBoxWithGcHeader. let gc_box: Box<RawCcBoxWithGcHeader<T, O>> = unsafe { cast_box(cc_box) }; O::remove(&gc_box.header); // Drop T if it hasn't been dropped yet. // This needs to be after O::remove so the collector won't have a // chance to read dropped content. gc_box.cc_box.drop_t(); debug::log(|| (gc_box.cc_box.debug_name(), "drop (CcBoxWithGcHeader)")); drop(gc_box); } else { // Drop T if it hasn't been dropped yet. cc_box.drop_t(); debug::log(|| (cc_box.debug_name(), "drop (CcBox)")); drop(cc_box); } } impl<T:?Sized, O: AbstractObjectSpace> Drop for RawCc<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); // Block threaded collector. This is needed because "drop()" is a // complex operation. The whole operation needs to be "atomic". let _locked = inner.ref_count.locked(); let old_ref_count = self.dec_ref(); debug::log(|| (self.debug_name(), format!("drop ({})", self.ref_count()))); debug_assert!(old_ref_count >= 1); if old_ref_count == 1 { if self.weak_count() == 0 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } else { inner.drop_t(); } } } } impl<T:?Sized, O: AbstractObjectSpace> Drop for RawWeak<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); let ref_count = &inner.ref_count; // Block threaded collector to "freeze" the ref count, for safety. let _locked = ref_count.locked(); let old_ref_count = ref_count.ref_count(); let old_weak_count = ref_count.dec_weak(); debug::log(|| { ( inner.debug_name(), format!("drop-weak ({})", ref_count.weak_count()), ) }); debug_assert!(old_weak_count >= 1); if old_ref_count == 0 && old_weak_count == 1 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } } } impl<T: Trace +?Sized, O: AbstractObjectSpace> CcDyn for RawCcBox<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_traverse(&self, tracer: &mut Tracer) { debug::log(|| (self.debug_name(), "gc_traverse")); T::trace(self.deref(), tracer) } fn gc_clone(&self) -> Box<dyn GcClone> { self.ref_count.inc_ref(); debug::log(|| { let msg = format!("gc_clone ({})", self.ref_count()); (self.debug_name(), msg) }); // safety: The pointer is compatible. The mutability is different only // to satisfy NonNull (NonNull::new requires &mut). The returned value // is still "immutable". &self can also never be nonnull. let ptr: NonNull<RawCcBox<T, O>> = unsafe { NonNull::new_unchecked(self as *const _ as *mut _) }; let cc = RawCc::<T, O>(ptr);
{ Box::into_raw(Box::new(cc_box)) }
conditional_block
cc.rs
T` is unwind-safe, so does `CcBox<T>`. impl<T: UnwindSafe +?Sized> UnwindSafe for RawCcBox<T, ObjectSpace> {} // `NonNull` does not implement `UnwindSafe`. But `Cc` and `Weak` only use it // as a "const" pointer. If `T` is unwind-safe, so does `Cc<T>`. impl<T: UnwindSafe +?Sized, O: AbstractObjectSpace> UnwindSafe for RawCc<T, O> {} impl<T: UnwindSafe +?Sized, O: AbstractObjectSpace> UnwindSafe for RawWeak<T, O> {} /// Type-erased `Cc<T>` with interfaces needed by GC. /// /// This is a private type. pub trait CcDyn { /// Returns the reference count for cycle detection. fn gc_ref_count(&self) -> usize; /// Visit referents for cycle detection. fn gc_traverse(&self, tracer: &mut Tracer); /// Get an cloned `Cc<dyn Trace>`. This has 2 purposes: /// - Keep a reference so `CcBox<T>` is not released in the next step. /// So metadata like `ref_count` can still be read. /// - Operate on the object. fn gc_clone(&self) -> Box<dyn GcClone>; #[cfg(feature = "debug")] /// Name used in collect.rs. fn gc_debug_name(&self) -> String { "?".to_string() } } /// Type-erased gc_clone result. /// /// This is a private type. pub trait GcClone { /// Force drop the value T. fn gc_drop_t(&self); /// Returns the reference count. This is useful for verification. fn gc_ref_count(&self) -> usize; } /// A dummy implementation without drop side-effects. pub(crate) struct CcDummy; impl CcDummy { pub(crate) fn ccdyn_vptr() -> *mut () { let mut dummy = CcDummy; // safety: To access vtable pointer. Stable API cannot do it. let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) }; fat_ptr[1] } } impl CcDyn for CcDummy { fn gc_ref_count(&self) -> usize { 1 } fn gc_traverse(&self, _tracer: &mut Tracer) {} fn gc_clone(&self) -> Box<dyn GcClone> { panic!("bug: CcDummy::gc_clone should never be called"); } } impl<T: Trace> Cc<T> { /// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage. /// /// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html). pub fn new(value: T) -> Cc<T> { collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space)) } } impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> { /// Constructs a new [`Cc<T>`](type.Cc.html) in the given /// [`ObjectSpace`](struct.ObjectSpace.html). /// /// To collect cycles, call `ObjectSpace::collect_cycles()`. pub(crate) fn new_in_space(value: T, space: &O) -> Self { let is_tracked = T::is_type_tracked(); let cc_box = RawCcBox { ref_count: space.new_ref_count(is_tracked), value: UnsafeCell::new(ManuallyDrop::new(value)), #[cfg(test)] name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()), }; let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked { // Create a GcHeader before the CcBox. This is similar to cpython. let header = space.empty_header(); let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box }; let mut boxed = Box::new(cc_box_with_header); // Fix-up fields in GcHeader. This is done after the creation of the // Box so the memory addresses are stable. space.insert(&mut boxed.header, &boxed.cc_box); debug_assert_eq!( mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(), mem::size_of::<RawCcBoxWithGcHeader<T, O>>() ); let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box; Box::leak(boxed); ptr } else { Box::into_raw(Box::new(cc_box)) }; // safety: ccbox_ptr cannot be null from the above code. let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) }; let result = Self(non_null); if is_tracked { debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)")); } else { debug::log(|| (result.debug_name(), "new (CcBox)")); } debug_assert_eq!(result.ref_count(), 1); result } /// Convert to `RawCc<dyn Trace>`. pub fn into_dyn(self) -> RawCc<dyn Trace, O> { #[cfg(feature = "nightly")] { // Requires CoerceUnsized, which is currently unstable. self } // safety: Trait object magic. Test by test_dyn_downcast. #[cfg(not(feature = "nightly"))] unsafe { // XXX: This depends on rust internals. But it works on stable. // Replace this with CoerceUnsized once that becomes stable. // Cc<dyn Trace> has 2 usize values: The first one is the same // as Cc<T>. The second one is the vtable. The vtable pointer // is the same as the second pointer of `&dyn Trace`. let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace); let self_ptr: usize = mem::transmute(self); fat_ptr[0] = self_ptr; mem::transmute(fat_ptr) } } } impl<T: Trace + Clone> Cc<T> { /// Update the value `T` in a copy-on-write way. /// /// If the ref count is 1, the value is updated in-place. /// Otherwise a new `Cc<T>` will be created. pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) { let need_clone = self.ref_count() > 1; if need_clone { let mut value = <Cc<T>>::deref(self).clone(); update_func(&mut value); *self = Cc::new(value); } else { let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get(); let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut(); update_func(value_mut); } } } impl<T:?Sized, O: AbstractObjectSpace> RawCcBox<T, O> { #[inline] fn header_ptr(&self) -> *const () { self.header() as *const _ as _ } #[inline] fn header(&self) -> &O::Header { debug_assert!(self.is_tracked()); // safety: See `Cc::new`. GcHeader is before CcBox for tracked objects. unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) } } #[inline] fn is_tracked(&self) -> bool { self.ref_count.is_tracked() } #[inline] fn is_dropped(&self) -> bool { self.ref_count.is_dropped() } #[inline] fn inc_ref(&self) -> usize { self.ref_count.inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.ref_count.dec_ref() } #[inline] fn ref_count(&self) -> usize { self.ref_count.ref_count() } #[inline] fn weak_count(&self) -> usize { self.ref_count.weak_count() } #[inline] fn set_dropped(&self) -> bool { self.ref_count.set_dropped() } #[inline] pub(crate) fn drop_t(&self) { let already_dropped = self.set_dropped(); if!already_dropped { debug::log(|| (self.debug_name(), "drop (T)")); // safety: is_dropped() check ensures T is only dropped once. Other // places (ex. gc collector) ensure that T is no longer accessed. unsafe { ManuallyDrop::drop(&mut *(self.value.get())) }; } } pub(crate) fn trace_t(&self, tracer: &mut Tracer) { if!self.is_tracked() { return; } debug::log(|| (self.debug_name(), "trace")); // For other non-`Cc<T>` container types, `trace` visit referents, // is recursive, and does not call `tracer` directly. For `Cc<T>`, // `trace` stops here, is non-recursive, and does apply `tracer` // to the actual `GcHeader`. It's expected that the upper layer // calls `gc_traverse` on everything (not just roots). tracer(self.header_ptr()); } pub(crate) fn debug_name(&self) -> String { #[cfg(test)] { self.name.clone() } #[cfg(not(test))] { #[allow(unused_mut)] let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value); #[cfg(all(feature = "debug", feature = "nightly"))] { if!self.is_dropped() && crate::debug::GC_DROPPING.with(|t|!t.get()) { let debug = self.deref().optional_debug(); if!debug.is_empty() { result += &format!(" {}", debug); } } } return result; } } } #[cfg(all(feature = "debug", feature = "nightly"))] pub(crate) trait OptionalDebug { fn optional_debug(&self) -> String; } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T:?Sized> OptionalDebug for T { default fn optional_debug(&self) -> String { "".to_string() } } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: std::fmt::Debug +?Sized> OptionalDebug for T { fn optional_debug(&self) -> String { format!("{:?}", self) } } impl<T:?Sized, O: AbstractObjectSpace> RawCc<T, O> { /// Obtains a "weak reference", a non-owning pointer. pub fn downgrade(&self) -> RawWeak<T, O> { let inner = self.inner(); inner.ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("new-weak ({})", inner.ref_count.weak_count()), ) }); RawWeak(self.0) } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.ref_count() } } impl<T:?Sized, O: AbstractObjectSpace> RawWeak<T, O> { /// Attempts to obtain a "strong reference". /// /// Returns `None` if the value has already been dropped. pub fn upgrade(&self) -> Option<RawCc<T, O>> { let inner = self.inner(); // Make the below operation "atomic". let _locked = inner.ref_count.locked(); if inner.is_dropped() { None } else { inner.inc_ref(); debug::log(|| { ( inner.debug_name(), format!("new-strong ({})", inner.ref_count.ref_count()), ) }); Some(RawCc(self.0)) } } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } } impl<T:?Sized, O: AbstractObjectSpace> RawCc<T, O> { #[inline] pub(crate) fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } /// `trace` without `T: Trace` bound. /// /// Useful for structures with `Cc<T>` fields where `T` does not implement /// `Trace`. For example, `struct S(Cc<Box<dyn MyTrait>>)`. To implement /// `Trace` for `S`, it can use `Cc::trace(&self.0, tracer)`. #[inline] pub fn trace(&self, tracer: &mut Tracer) { self.inner().trace_t(tracer); } #[inline] fn inc_ref(&self) -> usize { self.inner().inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.inner().dec_ref() } #[inline] pub(crate) fn ref_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } pub(crate) fn debug_name(&self) -> String { self.inner().debug_name() } } impl<T:?Sized, O: AbstractObjectSpace> RawWeak<T, O> { #[inline] fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } } impl<T:?Sized, O: AbstractObjectSpace> Clone for RawCc<T, O> { #[inline] fn clone(&self) -> Self { // In theory self.inner().ref_count.locked() is needed. // Practically this is an atomic operation that cannot be split so locking // becomes optional. // let _locked = self.inner().ref_count.locked(); self.inc_ref(); debug::log(|| (self.debug_name(), format!("clone ({})", self.ref_count()))); Self(self.0) } } impl<T:?Sized, O: AbstractObjectSpace> Clone for RawWeak<T, O> { #[inline] fn clone(&self) -> Self { let inner = self.inner(); let ref_count = &inner.ref_count; ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("clone-weak ({})", ref_count.weak_count()), ) }); Self(self.0) } } impl<T:?Sized> Deref for Cc<T> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { self.inner().deref() } } impl<T:?Sized, O: AbstractObjectSpace> Deref for RawCcBox<T, O> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { debug_assert!( !self.is_dropped(), concat!( "bug: accessing a dropped CcBox detected\n", "This usually happens after ignoring another panic triggered by the collector." ) ); // safety: CcBox (and its value) lifetime maintained by ref count. // If `Trace` is implemented correctly then the GC won't drop_t() // incorrectly and this pointer is valid. Otherwise the above // assertion can prevent UBs on debug build. unsafe { &*self.value.get() } } } fn drop_ccbox<T:?Sized, O: AbstractObjectSpace>(cc_box: *mut RawCcBox<T, O>) { // safety: See Cc::new. The pointer was created by Box::into_raw. let cc_box: Box<RawCcBox<T, O>> = unsafe { Box::from_raw(cc_box) }; let is_tracked = cc_box.is_tracked(); if is_tracked { // The real object is CcBoxWithGcHeader. Drop that instead. // safety: See Cc::new for CcBoxWithGcHeader. let gc_box: Box<RawCcBoxWithGcHeader<T, O>> = unsafe { cast_box(cc_box) }; O::remove(&gc_box.header); // Drop T if it hasn't been dropped yet. // This needs to be after O::remove so the collector won't have a // chance to read dropped content. gc_box.cc_box.drop_t(); debug::log(|| (gc_box.cc_box.debug_name(), "drop (CcBoxWithGcHeader)")); drop(gc_box); } else { // Drop T if it hasn't been dropped yet. cc_box.drop_t(); debug::log(|| (cc_box.debug_name(), "drop (CcBox)")); drop(cc_box); } } impl<T:?Sized, O: AbstractObjectSpace> Drop for RawCc<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); // Block threaded collector. This is needed because "drop()" is a // complex operation. The whole operation needs to be "atomic". let _locked = inner.ref_count.locked(); let old_ref_count = self.dec_ref(); debug::log(|| (self.debug_name(), format!("drop ({})", self.ref_count()))); debug_assert!(old_ref_count >= 1); if old_ref_count == 1 { if self.weak_count() == 0 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } else { inner.drop_t(); } } } } impl<T:?Sized, O: AbstractObjectSpace> Drop for RawWeak<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); let ref_count = &inner.ref_count; // Block threaded collector to "freeze" the ref count, for safety. let _locked = ref_count.locked(); let old_ref_count = ref_count.ref_count(); let old_weak_count = ref_count.dec_weak(); debug::log(|| { ( inner.debug_name(), format!("drop-weak ({})", ref_count.weak_count()), ) }); debug_assert!(old_weak_count >= 1); if old_ref_count == 0 && old_weak_count == 1 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } } } impl<T: Trace +?Sized, O: AbstractObjectSpace> CcDyn for RawCcBox<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_traverse(&self, tracer: &mut Tracer) { debug::log(|| (self.debug_name(), "gc_traverse")); T::trace(self.deref(), tracer) } fn gc_clone(&self) -> Box<dyn GcClone> { self.ref_count.inc_ref(); debug::log(|| { let msg = format!("gc_clone ({})", self.ref_count()); (self.debug_name(), msg) }); // safety: The pointer is compatible. The mutability is different only // to satisfy NonNull (NonNull::new requires &mut). The returned value // is still "immutable". &self can also never be nonnull. let ptr: NonNull<RawCcBox<T, O>> = unsafe { NonNull::new_unchecked(self as *const _ as *mut _) }; let cc = RawCc::<T, O>(ptr); Box::new(cc) } #[cfg(feature = "debug")] fn gc_debug_name(&self) -> String { self.debug_name() } } impl<T: Trace +?Sized, O: AbstractObjectSpace> GcClone for RawCc<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_drop_t(&self) { self.inner().drop_t() } } impl<T: Trace> Trace for Cc<T> { fn trace(&self, tracer: &mut Tracer) { Cc::<T>::trace(self, tracer) } #[inline] fn is_type_tracked() -> bool { T::is_type_tracked() } } impl Trace for Cc<dyn Trace> { fn trace(&self, tracer: &mut Tracer) { Cc::<dyn Trace>::trace(self, tracer) } #[inline] fn is_type_tracked() -> bool { // Trait objects can be anything. true } } #[cfg(feature = "nightly")] impl<T:?Sized + std::marker::Unsize<U>, U:?Sized, O: AbstractObjectSpace> std::ops::CoerceUnsized<RawCc<U, O>> for RawCc<T, O> { } #[inline] unsafe fn
cast_ref
identifier_name
cc.rs
st)] pub(crate) name: String, value: UnsafeCell<ManuallyDrop<T>>, } /// The real layout if `T` is tracked by the collector. The main APIs still use /// the `CcBox` type. This type is only used for allocation and deallocation. /// /// This is a private type. #[repr(C)] pub struct RawCcBoxWithGcHeader<T:?Sized, O: AbstractObjectSpace> { header: O::Header, cc_box: RawCcBox<T, O>, } /// A single-threaded reference-counting pointer that integrates /// with cyclic garbage collection. /// /// See [module level documentation](index.html) for more details. /// /// [`Cc`](type.Cc.html) is not thread-safe. It does not implement `Send` /// or `Sync`: /// /// ```compile_fail /// use std::ops::Deref; /// use gcmodule::Cc; /// let cc = Cc::new(5); /// std::thread::spawn(move || { /// println!("{}", cc.deref()); /// }); /// ``` pub type Cc<T> = RawCc<T, ObjectSpace>; /// Weak reference of [`Cc`](type.Cc.html). pub type Weak<T> = RawWeak<T, ObjectSpace>; /// Low-level type for [`Cc<T>`](type.Cc.html). pub struct RawCc<T:?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); /// Low-level type for [`Weak<T>`](type.Weak.html). pub struct RawWeak<T:?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); // `ManuallyDrop<T>` does not implement `UnwindSafe`. But `CcBox::drop` does // make sure `T` is dropped. If `T` is unwind-safe, so does `CcBox<T>`. impl<T: UnwindSafe +?Sized> UnwindSafe for RawCcBox<T, ObjectSpace> {} // `NonNull` does not implement `UnwindSafe`. But `Cc` and `Weak` only use it // as a "const" pointer. If `T` is unwind-safe, so does `Cc<T>`. impl<T: UnwindSafe +?Sized, O: AbstractObjectSpace> UnwindSafe for RawCc<T, O> {} impl<T: UnwindSafe +?Sized, O: AbstractObjectSpace> UnwindSafe for RawWeak<T, O> {} /// Type-erased `Cc<T>` with interfaces needed by GC. /// /// This is a private type. pub trait CcDyn { /// Returns the reference count for cycle detection. fn gc_ref_count(&self) -> usize; /// Visit referents for cycle detection. fn gc_traverse(&self, tracer: &mut Tracer); /// Get an cloned `Cc<dyn Trace>`. This has 2 purposes: /// - Keep a reference so `CcBox<T>` is not released in the next step. /// So metadata like `ref_count` can still be read. /// - Operate on the object. fn gc_clone(&self) -> Box<dyn GcClone>; #[cfg(feature = "debug")] /// Name used in collect.rs. fn gc_debug_name(&self) -> String { "?".to_string() } } /// Type-erased gc_clone result. /// /// This is a private type. pub trait GcClone { /// Force drop the value T. fn gc_drop_t(&self); /// Returns the reference count. This is useful for verification. fn gc_ref_count(&self) -> usize; } /// A dummy implementation without drop side-effects. pub(crate) struct CcDummy; impl CcDummy { pub(crate) fn ccdyn_vptr() -> *mut () { let mut dummy = CcDummy; // safety: To access vtable pointer. Stable API cannot do it. let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) }; fat_ptr[1] } } impl CcDyn for CcDummy { fn gc_ref_count(&self) -> usize { 1 } fn gc_traverse(&self, _tracer: &mut Tracer) {} fn gc_clone(&self) -> Box<dyn GcClone> { panic!("bug: CcDummy::gc_clone should never be called"); } } impl<T: Trace> Cc<T> { /// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage. /// /// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html). pub fn new(value: T) -> Cc<T>
} impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> { /// Constructs a new [`Cc<T>`](type.Cc.html) in the given /// [`ObjectSpace`](struct.ObjectSpace.html). /// /// To collect cycles, call `ObjectSpace::collect_cycles()`. pub(crate) fn new_in_space(value: T, space: &O) -> Self { let is_tracked = T::is_type_tracked(); let cc_box = RawCcBox { ref_count: space.new_ref_count(is_tracked), value: UnsafeCell::new(ManuallyDrop::new(value)), #[cfg(test)] name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()), }; let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked { // Create a GcHeader before the CcBox. This is similar to cpython. let header = space.empty_header(); let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box }; let mut boxed = Box::new(cc_box_with_header); // Fix-up fields in GcHeader. This is done after the creation of the // Box so the memory addresses are stable. space.insert(&mut boxed.header, &boxed.cc_box); debug_assert_eq!( mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(), mem::size_of::<RawCcBoxWithGcHeader<T, O>>() ); let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box; Box::leak(boxed); ptr } else { Box::into_raw(Box::new(cc_box)) }; // safety: ccbox_ptr cannot be null from the above code. let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) }; let result = Self(non_null); if is_tracked { debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)")); } else { debug::log(|| (result.debug_name(), "new (CcBox)")); } debug_assert_eq!(result.ref_count(), 1); result } /// Convert to `RawCc<dyn Trace>`. pub fn into_dyn(self) -> RawCc<dyn Trace, O> { #[cfg(feature = "nightly")] { // Requires CoerceUnsized, which is currently unstable. self } // safety: Trait object magic. Test by test_dyn_downcast. #[cfg(not(feature = "nightly"))] unsafe { // XXX: This depends on rust internals. But it works on stable. // Replace this with CoerceUnsized once that becomes stable. // Cc<dyn Trace> has 2 usize values: The first one is the same // as Cc<T>. The second one is the vtable. The vtable pointer // is the same as the second pointer of `&dyn Trace`. let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace); let self_ptr: usize = mem::transmute(self); fat_ptr[0] = self_ptr; mem::transmute(fat_ptr) } } } impl<T: Trace + Clone> Cc<T> { /// Update the value `T` in a copy-on-write way. /// /// If the ref count is 1, the value is updated in-place. /// Otherwise a new `Cc<T>` will be created. pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) { let need_clone = self.ref_count() > 1; if need_clone { let mut value = <Cc<T>>::deref(self).clone(); update_func(&mut value); *self = Cc::new(value); } else { let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get(); let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut(); update_func(value_mut); } } } impl<T:?Sized, O: AbstractObjectSpace> RawCcBox<T, O> { #[inline] fn header_ptr(&self) -> *const () { self.header() as *const _ as _ } #[inline] fn header(&self) -> &O::Header { debug_assert!(self.is_tracked()); // safety: See `Cc::new`. GcHeader is before CcBox for tracked objects. unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) } } #[inline] fn is_tracked(&self) -> bool { self.ref_count.is_tracked() } #[inline] fn is_dropped(&self) -> bool { self.ref_count.is_dropped() } #[inline] fn inc_ref(&self) -> usize { self.ref_count.inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.ref_count.dec_ref() } #[inline] fn ref_count(&self) -> usize { self.ref_count.ref_count() } #[inline] fn weak_count(&self) -> usize { self.ref_count.weak_count() } #[inline] fn set_dropped(&self) -> bool { self.ref_count.set_dropped() } #[inline] pub(crate) fn drop_t(&self) { let already_dropped = self.set_dropped(); if!already_dropped { debug::log(|| (self.debug_name(), "drop (T)")); // safety: is_dropped() check ensures T is only dropped once. Other // places (ex. gc collector) ensure that T is no longer accessed. unsafe { ManuallyDrop::drop(&mut *(self.value.get())) }; } } pub(crate) fn trace_t(&self, tracer: &mut Tracer) { if!self.is_tracked() { return; } debug::log(|| (self.debug_name(), "trace")); // For other non-`Cc<T>` container types, `trace` visit referents, // is recursive, and does not call `tracer` directly. For `Cc<T>`, // `trace` stops here, is non-recursive, and does apply `tracer` // to the actual `GcHeader`. It's expected that the upper layer // calls `gc_traverse` on everything (not just roots). tracer(self.header_ptr()); } pub(crate) fn debug_name(&self) -> String { #[cfg(test)] { self.name.clone() } #[cfg(not(test))] { #[allow(unused_mut)] let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value); #[cfg(all(feature = "debug", feature = "nightly"))] { if!self.is_dropped() && crate::debug::GC_DROPPING.with(|t|!t.get()) { let debug = self.deref().optional_debug(); if!debug.is_empty() { result += &format!(" {}", debug); } } } return result; } } } #[cfg(all(feature = "debug", feature = "nightly"))] pub(crate) trait OptionalDebug { fn optional_debug(&self) -> String; } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T:?Sized> OptionalDebug for T { default fn optional_debug(&self) -> String { "".to_string() } } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: std::fmt::Debug +?Sized> OptionalDebug for T { fn optional_debug(&self) -> String { format!("{:?}", self) } } impl<T:?Sized, O: AbstractObjectSpace> RawCc<T, O> { /// Obtains a "weak reference", a non-owning pointer. pub fn downgrade(&self) -> RawWeak<T, O> { let inner = self.inner(); inner.ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("new-weak ({})", inner.ref_count.weak_count()), ) }); RawWeak(self.0) } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.ref_count() } } impl<T:?Sized, O: AbstractObjectSpace> RawWeak<T, O> { /// Attempts to obtain a "strong reference". /// /// Returns `None` if the value has already been dropped. pub fn upgrade(&self) -> Option<RawCc<T, O>> { let inner = self.inner(); // Make the below operation "atomic". let _locked = inner.ref_count.locked(); if inner.is_dropped() { None } else { inner.inc_ref(); debug::log(|| { ( inner.debug_name(), format!("new-strong ({})", inner.ref_count.ref_count()), ) }); Some(RawCc(self.0)) } } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } } impl<T:?Sized, O: AbstractObjectSpace> RawCc<T, O> { #[inline] pub(crate) fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } /// `trace` without `T: Trace` bound. /// /// Useful for structures with `Cc<T>` fields where `T` does not implement /// `Trace`. For example, `struct S(Cc<Box<dyn MyTrait>>)`. To implement /// `Trace` for `S`, it can use `Cc::trace(&self.0, tracer)`. #[inline] pub fn trace(&self, tracer: &mut Tracer) { self.inner().trace_t(tracer); } #[inline] fn inc_ref(&self) -> usize { self.inner().inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.inner().dec_ref() } #[inline] pub(crate) fn ref_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } pub(crate) fn debug_name(&self) -> String { self.inner().debug_name() } } impl<T:?Sized, O: AbstractObjectSpace> RawWeak<T, O> { #[inline] fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } } impl<T:?Sized, O: AbstractObjectSpace> Clone for RawCc<T, O> { #[inline] fn clone(&self) -> Self { // In theory self.inner().ref_count.locked() is needed. // Practically this is an atomic operation that cannot be split so locking // becomes optional. // let _locked = self.inner().ref_count.locked(); self.inc_ref(); debug::log(|| (self.debug_name(), format!("clone ({})", self.ref_count()))); Self(self.0) } } impl<T:?Sized, O: AbstractObjectSpace> Clone for RawWeak<T, O> { #[inline] fn clone(&self) -> Self { let inner = self.inner(); let ref_count = &inner.ref_count; ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("clone-weak ({})", ref_count.weak_count()), ) }); Self(self.0) } } impl<T:?Sized> Deref for Cc<T> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { self.inner().deref() } } impl<T:?Sized, O: AbstractObjectSpace> Deref for RawCcBox<T, O> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { debug_assert!( !self.is_dropped(), concat!( "bug: accessing a dropped CcBox detected\n", "This usually happens after ignoring another panic triggered by the collector." ) ); // safety: CcBox (and its value) lifetime maintained by ref count. // If `Trace` is implemented correctly then the GC won't drop_t() // incorrectly and this pointer is valid. Otherwise the above // assertion can prevent UBs on debug build. unsafe { &*self.value.get() } } } fn drop_ccbox<T:?Sized, O: AbstractObjectSpace>(cc_box: *mut RawCcBox<T, O>) { // safety: See Cc::new. The pointer was created by Box::into_raw. let cc_box: Box<RawCcBox<T, O>> = unsafe { Box::from_raw(cc_box) }; let is_tracked = cc_box.is_tracked(); if is_tracked { // The real object is CcBoxWithGcHeader. Drop that instead. // safety: See Cc::new for CcBoxWithGcHeader. let gc_box: Box<RawCcBoxWithGcHeader<T, O>> = unsafe { cast_box(cc_box) }; O::remove(&gc_box.header); // Drop T if it hasn't been dropped yet. // This needs to be after O::remove so the collector won't have a // chance to read dropped content. gc_box.cc_box.drop_t(); debug::log(|| (gc_box.cc_box.debug_name(), "drop (CcBoxWithGcHeader)")); drop(gc_box); } else { // Drop T if it hasn't been dropped yet. cc_box.drop_t(); debug::log(|| (cc_box.debug_name(), "drop (CcBox)")); drop(cc_box); } } impl<T:?Sized, O: AbstractObjectSpace> Drop for RawCc<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); // Block threaded collector. This is needed because "drop()" is a // complex operation. The whole operation needs to be "atomic". let _locked = inner.ref_count.locked(); let old_ref_count = self.dec_ref(); debug::log(|| (self.debug_name(), format!("drop ({})", self.ref_count()))); debug_assert!(old_ref_count >= 1); if old_ref_count == 1 { if self.weak_count() == 0 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } else { inner.drop_t(); } } } } impl<T:?Sized, O: AbstractObjectSpace> Drop for RawWeak<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); let ref_count = &inner.ref_count; // Block threaded collector to "freeze" the ref count, for safety. let _locked = ref_count.locked(); let old_ref_count = ref_count.ref_count(); let old_weak_count = ref_count.dec_weak(); debug::log(|| { ( inner.debug_name(), format!("drop-weak ({})", ref_count.weak_count()), ) }); debug_assert!(old_weak_count >= 1); if old_ref_count == 0 && old_weak_count == 1 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } } } impl<T: Trace +?Sized, O: AbstractObjectSpace> CcDyn for RawCcBox<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_traverse(&self, tracer: &mut Tracer) { debug::log(|| (self.debug_name(), "gc_traverse")); T::trace(self.deref(), tracer) } fn gc_clone(&self) -> Box<dyn GcClone> { self.ref_count.inc_ref(); debug::log(|| { let msg = format!("gc_clone ({})", self.ref_count()); (self.debug_name(), msg) }); // safety: The pointer is compatible. The mutability is different only // to satisfy NonNull (NonNull::new requires &mut). The returned value // is still "immutable". &self can also never be nonnull. let ptr: NonNull<RawCcBox<T, O>> = unsafe { NonNull::new_unchecked(self as *const _ as *mut _) }; let cc = RawCc::<T, O>(ptr);
{ collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space)) }
identifier_body
cc.rs
est)] pub(crate) name: String, value: UnsafeCell<ManuallyDrop<T>>, } /// The real layout if `T` is tracked by the collector. The main APIs still use /// the `CcBox` type. This type is only used for allocation and deallocation. /// /// This is a private type. #[repr(C)] pub struct RawCcBoxWithGcHeader<T:?Sized, O: AbstractObjectSpace> { header: O::Header, cc_box: RawCcBox<T, O>, } /// A single-threaded reference-counting pointer that integrates /// with cyclic garbage collection. /// /// See [module level documentation](index.html) for more details. /// /// [`Cc`](type.Cc.html) is not thread-safe. It does not implement `Send` /// or `Sync`: /// /// ```compile_fail /// use std::ops::Deref; /// use gcmodule::Cc; /// let cc = Cc::new(5); /// std::thread::spawn(move || { /// println!("{}", cc.deref()); /// }); /// ``` pub type Cc<T> = RawCc<T, ObjectSpace>; /// Weak reference of [`Cc`](type.Cc.html). pub type Weak<T> = RawWeak<T, ObjectSpace>; /// Low-level type for [`Cc<T>`](type.Cc.html). pub struct RawCc<T:?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); /// Low-level type for [`Weak<T>`](type.Weak.html). pub struct RawWeak<T:?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); // `ManuallyDrop<T>` does not implement `UnwindSafe`. But `CcBox::drop` does // make sure `T` is dropped. If `T` is unwind-safe, so does `CcBox<T>`. impl<T: UnwindSafe +?Sized> UnwindSafe for RawCcBox<T, ObjectSpace> {} // `NonNull` does not implement `UnwindSafe`. But `Cc` and `Weak` only use it // as a "const" pointer. If `T` is unwind-safe, so does `Cc<T>`. impl<T: UnwindSafe +?Sized, O: AbstractObjectSpace> UnwindSafe for RawCc<T, O> {} impl<T: UnwindSafe +?Sized, O: AbstractObjectSpace> UnwindSafe for RawWeak<T, O> {} /// Type-erased `Cc<T>` with interfaces needed by GC. /// /// This is a private type. pub trait CcDyn { /// Returns the reference count for cycle detection. fn gc_ref_count(&self) -> usize; /// Visit referents for cycle detection. fn gc_traverse(&self, tracer: &mut Tracer); /// Get an cloned `Cc<dyn Trace>`. This has 2 purposes: /// - Keep a reference so `CcBox<T>` is not released in the next step. /// So metadata like `ref_count` can still be read. /// - Operate on the object. fn gc_clone(&self) -> Box<dyn GcClone>; #[cfg(feature = "debug")] /// Name used in collect.rs. fn gc_debug_name(&self) -> String { "?".to_string() } } /// Type-erased gc_clone result. /// /// This is a private type. pub trait GcClone {
/// Force drop the value T. fn gc_drop_t(&self); /// Returns the reference count. This is useful for verification. fn gc_ref_count(&self) -> usize; } /// A dummy implementation without drop side-effects. pub(crate) struct CcDummy; impl CcDummy { pub(crate) fn ccdyn_vptr() -> *mut () { let mut dummy = CcDummy; // safety: To access vtable pointer. Stable API cannot do it. let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) }; fat_ptr[1] } } impl CcDyn for CcDummy { fn gc_ref_count(&self) -> usize { 1 } fn gc_traverse(&self, _tracer: &mut Tracer) {} fn gc_clone(&self) -> Box<dyn GcClone> { panic!("bug: CcDummy::gc_clone should never be called"); } } impl<T: Trace> Cc<T> { /// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage. /// /// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html). pub fn new(value: T) -> Cc<T> { collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space)) } } impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> { /// Constructs a new [`Cc<T>`](type.Cc.html) in the given /// [`ObjectSpace`](struct.ObjectSpace.html). /// /// To collect cycles, call `ObjectSpace::collect_cycles()`. pub(crate) fn new_in_space(value: T, space: &O) -> Self { let is_tracked = T::is_type_tracked(); let cc_box = RawCcBox { ref_count: space.new_ref_count(is_tracked), value: UnsafeCell::new(ManuallyDrop::new(value)), #[cfg(test)] name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()), }; let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked { // Create a GcHeader before the CcBox. This is similar to cpython. let header = space.empty_header(); let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box }; let mut boxed = Box::new(cc_box_with_header); // Fix-up fields in GcHeader. This is done after the creation of the // Box so the memory addresses are stable. space.insert(&mut boxed.header, &boxed.cc_box); debug_assert_eq!( mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(), mem::size_of::<RawCcBoxWithGcHeader<T, O>>() ); let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box; Box::leak(boxed); ptr } else { Box::into_raw(Box::new(cc_box)) }; // safety: ccbox_ptr cannot be null from the above code. let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) }; let result = Self(non_null); if is_tracked { debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)")); } else { debug::log(|| (result.debug_name(), "new (CcBox)")); } debug_assert_eq!(result.ref_count(), 1); result } /// Convert to `RawCc<dyn Trace>`. pub fn into_dyn(self) -> RawCc<dyn Trace, O> { #[cfg(feature = "nightly")] { // Requires CoerceUnsized, which is currently unstable. self } // safety: Trait object magic. Test by test_dyn_downcast. #[cfg(not(feature = "nightly"))] unsafe { // XXX: This depends on rust internals. But it works on stable. // Replace this with CoerceUnsized once that becomes stable. // Cc<dyn Trace> has 2 usize values: The first one is the same // as Cc<T>. The second one is the vtable. The vtable pointer // is the same as the second pointer of `&dyn Trace`. let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace); let self_ptr: usize = mem::transmute(self); fat_ptr[0] = self_ptr; mem::transmute(fat_ptr) } } } impl<T: Trace + Clone> Cc<T> { /// Update the value `T` in a copy-on-write way. /// /// If the ref count is 1, the value is updated in-place. /// Otherwise a new `Cc<T>` will be created. pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) { let need_clone = self.ref_count() > 1; if need_clone { let mut value = <Cc<T>>::deref(self).clone(); update_func(&mut value); *self = Cc::new(value); } else { let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get(); let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut(); update_func(value_mut); } } } impl<T:?Sized, O: AbstractObjectSpace> RawCcBox<T, O> { #[inline] fn header_ptr(&self) -> *const () { self.header() as *const _ as _ } #[inline] fn header(&self) -> &O::Header { debug_assert!(self.is_tracked()); // safety: See `Cc::new`. GcHeader is before CcBox for tracked objects. unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) } } #[inline] fn is_tracked(&self) -> bool { self.ref_count.is_tracked() } #[inline] fn is_dropped(&self) -> bool { self.ref_count.is_dropped() } #[inline] fn inc_ref(&self) -> usize { self.ref_count.inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.ref_count.dec_ref() } #[inline] fn ref_count(&self) -> usize { self.ref_count.ref_count() } #[inline] fn weak_count(&self) -> usize { self.ref_count.weak_count() } #[inline] fn set_dropped(&self) -> bool { self.ref_count.set_dropped() } #[inline] pub(crate) fn drop_t(&self) { let already_dropped = self.set_dropped(); if!already_dropped { debug::log(|| (self.debug_name(), "drop (T)")); // safety: is_dropped() check ensures T is only dropped once. Other // places (ex. gc collector) ensure that T is no longer accessed. unsafe { ManuallyDrop::drop(&mut *(self.value.get())) }; } } pub(crate) fn trace_t(&self, tracer: &mut Tracer) { if!self.is_tracked() { return; } debug::log(|| (self.debug_name(), "trace")); // For other non-`Cc<T>` container types, `trace` visit referents, // is recursive, and does not call `tracer` directly. For `Cc<T>`, // `trace` stops here, is non-recursive, and does apply `tracer` // to the actual `GcHeader`. It's expected that the upper layer // calls `gc_traverse` on everything (not just roots). tracer(self.header_ptr()); } pub(crate) fn debug_name(&self) -> String { #[cfg(test)] { self.name.clone() } #[cfg(not(test))] { #[allow(unused_mut)] let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value); #[cfg(all(feature = "debug", feature = "nightly"))] { if!self.is_dropped() && crate::debug::GC_DROPPING.with(|t|!t.get()) { let debug = self.deref().optional_debug(); if!debug.is_empty() { result += &format!(" {}", debug); } } } return result; } } } #[cfg(all(feature = "debug", feature = "nightly"))] pub(crate) trait OptionalDebug { fn optional_debug(&self) -> String; } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T:?Sized> OptionalDebug for T { default fn optional_debug(&self) -> String { "".to_string() } } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: std::fmt::Debug +?Sized> OptionalDebug for T { fn optional_debug(&self) -> String { format!("{:?}", self) } } impl<T:?Sized, O: AbstractObjectSpace> RawCc<T, O> { /// Obtains a "weak reference", a non-owning pointer. pub fn downgrade(&self) -> RawWeak<T, O> { let inner = self.inner(); inner.ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("new-weak ({})", inner.ref_count.weak_count()), ) }); RawWeak(self.0) } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.ref_count() } } impl<T:?Sized, O: AbstractObjectSpace> RawWeak<T, O> { /// Attempts to obtain a "strong reference". /// /// Returns `None` if the value has already been dropped. pub fn upgrade(&self) -> Option<RawCc<T, O>> { let inner = self.inner(); // Make the below operation "atomic". let _locked = inner.ref_count.locked(); if inner.is_dropped() { None } else { inner.inc_ref(); debug::log(|| { ( inner.debug_name(), format!("new-strong ({})", inner.ref_count.ref_count()), ) }); Some(RawCc(self.0)) } } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } } impl<T:?Sized, O: AbstractObjectSpace> RawCc<T, O> { #[inline] pub(crate) fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } /// `trace` without `T: Trace` bound. /// /// Useful for structures with `Cc<T>` fields where `T` does not implement /// `Trace`. For example, `struct S(Cc<Box<dyn MyTrait>>)`. To implement /// `Trace` for `S`, it can use `Cc::trace(&self.0, tracer)`. #[inline] pub fn trace(&self, tracer: &mut Tracer) { self.inner().trace_t(tracer); } #[inline] fn inc_ref(&self) -> usize { self.inner().inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.inner().dec_ref() } #[inline] pub(crate) fn ref_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } pub(crate) fn debug_name(&self) -> String { self.inner().debug_name() } } impl<T:?Sized, O: AbstractObjectSpace> RawWeak<T, O> { #[inline] fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } } impl<T:?Sized, O: AbstractObjectSpace> Clone for RawCc<T, O> { #[inline] fn clone(&self) -> Self { // In theory self.inner().ref_count.locked() is needed. // Practically this is an atomic operation that cannot be split so locking // becomes optional. // let _locked = self.inner().ref_count.locked(); self.inc_ref(); debug::log(|| (self.debug_name(), format!("clone ({})", self.ref_count()))); Self(self.0) } } impl<T:?Sized, O: AbstractObjectSpace> Clone for RawWeak<T, O> { #[inline] fn clone(&self) -> Self { let inner = self.inner(); let ref_count = &inner.ref_count; ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("clone-weak ({})", ref_count.weak_count()), ) }); Self(self.0) } } impl<T:?Sized> Deref for Cc<T> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { self.inner().deref() } } impl<T:?Sized, O: AbstractObjectSpace> Deref for RawCcBox<T, O> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { debug_assert!( !self.is_dropped(), concat!( "bug: accessing a dropped CcBox detected\n", "This usually happens after ignoring another panic triggered by the collector." ) ); // safety: CcBox (and its value) lifetime maintained by ref count. // If `Trace` is implemented correctly then the GC won't drop_t() // incorrectly and this pointer is valid. Otherwise the above // assertion can prevent UBs on debug build. unsafe { &*self.value.get() } } } fn drop_ccbox<T:?Sized, O: AbstractObjectSpace>(cc_box: *mut RawCcBox<T, O>) { // safety: See Cc::new. The pointer was created by Box::into_raw. let cc_box: Box<RawCcBox<T, O>> = unsafe { Box::from_raw(cc_box) }; let is_tracked = cc_box.is_tracked(); if is_tracked { // The real object is CcBoxWithGcHeader. Drop that instead. // safety: See Cc::new for CcBoxWithGcHeader. let gc_box: Box<RawCcBoxWithGcHeader<T, O>> = unsafe { cast_box(cc_box) }; O::remove(&gc_box.header); // Drop T if it hasn't been dropped yet. // This needs to be after O::remove so the collector won't have a // chance to read dropped content. gc_box.cc_box.drop_t(); debug::log(|| (gc_box.cc_box.debug_name(), "drop (CcBoxWithGcHeader)")); drop(gc_box); } else { // Drop T if it hasn't been dropped yet. cc_box.drop_t(); debug::log(|| (cc_box.debug_name(), "drop (CcBox)")); drop(cc_box); } } impl<T:?Sized, O: AbstractObjectSpace> Drop for RawCc<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); // Block threaded collector. This is needed because "drop()" is a // complex operation. The whole operation needs to be "atomic". let _locked = inner.ref_count.locked(); let old_ref_count = self.dec_ref(); debug::log(|| (self.debug_name(), format!("drop ({})", self.ref_count()))); debug_assert!(old_ref_count >= 1); if old_ref_count == 1 { if self.weak_count() == 0 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } else { inner.drop_t(); } } } } impl<T:?Sized, O: AbstractObjectSpace> Drop for RawWeak<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); let ref_count = &inner.ref_count; // Block threaded collector to "freeze" the ref count, for safety. let _locked = ref_count.locked(); let old_ref_count = ref_count.ref_count(); let old_weak_count = ref_count.dec_weak(); debug::log(|| { ( inner.debug_name(), format!("drop-weak ({})", ref_count.weak_count()), ) }); debug_assert!(old_weak_count >= 1); if old_ref_count == 0 && old_weak_count == 1 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } } } impl<T: Trace +?Sized, O: AbstractObjectSpace> CcDyn for RawCcBox<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_traverse(&self, tracer: &mut Tracer) { debug::log(|| (self.debug_name(), "gc_traverse")); T::trace(self.deref(), tracer) } fn gc_clone(&self) -> Box<dyn GcClone> { self.ref_count.inc_ref(); debug::log(|| { let msg = format!("gc_clone ({})", self.ref_count()); (self.debug_name(), msg) }); // safety: The pointer is compatible. The mutability is different only // to satisfy NonNull (NonNull::new requires &mut). The returned value // is still "immutable". &self can also never be nonnull. let ptr: NonNull<RawCcBox<T, O>> = unsafe { NonNull::new_unchecked(self as *const _ as *mut _) }; let cc = RawCc::<T, O>(ptr);
random_line_split
main.rs
// Copyright (c) Microsoft. All rights reserved. //! This binary is the process entrypoint for aziot-certd, -identityd and -keyd. //! Rather than be three separate binaries, all three services are symlinks to //! this one aziotd binary. The aziotd binary looks at its command-line args to figure out //! which service it's being invoked as, and runs the code of that service accordingly. #![deny(rust_2018_idioms)] #![warn(clippy::all, clippy::pedantic)] #![allow(clippy::default_trait_access, clippy::let_unit_value)] mod error; mod logging; use error::{Error, ErrorKind}; #[tokio::main] async fn main() { logging::init(); if let Err(err) = main_inner().await { log::error!("{}", err.0); let mut source = std::error::Error::source(&err.0); while let Some(err) = source { log::error!("caused by: {}", err); source = std::error::Error::source(err); } log::error!("{:?}", err.1); std::process::exit(1); } } async fn main_inner() -> Result<(), Error> { let mut args = std::env::args_os(); let process_name = process_name_from_args(&mut args)?; match process_name { ProcessName::Certd => { run( aziot_certd::main, "AZIOT_CERTD_CONFIG", "/etc/aziot/certd/config.toml", "AZIOT_CERTD_CONFIG_DIR", "/etc/aziot/certd/config.d", ) .await? } ProcessName::Identityd => { run( aziot_identityd::main, "AZIOT_IDENTITYD_CONFIG", "/etc/aziot/identityd/config.toml", "AZIOT_IDENTITYD_CONFIG_DIR", "/etc/aziot/identityd/config.d", ) .await? } ProcessName::Keyd => { run( aziot_keyd::main, "AZIOT_KEYD_CONFIG", "/etc/aziot/keyd/config.toml", "AZIOT_KEYD_CONFIG_DIR", "/etc/aziot/keyd/config.d", ) .await? } ProcessName::Tpmd => { run( aziot_tpmd::main, "AZIOT_TPMD_CONFIG", "/etc/aziot/tpmd/config.toml", "AZIOT_TPMD_CONFIG_DIR", "/etc/aziot/tpmd/config.d", ) .await? } } Ok(()) } #[derive(Clone, Copy, Debug, PartialEq)] enum ProcessName { Certd, Identityd, Keyd, Tpmd, } /// If the symlink is being used to invoke this binary, the process name can be determined /// from the first arg, ie `argv[0]` in C terms. /// /// An alternative is supported where the binary is invoked as aziotd itself, /// and the process name is instead the next arg, ie `argv[1]` in C terms. /// This is primary useful for local development, so it's only allowed in debug builds. fn process_name_from_args<I>(args: &mut I) -> Result<ProcessName, Error> where I: Iterator, <I as Iterator>::Item: AsRef<std::ffi::OsStr>, { let arg = args.next().ok_or_else(|| { ErrorKind::GetProcessName("could not extract process name from args".into()) })?; // arg could be a single component like "aziot-certd", or a path that ends with "aziot-certd", // so parse it as a Path and get the last component. This does the right thing in either case. let arg = std::path::Path::new(&arg); let process_name = arg.file_name().ok_or_else(|| { ErrorKind::GetProcessName( format!( "could not extract process name from arg {:?}", arg.display(), ) .into(), ) })?; match process_name.to_str() { Some("aziot-certd") => Ok(ProcessName::Certd), Some("aziot-identityd") => Ok(ProcessName::Identityd), Some("aziot-keyd") => Ok(ProcessName::Keyd), Some("aziot-tpmd") => Ok(ProcessName::Tpmd), // The next arg is the process name #[cfg(debug_assertions)] Some("aziotd") => process_name_from_args(args), _ => Err(ErrorKind::GetProcessName( format!("unrecognized process name {:?}", process_name).into(), ) .into()), } } async fn run<TMain, TConfig, TFuture, TServer>( main: TMain, config_env_var: &str, config_file_default: &str, config_directory_env_var: &str, config_directory_default: &str, ) -> Result<(), Error> where TMain: FnOnce(TConfig) -> TFuture, TConfig: serde::de::DeserializeOwned, TFuture: std::future::Future<
hyper::Request<hyper::Body>, Response = hyper::Response<hyper::Body>, Error = std::convert::Infallible, > + Clone + Send +'static, <TServer as hyper::service::Service<hyper::Request<hyper::Body>>>::Future: Send, { log::info!("Starting service..."); log::info!( "Version - {}", option_env!("PACKAGE_VERSION").unwrap_or("dev build"), ); let config_path: std::path::PathBuf = std::env::var_os(config_env_var).map_or_else(|| config_file_default.into(), Into::into); let config = std::fs::read(&config_path) .map_err(|err| ErrorKind::ReadConfig(Some(config_path.clone()), Box::new(err)))?; let mut config: toml::Value = toml::from_slice(&config) .map_err(|err| ErrorKind::ReadConfig(Some(config_path), Box::new(err)))?; let config_directory_path: std::path::PathBuf = std::env::var_os(config_directory_env_var) .map_or_else(|| config_directory_default.into(), Into::into); match std::fs::read_dir(&config_directory_path) { Ok(entries) => { let mut patch_paths = vec![]; for entry in entries { let entry = entry.map_err(|err| { ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err)) })?; let entry_file_type = entry.file_type().map_err(|err| { ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err)) })?; if!entry_file_type.is_file() { continue; } let patch_path = entry.path(); if patch_path.extension().and_then(std::ffi::OsStr::to_str)!= Some("toml") { continue; } patch_paths.push(patch_path); } patch_paths.sort(); for patch_path in patch_paths { let patch = std::fs::read(&patch_path).map_err(|err| { ErrorKind::ReadConfig(Some(patch_path.clone()), Box::new(err)) })?; let patch: toml::Value = toml::from_slice(&patch) .map_err(|err| ErrorKind::ReadConfig(Some(patch_path), Box::new(err)))?; merge_toml(&mut config, patch); } } Err(err) if err.kind() == std::io::ErrorKind::NotFound => (), Err(err) => { return Err(ErrorKind::ReadConfig(Some(config_directory_path), Box::new(err)).into()) } } let config: TConfig = serde::Deserialize::deserialize(config) .map_err(|err| ErrorKind::ReadConfig(None, Box::new(err)))?; let (connector, server) = main(config).await.map_err(ErrorKind::Service)?; log::info!("Starting server..."); let mut incoming = connector .incoming() .await .map_err(|err| ErrorKind::Service(Box::new(err)))?; let () = incoming .serve(server) .await .map_err(|err| ErrorKind::Service(Box::new(err)))?; log::info!("Stopped server."); Ok(()) } fn merge_toml(base: &mut toml::Value, patch: toml::Value) { // Similar to JSON patch, except that: // // - Maps are called tables. // - There is no equivalent of null that can be used to remove keys from an object. // - Arrays are merged via concatenating the patch to the base, rather than replacing the base with the patch. if let toml::Value::Table(base) = base { if let toml::Value::Table(patch) = patch { for (key, value) in patch { // Insert a dummy `false` if the original key didn't exist at all. It'll be overwritten by `value` in that case. let original_value = base.entry(key).or_insert(toml::Value::Boolean(false)); merge_toml(original_value, value); } return; } } if let toml::Value::Array(base) = base { if let toml::Value::Array(patch) = patch { base.extend(patch); return; } } *base = patch; } #[cfg(test)] mod tests { #[test] fn process_name_from_args() { // Success test cases let mut test_cases = vec![ (&["aziot-certd"][..], super::ProcessName::Certd), (&["aziot-identityd"][..], super::ProcessName::Identityd), (&["aziot-keyd"][..], super::ProcessName::Keyd), (&["aziot-tpmd"][..], super::ProcessName::Tpmd), ( &["/usr/libexec/aziot/aziot-certd"][..], super::ProcessName::Certd, ), ( &["/usr/libexec/aziot/aziot-identityd"][..], super::ProcessName::Identityd, ), ( &["/usr/libexec/aziot/aziot-keyd"][..], super::ProcessName::Keyd, ), ( &["/usr/libexec/aziot/aziot-tpmd"][..], super::ProcessName::Tpmd, ), ]; // argv[1] fallback is only in release builds. if cfg!(debug_assertions) { test_cases.extend_from_slice(&[ (&["aziotd", "aziot-certd"][..], super::ProcessName::Certd), ( &["aziotd", "aziot-identityd"][..], super::ProcessName::Identityd, ), (&["aziotd", "aziot-keyd"][..], super::ProcessName::Keyd), ( &["/usr/libexec/aziot/aziotd", "aziot-certd"][..], super::ProcessName::Certd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-identityd"][..], super::ProcessName::Identityd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-keyd"][..], super::ProcessName::Keyd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-tpmd"][..], super::ProcessName::Tpmd, ), ]); } for (input, expected) in test_cases { let mut input = input.iter().copied().map(std::ffi::OsStr::new); let actual = super::process_name_from_args(&mut input).unwrap(); assert_eq!(None, input.next()); assert_eq!(expected, actual); } // Failure test cases for &input in &[ // Unrecognized process name in argv[0] &["foo"][..], &["/usr/libexec/aziot/foo"][..], &["/usr/libexec/aziot/foo", "aziot-certd"][..], // Either fails because it's a release build so argv[1] fallback is disabled, // or fails because it's a debug build where argv[1] fallback is enabled // but the process name in argv[1] is unrecognized anyway. &["aziotd", "foo"][..], &["/usr/libexec/aziot/aziotd", "foo"][..], ] { let mut input = input.iter().copied().map(std::ffi::OsStr::new); let _ = super::process_name_from_args(&mut input).unwrap_err(); } } #[test] fn merge_toml() { let base = r#" foo_key = "A" foo_parent_key = { foo_sub_key = "B" } [bar_table] bar_table_key = "C" bar_table_parent_key = { bar_table_sub_key = "D" } [[baz_table_array]] baz_table_array_key = "E" baz_table_array_parent_key = { baz_table_sub_key = "F" } "#; let mut base: toml::Value = toml::from_str(base).unwrap(); let patch = r#" foo_key = "A2" foo_key_new = "A3" foo_parent_key = { foo_sub_key = "B2", foo_sub_key2 = "B3" } foo_parent_key_new = { foo_sub_key = "B4", foo_sub_key2 = "B5" } [bar_table] bar_table_key = "C2" bar_table_key_new = "C3" bar_table_parent_key = { bar_table_sub_key = "D2", bar_table_sub_key2 = "D3" } bar_table_parent_key_new = { bar_table_sub_key = "D4", bar_table_sub_key2 = "D5" } [[baz_table_array]] baz_table_array_key = "G" baz_table_array_parent_key = { baz_table_sub_key = "H" } "#; let patch: toml::Value = toml::from_str(patch).unwrap(); super::merge_toml(&mut base, patch); let expected = r#" foo_key = "A2" foo_key_new = "A3" foo_parent_key = { foo_sub_key = "B2", foo_sub_key2 = "B3" } foo_parent_key_new = { foo_sub_key = "B4", foo_sub_key2 = "B5" } [bar_table] bar_table_key = "C2" bar_table_key_new = "C3" bar_table_parent_key = { bar_table_sub_key = "D2", bar_table_sub_key2 = "D3" } bar_table_parent_key_new = { bar_table_sub_key = "D4", bar_table_sub_key2 = "D5" } [[baz_table_array]] baz_table_array_key = "E" baz_table_array_parent_key = { baz_table_sub_key = "F" } [[baz_table_array]] baz_table_array_key = "G" baz_table_array_parent_key = { baz_table_sub_key = "H" } "#; let expected: toml::Value = toml::from_str(expected).unwrap(); assert_eq!(expected, base); } }
Output = Result<(http_common::Connector, TServer), Box<dyn std::error::Error>>, >, TServer: hyper::service::Service<
random_line_split
main.rs
// Copyright (c) Microsoft. All rights reserved. //! This binary is the process entrypoint for aziot-certd, -identityd and -keyd. //! Rather than be three separate binaries, all three services are symlinks to //! this one aziotd binary. The aziotd binary looks at its command-line args to figure out //! which service it's being invoked as, and runs the code of that service accordingly. #![deny(rust_2018_idioms)] #![warn(clippy::all, clippy::pedantic)] #![allow(clippy::default_trait_access, clippy::let_unit_value)] mod error; mod logging; use error::{Error, ErrorKind}; #[tokio::main] async fn main() { logging::init(); if let Err(err) = main_inner().await { log::error!("{}", err.0); let mut source = std::error::Error::source(&err.0); while let Some(err) = source { log::error!("caused by: {}", err); source = std::error::Error::source(err); } log::error!("{:?}", err.1); std::process::exit(1); } } async fn main_inner() -> Result<(), Error> { let mut args = std::env::args_os(); let process_name = process_name_from_args(&mut args)?; match process_name { ProcessName::Certd => { run( aziot_certd::main, "AZIOT_CERTD_CONFIG", "/etc/aziot/certd/config.toml", "AZIOT_CERTD_CONFIG_DIR", "/etc/aziot/certd/config.d", ) .await? } ProcessName::Identityd => { run( aziot_identityd::main, "AZIOT_IDENTITYD_CONFIG", "/etc/aziot/identityd/config.toml", "AZIOT_IDENTITYD_CONFIG_DIR", "/etc/aziot/identityd/config.d", ) .await? } ProcessName::Keyd => { run( aziot_keyd::main, "AZIOT_KEYD_CONFIG", "/etc/aziot/keyd/config.toml", "AZIOT_KEYD_CONFIG_DIR", "/etc/aziot/keyd/config.d", ) .await? } ProcessName::Tpmd =>
} Ok(()) } #[derive(Clone, Copy, Debug, PartialEq)] enum ProcessName { Certd, Identityd, Keyd, Tpmd, } /// If the symlink is being used to invoke this binary, the process name can be determined /// from the first arg, ie `argv[0]` in C terms. /// /// An alternative is supported where the binary is invoked as aziotd itself, /// and the process name is instead the next arg, ie `argv[1]` in C terms. /// This is primary useful for local development, so it's only allowed in debug builds. fn process_name_from_args<I>(args: &mut I) -> Result<ProcessName, Error> where I: Iterator, <I as Iterator>::Item: AsRef<std::ffi::OsStr>, { let arg = args.next().ok_or_else(|| { ErrorKind::GetProcessName("could not extract process name from args".into()) })?; // arg could be a single component like "aziot-certd", or a path that ends with "aziot-certd", // so parse it as a Path and get the last component. This does the right thing in either case. let arg = std::path::Path::new(&arg); let process_name = arg.file_name().ok_or_else(|| { ErrorKind::GetProcessName( format!( "could not extract process name from arg {:?}", arg.display(), ) .into(), ) })?; match process_name.to_str() { Some("aziot-certd") => Ok(ProcessName::Certd), Some("aziot-identityd") => Ok(ProcessName::Identityd), Some("aziot-keyd") => Ok(ProcessName::Keyd), Some("aziot-tpmd") => Ok(ProcessName::Tpmd), // The next arg is the process name #[cfg(debug_assertions)] Some("aziotd") => process_name_from_args(args), _ => Err(ErrorKind::GetProcessName( format!("unrecognized process name {:?}", process_name).into(), ) .into()), } } async fn run<TMain, TConfig, TFuture, TServer>( main: TMain, config_env_var: &str, config_file_default: &str, config_directory_env_var: &str, config_directory_default: &str, ) -> Result<(), Error> where TMain: FnOnce(TConfig) -> TFuture, TConfig: serde::de::DeserializeOwned, TFuture: std::future::Future< Output = Result<(http_common::Connector, TServer), Box<dyn std::error::Error>>, >, TServer: hyper::service::Service< hyper::Request<hyper::Body>, Response = hyper::Response<hyper::Body>, Error = std::convert::Infallible, > + Clone + Send +'static, <TServer as hyper::service::Service<hyper::Request<hyper::Body>>>::Future: Send, { log::info!("Starting service..."); log::info!( "Version - {}", option_env!("PACKAGE_VERSION").unwrap_or("dev build"), ); let config_path: std::path::PathBuf = std::env::var_os(config_env_var).map_or_else(|| config_file_default.into(), Into::into); let config = std::fs::read(&config_path) .map_err(|err| ErrorKind::ReadConfig(Some(config_path.clone()), Box::new(err)))?; let mut config: toml::Value = toml::from_slice(&config) .map_err(|err| ErrorKind::ReadConfig(Some(config_path), Box::new(err)))?; let config_directory_path: std::path::PathBuf = std::env::var_os(config_directory_env_var) .map_or_else(|| config_directory_default.into(), Into::into); match std::fs::read_dir(&config_directory_path) { Ok(entries) => { let mut patch_paths = vec![]; for entry in entries { let entry = entry.map_err(|err| { ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err)) })?; let entry_file_type = entry.file_type().map_err(|err| { ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err)) })?; if!entry_file_type.is_file() { continue; } let patch_path = entry.path(); if patch_path.extension().and_then(std::ffi::OsStr::to_str)!= Some("toml") { continue; } patch_paths.push(patch_path); } patch_paths.sort(); for patch_path in patch_paths { let patch = std::fs::read(&patch_path).map_err(|err| { ErrorKind::ReadConfig(Some(patch_path.clone()), Box::new(err)) })?; let patch: toml::Value = toml::from_slice(&patch) .map_err(|err| ErrorKind::ReadConfig(Some(patch_path), Box::new(err)))?; merge_toml(&mut config, patch); } } Err(err) if err.kind() == std::io::ErrorKind::NotFound => (), Err(err) => { return Err(ErrorKind::ReadConfig(Some(config_directory_path), Box::new(err)).into()) } } let config: TConfig = serde::Deserialize::deserialize(config) .map_err(|err| ErrorKind::ReadConfig(None, Box::new(err)))?; let (connector, server) = main(config).await.map_err(ErrorKind::Service)?; log::info!("Starting server..."); let mut incoming = connector .incoming() .await .map_err(|err| ErrorKind::Service(Box::new(err)))?; let () = incoming .serve(server) .await .map_err(|err| ErrorKind::Service(Box::new(err)))?; log::info!("Stopped server."); Ok(()) } fn merge_toml(base: &mut toml::Value, patch: toml::Value) { // Similar to JSON patch, except that: // // - Maps are called tables. // - There is no equivalent of null that can be used to remove keys from an object. // - Arrays are merged via concatenating the patch to the base, rather than replacing the base with the patch. if let toml::Value::Table(base) = base { if let toml::Value::Table(patch) = patch { for (key, value) in patch { // Insert a dummy `false` if the original key didn't exist at all. It'll be overwritten by `value` in that case. let original_value = base.entry(key).or_insert(toml::Value::Boolean(false)); merge_toml(original_value, value); } return; } } if let toml::Value::Array(base) = base { if let toml::Value::Array(patch) = patch { base.extend(patch); return; } } *base = patch; } #[cfg(test)] mod tests { #[test] fn process_name_from_args() { // Success test cases let mut test_cases = vec![ (&["aziot-certd"][..], super::ProcessName::Certd), (&["aziot-identityd"][..], super::ProcessName::Identityd), (&["aziot-keyd"][..], super::ProcessName::Keyd), (&["aziot-tpmd"][..], super::ProcessName::Tpmd), ( &["/usr/libexec/aziot/aziot-certd"][..], super::ProcessName::Certd, ), ( &["/usr/libexec/aziot/aziot-identityd"][..], super::ProcessName::Identityd, ), ( &["/usr/libexec/aziot/aziot-keyd"][..], super::ProcessName::Keyd, ), ( &["/usr/libexec/aziot/aziot-tpmd"][..], super::ProcessName::Tpmd, ), ]; // argv[1] fallback is only in release builds. if cfg!(debug_assertions) { test_cases.extend_from_slice(&[ (&["aziotd", "aziot-certd"][..], super::ProcessName::Certd), ( &["aziotd", "aziot-identityd"][..], super::ProcessName::Identityd, ), (&["aziotd", "aziot-keyd"][..], super::ProcessName::Keyd), ( &["/usr/libexec/aziot/aziotd", "aziot-certd"][..], super::ProcessName::Certd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-identityd"][..], super::ProcessName::Identityd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-keyd"][..], super::ProcessName::Keyd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-tpmd"][..], super::ProcessName::Tpmd, ), ]); } for (input, expected) in test_cases { let mut input = input.iter().copied().map(std::ffi::OsStr::new); let actual = super::process_name_from_args(&mut input).unwrap(); assert_eq!(None, input.next()); assert_eq!(expected, actual); } // Failure test cases for &input in &[ // Unrecognized process name in argv[0] &["foo"][..], &["/usr/libexec/aziot/foo"][..], &["/usr/libexec/aziot/foo", "aziot-certd"][..], // Either fails because it's a release build so argv[1] fallback is disabled, // or fails because it's a debug build where argv[1] fallback is enabled // but the process name in argv[1] is unrecognized anyway. &["aziotd", "foo"][..], &["/usr/libexec/aziot/aziotd", "foo"][..], ] { let mut input = input.iter().copied().map(std::ffi::OsStr::new); let _ = super::process_name_from_args(&mut input).unwrap_err(); } } #[test] fn merge_toml() { let base = r#" foo_key = "A" foo_parent_key = { foo_sub_key = "B" } [bar_table] bar_table_key = "C" bar_table_parent_key = { bar_table_sub_key = "D" } [[baz_table_array]] baz_table_array_key = "E" baz_table_array_parent_key = { baz_table_sub_key = "F" } "#; let mut base: toml::Value = toml::from_str(base).unwrap(); let patch = r#" foo_key = "A2" foo_key_new = "A3" foo_parent_key = { foo_sub_key = "B2", foo_sub_key2 = "B3" } foo_parent_key_new = { foo_sub_key = "B4", foo_sub_key2 = "B5" } [bar_table] bar_table_key = "C2" bar_table_key_new = "C3" bar_table_parent_key = { bar_table_sub_key = "D2", bar_table_sub_key2 = "D3" } bar_table_parent_key_new = { bar_table_sub_key = "D4", bar_table_sub_key2 = "D5" } [[baz_table_array]] baz_table_array_key = "G" baz_table_array_parent_key = { baz_table_sub_key = "H" } "#; let patch: toml::Value = toml::from_str(patch).unwrap(); super::merge_toml(&mut base, patch); let expected = r#" foo_key = "A2" foo_key_new = "A3" foo_parent_key = { foo_sub_key = "B2", foo_sub_key2 = "B3" } foo_parent_key_new = { foo_sub_key = "B4", foo_sub_key2 = "B5" } [bar_table] bar_table_key = "C2" bar_table_key_new = "C3" bar_table_parent_key = { bar_table_sub_key = "D2", bar_table_sub_key2 = "D3" } bar_table_parent_key_new = { bar_table_sub_key = "D4", bar_table_sub_key2 = "D5" } [[baz_table_array]] baz_table_array_key = "E" baz_table_array_parent_key = { baz_table_sub_key = "F" } [[baz_table_array]] baz_table_array_key = "G" baz_table_array_parent_key = { baz_table_sub_key = "H" } "#; let expected: toml::Value = toml::from_str(expected).unwrap(); assert_eq!(expected, base); } }
{ run( aziot_tpmd::main, "AZIOT_TPMD_CONFIG", "/etc/aziot/tpmd/config.toml", "AZIOT_TPMD_CONFIG_DIR", "/etc/aziot/tpmd/config.d", ) .await? }
conditional_block
main.rs
// Copyright (c) Microsoft. All rights reserved. //! This binary is the process entrypoint for aziot-certd, -identityd and -keyd. //! Rather than be three separate binaries, all three services are symlinks to //! this one aziotd binary. The aziotd binary looks at its command-line args to figure out //! which service it's being invoked as, and runs the code of that service accordingly. #![deny(rust_2018_idioms)] #![warn(clippy::all, clippy::pedantic)] #![allow(clippy::default_trait_access, clippy::let_unit_value)] mod error; mod logging; use error::{Error, ErrorKind}; #[tokio::main] async fn main() { logging::init(); if let Err(err) = main_inner().await { log::error!("{}", err.0); let mut source = std::error::Error::source(&err.0); while let Some(err) = source { log::error!("caused by: {}", err); source = std::error::Error::source(err); } log::error!("{:?}", err.1); std::process::exit(1); } } async fn main_inner() -> Result<(), Error> { let mut args = std::env::args_os(); let process_name = process_name_from_args(&mut args)?; match process_name { ProcessName::Certd => { run( aziot_certd::main, "AZIOT_CERTD_CONFIG", "/etc/aziot/certd/config.toml", "AZIOT_CERTD_CONFIG_DIR", "/etc/aziot/certd/config.d", ) .await? } ProcessName::Identityd => { run( aziot_identityd::main, "AZIOT_IDENTITYD_CONFIG", "/etc/aziot/identityd/config.toml", "AZIOT_IDENTITYD_CONFIG_DIR", "/etc/aziot/identityd/config.d", ) .await? } ProcessName::Keyd => { run( aziot_keyd::main, "AZIOT_KEYD_CONFIG", "/etc/aziot/keyd/config.toml", "AZIOT_KEYD_CONFIG_DIR", "/etc/aziot/keyd/config.d", ) .await? } ProcessName::Tpmd => { run( aziot_tpmd::main, "AZIOT_TPMD_CONFIG", "/etc/aziot/tpmd/config.toml", "AZIOT_TPMD_CONFIG_DIR", "/etc/aziot/tpmd/config.d", ) .await? } } Ok(()) } #[derive(Clone, Copy, Debug, PartialEq)] enum ProcessName { Certd, Identityd, Keyd, Tpmd, } /// If the symlink is being used to invoke this binary, the process name can be determined /// from the first arg, ie `argv[0]` in C terms. /// /// An alternative is supported where the binary is invoked as aziotd itself, /// and the process name is instead the next arg, ie `argv[1]` in C terms. /// This is primary useful for local development, so it's only allowed in debug builds. fn process_name_from_args<I>(args: &mut I) -> Result<ProcessName, Error> where I: Iterator, <I as Iterator>::Item: AsRef<std::ffi::OsStr>, { let arg = args.next().ok_or_else(|| { ErrorKind::GetProcessName("could not extract process name from args".into()) })?; // arg could be a single component like "aziot-certd", or a path that ends with "aziot-certd", // so parse it as a Path and get the last component. This does the right thing in either case. let arg = std::path::Path::new(&arg); let process_name = arg.file_name().ok_or_else(|| { ErrorKind::GetProcessName( format!( "could not extract process name from arg {:?}", arg.display(), ) .into(), ) })?; match process_name.to_str() { Some("aziot-certd") => Ok(ProcessName::Certd), Some("aziot-identityd") => Ok(ProcessName::Identityd), Some("aziot-keyd") => Ok(ProcessName::Keyd), Some("aziot-tpmd") => Ok(ProcessName::Tpmd), // The next arg is the process name #[cfg(debug_assertions)] Some("aziotd") => process_name_from_args(args), _ => Err(ErrorKind::GetProcessName( format!("unrecognized process name {:?}", process_name).into(), ) .into()), } } async fn run<TMain, TConfig, TFuture, TServer>( main: TMain, config_env_var: &str, config_file_default: &str, config_directory_env_var: &str, config_directory_default: &str, ) -> Result<(), Error> where TMain: FnOnce(TConfig) -> TFuture, TConfig: serde::de::DeserializeOwned, TFuture: std::future::Future< Output = Result<(http_common::Connector, TServer), Box<dyn std::error::Error>>, >, TServer: hyper::service::Service< hyper::Request<hyper::Body>, Response = hyper::Response<hyper::Body>, Error = std::convert::Infallible, > + Clone + Send +'static, <TServer as hyper::service::Service<hyper::Request<hyper::Body>>>::Future: Send, { log::info!("Starting service..."); log::info!( "Version - {}", option_env!("PACKAGE_VERSION").unwrap_or("dev build"), ); let config_path: std::path::PathBuf = std::env::var_os(config_env_var).map_or_else(|| config_file_default.into(), Into::into); let config = std::fs::read(&config_path) .map_err(|err| ErrorKind::ReadConfig(Some(config_path.clone()), Box::new(err)))?; let mut config: toml::Value = toml::from_slice(&config) .map_err(|err| ErrorKind::ReadConfig(Some(config_path), Box::new(err)))?; let config_directory_path: std::path::PathBuf = std::env::var_os(config_directory_env_var) .map_or_else(|| config_directory_default.into(), Into::into); match std::fs::read_dir(&config_directory_path) { Ok(entries) => { let mut patch_paths = vec![]; for entry in entries { let entry = entry.map_err(|err| { ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err)) })?; let entry_file_type = entry.file_type().map_err(|err| { ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err)) })?; if!entry_file_type.is_file() { continue; } let patch_path = entry.path(); if patch_path.extension().and_then(std::ffi::OsStr::to_str)!= Some("toml") { continue; } patch_paths.push(patch_path); } patch_paths.sort(); for patch_path in patch_paths { let patch = std::fs::read(&patch_path).map_err(|err| { ErrorKind::ReadConfig(Some(patch_path.clone()), Box::new(err)) })?; let patch: toml::Value = toml::from_slice(&patch) .map_err(|err| ErrorKind::ReadConfig(Some(patch_path), Box::new(err)))?; merge_toml(&mut config, patch); } } Err(err) if err.kind() == std::io::ErrorKind::NotFound => (), Err(err) => { return Err(ErrorKind::ReadConfig(Some(config_directory_path), Box::new(err)).into()) } } let config: TConfig = serde::Deserialize::deserialize(config) .map_err(|err| ErrorKind::ReadConfig(None, Box::new(err)))?; let (connector, server) = main(config).await.map_err(ErrorKind::Service)?; log::info!("Starting server..."); let mut incoming = connector .incoming() .await .map_err(|err| ErrorKind::Service(Box::new(err)))?; let () = incoming .serve(server) .await .map_err(|err| ErrorKind::Service(Box::new(err)))?; log::info!("Stopped server."); Ok(()) } fn merge_toml(base: &mut toml::Value, patch: toml::Value) { // Similar to JSON patch, except that: // // - Maps are called tables. // - There is no equivalent of null that can be used to remove keys from an object. // - Arrays are merged via concatenating the patch to the base, rather than replacing the base with the patch. if let toml::Value::Table(base) = base { if let toml::Value::Table(patch) = patch { for (key, value) in patch { // Insert a dummy `false` if the original key didn't exist at all. It'll be overwritten by `value` in that case. let original_value = base.entry(key).or_insert(toml::Value::Boolean(false)); merge_toml(original_value, value); } return; } } if let toml::Value::Array(base) = base { if let toml::Value::Array(patch) = patch { base.extend(patch); return; } } *base = patch; } #[cfg(test)] mod tests { #[test] fn process_name_from_args() { // Success test cases let mut test_cases = vec![ (&["aziot-certd"][..], super::ProcessName::Certd), (&["aziot-identityd"][..], super::ProcessName::Identityd), (&["aziot-keyd"][..], super::ProcessName::Keyd), (&["aziot-tpmd"][..], super::ProcessName::Tpmd), ( &["/usr/libexec/aziot/aziot-certd"][..], super::ProcessName::Certd, ), ( &["/usr/libexec/aziot/aziot-identityd"][..], super::ProcessName::Identityd, ), ( &["/usr/libexec/aziot/aziot-keyd"][..], super::ProcessName::Keyd, ), ( &["/usr/libexec/aziot/aziot-tpmd"][..], super::ProcessName::Tpmd, ), ]; // argv[1] fallback is only in release builds. if cfg!(debug_assertions) { test_cases.extend_from_slice(&[ (&["aziotd", "aziot-certd"][..], super::ProcessName::Certd), ( &["aziotd", "aziot-identityd"][..], super::ProcessName::Identityd, ), (&["aziotd", "aziot-keyd"][..], super::ProcessName::Keyd), ( &["/usr/libexec/aziot/aziotd", "aziot-certd"][..], super::ProcessName::Certd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-identityd"][..], super::ProcessName::Identityd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-keyd"][..], super::ProcessName::Keyd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-tpmd"][..], super::ProcessName::Tpmd, ), ]); } for (input, expected) in test_cases { let mut input = input.iter().copied().map(std::ffi::OsStr::new); let actual = super::process_name_from_args(&mut input).unwrap(); assert_eq!(None, input.next()); assert_eq!(expected, actual); } // Failure test cases for &input in &[ // Unrecognized process name in argv[0] &["foo"][..], &["/usr/libexec/aziot/foo"][..], &["/usr/libexec/aziot/foo", "aziot-certd"][..], // Either fails because it's a release build so argv[1] fallback is disabled, // or fails because it's a debug build where argv[1] fallback is enabled // but the process name in argv[1] is unrecognized anyway. &["aziotd", "foo"][..], &["/usr/libexec/aziot/aziotd", "foo"][..], ] { let mut input = input.iter().copied().map(std::ffi::OsStr::new); let _ = super::process_name_from_args(&mut input).unwrap_err(); } } #[test] fn
() { let base = r#" foo_key = "A" foo_parent_key = { foo_sub_key = "B" } [bar_table] bar_table_key = "C" bar_table_parent_key = { bar_table_sub_key = "D" } [[baz_table_array]] baz_table_array_key = "E" baz_table_array_parent_key = { baz_table_sub_key = "F" } "#; let mut base: toml::Value = toml::from_str(base).unwrap(); let patch = r#" foo_key = "A2" foo_key_new = "A3" foo_parent_key = { foo_sub_key = "B2", foo_sub_key2 = "B3" } foo_parent_key_new = { foo_sub_key = "B4", foo_sub_key2 = "B5" } [bar_table] bar_table_key = "C2" bar_table_key_new = "C3" bar_table_parent_key = { bar_table_sub_key = "D2", bar_table_sub_key2 = "D3" } bar_table_parent_key_new = { bar_table_sub_key = "D4", bar_table_sub_key2 = "D5" } [[baz_table_array]] baz_table_array_key = "G" baz_table_array_parent_key = { baz_table_sub_key = "H" } "#; let patch: toml::Value = toml::from_str(patch).unwrap(); super::merge_toml(&mut base, patch); let expected = r#" foo_key = "A2" foo_key_new = "A3" foo_parent_key = { foo_sub_key = "B2", foo_sub_key2 = "B3" } foo_parent_key_new = { foo_sub_key = "B4", foo_sub_key2 = "B5" } [bar_table] bar_table_key = "C2" bar_table_key_new = "C3" bar_table_parent_key = { bar_table_sub_key = "D2", bar_table_sub_key2 = "D3" } bar_table_parent_key_new = { bar_table_sub_key = "D4", bar_table_sub_key2 = "D5" } [[baz_table_array]] baz_table_array_key = "E" baz_table_array_parent_key = { baz_table_sub_key = "F" } [[baz_table_array]] baz_table_array_key = "G" baz_table_array_parent_key = { baz_table_sub_key = "H" } "#; let expected: toml::Value = toml::from_str(expected).unwrap(); assert_eq!(expected, base); } }
merge_toml
identifier_name
main.rs
// Copyright (c) Microsoft. All rights reserved. //! This binary is the process entrypoint for aziot-certd, -identityd and -keyd. //! Rather than be three separate binaries, all three services are symlinks to //! this one aziotd binary. The aziotd binary looks at its command-line args to figure out //! which service it's being invoked as, and runs the code of that service accordingly. #![deny(rust_2018_idioms)] #![warn(clippy::all, clippy::pedantic)] #![allow(clippy::default_trait_access, clippy::let_unit_value)] mod error; mod logging; use error::{Error, ErrorKind}; #[tokio::main] async fn main() { logging::init(); if let Err(err) = main_inner().await { log::error!("{}", err.0); let mut source = std::error::Error::source(&err.0); while let Some(err) = source { log::error!("caused by: {}", err); source = std::error::Error::source(err); } log::error!("{:?}", err.1); std::process::exit(1); } } async fn main_inner() -> Result<(), Error>
"/etc/aziot/identityd/config.toml", "AZIOT_IDENTITYD_CONFIG_DIR", "/etc/aziot/identityd/config.d", ) .await? } ProcessName::Keyd => { run( aziot_keyd::main, "AZIOT_KEYD_CONFIG", "/etc/aziot/keyd/config.toml", "AZIOT_KEYD_CONFIG_DIR", "/etc/aziot/keyd/config.d", ) .await? } ProcessName::Tpmd => { run( aziot_tpmd::main, "AZIOT_TPMD_CONFIG", "/etc/aziot/tpmd/config.toml", "AZIOT_TPMD_CONFIG_DIR", "/etc/aziot/tpmd/config.d", ) .await? } } Ok(()) } #[derive(Clone, Copy, Debug, PartialEq)] enum ProcessName { Certd, Identityd, Keyd, Tpmd, } /// If the symlink is being used to invoke this binary, the process name can be determined /// from the first arg, ie `argv[0]` in C terms. /// /// An alternative is supported where the binary is invoked as aziotd itself, /// and the process name is instead the next arg, ie `argv[1]` in C terms. /// This is primary useful for local development, so it's only allowed in debug builds. fn process_name_from_args<I>(args: &mut I) -> Result<ProcessName, Error> where I: Iterator, <I as Iterator>::Item: AsRef<std::ffi::OsStr>, { let arg = args.next().ok_or_else(|| { ErrorKind::GetProcessName("could not extract process name from args".into()) })?; // arg could be a single component like "aziot-certd", or a path that ends with "aziot-certd", // so parse it as a Path and get the last component. This does the right thing in either case. let arg = std::path::Path::new(&arg); let process_name = arg.file_name().ok_or_else(|| { ErrorKind::GetProcessName( format!( "could not extract process name from arg {:?}", arg.display(), ) .into(), ) })?; match process_name.to_str() { Some("aziot-certd") => Ok(ProcessName::Certd), Some("aziot-identityd") => Ok(ProcessName::Identityd), Some("aziot-keyd") => Ok(ProcessName::Keyd), Some("aziot-tpmd") => Ok(ProcessName::Tpmd), // The next arg is the process name #[cfg(debug_assertions)] Some("aziotd") => process_name_from_args(args), _ => Err(ErrorKind::GetProcessName( format!("unrecognized process name {:?}", process_name).into(), ) .into()), } } async fn run<TMain, TConfig, TFuture, TServer>( main: TMain, config_env_var: &str, config_file_default: &str, config_directory_env_var: &str, config_directory_default: &str, ) -> Result<(), Error> where TMain: FnOnce(TConfig) -> TFuture, TConfig: serde::de::DeserializeOwned, TFuture: std::future::Future< Output = Result<(http_common::Connector, TServer), Box<dyn std::error::Error>>, >, TServer: hyper::service::Service< hyper::Request<hyper::Body>, Response = hyper::Response<hyper::Body>, Error = std::convert::Infallible, > + Clone + Send +'static, <TServer as hyper::service::Service<hyper::Request<hyper::Body>>>::Future: Send, { log::info!("Starting service..."); log::info!( "Version - {}", option_env!("PACKAGE_VERSION").unwrap_or("dev build"), ); let config_path: std::path::PathBuf = std::env::var_os(config_env_var).map_or_else(|| config_file_default.into(), Into::into); let config = std::fs::read(&config_path) .map_err(|err| ErrorKind::ReadConfig(Some(config_path.clone()), Box::new(err)))?; let mut config: toml::Value = toml::from_slice(&config) .map_err(|err| ErrorKind::ReadConfig(Some(config_path), Box::new(err)))?; let config_directory_path: std::path::PathBuf = std::env::var_os(config_directory_env_var) .map_or_else(|| config_directory_default.into(), Into::into); match std::fs::read_dir(&config_directory_path) { Ok(entries) => { let mut patch_paths = vec![]; for entry in entries { let entry = entry.map_err(|err| { ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err)) })?; let entry_file_type = entry.file_type().map_err(|err| { ErrorKind::ReadConfig(Some(config_directory_path.clone()), Box::new(err)) })?; if!entry_file_type.is_file() { continue; } let patch_path = entry.path(); if patch_path.extension().and_then(std::ffi::OsStr::to_str)!= Some("toml") { continue; } patch_paths.push(patch_path); } patch_paths.sort(); for patch_path in patch_paths { let patch = std::fs::read(&patch_path).map_err(|err| { ErrorKind::ReadConfig(Some(patch_path.clone()), Box::new(err)) })?; let patch: toml::Value = toml::from_slice(&patch) .map_err(|err| ErrorKind::ReadConfig(Some(patch_path), Box::new(err)))?; merge_toml(&mut config, patch); } } Err(err) if err.kind() == std::io::ErrorKind::NotFound => (), Err(err) => { return Err(ErrorKind::ReadConfig(Some(config_directory_path), Box::new(err)).into()) } } let config: TConfig = serde::Deserialize::deserialize(config) .map_err(|err| ErrorKind::ReadConfig(None, Box::new(err)))?; let (connector, server) = main(config).await.map_err(ErrorKind::Service)?; log::info!("Starting server..."); let mut incoming = connector .incoming() .await .map_err(|err| ErrorKind::Service(Box::new(err)))?; let () = incoming .serve(server) .await .map_err(|err| ErrorKind::Service(Box::new(err)))?; log::info!("Stopped server."); Ok(()) } fn merge_toml(base: &mut toml::Value, patch: toml::Value) { // Similar to JSON patch, except that: // // - Maps are called tables. // - There is no equivalent of null that can be used to remove keys from an object. // - Arrays are merged via concatenating the patch to the base, rather than replacing the base with the patch. if let toml::Value::Table(base) = base { if let toml::Value::Table(patch) = patch { for (key, value) in patch { // Insert a dummy `false` if the original key didn't exist at all. It'll be overwritten by `value` in that case. let original_value = base.entry(key).or_insert(toml::Value::Boolean(false)); merge_toml(original_value, value); } return; } } if let toml::Value::Array(base) = base { if let toml::Value::Array(patch) = patch { base.extend(patch); return; } } *base = patch; } #[cfg(test)] mod tests { #[test] fn process_name_from_args() { // Success test cases let mut test_cases = vec![ (&["aziot-certd"][..], super::ProcessName::Certd), (&["aziot-identityd"][..], super::ProcessName::Identityd), (&["aziot-keyd"][..], super::ProcessName::Keyd), (&["aziot-tpmd"][..], super::ProcessName::Tpmd), ( &["/usr/libexec/aziot/aziot-certd"][..], super::ProcessName::Certd, ), ( &["/usr/libexec/aziot/aziot-identityd"][..], super::ProcessName::Identityd, ), ( &["/usr/libexec/aziot/aziot-keyd"][..], super::ProcessName::Keyd, ), ( &["/usr/libexec/aziot/aziot-tpmd"][..], super::ProcessName::Tpmd, ), ]; // argv[1] fallback is only in release builds. if cfg!(debug_assertions) { test_cases.extend_from_slice(&[ (&["aziotd", "aziot-certd"][..], super::ProcessName::Certd), ( &["aziotd", "aziot-identityd"][..], super::ProcessName::Identityd, ), (&["aziotd", "aziot-keyd"][..], super::ProcessName::Keyd), ( &["/usr/libexec/aziot/aziotd", "aziot-certd"][..], super::ProcessName::Certd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-identityd"][..], super::ProcessName::Identityd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-keyd"][..], super::ProcessName::Keyd, ), ( &["/usr/libexec/aziot/aziotd", "aziot-tpmd"][..], super::ProcessName::Tpmd, ), ]); } for (input, expected) in test_cases { let mut input = input.iter().copied().map(std::ffi::OsStr::new); let actual = super::process_name_from_args(&mut input).unwrap(); assert_eq!(None, input.next()); assert_eq!(expected, actual); } // Failure test cases for &input in &[ // Unrecognized process name in argv[0] &["foo"][..], &["/usr/libexec/aziot/foo"][..], &["/usr/libexec/aziot/foo", "aziot-certd"][..], // Either fails because it's a release build so argv[1] fallback is disabled, // or fails because it's a debug build where argv[1] fallback is enabled // but the process name in argv[1] is unrecognized anyway. &["aziotd", "foo"][..], &["/usr/libexec/aziot/aziotd", "foo"][..], ] { let mut input = input.iter().copied().map(std::ffi::OsStr::new); let _ = super::process_name_from_args(&mut input).unwrap_err(); } } #[test] fn merge_toml() { let base = r#" foo_key = "A" foo_parent_key = { foo_sub_key = "B" } [bar_table] bar_table_key = "C" bar_table_parent_key = { bar_table_sub_key = "D" } [[baz_table_array]] baz_table_array_key = "E" baz_table_array_parent_key = { baz_table_sub_key = "F" } "#; let mut base: toml::Value = toml::from_str(base).unwrap(); let patch = r#" foo_key = "A2" foo_key_new = "A3" foo_parent_key = { foo_sub_key = "B2", foo_sub_key2 = "B3" } foo_parent_key_new = { foo_sub_key = "B4", foo_sub_key2 = "B5" } [bar_table] bar_table_key = "C2" bar_table_key_new = "C3" bar_table_parent_key = { bar_table_sub_key = "D2", bar_table_sub_key2 = "D3" } bar_table_parent_key_new = { bar_table_sub_key = "D4", bar_table_sub_key2 = "D5" } [[baz_table_array]] baz_table_array_key = "G" baz_table_array_parent_key = { baz_table_sub_key = "H" } "#; let patch: toml::Value = toml::from_str(patch).unwrap(); super::merge_toml(&mut base, patch); let expected = r#" foo_key = "A2" foo_key_new = "A3" foo_parent_key = { foo_sub_key = "B2", foo_sub_key2 = "B3" } foo_parent_key_new = { foo_sub_key = "B4", foo_sub_key2 = "B5" } [bar_table] bar_table_key = "C2" bar_table_key_new = "C3" bar_table_parent_key = { bar_table_sub_key = "D2", bar_table_sub_key2 = "D3" } bar_table_parent_key_new = { bar_table_sub_key = "D4", bar_table_sub_key2 = "D5" } [[baz_table_array]] baz_table_array_key = "E" baz_table_array_parent_key = { baz_table_sub_key = "F" } [[baz_table_array]] baz_table_array_key = "G" baz_table_array_parent_key = { baz_table_sub_key = "H" } "#; let expected: toml::Value = toml::from_str(expected).unwrap(); assert_eq!(expected, base); } }
{ let mut args = std::env::args_os(); let process_name = process_name_from_args(&mut args)?; match process_name { ProcessName::Certd => { run( aziot_certd::main, "AZIOT_CERTD_CONFIG", "/etc/aziot/certd/config.toml", "AZIOT_CERTD_CONFIG_DIR", "/etc/aziot/certd/config.d", ) .await? } ProcessName::Identityd => { run( aziot_identityd::main, "AZIOT_IDENTITYD_CONFIG",
identifier_body
mod.rs
mod strata; mod assigners; mod samplers; pub mod serial_storage; use std::cmp::max; use std::ops::Range; use std::sync::Arc; use std::sync::RwLock; use std::thread::spawn; use std::thread::sleep; use std::time::Duration; use evmap; use rand; use commons::bins::Bins; use commons::channel; use commons::channel::Receiver; use commons::channel::Sender; use commons::get_sign; use commons::ExampleWithScore; use commons::Model; use commons::Signal; use labeled_data::LabeledData; use super::Example; use super::TFeature; use self::assigners::Assigners; use self::samplers::Samplers; use self::serial_storage::SerialStorage; use self::strata::Strata; pub const SPEED_TEST: bool = false; pub struct F64 { pub val: f64 } impl PartialEq for F64 { fn eq(&self, other: &F64) -> bool { get_sign(self.val - other.val) == 0 } } impl Eq for F64 {} type WeightTableRead = evmap::ReadHandle<i8, Box<F64>>; pub struct StratifiedStorage { // num_examples: usize, // feature_size: usize, // num_examples_per_block: usize, // disk_buffer_filename: String, // strata: Arc<RwLock<Strata>>, // stats_update_s: Sender<(i8, (i32, f64))>, #[allow(dead_code)] counts_table_r: evmap::ReadHandle<i8, i32>, #[allow(dead_code)] weights_table_r: WeightTableRead, // num_assigners: usize, // num_samplers: usize, // updated_examples_r: Receiver<ExampleWithScore>, updated_examples_s: Sender<ExampleWithScore>, // sampled_examples_s: Sender<(ExampleWithScore, u32)>, // sampling_signal: Receiver<Signal>, // models: Receiver<Model>, positive: String, } impl StratifiedStorage { /// Create the stratified storage structure. /// /// * `num_examples`: the total number of examples in the training data set /// * `feature_size`: the number of features of the training examples /// * `num_examples_per_block`: the number of examples to write back to disk in batch (explained below) /// * `disk_buffer_filename`: the name of the binary file for saving the examples in strata on disk /// If such file does not exist, it will be created /// * `num_assigners`: the number of threads that run the `Assigner`s (explained below) /// * `num_samplers`: the number of threads that run the `Sampler`s (explained below) /// * `sampled_examples`: the channle that the stratified storage sends the sampled examples to /// the buffer loader /// * `sampling_singal`: the channle that the buffer loader sends sampling signals to /// start and stop the samplers as needed /// * `models`: the channel that the booster sends the latest models in /// /// Stratified storage organizes training examples according to their weights /// given current learning model. /// The examples are assigned to different strata so that the weight ratio of the examples /// within the same stratum does not exceed 2. /// Most examples in a stratum are stored on disk, while a small number of examples remains /// in memory to be writen to disk or just read from disk and ready to send out to the sampler. /// /// The overall structure of the stratified storage is as follow: /// ///![](https://www.lucidchart.com/publicSegments/view/c87b7a50-5442-4a41-a601-3dfb49b16511/image.png) /// /// The `Assigner`s read examples with updated scores from the `Sampler` and write them back to /// the corresponding strata based on their new weights. The examples would be put into the /// `In Queue`s first till a proper number of examples are accumulated that belongs to the /// same strata, at that point they would be written into disk in batch. /// /// Meanwhile, a certain number of examples from each stratum are loaded into the memory /// from the disk and kept in `Out Queue`s. /// The `Sampler`s iteratively select a stratum with a probability that proportional to /// the sum of weights of all examples in that stratum, send its next sampled example to the memory /// buffer, and remove that example from strata. /// /// A `Shared Weight Table` maintains the sum of the weights of all examples in each stratum. /// The `Assigner`s increase the value in the `Shared Weight Table` when a new example is inserted into /// a stratum. /// The `Sampler`s use the weights in the `Shared Weight Table` to decide which stratum to read next and /// send its next sampled example to the memory buffer. After an example is processed, the `Sampler` also /// updates its weight, sends it to right stratum, and updates `Shared Weight Table` accordingly. pub fn new( num_examples: usize, feature_size: usize, positive: String, num_examples_per_block: usize, disk_buffer_filename: &str, num_assigners: usize, num_samplers: usize, sampled_examples: Sender<(ExampleWithScore, u32)>, sampling_signal: Receiver<Signal>, models: Receiver<Model>, channel_size: usize, debug_mode: bool, ) -> StratifiedStorage { let strata = Strata::new(num_examples, feature_size, num_examples_per_block, disk_buffer_filename); let strata = Arc::new(RwLock::new(strata)); let (counts_table_r, mut counts_table_w) = evmap::new(); let (weights_table_r, mut weights_table_w) = evmap::new(); let (updated_examples_s, updated_examples_r) = channel::bounded(channel_size, "updated-examples"); // The messages in the stats channel are very small, so its capacity can be larger. let (stats_update_s, stats_update_r) = channel::bounded(5000000, "stats"); // Update shared weights table (non-blocking) { let counts_table_r = counts_table_r.clone(); let weights_table_r = weights_table_r.clone(); spawn(move || { while let Some((index, (count, weight))) = stats_update_r.recv() { let val = counts_table_r.get_and(&index, |vs| vs[0]); counts_table_w.update(index, val.unwrap_or(0) + count); let cur = weights_table_r.get_and(&index, |vs: &[Box<F64>]| vs[0].val) .unwrap_or(0.0); weights_table_w.update(index, Box::new(F64 { val: cur + weight })); { counts_table_w.refresh(); weights_table_w.refresh(); } } }); } // Monitor the distribution of strata if debug_mode { let counts_table_r = counts_table_r.clone(); let weights_table_r = weights_table_r.clone(); spawn(move || { loop { sleep(Duration::from_millis(5000)); let mut p: Vec<(i8, f64)> = weights_table_r.map_into(|a: &i8, b: &[Box<F64>]| (a.clone(), b[0].val)); p.sort_by(|a, b| (a.0).cmp(&b.0)); let mut c: Vec<(i8, i32)> = counts_table_r.map_into(|a, b| (a.clone(), b[0])); c.sort_by(|a, b| (a.0).cmp(&b.0)); let mut sump: f64 = p.iter().map(|t| t.1).sum(); if get_sign(sump) == 0 { sump = 1.0; } let ps: Vec<String> = p.into_iter() .map(|(idx, w)| (idx, 100.0 * w / sump)) .map(|(idx, w)| format!("({}, {:.2})", idx, w)) .collect(); debug!("strata weights distr, {}, {}", ps.join(", "), sump); let sumc: i32 = max(c.iter().map(|t| t.1).sum(), 1); let cs: Vec<String> = c.into_iter() .map(|(idx, c)| (idx, 100.0 * c as f32 / (sumc as f32))) .map(|(idx, c)| format!("({}, {:.2})", idx, c)) .collect(); debug!("strata counts distr, {}, {}", cs.join(", "), sumc); } }); } let assigners = Assigners::new( updated_examples_r, strata.clone(), stats_update_s.clone(), num_assigners, ); let samplers = Samplers::new( strata.clone(), sampled_examples.clone(), updated_examples_s.clone(), models.clone(), stats_update_s.clone(), weights_table_r.clone(), sampling_signal.clone(), num_samplers, ); assigners.run(); samplers.run(); StratifiedStorage { // num_examples: num_examples, // feature_size: feature_size, // num_examples_per_block: num_examples_per_block, // disk_buffer_filename: String::from(disk_buffer_filename), // strata: strata, // stats_update_s: stats_update_s, counts_table_r: counts_table_r, weights_table_r: weights_table_r, // num_assigners: num_assigners, // num_samplers: num_samplers, // updated_examples_r: updated_examples_r, updated_examples_s: updated_examples_s, // sampled_examples_s: sampled_examples, // sampling_signal: sampling_signal, // models: models, positive: positive, } } pub fn init_stratified_from_file( &self, filename: String, size: usize, batch_size: usize, feature_size: usize, range: Range<usize>, bins: Vec<Bins>, ) { let mut reader = SerialStorage::new( filename.clone(), size, feature_size, true, self.positive.clone(), None, range.clone(), ); let updated_examples_s = self.updated_examples_s.clone(); spawn(move || { let mut index = 0; while index < size { reader.read_raw(batch_size).into_iter().for_each(|data| { let features: Vec<TFeature> = data.feature.iter().enumerate() .map(|(idx, val)| { if range.start <= idx && idx < range.end { bins[idx - range.start].get_split_index(*val) } else { 0 } }).collect(); let mapped_data = LabeledData::new(features, data.label); updated_examples_s.send((mapped_data, (0.0, 0))); }); index += batch_size; } debug!("Raw data on disk has been loaded into the stratified storage, \ filename {}, capacity {}, feature size {}", filename, size, feature_size); }); } } fn sample_weights_table(weights_table_r: &WeightTableRead) -> Option<i8> { let p: Vec<(i8, f64)> = weights_table_r.map_into(|a, b| (a.clone(), b[0].val)); let sum_of_weights: f64 = p.iter().map(|t| t.1).sum(); if get_sign(sum_of_weights) == 0 { None } else { let mut frac = rand::random::<f64>() * sum_of_weights; let mut iter = p.iter(); let mut key_val = &(0, 0.0); while get_sign(frac) >= 0 { key_val = iter.next().expect("get_sample_from: input p is empty"); frac -= key_val.1; } Some(key_val.0) } } #[cfg(test)] mod tests { extern crate env_logger; use std::fs::remove_file; use commons::channel; use std::thread::spawn; use labeled_data::LabeledData; use commons::ExampleWithScore; use commons::Signal; use commons::performance_monitor::PerformanceMonitor; use super::StratifiedStorage; use ::TFeature; #[test] fn test_mean() { let _ = env_logger::try_init(); let filename = "unittest-stratified3.bin"; let batch = 100000; let num_read = 1000000; let (sampled_examples_send, sampled_examples_recv) = channel::bounded(1000, "sampled-examples"); let (_, models_recv) = channel::bounded(10, "updated-models"); let (signal_s, signal_r) = channel::bounded(10, "sampling-signal"); signal_s.send(Signal::START); let stratified_storage = StratifiedStorage::new( batch * 10, 1, "1".to_string(), 10000, filename, 4, 4, sampled_examples_send, signal_r, models_recv, 10, false, ); let updated_examples_send = stratified_storage.updated_examples_s.clone(); let mut pm_load = PerformanceMonitor::new(); pm_load.start(); let loading = spawn(move || { for _ in 0..batch {
updated_examples_send.send(t.clone()); } } println!("Loading speed: {}", (batch * 10) as f32 / pm_load.get_duration()); }); let mut pm_sample = PerformanceMonitor::new(); pm_sample.start(); let mut average = 0.0; for _ in 0..num_read { let recv = sampled_examples_recv.recv().unwrap(); average += (((recv.0).0).feature[0] as f32) * (recv.1 as f32) / (num_read as f32); pm_sample.update(recv.1 as usize); } spawn(move || { println!("Sampling speed: {}", num_read as f32 / pm_sample.get_duration()); }); let answer = (1..11).map(|a| a as f32).map(|a| a * a).sum::<f32>() / ((1..11).sum::<i32>() as f32); loading.join().unwrap(); if (average - answer).abs() > 0.05 { spawn(move || { println!("Average: {}. Expect: {}.", average, answer); }).join().unwrap(); assert!(false); } remove_file(filename).unwrap(); } fn get_example(feature: Vec<TFeature>, weight: f32) -> ExampleWithScore { let label: i8 = 1; let example = LabeledData::new(feature, label); let score = -weight.ln(); (example, (score, 0)) } }
for i in 1..11 { let t = get_example(vec![i as TFeature], i as f32);
random_line_split
mod.rs
mod strata; mod assigners; mod samplers; pub mod serial_storage; use std::cmp::max; use std::ops::Range; use std::sync::Arc; use std::sync::RwLock; use std::thread::spawn; use std::thread::sleep; use std::time::Duration; use evmap; use rand; use commons::bins::Bins; use commons::channel; use commons::channel::Receiver; use commons::channel::Sender; use commons::get_sign; use commons::ExampleWithScore; use commons::Model; use commons::Signal; use labeled_data::LabeledData; use super::Example; use super::TFeature; use self::assigners::Assigners; use self::samplers::Samplers; use self::serial_storage::SerialStorage; use self::strata::Strata; pub const SPEED_TEST: bool = false; pub struct F64 { pub val: f64 } impl PartialEq for F64 { fn eq(&self, other: &F64) -> bool { get_sign(self.val - other.val) == 0 } } impl Eq for F64 {} type WeightTableRead = evmap::ReadHandle<i8, Box<F64>>; pub struct StratifiedStorage { // num_examples: usize, // feature_size: usize, // num_examples_per_block: usize, // disk_buffer_filename: String, // strata: Arc<RwLock<Strata>>, // stats_update_s: Sender<(i8, (i32, f64))>, #[allow(dead_code)] counts_table_r: evmap::ReadHandle<i8, i32>, #[allow(dead_code)] weights_table_r: WeightTableRead, // num_assigners: usize, // num_samplers: usize, // updated_examples_r: Receiver<ExampleWithScore>, updated_examples_s: Sender<ExampleWithScore>, // sampled_examples_s: Sender<(ExampleWithScore, u32)>, // sampling_signal: Receiver<Signal>, // models: Receiver<Model>, positive: String, } impl StratifiedStorage { /// Create the stratified storage structure. /// /// * `num_examples`: the total number of examples in the training data set /// * `feature_size`: the number of features of the training examples /// * `num_examples_per_block`: the number of examples to write back to disk in batch (explained below) /// * `disk_buffer_filename`: the name of the binary file for saving the examples in strata on disk /// If such file does not exist, it will be created /// * `num_assigners`: the number of threads that run the `Assigner`s (explained below) /// * `num_samplers`: the number of threads that run the `Sampler`s (explained below) /// * `sampled_examples`: the channle that the stratified storage sends the sampled examples to /// the buffer loader /// * `sampling_singal`: the channle that the buffer loader sends sampling signals to /// start and stop the samplers as needed /// * `models`: the channel that the booster sends the latest models in /// /// Stratified storage organizes training examples according to their weights /// given current learning model. /// The examples are assigned to different strata so that the weight ratio of the examples /// within the same stratum does not exceed 2. /// Most examples in a stratum are stored on disk, while a small number of examples remains /// in memory to be writen to disk or just read from disk and ready to send out to the sampler. /// /// The overall structure of the stratified storage is as follow: /// ///![](https://www.lucidchart.com/publicSegments/view/c87b7a50-5442-4a41-a601-3dfb49b16511/image.png) /// /// The `Assigner`s read examples with updated scores from the `Sampler` and write them back to /// the corresponding strata based on their new weights. The examples would be put into the /// `In Queue`s first till a proper number of examples are accumulated that belongs to the /// same strata, at that point they would be written into disk in batch. /// /// Meanwhile, a certain number of examples from each stratum are loaded into the memory /// from the disk and kept in `Out Queue`s. /// The `Sampler`s iteratively select a stratum with a probability that proportional to /// the sum of weights of all examples in that stratum, send its next sampled example to the memory /// buffer, and remove that example from strata. /// /// A `Shared Weight Table` maintains the sum of the weights of all examples in each stratum. /// The `Assigner`s increase the value in the `Shared Weight Table` when a new example is inserted into /// a stratum. /// The `Sampler`s use the weights in the `Shared Weight Table` to decide which stratum to read next and /// send its next sampled example to the memory buffer. After an example is processed, the `Sampler` also /// updates its weight, sends it to right stratum, and updates `Shared Weight Table` accordingly. pub fn new( num_examples: usize, feature_size: usize, positive: String, num_examples_per_block: usize, disk_buffer_filename: &str, num_assigners: usize, num_samplers: usize, sampled_examples: Sender<(ExampleWithScore, u32)>, sampling_signal: Receiver<Signal>, models: Receiver<Model>, channel_size: usize, debug_mode: bool, ) -> StratifiedStorage { let strata = Strata::new(num_examples, feature_size, num_examples_per_block, disk_buffer_filename); let strata = Arc::new(RwLock::new(strata)); let (counts_table_r, mut counts_table_w) = evmap::new(); let (weights_table_r, mut weights_table_w) = evmap::new(); let (updated_examples_s, updated_examples_r) = channel::bounded(channel_size, "updated-examples"); // The messages in the stats channel are very small, so its capacity can be larger. let (stats_update_s, stats_update_r) = channel::bounded(5000000, "stats"); // Update shared weights table (non-blocking) { let counts_table_r = counts_table_r.clone(); let weights_table_r = weights_table_r.clone(); spawn(move || { while let Some((index, (count, weight))) = stats_update_r.recv() { let val = counts_table_r.get_and(&index, |vs| vs[0]); counts_table_w.update(index, val.unwrap_or(0) + count); let cur = weights_table_r.get_and(&index, |vs: &[Box<F64>]| vs[0].val) .unwrap_or(0.0); weights_table_w.update(index, Box::new(F64 { val: cur + weight })); { counts_table_w.refresh(); weights_table_w.refresh(); } } }); } // Monitor the distribution of strata if debug_mode { let counts_table_r = counts_table_r.clone(); let weights_table_r = weights_table_r.clone(); spawn(move || { loop { sleep(Duration::from_millis(5000)); let mut p: Vec<(i8, f64)> = weights_table_r.map_into(|a: &i8, b: &[Box<F64>]| (a.clone(), b[0].val)); p.sort_by(|a, b| (a.0).cmp(&b.0)); let mut c: Vec<(i8, i32)> = counts_table_r.map_into(|a, b| (a.clone(), b[0])); c.sort_by(|a, b| (a.0).cmp(&b.0)); let mut sump: f64 = p.iter().map(|t| t.1).sum(); if get_sign(sump) == 0 { sump = 1.0; } let ps: Vec<String> = p.into_iter() .map(|(idx, w)| (idx, 100.0 * w / sump)) .map(|(idx, w)| format!("({}, {:.2})", idx, w)) .collect(); debug!("strata weights distr, {}, {}", ps.join(", "), sump); let sumc: i32 = max(c.iter().map(|t| t.1).sum(), 1); let cs: Vec<String> = c.into_iter() .map(|(idx, c)| (idx, 100.0 * c as f32 / (sumc as f32))) .map(|(idx, c)| format!("({}, {:.2})", idx, c)) .collect(); debug!("strata counts distr, {}, {}", cs.join(", "), sumc); } }); } let assigners = Assigners::new( updated_examples_r, strata.clone(), stats_update_s.clone(), num_assigners, ); let samplers = Samplers::new( strata.clone(), sampled_examples.clone(), updated_examples_s.clone(), models.clone(), stats_update_s.clone(), weights_table_r.clone(), sampling_signal.clone(), num_samplers, ); assigners.run(); samplers.run(); StratifiedStorage { // num_examples: num_examples, // feature_size: feature_size, // num_examples_per_block: num_examples_per_block, // disk_buffer_filename: String::from(disk_buffer_filename), // strata: strata, // stats_update_s: stats_update_s, counts_table_r: counts_table_r, weights_table_r: weights_table_r, // num_assigners: num_assigners, // num_samplers: num_samplers, // updated_examples_r: updated_examples_r, updated_examples_s: updated_examples_s, // sampled_examples_s: sampled_examples, // sampling_signal: sampling_signal, // models: models, positive: positive, } } pub fn init_stratified_from_file( &self, filename: String, size: usize, batch_size: usize, feature_size: usize, range: Range<usize>, bins: Vec<Bins>, ) { let mut reader = SerialStorage::new( filename.clone(), size, feature_size, true, self.positive.clone(), None, range.clone(), ); let updated_examples_s = self.updated_examples_s.clone(); spawn(move || { let mut index = 0; while index < size { reader.read_raw(batch_size).into_iter().for_each(|data| { let features: Vec<TFeature> = data.feature.iter().enumerate() .map(|(idx, val)| { if range.start <= idx && idx < range.end { bins[idx - range.start].get_split_index(*val) } else { 0 } }).collect(); let mapped_data = LabeledData::new(features, data.label); updated_examples_s.send((mapped_data, (0.0, 0))); }); index += batch_size; } debug!("Raw data on disk has been loaded into the stratified storage, \ filename {}, capacity {}, feature size {}", filename, size, feature_size); }); } } fn sample_weights_table(weights_table_r: &WeightTableRead) -> Option<i8> { let p: Vec<(i8, f64)> = weights_table_r.map_into(|a, b| (a.clone(), b[0].val)); let sum_of_weights: f64 = p.iter().map(|t| t.1).sum(); if get_sign(sum_of_weights) == 0 { None } else { let mut frac = rand::random::<f64>() * sum_of_weights; let mut iter = p.iter(); let mut key_val = &(0, 0.0); while get_sign(frac) >= 0 { key_val = iter.next().expect("get_sample_from: input p is empty"); frac -= key_val.1; } Some(key_val.0) } } #[cfg(test)] mod tests { extern crate env_logger; use std::fs::remove_file; use commons::channel; use std::thread::spawn; use labeled_data::LabeledData; use commons::ExampleWithScore; use commons::Signal; use commons::performance_monitor::PerformanceMonitor; use super::StratifiedStorage; use ::TFeature; #[test] fn
() { let _ = env_logger::try_init(); let filename = "unittest-stratified3.bin"; let batch = 100000; let num_read = 1000000; let (sampled_examples_send, sampled_examples_recv) = channel::bounded(1000, "sampled-examples"); let (_, models_recv) = channel::bounded(10, "updated-models"); let (signal_s, signal_r) = channel::bounded(10, "sampling-signal"); signal_s.send(Signal::START); let stratified_storage = StratifiedStorage::new( batch * 10, 1, "1".to_string(), 10000, filename, 4, 4, sampled_examples_send, signal_r, models_recv, 10, false, ); let updated_examples_send = stratified_storage.updated_examples_s.clone(); let mut pm_load = PerformanceMonitor::new(); pm_load.start(); let loading = spawn(move || { for _ in 0..batch { for i in 1..11 { let t = get_example(vec![i as TFeature], i as f32); updated_examples_send.send(t.clone()); } } println!("Loading speed: {}", (batch * 10) as f32 / pm_load.get_duration()); }); let mut pm_sample = PerformanceMonitor::new(); pm_sample.start(); let mut average = 0.0; for _ in 0..num_read { let recv = sampled_examples_recv.recv().unwrap(); average += (((recv.0).0).feature[0] as f32) * (recv.1 as f32) / (num_read as f32); pm_sample.update(recv.1 as usize); } spawn(move || { println!("Sampling speed: {}", num_read as f32 / pm_sample.get_duration()); }); let answer = (1..11).map(|a| a as f32).map(|a| a * a).sum::<f32>() / ((1..11).sum::<i32>() as f32); loading.join().unwrap(); if (average - answer).abs() > 0.05 { spawn(move || { println!("Average: {}. Expect: {}.", average, answer); }).join().unwrap(); assert!(false); } remove_file(filename).unwrap(); } fn get_example(feature: Vec<TFeature>, weight: f32) -> ExampleWithScore { let label: i8 = 1; let example = LabeledData::new(feature, label); let score = -weight.ln(); (example, (score, 0)) } }
test_mean
identifier_name
mod.rs
mod strata; mod assigners; mod samplers; pub mod serial_storage; use std::cmp::max; use std::ops::Range; use std::sync::Arc; use std::sync::RwLock; use std::thread::spawn; use std::thread::sleep; use std::time::Duration; use evmap; use rand; use commons::bins::Bins; use commons::channel; use commons::channel::Receiver; use commons::channel::Sender; use commons::get_sign; use commons::ExampleWithScore; use commons::Model; use commons::Signal; use labeled_data::LabeledData; use super::Example; use super::TFeature; use self::assigners::Assigners; use self::samplers::Samplers; use self::serial_storage::SerialStorage; use self::strata::Strata; pub const SPEED_TEST: bool = false; pub struct F64 { pub val: f64 } impl PartialEq for F64 { fn eq(&self, other: &F64) -> bool { get_sign(self.val - other.val) == 0 } } impl Eq for F64 {} type WeightTableRead = evmap::ReadHandle<i8, Box<F64>>; pub struct StratifiedStorage { // num_examples: usize, // feature_size: usize, // num_examples_per_block: usize, // disk_buffer_filename: String, // strata: Arc<RwLock<Strata>>, // stats_update_s: Sender<(i8, (i32, f64))>, #[allow(dead_code)] counts_table_r: evmap::ReadHandle<i8, i32>, #[allow(dead_code)] weights_table_r: WeightTableRead, // num_assigners: usize, // num_samplers: usize, // updated_examples_r: Receiver<ExampleWithScore>, updated_examples_s: Sender<ExampleWithScore>, // sampled_examples_s: Sender<(ExampleWithScore, u32)>, // sampling_signal: Receiver<Signal>, // models: Receiver<Model>, positive: String, } impl StratifiedStorage { /// Create the stratified storage structure. /// /// * `num_examples`: the total number of examples in the training data set /// * `feature_size`: the number of features of the training examples /// * `num_examples_per_block`: the number of examples to write back to disk in batch (explained below) /// * `disk_buffer_filename`: the name of the binary file for saving the examples in strata on disk /// If such file does not exist, it will be created /// * `num_assigners`: the number of threads that run the `Assigner`s (explained below) /// * `num_samplers`: the number of threads that run the `Sampler`s (explained below) /// * `sampled_examples`: the channle that the stratified storage sends the sampled examples to /// the buffer loader /// * `sampling_singal`: the channle that the buffer loader sends sampling signals to /// start and stop the samplers as needed /// * `models`: the channel that the booster sends the latest models in /// /// Stratified storage organizes training examples according to their weights /// given current learning model. /// The examples are assigned to different strata so that the weight ratio of the examples /// within the same stratum does not exceed 2. /// Most examples in a stratum are stored on disk, while a small number of examples remains /// in memory to be writen to disk or just read from disk and ready to send out to the sampler. /// /// The overall structure of the stratified storage is as follow: /// ///![](https://www.lucidchart.com/publicSegments/view/c87b7a50-5442-4a41-a601-3dfb49b16511/image.png) /// /// The `Assigner`s read examples with updated scores from the `Sampler` and write them back to /// the corresponding strata based on their new weights. The examples would be put into the /// `In Queue`s first till a proper number of examples are accumulated that belongs to the /// same strata, at that point they would be written into disk in batch. /// /// Meanwhile, a certain number of examples from each stratum are loaded into the memory /// from the disk and kept in `Out Queue`s. /// The `Sampler`s iteratively select a stratum with a probability that proportional to /// the sum of weights of all examples in that stratum, send its next sampled example to the memory /// buffer, and remove that example from strata. /// /// A `Shared Weight Table` maintains the sum of the weights of all examples in each stratum. /// The `Assigner`s increase the value in the `Shared Weight Table` when a new example is inserted into /// a stratum. /// The `Sampler`s use the weights in the `Shared Weight Table` to decide which stratum to read next and /// send its next sampled example to the memory buffer. After an example is processed, the `Sampler` also /// updates its weight, sends it to right stratum, and updates `Shared Weight Table` accordingly. pub fn new( num_examples: usize, feature_size: usize, positive: String, num_examples_per_block: usize, disk_buffer_filename: &str, num_assigners: usize, num_samplers: usize, sampled_examples: Sender<(ExampleWithScore, u32)>, sampling_signal: Receiver<Signal>, models: Receiver<Model>, channel_size: usize, debug_mode: bool, ) -> StratifiedStorage
weights_table_w.update(index, Box::new(F64 { val: cur + weight })); { counts_table_w.refresh(); weights_table_w.refresh(); } } }); } // Monitor the distribution of strata if debug_mode { let counts_table_r = counts_table_r.clone(); let weights_table_r = weights_table_r.clone(); spawn(move || { loop { sleep(Duration::from_millis(5000)); let mut p: Vec<(i8, f64)> = weights_table_r.map_into(|a: &i8, b: &[Box<F64>]| (a.clone(), b[0].val)); p.sort_by(|a, b| (a.0).cmp(&b.0)); let mut c: Vec<(i8, i32)> = counts_table_r.map_into(|a, b| (a.clone(), b[0])); c.sort_by(|a, b| (a.0).cmp(&b.0)); let mut sump: f64 = p.iter().map(|t| t.1).sum(); if get_sign(sump) == 0 { sump = 1.0; } let ps: Vec<String> = p.into_iter() .map(|(idx, w)| (idx, 100.0 * w / sump)) .map(|(idx, w)| format!("({}, {:.2})", idx, w)) .collect(); debug!("strata weights distr, {}, {}", ps.join(", "), sump); let sumc: i32 = max(c.iter().map(|t| t.1).sum(), 1); let cs: Vec<String> = c.into_iter() .map(|(idx, c)| (idx, 100.0 * c as f32 / (sumc as f32))) .map(|(idx, c)| format!("({}, {:.2})", idx, c)) .collect(); debug!("strata counts distr, {}, {}", cs.join(", "), sumc); } }); } let assigners = Assigners::new( updated_examples_r, strata.clone(), stats_update_s.clone(), num_assigners, ); let samplers = Samplers::new( strata.clone(), sampled_examples.clone(), updated_examples_s.clone(), models.clone(), stats_update_s.clone(), weights_table_r.clone(), sampling_signal.clone(), num_samplers, ); assigners.run(); samplers.run(); StratifiedStorage { // num_examples: num_examples, // feature_size: feature_size, // num_examples_per_block: num_examples_per_block, // disk_buffer_filename: String::from(disk_buffer_filename), // strata: strata, // stats_update_s: stats_update_s, counts_table_r: counts_table_r, weights_table_r: weights_table_r, // num_assigners: num_assigners, // num_samplers: num_samplers, // updated_examples_r: updated_examples_r, updated_examples_s: updated_examples_s, // sampled_examples_s: sampled_examples, // sampling_signal: sampling_signal, // models: models, positive: positive, } } pub fn init_stratified_from_file( &self, filename: String, size: usize, batch_size: usize, feature_size: usize, range: Range<usize>, bins: Vec<Bins>, ) { let mut reader = SerialStorage::new( filename.clone(), size, feature_size, true, self.positive.clone(), None, range.clone(), ); let updated_examples_s = self.updated_examples_s.clone(); spawn(move || { let mut index = 0; while index < size { reader.read_raw(batch_size).into_iter().for_each(|data| { let features: Vec<TFeature> = data.feature.iter().enumerate() .map(|(idx, val)| { if range.start <= idx && idx < range.end { bins[idx - range.start].get_split_index(*val) } else { 0 } }).collect(); let mapped_data = LabeledData::new(features, data.label); updated_examples_s.send((mapped_data, (0.0, 0))); }); index += batch_size; } debug!("Raw data on disk has been loaded into the stratified storage, \ filename {}, capacity {}, feature size {}", filename, size, feature_size); }); } } fn sample_weights_table(weights_table_r: &WeightTableRead) -> Option<i8> { let p: Vec<(i8, f64)> = weights_table_r.map_into(|a, b| (a.clone(), b[0].val)); let sum_of_weights: f64 = p.iter().map(|t| t.1).sum(); if get_sign(sum_of_weights) == 0 { None } else { let mut frac = rand::random::<f64>() * sum_of_weights; let mut iter = p.iter(); let mut key_val = &(0, 0.0); while get_sign(frac) >= 0 { key_val = iter.next().expect("get_sample_from: input p is empty"); frac -= key_val.1; } Some(key_val.0) } } #[cfg(test)] mod tests { extern crate env_logger; use std::fs::remove_file; use commons::channel; use std::thread::spawn; use labeled_data::LabeledData; use commons::ExampleWithScore; use commons::Signal; use commons::performance_monitor::PerformanceMonitor; use super::StratifiedStorage; use ::TFeature; #[test] fn test_mean() { let _ = env_logger::try_init(); let filename = "unittest-stratified3.bin"; let batch = 100000; let num_read = 1000000; let (sampled_examples_send, sampled_examples_recv) = channel::bounded(1000, "sampled-examples"); let (_, models_recv) = channel::bounded(10, "updated-models"); let (signal_s, signal_r) = channel::bounded(10, "sampling-signal"); signal_s.send(Signal::START); let stratified_storage = StratifiedStorage::new( batch * 10, 1, "1".to_string(), 10000, filename, 4, 4, sampled_examples_send, signal_r, models_recv, 10, false, ); let updated_examples_send = stratified_storage.updated_examples_s.clone(); let mut pm_load = PerformanceMonitor::new(); pm_load.start(); let loading = spawn(move || { for _ in 0..batch { for i in 1..11 { let t = get_example(vec![i as TFeature], i as f32); updated_examples_send.send(t.clone()); } } println!("Loading speed: {}", (batch * 10) as f32 / pm_load.get_duration()); }); let mut pm_sample = PerformanceMonitor::new(); pm_sample.start(); let mut average = 0.0; for _ in 0..num_read { let recv = sampled_examples_recv.recv().unwrap(); average += (((recv.0).0).feature[0] as f32) * (recv.1 as f32) / (num_read as f32); pm_sample.update(recv.1 as usize); } spawn(move || { println!("Sampling speed: {}", num_read as f32 / pm_sample.get_duration()); }); let answer = (1..11).map(|a| a as f32).map(|a| a * a).sum::<f32>() / ((1..11).sum::<i32>() as f32); loading.join().unwrap(); if (average - answer).abs() > 0.05 { spawn(move || { println!("Average: {}. Expect: {}.", average, answer); }).join().unwrap(); assert!(false); } remove_file(filename).unwrap(); } fn get_example(feature: Vec<TFeature>, weight: f32) -> ExampleWithScore { let label: i8 = 1; let example = LabeledData::new(feature, label); let score = -weight.ln(); (example, (score, 0)) } }
{ let strata = Strata::new(num_examples, feature_size, num_examples_per_block, disk_buffer_filename); let strata = Arc::new(RwLock::new(strata)); let (counts_table_r, mut counts_table_w) = evmap::new(); let (weights_table_r, mut weights_table_w) = evmap::new(); let (updated_examples_s, updated_examples_r) = channel::bounded(channel_size, "updated-examples"); // The messages in the stats channel are very small, so its capacity can be larger. let (stats_update_s, stats_update_r) = channel::bounded(5000000, "stats"); // Update shared weights table (non-blocking) { let counts_table_r = counts_table_r.clone(); let weights_table_r = weights_table_r.clone(); spawn(move || { while let Some((index, (count, weight))) = stats_update_r.recv() { let val = counts_table_r.get_and(&index, |vs| vs[0]); counts_table_w.update(index, val.unwrap_or(0) + count); let cur = weights_table_r.get_and(&index, |vs: &[Box<F64>]| vs[0].val) .unwrap_or(0.0);
identifier_body
mod.rs
mod strata; mod assigners; mod samplers; pub mod serial_storage; use std::cmp::max; use std::ops::Range; use std::sync::Arc; use std::sync::RwLock; use std::thread::spawn; use std::thread::sleep; use std::time::Duration; use evmap; use rand; use commons::bins::Bins; use commons::channel; use commons::channel::Receiver; use commons::channel::Sender; use commons::get_sign; use commons::ExampleWithScore; use commons::Model; use commons::Signal; use labeled_data::LabeledData; use super::Example; use super::TFeature; use self::assigners::Assigners; use self::samplers::Samplers; use self::serial_storage::SerialStorage; use self::strata::Strata; pub const SPEED_TEST: bool = false; pub struct F64 { pub val: f64 } impl PartialEq for F64 { fn eq(&self, other: &F64) -> bool { get_sign(self.val - other.val) == 0 } } impl Eq for F64 {} type WeightTableRead = evmap::ReadHandle<i8, Box<F64>>; pub struct StratifiedStorage { // num_examples: usize, // feature_size: usize, // num_examples_per_block: usize, // disk_buffer_filename: String, // strata: Arc<RwLock<Strata>>, // stats_update_s: Sender<(i8, (i32, f64))>, #[allow(dead_code)] counts_table_r: evmap::ReadHandle<i8, i32>, #[allow(dead_code)] weights_table_r: WeightTableRead, // num_assigners: usize, // num_samplers: usize, // updated_examples_r: Receiver<ExampleWithScore>, updated_examples_s: Sender<ExampleWithScore>, // sampled_examples_s: Sender<(ExampleWithScore, u32)>, // sampling_signal: Receiver<Signal>, // models: Receiver<Model>, positive: String, } impl StratifiedStorage { /// Create the stratified storage structure. /// /// * `num_examples`: the total number of examples in the training data set /// * `feature_size`: the number of features of the training examples /// * `num_examples_per_block`: the number of examples to write back to disk in batch (explained below) /// * `disk_buffer_filename`: the name of the binary file for saving the examples in strata on disk /// If such file does not exist, it will be created /// * `num_assigners`: the number of threads that run the `Assigner`s (explained below) /// * `num_samplers`: the number of threads that run the `Sampler`s (explained below) /// * `sampled_examples`: the channle that the stratified storage sends the sampled examples to /// the buffer loader /// * `sampling_singal`: the channle that the buffer loader sends sampling signals to /// start and stop the samplers as needed /// * `models`: the channel that the booster sends the latest models in /// /// Stratified storage organizes training examples according to their weights /// given current learning model. /// The examples are assigned to different strata so that the weight ratio of the examples /// within the same stratum does not exceed 2. /// Most examples in a stratum are stored on disk, while a small number of examples remains /// in memory to be writen to disk or just read from disk and ready to send out to the sampler. /// /// The overall structure of the stratified storage is as follow: /// ///![](https://www.lucidchart.com/publicSegments/view/c87b7a50-5442-4a41-a601-3dfb49b16511/image.png) /// /// The `Assigner`s read examples with updated scores from the `Sampler` and write them back to /// the corresponding strata based on their new weights. The examples would be put into the /// `In Queue`s first till a proper number of examples are accumulated that belongs to the /// same strata, at that point they would be written into disk in batch. /// /// Meanwhile, a certain number of examples from each stratum are loaded into the memory /// from the disk and kept in `Out Queue`s. /// The `Sampler`s iteratively select a stratum with a probability that proportional to /// the sum of weights of all examples in that stratum, send its next sampled example to the memory /// buffer, and remove that example from strata. /// /// A `Shared Weight Table` maintains the sum of the weights of all examples in each stratum. /// The `Assigner`s increase the value in the `Shared Weight Table` when a new example is inserted into /// a stratum. /// The `Sampler`s use the weights in the `Shared Weight Table` to decide which stratum to read next and /// send its next sampled example to the memory buffer. After an example is processed, the `Sampler` also /// updates its weight, sends it to right stratum, and updates `Shared Weight Table` accordingly. pub fn new( num_examples: usize, feature_size: usize, positive: String, num_examples_per_block: usize, disk_buffer_filename: &str, num_assigners: usize, num_samplers: usize, sampled_examples: Sender<(ExampleWithScore, u32)>, sampling_signal: Receiver<Signal>, models: Receiver<Model>, channel_size: usize, debug_mode: bool, ) -> StratifiedStorage { let strata = Strata::new(num_examples, feature_size, num_examples_per_block, disk_buffer_filename); let strata = Arc::new(RwLock::new(strata)); let (counts_table_r, mut counts_table_w) = evmap::new(); let (weights_table_r, mut weights_table_w) = evmap::new(); let (updated_examples_s, updated_examples_r) = channel::bounded(channel_size, "updated-examples"); // The messages in the stats channel are very small, so its capacity can be larger. let (stats_update_s, stats_update_r) = channel::bounded(5000000, "stats"); // Update shared weights table (non-blocking) { let counts_table_r = counts_table_r.clone(); let weights_table_r = weights_table_r.clone(); spawn(move || { while let Some((index, (count, weight))) = stats_update_r.recv() { let val = counts_table_r.get_and(&index, |vs| vs[0]); counts_table_w.update(index, val.unwrap_or(0) + count); let cur = weights_table_r.get_and(&index, |vs: &[Box<F64>]| vs[0].val) .unwrap_or(0.0); weights_table_w.update(index, Box::new(F64 { val: cur + weight })); { counts_table_w.refresh(); weights_table_w.refresh(); } } }); } // Monitor the distribution of strata if debug_mode { let counts_table_r = counts_table_r.clone(); let weights_table_r = weights_table_r.clone(); spawn(move || { loop { sleep(Duration::from_millis(5000)); let mut p: Vec<(i8, f64)> = weights_table_r.map_into(|a: &i8, b: &[Box<F64>]| (a.clone(), b[0].val)); p.sort_by(|a, b| (a.0).cmp(&b.0)); let mut c: Vec<(i8, i32)> = counts_table_r.map_into(|a, b| (a.clone(), b[0])); c.sort_by(|a, b| (a.0).cmp(&b.0)); let mut sump: f64 = p.iter().map(|t| t.1).sum(); if get_sign(sump) == 0 { sump = 1.0; } let ps: Vec<String> = p.into_iter() .map(|(idx, w)| (idx, 100.0 * w / sump)) .map(|(idx, w)| format!("({}, {:.2})", idx, w)) .collect(); debug!("strata weights distr, {}, {}", ps.join(", "), sump); let sumc: i32 = max(c.iter().map(|t| t.1).sum(), 1); let cs: Vec<String> = c.into_iter() .map(|(idx, c)| (idx, 100.0 * c as f32 / (sumc as f32))) .map(|(idx, c)| format!("({}, {:.2})", idx, c)) .collect(); debug!("strata counts distr, {}, {}", cs.join(", "), sumc); } }); } let assigners = Assigners::new( updated_examples_r, strata.clone(), stats_update_s.clone(), num_assigners, ); let samplers = Samplers::new( strata.clone(), sampled_examples.clone(), updated_examples_s.clone(), models.clone(), stats_update_s.clone(), weights_table_r.clone(), sampling_signal.clone(), num_samplers, ); assigners.run(); samplers.run(); StratifiedStorage { // num_examples: num_examples, // feature_size: feature_size, // num_examples_per_block: num_examples_per_block, // disk_buffer_filename: String::from(disk_buffer_filename), // strata: strata, // stats_update_s: stats_update_s, counts_table_r: counts_table_r, weights_table_r: weights_table_r, // num_assigners: num_assigners, // num_samplers: num_samplers, // updated_examples_r: updated_examples_r, updated_examples_s: updated_examples_s, // sampled_examples_s: sampled_examples, // sampling_signal: sampling_signal, // models: models, positive: positive, } } pub fn init_stratified_from_file( &self, filename: String, size: usize, batch_size: usize, feature_size: usize, range: Range<usize>, bins: Vec<Bins>, ) { let mut reader = SerialStorage::new( filename.clone(), size, feature_size, true, self.positive.clone(), None, range.clone(), ); let updated_examples_s = self.updated_examples_s.clone(); spawn(move || { let mut index = 0; while index < size { reader.read_raw(batch_size).into_iter().for_each(|data| { let features: Vec<TFeature> = data.feature.iter().enumerate() .map(|(idx, val)| { if range.start <= idx && idx < range.end
else { 0 } }).collect(); let mapped_data = LabeledData::new(features, data.label); updated_examples_s.send((mapped_data, (0.0, 0))); }); index += batch_size; } debug!("Raw data on disk has been loaded into the stratified storage, \ filename {}, capacity {}, feature size {}", filename, size, feature_size); }); } } fn sample_weights_table(weights_table_r: &WeightTableRead) -> Option<i8> { let p: Vec<(i8, f64)> = weights_table_r.map_into(|a, b| (a.clone(), b[0].val)); let sum_of_weights: f64 = p.iter().map(|t| t.1).sum(); if get_sign(sum_of_weights) == 0 { None } else { let mut frac = rand::random::<f64>() * sum_of_weights; let mut iter = p.iter(); let mut key_val = &(0, 0.0); while get_sign(frac) >= 0 { key_val = iter.next().expect("get_sample_from: input p is empty"); frac -= key_val.1; } Some(key_val.0) } } #[cfg(test)] mod tests { extern crate env_logger; use std::fs::remove_file; use commons::channel; use std::thread::spawn; use labeled_data::LabeledData; use commons::ExampleWithScore; use commons::Signal; use commons::performance_monitor::PerformanceMonitor; use super::StratifiedStorage; use ::TFeature; #[test] fn test_mean() { let _ = env_logger::try_init(); let filename = "unittest-stratified3.bin"; let batch = 100000; let num_read = 1000000; let (sampled_examples_send, sampled_examples_recv) = channel::bounded(1000, "sampled-examples"); let (_, models_recv) = channel::bounded(10, "updated-models"); let (signal_s, signal_r) = channel::bounded(10, "sampling-signal"); signal_s.send(Signal::START); let stratified_storage = StratifiedStorage::new( batch * 10, 1, "1".to_string(), 10000, filename, 4, 4, sampled_examples_send, signal_r, models_recv, 10, false, ); let updated_examples_send = stratified_storage.updated_examples_s.clone(); let mut pm_load = PerformanceMonitor::new(); pm_load.start(); let loading = spawn(move || { for _ in 0..batch { for i in 1..11 { let t = get_example(vec![i as TFeature], i as f32); updated_examples_send.send(t.clone()); } } println!("Loading speed: {}", (batch * 10) as f32 / pm_load.get_duration()); }); let mut pm_sample = PerformanceMonitor::new(); pm_sample.start(); let mut average = 0.0; for _ in 0..num_read { let recv = sampled_examples_recv.recv().unwrap(); average += (((recv.0).0).feature[0] as f32) * (recv.1 as f32) / (num_read as f32); pm_sample.update(recv.1 as usize); } spawn(move || { println!("Sampling speed: {}", num_read as f32 / pm_sample.get_duration()); }); let answer = (1..11).map(|a| a as f32).map(|a| a * a).sum::<f32>() / ((1..11).sum::<i32>() as f32); loading.join().unwrap(); if (average - answer).abs() > 0.05 { spawn(move || { println!("Average: {}. Expect: {}.", average, answer); }).join().unwrap(); assert!(false); } remove_file(filename).unwrap(); } fn get_example(feature: Vec<TFeature>, weight: f32) -> ExampleWithScore { let label: i8 = 1; let example = LabeledData::new(feature, label); let score = -weight.ln(); (example, (score, 0)) } }
{ bins[idx - range.start].get_split_index(*val) }
conditional_block
main.rs
#![allow(clippy::float_cmp)] #![allow(clippy::inline_always)] #![allow(clippy::many_single_char_names)] #![allow(clippy::needless_lifetimes)] #![allow(clippy::needless_return)] #![allow(clippy::or_fun_call)] #![allow(clippy::too_many_arguments)] #![allow(clippy::redundant_field_names)] #![allow(clippy::enum_variant_names)] #![allow(clippy::cast_lossless)] #![allow(clippy::needless_range_loop)] #![allow(clippy::excessive_precision)] #![allow(clippy::transmute_ptr_to_ptr)] extern crate lazy_static; mod accel; mod algorithm; mod bbox; mod bbox4; mod boundable; mod camera; mod color; mod fp_utils; mod hash; mod hilbert; mod image; mod lerp; mod light; mod math; mod mis; mod parse; mod ray; mod renderer; mod sampling; mod scene; mod shading; mod surface; mod timer; mod tracer; mod transform_stack; use std::{fs::File, io, io::Read, mem, path::Path, str::FromStr}; use clap::{App, Arg}; use nom::bytes::complete::take_until; use kioku::Arena; use crate::{ accel::BVH4Node, bbox::BBox, parse::{parse_scene, DataTree}, renderer::LightPath, surface::SurfaceIntersection, timer::Timer, }; const VERSION: &str = env!("CARGO_PKG_VERSION"); #[allow(clippy::cognitive_complexity)] fn main() { let mut t = Timer::new(); // Parse command line arguments. let args = App::new("Psychopath") .version(VERSION) .about("A slightly psychotic path tracer") .arg( Arg::with_name("input") .short("i") .long("input") .value_name("FILE") .help("Input.psy file") .takes_value(true) .required_unless_one(&["dev", "use_stdin"]), ) .arg( Arg::with_name("spp") .short("s") .long("spp") .value_name("N") .help("Number of samples per pixel") .takes_value(true) .validator(|s| { usize::from_str(&s) .and(Ok(())) .or(Err("must be an integer".to_string())) }), ) .arg( Arg::with_name("max_bucket_samples") .short("b") .long("spb") .value_name("N") .help("Target number of samples per bucket (determines bucket size)") .takes_value(true) .validator(|s| { usize::from_str(&s) .and(Ok(())) .or(Err("must be an integer".to_string())) }), ) .arg( Arg::with_name("crop") .long("crop") .value_name("X1 Y1 X2 Y2") .help( "Only render the image between pixel coordinates (X1, Y1) \ and (X2, Y2). Coordinates are zero-indexed and inclusive.", ) .takes_value(true) .number_of_values(4) .validator(|s| { usize::from_str(&s) .and(Ok(())) .or(Err("must be four integers".to_string())) }), ) .arg( Arg::with_name("threads") .short("t") .long("threads") .value_name("N") .help( "Number of threads to render with. Defaults to the number of logical \ cores on the system.", ) .takes_value(true) .validator(|s| { usize::from_str(&s) .and(Ok(())) .or(Err("must be an integer".to_string())) }), ) .arg( Arg::with_name("stats") .long("stats") .help("Print additional statistics about rendering"), ) .arg( Arg::with_name("dev") .long("dev") .help("Show useful dev/debug info."), ) .arg( Arg::with_name("serialized_output") .long("serialized_output") .help("Serialize and send render output to standard output.") .hidden(true), ) .arg( Arg::with_name("use_stdin") .long("use_stdin") .help("Take scene file in from stdin instead of a file path.") .hidden(true), ) .get_matches(); // Print some misc useful dev info. if args.is_present("dev") { println!( "SurfaceIntersection size: {} bytes", mem::size_of::<SurfaceIntersection>() ); println!("LightPath size: {} bytes", mem::size_of::<LightPath>()); println!("BBox size: {} bytes", mem::size_of::<BBox>()); // println!("BVHNode size: {} bytes", mem::size_of::<BVHNode>()); println!("BVH4Node size: {} bytes", mem::size_of::<BVH4Node>()); return; } let crop = args.values_of("crop").map(|mut vals| { let coords = ( u32::from_str(vals.next().unwrap()).unwrap(), u32::from_str(vals.next().unwrap()).unwrap(), u32::from_str(vals.next().unwrap()).unwrap(), u32::from_str(vals.next().unwrap()).unwrap(), ); if coords.0 > coords.2 { panic!("Argument '--crop': X1 must be less than or equal to X2"); } if coords.1 > coords.3 { panic!("Argument '--crop': Y1 must be less than or equal to Y2"); } coords }); // Parse data tree of scene file if!args.is_present("serialized_output") { println!("Parsing scene file...",); } t.tick(); let psy_contents = if args.is_present("use_stdin") { // Read from stdin let mut input = Vec::new(); let tmp = std::io::stdin(); let mut stdin = tmp.lock(); let mut buf = vec![0u8; 4096]; loop { let count = stdin .read(&mut buf) .expect("Unexpected end of scene input."); let start = if input.len() < 11 { 0 } else { input.len() - 11 }; let end = input.len() + count; input.extend(&buf[..count]); let mut done = false; let mut trunc_len = 0; if let nom::IResult::Ok((remaining, _)) = take_until::<&str, &[u8], ()>("__PSY_EOF__")(&input[start..end]) { done = true; trunc_len = input.len() - remaining.len(); } if done { input.truncate(trunc_len); break; } } String::from_utf8(input).unwrap() } else { // Read from file let mut input = String::new(); let fp = args.value_of("input").unwrap(); let mut f = io::BufReader::new(File::open(fp).unwrap()); let _ = f.read_to_string(&mut input); input }; let dt = DataTree::from_str(&psy_contents).unwrap(); if!args.is_present("serialized_output") { println!("\tParsed scene file in {:.3}s", t.tick()); } // Iterate through scenes and render them if let DataTree::Internal { ref children,.. } = dt { for child in children { t.tick(); if child.type_name() == "Scene" { if!args.is_present("serialized_output") { println!("Building scene..."); } let arena = Arena::new().with_block_size((1 << 20) * 4); let mut r = parse_scene(&arena, child).unwrap_or_else(|e| { e.print(&psy_contents); panic!("Parse error."); }); if let Some(spp) = args.value_of("spp") { if!args.is_present("serialized_output") { println!("\tOverriding scene spp: {}", spp); } r.spp = usize::from_str(spp).unwrap(); } let max_samples_per_bucket = if let Some(max_samples_per_bucket) = args.value_of("max_bucket_samples") { u32::from_str(max_samples_per_bucket).unwrap() } else { 4096 }; let thread_count = if let Some(threads) = args.value_of("threads") { u32::from_str(threads).unwrap() } else { num_cpus::get() as u32 }; if!args.is_present("serialized_output")
if!args.is_present("serialized_output") { println!("Rendering scene with {} threads...", thread_count); } let (mut image, rstats) = r.render( max_samples_per_bucket, crop, thread_count, args.is_present("serialized_output"), ); // Print render stats if!args.is_present("serialized_output") { let rtime = t.tick(); let ntime = rtime as f64 / rstats.total_time; println!("\tRendered scene in {:.3}s", rtime); println!( "\t\tTrace: {:.3}s", ntime * rstats.trace_time ); println!("\t\t\tRays traced: {}", rstats.ray_count); println!( "\t\t\tRays/sec: {}", (rstats.ray_count as f64 / (ntime * rstats.trace_time) as f64) as u64 ); println!("\t\t\tRay/node tests: {}", rstats.accel_node_visits); println!( "\t\tInitial ray generation: {:.3}s", ntime * rstats.initial_ray_generation_time ); println!( "\t\tRay generation: {:.3}s", ntime * rstats.ray_generation_time ); println!( "\t\tSample writing: {:.3}s", ntime * rstats.sample_writing_time ); } // Write to disk if!args.is_present("serialized_output") { println!("Writing image to disk into '{}'...", r.output_file); if r.output_file.ends_with(".png") { image .write_png(Path::new(&r.output_file)) .expect("Failed to write png..."); } else if r.output_file.ends_with(".exr") { image.write_exr(Path::new(&r.output_file)); } else { panic!("Unknown output file extension."); } println!("\tWrote image in {:.3}s", t.tick()); } // Print memory stats if stats are wanted. if args.is_present("stats") { // let arena_stats = arena.stats(); // let mib_occupied = arena_stats.0 as f64 / 1_048_576.0; // let mib_allocated = arena_stats.1 as f64 / 1_048_576.0; // println!("MemArena stats:"); // if mib_occupied >= 1.0 { // println!("\tOccupied: {:.1} MiB", mib_occupied); // } else { // println!("\tOccupied: {:.4} MiB", mib_occupied); // } // if mib_allocated >= 1.0 { // println!("\tUsed: {:.1} MiB", mib_allocated); // } else { // println!("\tUsed: {:.4} MiB", mib_allocated); // } // println!("\tTotal blocks: {}", arena_stats.2); } } } } // End with blank line println!(); }
{ println!("\tBuilt scene in {:.3}s", t.tick()); }
conditional_block