file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
nvg.rs
//! NanoVG is small antialiased vector graphics rendering library with a lean //! API modeled after the HTML5 Canvas API. It can be used to draw gauge //! instruments in MSFS. See `Gauge::create_nanovg`. use crate::sys; type Result = std::result::Result<(), Box<dyn std::error::Error>>; /// A NanoVG render context. pub struct Context { ctx: *mut sys::NVGcontext, } impl Context { /// Create a NanoVG render context from an `FsContext`. pub fn create(fs_ctx: sys::FsContext) -> Option<Self> { let uninit = std::mem::MaybeUninit::<sys::NVGparams>::zeroed(); let mut params = unsafe { uninit.assume_init() }; params.userPtr = fs_ctx; params.edgeAntiAlias = 1; let ctx = unsafe { sys::nvgCreateInternal(&mut params) }; if ctx.is_null() { None } else { Some(Self { ctx }) } } /// Draw a frame. pub fn draw_frame<F: Fn(&Frame) -> Result>(&self, width: usize, height: usize, f: F) { unsafe { sys::nvgBeginFrame(self.ctx, width as f32, height as f32, 1.0); } let frame = Frame { ctx: self.ctx }; match f(&frame) { Ok(()) => unsafe { sys::nvgEndFrame(self.ctx); }, Err(_) => unsafe { sys::nvgCancelFrame(self.ctx); }, } } /// NanoVG allows you to load.ttf files and use the font to render text. /// /// The appearance of the text can be defined by setting the current text style /// and by specifying the fill color. Common text and font settings such as /// font size, letter spacing and text align are supported. Font blur allows you /// to create simple text effects such as drop shadows. /// /// At render time the font face can be set based on the font handles or name. /// /// Font measure functions return values in local space, the calculations are /// carried in the same resolution as the final rendering. This is done because /// the text glyph positions are snapped to the nearest pixels sharp rendering. /// /// The local space means that values are not rotated or scale as per the current /// transformation. For example if you set font size to 12, which would mean that /// line height is 16, then regardless of the current scaling and rotation, the /// returned line height is always 16. Some measures may vary because of the scaling /// since aforementioned pixel snapping. /// /// While this may sound a little odd, the setup allows you to always render the /// same way regardless of scaling. /// /// Note: currently only solid color fill is supported for text. pub fn create_font( &self, name: &str, filename: &str, ) -> std::result::Result<Font, Box<dyn std::error::Error>> { let name = std::ffi::CString::new(name).unwrap(); let filename = std::ffi::CString::new(filename).unwrap(); let handle = unsafe { sys::nvgCreateFont(self.ctx, name.as_ptr(), filename.as_ptr()) }; match handle { -1 => Err(Box::new(std::io::Error::new( std::io::ErrorKind::Other, "unable to load font", ))), _ => Ok(Font { handle }), } } /// NanoVG allows you to load jpg, png, psd, tga, pic and gif files to be used for rendering. /// In addition you can upload your own image. The image loading is provided by stb_image. pub fn create_image( &self, filename: &str, ) -> std::result::Result<Image, Box<dyn std::error::Error>> { let filename = std::ffi::CString::new(filename).unwrap(); let handle = unsafe { sys::nvgCreateImage(self.ctx, filename.as_ptr(), 0) }; match handle { -1 => Err(Box::new(std::io::Error::new( std::io::ErrorKind::Other, "unable to load image", ))), _ => Ok(Image { ctx: self.ctx, handle, }), } } } impl Drop for Context { fn drop(&mut self) { unsafe { sys::nvgDeleteInternal(self.ctx); } } } /// Methods to draw on a frame. See `Context::draw_frame`. pub struct Frame { ctx: *mut sys::NVGcontext, } impl Frame { /// Draw a path. pub fn draw_path<F: Fn(&Path) -> Result>(&self, style: &Style, f: F) -> Result { unsafe { // sys::nvgSave(self.ctx); // sys::nvgReset(self.ctx); sys::nvgBeginPath(self.ctx); } if let Some(stroke) = &style.stroke { match stroke { PaintOrColor::Paint(p) => unsafe { sys::nvgStrokePaint(self.ctx, &p.0); }, PaintOrColor::Color(c) => unsafe { sys::nvgStrokeColor(self.ctx, &c.0); }, } } if let Some(fill) = &style.fill { match fill { PaintOrColor::Paint(p) => unsafe { sys::nvgFillPaint(self.ctx, &p.0); }, PaintOrColor::Color(c) => unsafe { sys::nvgFillColor(self.ctx, &c.0); }, } } let path = Path { ctx: self.ctx }; let r = f(&path); if style.stroke.is_some() { unsafe { sys::nvgStroke(self.ctx); } } if style.fill.is_some() { unsafe { sys::nvgFill(self.ctx); } } /* unsafe { sys::nvgRestore(self.ctx); } */ r } } /// A path. pub struct Path { ctx: *mut sys::NVGcontext, } impl Path { /// Starts new sub-path with specified point as first point. pub fn move_to(&self, x: f32, y: f32) { unsafe { sys::nvgMoveTo(self.ctx, x, y); } } /// Adds line segment from the last point in the path to the specified point. pub fn line_to(&self, x: f32, y: f32) { unsafe { sys::nvgLineTo(self.ctx, x, y); } } /// Adds cubic bezier segment from last point in the path via two control points to the specified point. pub fn bezier_to(&self, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32) { unsafe { sys::nvgBezierTo(self.ctx, c1x, c1y, c2x, c2y, x, y); } } /// Adds quadratic bezier segment from last point in the path via a control point to the /// specified point. pub fn quad_to(&self, cx: f32, cy: f32, x: f32, y: f32) { unsafe { sys::nvgQuadTo(self.ctx, cx, cy, x, y); } } /// Adds an arc segment at the corner defined by the last path point, and two specified points. pub fn arc_to(&self, x1: f32, y1: f32, x2: f32, y2: f32, radius: f32) { unsafe { sys::nvgArcTo(self.ctx, x1, y1, x2, y2, radius); } } /// Closes current sub-path with a line segment. pub fn close_path(&self) { unsafe { sys::nvgClosePath(self.ctx); } } /// Creates a new circle arc shaped sub-path. The arc center is at (`cx`,`cy`), the arc radius /// is `r`, and the arc is drawn from angle `a0` to `a1`, and swept in direction `dir`. /// Angles are in radians. pub fn arc(&self, cx: f32, cy: f32, r: f32, a0: f32, a1: f32, dir: Direction) { unsafe { sys::nvgArc(self.ctx, cx, cy, r, a0, a1, dir.to_sys() as _); } } /// Creates a new oval arc shaped sub-path. The arc center is at (`cx`, `cy`), the arc radius /// is (`rx`, `ry`), and the arc is draw from angle a0 to a1, and swept in direction `dir`. #[allow(clippy::too_many_arguments)] pub fn elliptical_arc( &self, cx: f32, cy: f32, rx: f32, ry: f32, a0: f32, a1: f32, dir: Direction, ) { unsafe { sys::nvgEllipticalArc(self.ctx, cx, cy, rx, ry, a0, a1, dir.to_sys() as _); } } /// Creates new rectangle shaped sub-path. pub fn rect(&self, x: f32, y: f32, w: f32, h: f32) { unsafe { sys::nvgRect(self.ctx, x, y, w, h); } } /// Creates a new rounded rectangle sub-path with rounded corners #[allow(clippy::many_single_char_names)] pub fn rounded_rect(&self, x: f32, y: f32, w: f32, h: f32, r: f32) { unsafe { sys::nvgRoundedRect(self.ctx, x, y, w, h, r); } } /// Creates new rounded rectangle shaped sub-path with varying radii for each corner. #[allow(clippy::too_many_arguments)] #[allow(clippy::many_single_char_names)] pub fn rounded_rect_varying( &self, x: f32, y: f32, w: f32, h: f32, rad_top_left: f32, rad_top_right: f32, rad_bottom_right: f32, rad_bottom_left: f32, ) { unsafe { sys::nvgRoundedRectVarying( self.ctx, x, y, w, h, rad_top_left, rad_top_right, rad_bottom_right, rad_bottom_left, ); } } /// Creates a new ellipse shaped sub-path. pub fn ellipse(&self, cx: f32, cy: f32, rx: f32, ry: f32) { unsafe { sys::nvgEllipse(self.ctx, cx, cy, rx, ry); } } /// Creates a new circle shaped path. pub fn circle(&self, cx: f32, cy: f32, r: f32) { unsafe { sys::nvgCircle(self.ctx, cx, cy, r); } } // TODO: fill } /// Winding direction #[derive(Debug, Clone, Copy)] pub enum Direction { /// Winding for holes. Clockwise, /// Winding for solid shapes. CounterClockwise, } impl Direction { fn to_sys(self) -> sys::NVGwinding { match self { Direction::Clockwise => sys::NVGwinding_NVG_CW, Direction::CounterClockwise => sys::NVGwinding_NVG_CCW, } } } #[derive(Debug)] #[doc(hidden)] pub enum PaintOrColor { Paint(Paint), Color(Color), } impl From<Paint> for PaintOrColor { fn from(p: Paint) -> PaintOrColor { PaintOrColor::Paint(p) } } impl From<Color> for PaintOrColor { fn from(c: Color) -> PaintOrColor { PaintOrColor::Color(c) } } /// The stroke and/or fill which will be applied to a path. #[derive(Debug, Default)] pub struct Style { stroke: Option<PaintOrColor>, fill: Option<PaintOrColor>,
pub fn stroke<T: Into<PaintOrColor>>(mut self, stroke: T) -> Self { self.stroke = Some(stroke.into()); self } /// Set the fill of this style. pub fn fill<T: Into<PaintOrColor>>(mut self, fill: T) -> Self { self.fill = Some(fill.into()); self } } /// Colors in NanoVG are stored as unsigned ints in ABGR format. #[derive(Debug)] pub struct Color(sys::NVGcolor); impl Color { /// Returns a color value from red, green, blue values. Alpha will be set to 255 (1.0). pub fn from_rgb(r: u8, g: u8, b: u8) -> Self { Self(unsafe { sys::nvgRGB(r, g, b) }) } /// Returns a color value from red, green, blue values. Alpha will be set to 1.0f. pub fn from_rgbf(r: f32, g: f32, b: f32) -> Self { Self(unsafe { sys::nvgRGBf(r, g, b) }) } /// Returns a color value from red, green, blue and alpha values. pub fn from_rgba(r: u8, g: u8, b: u8, a: u8) -> Self { Self(unsafe { sys::nvgRGBA(r, g, b, a) }) } /// Returns a color value from red, green, blue values. Alpha will be set to 1.0f. pub fn from_rgbaf(r: f32, g: f32, b: f32, a: f32) -> Self { Self(unsafe { sys::nvgRGBAf(r, g, b, a) }) } /// Returns color value specified by hue, saturation and lightness. /// HSL values are all in range [0..1], alpha will be set to 255. pub fn from_hsv(h: f32, s: f32, l: f32) -> Self { Self(unsafe { sys::nvgHSL(h, s, l) }) } /// Returns color value specified by hue, saturation and lightness. /// HSL values are all in range [0..1], alpha will be set to 255. pub fn from_hsva(h: f32, s: f32, l: f32, a: u8) -> Self { Self(unsafe { sys::nvgHSLA(h, s, l, a) }) } } /// NanoVG supports four types of paints: linear gradient, box gradient, radial gradient and image pattern. /// These can be used as paints for strokes and fills. #[derive(Debug)] pub struct Paint(sys::NVGpaint); impl Paint { /// Creates and returns an image pattern. Parameters (`x`, `y`) specify the left-top location of the image pattern, /// (`w`, `h`) is the size of the image, `angle` is the rotation around the top-left corner, and `image` is the image /// to render. pub fn from_image( image: &Image, x: f32, y: f32, w: f32, h: f32, angle: f32, alpha: f32, ) -> Paint { Paint(unsafe { sys::nvgImagePattern(image.ctx, x, y, w, h, angle, image.handle, alpha) }) } } /// A font handle. pub struct Font { handle: std::os::raw::c_int, } /// An image handle. pub struct Image { ctx: *mut sys::NVGcontext, handle: std::os::raw::c_int, } impl Image { /// Returns the dimensions of a created image. pub fn size(&self) -> (usize, usize) { let mut w = 0; let mut h = 0; unsafe { sys::nvgImageSize(self.ctx, self.handle, &mut w, &mut h); } (w as usize, h as usize) } } impl Drop for Image { fn drop(&mut self) { unsafe { sys::nvgDeleteImage(self.ctx, self.handle); } } }
} impl Style { /// Set the stroke of this style.
random_line_split
actix.rs
. // See the License for the specific language governing permissions and // limitations under the License. //! Actix-web API backend. //! //! [Actix-web](https://github.com/actix/actix-web) is an asynchronous backend //! for HTTP API, based on the [Actix](https://github.com/actix/actix) framework. pub use actix_web::middleware::cors::Cors; use actix::{Actor, System}; use actix_web::{ error::ResponseError, http::header, AsyncResponder, FromRequest, HttpMessage, HttpResponse, Query, }; use failure::{bail, ensure, format_err, Error}; use futures::{future::Either, sync::mpsc, Future, IntoFuture, Stream}; use log::trace; use serde::{ de::{self, DeserializeOwned}, ser, Serialize, }; use std::{ fmt, net::SocketAddr, result, str::FromStr, sync::Arc, thread::{self, JoinHandle}, }; use crate::api::{ self, manager::{ApiManager, UpdateEndpoints}, Actuality, ApiAccess, ApiAggregator, ApiBackend, ApiScope, EndpointMutability, ExtendApiBackend, FutureResult, NamedWith, }; /// Type alias for the concrete `actix-web` HTTP response. pub type FutureResponse = actix_web::FutureResponse<HttpResponse, actix_web::Error>; /// Type alias for the concrete `actix-web` HTTP request. pub type HttpRequest = actix_web::HttpRequest<()>; /// Type alias for the inner `actix-web` HTTP requests handler. pub type RawHandler = dyn Fn(HttpRequest) -> FutureResponse +'static + Send + Sync; /// Type alias for the `actix-web::App`. pub type App = actix_web::App<()>; /// Type alias for the `actix-web::App` configuration. pub type AppConfig = Arc<dyn Fn(App) -> App +'static + Send + Sync>; /// Raw `actix-web` backend requests handler. #[derive(Clone)] pub struct RequestHandler { /// Endpoint name. pub name: String, /// Endpoint HTTP method. pub method: actix_web::http::Method, /// Inner handler. pub inner: Arc<RawHandler>, } impl fmt::Debug for RequestHandler { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RequestHandler") .field("name", &self.name) .field("method", &self.method) .finish() } } /// API builder for the `actix-web` backend. #[derive(Debug, Clone, Default)] pub struct ApiBuilder { handlers: Vec<RequestHandler>, } impl ApiBuilder { /// Constructs a new backend builder instance. pub fn new() -> Self { Self::default() } } impl ApiBackend for ApiBuilder { type Handler = RequestHandler; type Backend = actix_web::Scope<()>; fn raw_handler(&mut self, handler: Self::Handler) -> &mut Self { self.handlers.push(handler); self } fn wire(&self, mut output: Self::Backend) -> Self::Backend { for handler in self.handlers.clone() { let inner = handler.inner; output = output.route(&handler.name, handler.method.clone(), move |request| { inner(request) }); } output } } impl ExtendApiBackend for actix_web::Scope<()> { fn extend<'a, I>(mut self, items: I) -> Self where I: IntoIterator<Item = (&'a str, &'a ApiScope)>, { for item in items { self = self.nested(&item.0, move |scope| item.1.actix_backend.wire(scope)) } self } } impl ResponseError for api::Error { fn error_response(&self) -> HttpResponse { match self { api::Error::BadRequest(err) => HttpResponse::BadRequest().body(err.to_string()), api::Error::InternalError(err) => { HttpResponse::InternalServerError().body(err.to_string()) } api::Error::Io(err) => HttpResponse::InternalServerError().body(err.to_string()), api::Error::Storage(err) => HttpResponse::InternalServerError().body(err.to_string()), api::Error::Gone => HttpResponse::Gone().finish(), api::Error::MovedPermanently(new_location) => HttpResponse::MovedPermanently() .header(header::LOCATION, new_location.clone()) .finish(), api::Error::NotFound(err) => HttpResponse::NotFound().body(err.to_string()), api::Error::Unauthorized => HttpResponse::Unauthorized().finish(), } } } /// Creates a `HttpResponse` object from the provided JSON value. /// Depending on the `actuality` parameter value, the warning about endpoint /// being deprecated can be added. fn json_response<T: Serialize>(actuality: Actuality, json_value: T) -> HttpResponse { let mut response = HttpResponse::Ok(); if let Actuality::Deprecated { ref discontinued_on, ref description, } = actuality { // There is a proposal for creating special deprecation header within HTTP, // but currently it's only a draft. So the conventional way to notify API user // about endpoint deprecation is setting the `Warning` header. let expiration_note = match discontinued_on { // Date is formatted according to HTTP-date format. Some(date) => format!( "The old API is maintained until {}.", date.format("%a, %d %b %Y %T GMT") ), None => "Currently there is no specific date for disabling this endpoint.".into(), }; let mut warning_text = format!( "Deprecated API: This endpoint is deprecated, \ see the service documentation to find an alternative. \ {}", expiration_note ); if let Some(description) = description { warning_text = format!("{} Additional information: {}.", warning_text, description); } let warning_string = create_warning_header(&warning_text); response.header(header::WARNING, warning_string); } response.json(json_value) } /// Formats warning string according to the following format: /// "<warn-code> <warn-agent> \"<warn-text>\" [<warn-date>]" /// <warn-code> in our case is 299, which means a miscellaneous persistent warning. /// <warn-agent> is optional, so we set it to "-". /// <warn-text> is a warning description, which is taken as an only argument. /// <warn-date> is not required. /// For details you can see RFC 7234, section 5.5: Warning. fn create_warning_header(warning_text: &str) -> String { format!("299 - \"{}\"", warning_text) } impl From<EndpointMutability> for actix_web::http::Method { fn from(mutability: EndpointMutability) -> Self { match mutability { EndpointMutability::Immutable => actix_web::http::Method::GET, EndpointMutability::Mutable => actix_web::http::Method::POST, } } } impl<Q, I, F> From<NamedWith<Q, I, api::Result<I>, F>> for RequestHandler where F: Fn(Q) -> api::Result<I> +'static + Send + Sync + Clone, Q: DeserializeOwned +'static, I: Serialize +'static, { fn from(f: NamedWith<Q, I, api::Result<I>, F>) -> Self { // Convert handler that returns a `Result` into handler that will return `FutureResult`. let handler = f.inner.handler; let future_endpoint = move |query| -> Box<dyn Future<Item = I, Error = api::Error>> { let future = handler(query).into_future(); Box::new(future) }; let named_with_future = NamedWith::new(f.name, future_endpoint, f.mutability); // Then we can create a `RequestHandler` with the `From` specialization for future result. RequestHandler::from(named_with_future) } } /// Takes `HttpRequest` as a parameter and extracts query: /// - If request is immutable, the query is parsed from query string, /// - If request is mutable, the query is parsed from the request body as JSON. fn extract_query<Q>( request: HttpRequest, mutability: EndpointMutability, ) -> impl Future<Item = Q, Error = actix_web::error::Error> where Q: DeserializeOwned +'static, { match mutability { EndpointMutability::Immutable => { let future = Query::from_request(&request, &Default::default()) .map(Query::into_inner) .map_err(From::from) .into_future(); Either::A(future) } EndpointMutability::Mutable => { let future = request.json().from_err(); Either::B(future) } } } impl<Q, I, F> From<NamedWith<Q, I, FutureResult<I>, F>> for RequestHandler where F: Fn(Q) -> FutureResult<I> +'static + Clone + Send + Sync, Q: DeserializeOwned +'static, I: Serialize +'static, { fn from(f: NamedWith<Q, I, FutureResult<I>, F>) -> Self
} } } /// Creates `actix_web::App` for the given aggregator and runtime configuration. pub(crate) fn create_app(aggregator: &ApiAggregator, runtime_config: ApiRuntimeConfig) -> App { let app_config = runtime_config.app_config; let access = runtime_config.access; let mut app = App::new(); app = app.scope("api", |scope| aggregator.extend_backend(access, scope)); if let Some(app_config) = app_config { app = app_config(app); } app } /// Configuration parameters for the `App` runtime. #[derive(Clone)] pub struct ApiRuntimeConfig { /// The socket address to bind. pub listen_address: SocketAddr, /// API access level. pub access: ApiAccess, /// Optional App configuration. pub app_config: Option<AppConfig>, } impl ApiRuntimeConfig { /// Creates API runtime configuration for the given address and access level. pub fn new(listen_address: SocketAddr, access: ApiAccess) -> Self { Self { listen_address, access, app_config: Default::default(), } } } impl fmt::Debug for ApiRuntimeConfig { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ApiRuntimeConfig") .field("listen_address", &self.listen_address) .field("access", &self.access) .field("app_config", &self.app_config.as_ref().map(drop)) .finish() } } /// Configuration parameters for the actix system runtime. #[derive(Debug, Clone)] pub struct SystemRuntimeConfig { /// Active API runtimes. pub api_runtimes: Vec<ApiRuntimeConfig>, /// API aggregator. pub api_aggregator: ApiAggregator, /// The interval in milliseconds between attempts of restarting HTTP-server in case /// the server failed to restart pub server_restart_retry_timeout: u64, /// The attempts counts of restarting HTTP-server in case the server failed to restart pub server_restart_max_retries: u16, } /// Actix system runtime handle. pub struct SystemRuntime { system_thread: JoinHandle<result::Result<(), Error>>, system: System, } impl SystemRuntimeConfig { /// Starts actix system runtime along with all web runtimes. pub fn start( self, endpoints_rx: mpsc::Receiver<UpdateEndpoints>, ) -> result::Result<SystemRuntime, Error> { // Creates a system thread. let (system_tx, system_rx) = mpsc::unbounded(); let system_thread = thread::spawn(move || -> result::Result<(), Error> { let system = System::new("http-server"); system_tx.unbounded_send(System::current())?; ApiManager::new(self, endpoints_rx).start(); // Starts actix-web runtime. let code = system.run(); trace!("Actix runtime finished with code {}", code); ensure!( code == 0, "Actix runtime finished with the non zero error code: {}", code ); Ok(()) }); // Receives addresses of runtime items. let system = system_rx .wait() .next() .ok_or_else(|| format_err!("Unable to receive actix system handle"))? .map_err(|()| format_err!("Unable to receive actix system handle"))?; Ok(SystemRuntime { system_thread, system, }) } } impl SystemRuntime { /// Stops the actix system runtime along with all web runtimes. pub fn stop(self) -> result::Result<(), Error> { // Stop actix system runtime. self.system.stop(); self.system_thread.join().map_err(|e| { format_err!( "Unable to join actix web api thread, an error occurred: {:?}", e ) })? } } impl fmt::Debug for SystemRuntime { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SystemRuntime").finish() } } /// CORS header specification. #[derive(Debug, Clone, PartialEq, Eq)] pub enum AllowOrigin { /// Allows access from any host. Any, /// Allows access only from the specified hosts. Whitelist(Vec<String>), } impl ser::Serialize for AllowOrigin { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: ser::Serializer, { match *self { AllowOrigin::Any => "*".serialize(serializer), AllowOrigin::Whitelist(ref hosts) => { if hosts.len() == 1 { hosts[0].serialize(serializer) } else { hosts.serialize(serializer) } } } } } impl<'de> de::Deserialize<'de> for AllowOrigin { fn deserialize<D>(d: D) -> result::Result<Self, D::Error> where D: de::Deserializer<'de>, { struct Visitor; impl<'de> de::Visitor<'de> for Visitor { type Value = AllowOrigin; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a list of hosts or \"*\"") } fn visit_str<E>(self, value: &str) -> result::Result<AllowOrigin, E> where E: de::Error, { match value { "*" => Ok(AllowOrigin::Any), _ => Ok(AllowOrigin::Whitelist(vec![value.to_string()])), } } fn visit_seq<A>(self, seq: A) -> result::Result<AllowOrigin, A::Error> where A: de::SeqAccess<'de>, { let hosts = de::Deserialize::deserialize(de::value::SeqAccessDeserializer::new(seq))?; Ok(AllowOrigin::Whitelist(hosts)) } } d.deserialize_any(Visitor) } } impl FromStr for AllowOrigin { type Err = Error; fn from_str(s: &str) -> result::Result<Self, Self::Err> { if s == "*" { return Ok(AllowOrigin::Any); } let v: Vec<_> = s .split(',') .map(|s| s.trim().to_string()) .filter(|s|!s.is_empty()) .collect(); if v.is_empty() { bail!("Invalid AllowOrigin::Whitelist value"); } Ok(AllowOrigin::Whitelist(v)) } } impl<'a> From<&'a AllowOrigin> for Cors { fn from(origin: &'a AllowOrigin) -> Self { match *origin { AllowOrigin::Any => Self::build().finish(), AllowOrigin::Whitelist(ref hosts) => { let mut builder = Self::build(); for host in hosts { builder.allowed_origin(host); } builder.finish() } } } } impl From<AllowOrigin> for Cors { fn from(origin: AllowOrigin) -> Self { Self::from(&origin) } } #[cfg(test)] mod tests { use pretty_assertions::assert_eq; use super::*; #[test] fn allow_origin_from_str() { fn check(text: &str, expected: AllowOrigin) { let from_str = AllowOrigin::from_str(text).unwrap(); assert_eq!(from_str, expected); } check(r#"*"#, AllowOrigin::Any); check( r#"http://example.com"#, AllowOrigin::Whitelist(vec!["http://example.com".to_string()]), ); check( r#"http://a.org, http://b.org"#, AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]), ); check( r#"http://a.org, http://b.org, "#, AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]), ); check( r#"http://a.org,http://b.org"#, AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]), ); } fn assert_responses_eq(left: HttpResponse, right: HttpResponse) { assert_eq!(left.status(), right.status()); assert_eq!(left.headers(), right.headers()); assert_eq!(left.body(), right.body()); } #[test] fn test_create_warning_header() { assert_eq!( &create_warning_header("Description"), "299 - \"Description\"" ); } #[test] fn json_responses() { use chrono::TimeZone; let actual_response = json_response(Actuality::Actual, 123); assert_responses_eq(actual_response, HttpResponse::Ok().json(123)); let deprecated_response_no_deadline = json_response( Actuality::Deprecated { discontinued_on: None, description: None, }, 123, ); let expected_warning_text = "Deprecated API: This endpoint is deprecated, \ see the service documentation to find an alternative. \ Currently there is no specific date for disabling this endpoint."; let expected_warning = create_warning_header(expected_warning_text); assert_responses_eq( deprecated_response_no_deadline, HttpResponse::Ok() .header(header::WARNING, expected_warning) .json(123), ); let description = "Docs can be found on docs.rs".to_owned(); let deprecated_response_with_description = json_response( Actuality::Deprecated { discontinued_on: None, description: Some(description), }, 123, ); let expected_warning_text = "Deprecated API: This endpoint is deprecated, \ see the service documentation to find an alternative. \ Currently there is no specific date for disabling this endpoint. \ Additional information: Docs can be found on docs.rs."; let expected_warning = create_warning_header(expected_warning_text); assert_responses_eq( deprecated_response_with_description, HttpResponse::Ok() .header(header::WARNING, expected_warning) .json(123), ); let deadline = chrono::Utc.ymd(2020, 12, 31).and_hms(23, 59, 59); let deprecated_response_deadline = json_response( Actuality::Deprecated { discontinued_on: Some(deadline),
{ let handler = f.inner.handler; let actuality = f.inner.actuality; let mutability = f.mutability; let index = move |request: HttpRequest| -> FutureResponse { let handler = handler.clone(); let actuality = actuality.clone(); extract_query(request, mutability) .and_then(move |query| { handler(query) .map(|value| json_response(actuality, value)) .map_err(From::from) }) .responder() }; Self { name: f.name, method: f.mutability.into(), inner: Arc::from(index) as Arc<RawHandler>,
identifier_body
actix.rs
. // See the License for the specific language governing permissions and // limitations under the License. //! Actix-web API backend. //! //! [Actix-web](https://github.com/actix/actix-web) is an asynchronous backend //! for HTTP API, based on the [Actix](https://github.com/actix/actix) framework. pub use actix_web::middleware::cors::Cors; use actix::{Actor, System}; use actix_web::{ error::ResponseError, http::header, AsyncResponder, FromRequest, HttpMessage, HttpResponse, Query, }; use failure::{bail, ensure, format_err, Error}; use futures::{future::Either, sync::mpsc, Future, IntoFuture, Stream}; use log::trace; use serde::{ de::{self, DeserializeOwned}, ser, Serialize, }; use std::{ fmt, net::SocketAddr, result, str::FromStr, sync::Arc, thread::{self, JoinHandle}, }; use crate::api::{ self, manager::{ApiManager, UpdateEndpoints}, Actuality, ApiAccess, ApiAggregator, ApiBackend, ApiScope, EndpointMutability, ExtendApiBackend, FutureResult, NamedWith, }; /// Type alias for the concrete `actix-web` HTTP response. pub type FutureResponse = actix_web::FutureResponse<HttpResponse, actix_web::Error>; /// Type alias for the concrete `actix-web` HTTP request. pub type HttpRequest = actix_web::HttpRequest<()>; /// Type alias for the inner `actix-web` HTTP requests handler. pub type RawHandler = dyn Fn(HttpRequest) -> FutureResponse +'static + Send + Sync; /// Type alias for the `actix-web::App`. pub type App = actix_web::App<()>; /// Type alias for the `actix-web::App` configuration. pub type AppConfig = Arc<dyn Fn(App) -> App +'static + Send + Sync>; /// Raw `actix-web` backend requests handler. #[derive(Clone)] pub struct RequestHandler { /// Endpoint name. pub name: String, /// Endpoint HTTP method. pub method: actix_web::http::Method, /// Inner handler. pub inner: Arc<RawHandler>, } impl fmt::Debug for RequestHandler { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RequestHandler") .field("name", &self.name) .field("method", &self.method) .finish() } } /// API builder for the `actix-web` backend. #[derive(Debug, Clone, Default)] pub struct ApiBuilder { handlers: Vec<RequestHandler>, } impl ApiBuilder { /// Constructs a new backend builder instance. pub fn new() -> Self { Self::default() } } impl ApiBackend for ApiBuilder { type Handler = RequestHandler; type Backend = actix_web::Scope<()>; fn raw_handler(&mut self, handler: Self::Handler) -> &mut Self { self.handlers.push(handler); self } fn wire(&self, mut output: Self::Backend) -> Self::Backend { for handler in self.handlers.clone() { let inner = handler.inner; output = output.route(&handler.name, handler.method.clone(), move |request| { inner(request) }); } output } } impl ExtendApiBackend for actix_web::Scope<()> { fn extend<'a, I>(mut self, items: I) -> Self where I: IntoIterator<Item = (&'a str, &'a ApiScope)>, { for item in items { self = self.nested(&item.0, move |scope| item.1.actix_backend.wire(scope)) } self } } impl ResponseError for api::Error { fn error_response(&self) -> HttpResponse { match self { api::Error::BadRequest(err) => HttpResponse::BadRequest().body(err.to_string()), api::Error::InternalError(err) => { HttpResponse::InternalServerError().body(err.to_string()) } api::Error::Io(err) => HttpResponse::InternalServerError().body(err.to_string()), api::Error::Storage(err) => HttpResponse::InternalServerError().body(err.to_string()), api::Error::Gone => HttpResponse::Gone().finish(), api::Error::MovedPermanently(new_location) => HttpResponse::MovedPermanently() .header(header::LOCATION, new_location.clone()) .finish(), api::Error::NotFound(err) => HttpResponse::NotFound().body(err.to_string()), api::Error::Unauthorized => HttpResponse::Unauthorized().finish(), } } } /// Creates a `HttpResponse` object from the provided JSON value. /// Depending on the `actuality` parameter value, the warning about endpoint /// being deprecated can be added. fn json_response<T: Serialize>(actuality: Actuality, json_value: T) -> HttpResponse { let mut response = HttpResponse::Ok(); if let Actuality::Deprecated { ref discontinued_on, ref description, } = actuality { // There is a proposal for creating special deprecation header within HTTP, // but currently it's only a draft. So the conventional way to notify API user // about endpoint deprecation is setting the `Warning` header. let expiration_note = match discontinued_on { // Date is formatted according to HTTP-date format. Some(date) => format!( "The old API is maintained until {}.", date.format("%a, %d %b %Y %T GMT") ), None => "Currently there is no specific date for disabling this endpoint.".into(), }; let mut warning_text = format!( "Deprecated API: This endpoint is deprecated, \ see the service documentation to find an alternative. \ {}", expiration_note ); if let Some(description) = description { warning_text = format!("{} Additional information: {}.", warning_text, description); } let warning_string = create_warning_header(&warning_text); response.header(header::WARNING, warning_string); } response.json(json_value) } /// Formats warning string according to the following format: /// "<warn-code> <warn-agent> \"<warn-text>\" [<warn-date>]" /// <warn-code> in our case is 299, which means a miscellaneous persistent warning. /// <warn-agent> is optional, so we set it to "-". /// <warn-text> is a warning description, which is taken as an only argument. /// <warn-date> is not required. /// For details you can see RFC 7234, section 5.5: Warning. fn create_warning_header(warning_text: &str) -> String { format!("299 - \"{}\"", warning_text) } impl From<EndpointMutability> for actix_web::http::Method { fn from(mutability: EndpointMutability) -> Self { match mutability { EndpointMutability::Immutable => actix_web::http::Method::GET, EndpointMutability::Mutable => actix_web::http::Method::POST, } } } impl<Q, I, F> From<NamedWith<Q, I, api::Result<I>, F>> for RequestHandler where F: Fn(Q) -> api::Result<I> +'static + Send + Sync + Clone, Q: DeserializeOwned +'static, I: Serialize +'static, { fn from(f: NamedWith<Q, I, api::Result<I>, F>) -> Self { // Convert handler that returns a `Result` into handler that will return `FutureResult`. let handler = f.inner.handler; let future_endpoint = move |query| -> Box<dyn Future<Item = I, Error = api::Error>> { let future = handler(query).into_future(); Box::new(future) }; let named_with_future = NamedWith::new(f.name, future_endpoint, f.mutability); // Then we can create a `RequestHandler` with the `From` specialization for future result. RequestHandler::from(named_with_future) } } /// Takes `HttpRequest` as a parameter and extracts query: /// - If request is immutable, the query is parsed from query string, /// - If request is mutable, the query is parsed from the request body as JSON. fn extract_query<Q>( request: HttpRequest, mutability: EndpointMutability, ) -> impl Future<Item = Q, Error = actix_web::error::Error> where Q: DeserializeOwned +'static, { match mutability { EndpointMutability::Immutable => { let future = Query::from_request(&request, &Default::default()) .map(Query::into_inner) .map_err(From::from) .into_future(); Either::A(future) } EndpointMutability::Mutable => { let future = request.json().from_err(); Either::B(future) } } } impl<Q, I, F> From<NamedWith<Q, I, FutureResult<I>, F>> for RequestHandler where F: Fn(Q) -> FutureResult<I> +'static + Clone + Send + Sync, Q: DeserializeOwned +'static, I: Serialize +'static, { fn from(f: NamedWith<Q, I, FutureResult<I>, F>) -> Self { let handler = f.inner.handler; let actuality = f.inner.actuality; let mutability = f.mutability; let index = move |request: HttpRequest| -> FutureResponse { let handler = handler.clone(); let actuality = actuality.clone(); extract_query(request, mutability) .and_then(move |query| { handler(query) .map(|value| json_response(actuality, value)) .map_err(From::from) }) .responder() }; Self { name: f.name, method: f.mutability.into(), inner: Arc::from(index) as Arc<RawHandler>, } } } /// Creates `actix_web::App` for the given aggregator and runtime configuration. pub(crate) fn create_app(aggregator: &ApiAggregator, runtime_config: ApiRuntimeConfig) -> App { let app_config = runtime_config.app_config; let access = runtime_config.access; let mut app = App::new(); app = app.scope("api", |scope| aggregator.extend_backend(access, scope)); if let Some(app_config) = app_config { app = app_config(app); } app } /// Configuration parameters for the `App` runtime. #[derive(Clone)] pub struct ApiRuntimeConfig { /// The socket address to bind. pub listen_address: SocketAddr, /// API access level. pub access: ApiAccess, /// Optional App configuration. pub app_config: Option<AppConfig>, } impl ApiRuntimeConfig { /// Creates API runtime configuration for the given address and access level. pub fn new(listen_address: SocketAddr, access: ApiAccess) -> Self { Self { listen_address, access, app_config: Default::default(), } } } impl fmt::Debug for ApiRuntimeConfig { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ApiRuntimeConfig") .field("listen_address", &self.listen_address) .field("access", &self.access) .field("app_config", &self.app_config.as_ref().map(drop)) .finish() } } /// Configuration parameters for the actix system runtime. #[derive(Debug, Clone)] pub struct SystemRuntimeConfig { /// Active API runtimes. pub api_runtimes: Vec<ApiRuntimeConfig>, /// API aggregator. pub api_aggregator: ApiAggregator, /// The interval in milliseconds between attempts of restarting HTTP-server in case /// the server failed to restart pub server_restart_retry_timeout: u64, /// The attempts counts of restarting HTTP-server in case the server failed to restart pub server_restart_max_retries: u16, } /// Actix system runtime handle. pub struct SystemRuntime { system_thread: JoinHandle<result::Result<(), Error>>, system: System, } impl SystemRuntimeConfig { /// Starts actix system runtime along with all web runtimes. pub fn start( self, endpoints_rx: mpsc::Receiver<UpdateEndpoints>, ) -> result::Result<SystemRuntime, Error> { // Creates a system thread. let (system_tx, system_rx) = mpsc::unbounded(); let system_thread = thread::spawn(move || -> result::Result<(), Error> { let system = System::new("http-server"); system_tx.unbounded_send(System::current())?; ApiManager::new(self, endpoints_rx).start(); // Starts actix-web runtime. let code = system.run(); trace!("Actix runtime finished with code {}", code); ensure!( code == 0, "Actix runtime finished with the non zero error code: {}", code ); Ok(()) }); // Receives addresses of runtime items. let system = system_rx .wait() .next() .ok_or_else(|| format_err!("Unable to receive actix system handle"))? .map_err(|()| format_err!("Unable to receive actix system handle"))?; Ok(SystemRuntime { system_thread, system, }) } } impl SystemRuntime { /// Stops the actix system runtime along with all web runtimes. pub fn stop(self) -> result::Result<(), Error> { // Stop actix system runtime. self.system.stop(); self.system_thread.join().map_err(|e| { format_err!( "Unable to join actix web api thread, an error occurred: {:?}", e ) })? } } impl fmt::Debug for SystemRuntime { fn
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SystemRuntime").finish() } } /// CORS header specification. #[derive(Debug, Clone, PartialEq, Eq)] pub enum AllowOrigin { /// Allows access from any host. Any, /// Allows access only from the specified hosts. Whitelist(Vec<String>), } impl ser::Serialize for AllowOrigin { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: ser::Serializer, { match *self { AllowOrigin::Any => "*".serialize(serializer), AllowOrigin::Whitelist(ref hosts) => { if hosts.len() == 1 { hosts[0].serialize(serializer) } else { hosts.serialize(serializer) } } } } } impl<'de> de::Deserialize<'de> for AllowOrigin { fn deserialize<D>(d: D) -> result::Result<Self, D::Error> where D: de::Deserializer<'de>, { struct Visitor; impl<'de> de::Visitor<'de> for Visitor { type Value = AllowOrigin; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a list of hosts or \"*\"") } fn visit_str<E>(self, value: &str) -> result::Result<AllowOrigin, E> where E: de::Error, { match value { "*" => Ok(AllowOrigin::Any), _ => Ok(AllowOrigin::Whitelist(vec![value.to_string()])), } } fn visit_seq<A>(self, seq: A) -> result::Result<AllowOrigin, A::Error> where A: de::SeqAccess<'de>, { let hosts = de::Deserialize::deserialize(de::value::SeqAccessDeserializer::new(seq))?; Ok(AllowOrigin::Whitelist(hosts)) } } d.deserialize_any(Visitor) } } impl FromStr for AllowOrigin { type Err = Error; fn from_str(s: &str) -> result::Result<Self, Self::Err> { if s == "*" { return Ok(AllowOrigin::Any); } let v: Vec<_> = s .split(',') .map(|s| s.trim().to_string()) .filter(|s|!s.is_empty()) .collect(); if v.is_empty() { bail!("Invalid AllowOrigin::Whitelist value"); } Ok(AllowOrigin::Whitelist(v)) } } impl<'a> From<&'a AllowOrigin> for Cors { fn from(origin: &'a AllowOrigin) -> Self { match *origin { AllowOrigin::Any => Self::build().finish(), AllowOrigin::Whitelist(ref hosts) => { let mut builder = Self::build(); for host in hosts { builder.allowed_origin(host); } builder.finish() } } } } impl From<AllowOrigin> for Cors { fn from(origin: AllowOrigin) -> Self { Self::from(&origin) } } #[cfg(test)] mod tests { use pretty_assertions::assert_eq; use super::*; #[test] fn allow_origin_from_str() { fn check(text: &str, expected: AllowOrigin) { let from_str = AllowOrigin::from_str(text).unwrap(); assert_eq!(from_str, expected); } check(r#"*"#, AllowOrigin::Any); check( r#"http://example.com"#, AllowOrigin::Whitelist(vec!["http://example.com".to_string()]), ); check( r#"http://a.org, http://b.org"#, AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]), ); check( r#"http://a.org, http://b.org, "#, AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]), ); check( r#"http://a.org,http://b.org"#, AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]), ); } fn assert_responses_eq(left: HttpResponse, right: HttpResponse) { assert_eq!(left.status(), right.status()); assert_eq!(left.headers(), right.headers()); assert_eq!(left.body(), right.body()); } #[test] fn test_create_warning_header() { assert_eq!( &create_warning_header("Description"), "299 - \"Description\"" ); } #[test] fn json_responses() { use chrono::TimeZone; let actual_response = json_response(Actuality::Actual, 123); assert_responses_eq(actual_response, HttpResponse::Ok().json(123)); let deprecated_response_no_deadline = json_response( Actuality::Deprecated { discontinued_on: None, description: None, }, 123, ); let expected_warning_text = "Deprecated API: This endpoint is deprecated, \ see the service documentation to find an alternative. \ Currently there is no specific date for disabling this endpoint."; let expected_warning = create_warning_header(expected_warning_text); assert_responses_eq( deprecated_response_no_deadline, HttpResponse::Ok() .header(header::WARNING, expected_warning) .json(123), ); let description = "Docs can be found on docs.rs".to_owned(); let deprecated_response_with_description = json_response( Actuality::Deprecated { discontinued_on: None, description: Some(description), }, 123, ); let expected_warning_text = "Deprecated API: This endpoint is deprecated, \ see the service documentation to find an alternative. \ Currently there is no specific date for disabling this endpoint. \ Additional information: Docs can be found on docs.rs."; let expected_warning = create_warning_header(expected_warning_text); assert_responses_eq( deprecated_response_with_description, HttpResponse::Ok() .header(header::WARNING, expected_warning) .json(123), ); let deadline = chrono::Utc.ymd(2020, 12, 31).and_hms(23, 59, 59); let deprecated_response_deadline = json_response( Actuality::Deprecated { discontinued_on: Some(deadline),
fmt
identifier_name
actix.rs
implied. // See the License for the specific language governing permissions and // limitations under the License. //! Actix-web API backend. //! //! [Actix-web](https://github.com/actix/actix-web) is an asynchronous backend //! for HTTP API, based on the [Actix](https://github.com/actix/actix) framework. pub use actix_web::middleware::cors::Cors; use actix::{Actor, System}; use actix_web::{ error::ResponseError, http::header, AsyncResponder, FromRequest, HttpMessage, HttpResponse, Query, }; use failure::{bail, ensure, format_err, Error}; use futures::{future::Either, sync::mpsc, Future, IntoFuture, Stream}; use log::trace; use serde::{ de::{self, DeserializeOwned}, ser, Serialize, }; use std::{ fmt, net::SocketAddr, result, str::FromStr, sync::Arc, thread::{self, JoinHandle}, }; use crate::api::{ self, manager::{ApiManager, UpdateEndpoints}, Actuality, ApiAccess, ApiAggregator, ApiBackend, ApiScope, EndpointMutability, ExtendApiBackend, FutureResult, NamedWith, }; /// Type alias for the concrete `actix-web` HTTP response. pub type FutureResponse = actix_web::FutureResponse<HttpResponse, actix_web::Error>; /// Type alias for the concrete `actix-web` HTTP request. pub type HttpRequest = actix_web::HttpRequest<()>; /// Type alias for the inner `actix-web` HTTP requests handler. pub type RawHandler = dyn Fn(HttpRequest) -> FutureResponse +'static + Send + Sync; /// Type alias for the `actix-web::App`. pub type App = actix_web::App<()>; /// Type alias for the `actix-web::App` configuration. pub type AppConfig = Arc<dyn Fn(App) -> App +'static + Send + Sync>; /// Raw `actix-web` backend requests handler. #[derive(Clone)] pub struct RequestHandler { /// Endpoint name. pub name: String, /// Endpoint HTTP method. pub method: actix_web::http::Method, /// Inner handler. pub inner: Arc<RawHandler>, } impl fmt::Debug for RequestHandler { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RequestHandler") .field("name", &self.name) .field("method", &self.method) .finish() } } /// API builder for the `actix-web` backend. #[derive(Debug, Clone, Default)] pub struct ApiBuilder { handlers: Vec<RequestHandler>, } impl ApiBuilder { /// Constructs a new backend builder instance. pub fn new() -> Self { Self::default() } } impl ApiBackend for ApiBuilder { type Handler = RequestHandler; type Backend = actix_web::Scope<()>; fn raw_handler(&mut self, handler: Self::Handler) -> &mut Self { self.handlers.push(handler); self } fn wire(&self, mut output: Self::Backend) -> Self::Backend { for handler in self.handlers.clone() { let inner = handler.inner; output = output.route(&handler.name, handler.method.clone(), move |request| { inner(request) }); } output } } impl ExtendApiBackend for actix_web::Scope<()> { fn extend<'a, I>(mut self, items: I) -> Self where I: IntoIterator<Item = (&'a str, &'a ApiScope)>, { for item in items { self = self.nested(&item.0, move |scope| item.1.actix_backend.wire(scope)) } self } } impl ResponseError for api::Error { fn error_response(&self) -> HttpResponse { match self { api::Error::BadRequest(err) => HttpResponse::BadRequest().body(err.to_string()), api::Error::InternalError(err) => { HttpResponse::InternalServerError().body(err.to_string()) } api::Error::Io(err) => HttpResponse::InternalServerError().body(err.to_string()), api::Error::Storage(err) => HttpResponse::InternalServerError().body(err.to_string()), api::Error::Gone => HttpResponse::Gone().finish(), api::Error::MovedPermanently(new_location) => HttpResponse::MovedPermanently() .header(header::LOCATION, new_location.clone()) .finish(), api::Error::NotFound(err) => HttpResponse::NotFound().body(err.to_string()), api::Error::Unauthorized => HttpResponse::Unauthorized().finish(), } } } /// Creates a `HttpResponse` object from the provided JSON value. /// Depending on the `actuality` parameter value, the warning about endpoint /// being deprecated can be added. fn json_response<T: Serialize>(actuality: Actuality, json_value: T) -> HttpResponse { let mut response = HttpResponse::Ok(); if let Actuality::Deprecated { ref discontinued_on, ref description, } = actuality { // There is a proposal for creating special deprecation header within HTTP, // but currently it's only a draft. So the conventional way to notify API user // about endpoint deprecation is setting the `Warning` header. let expiration_note = match discontinued_on { // Date is formatted according to HTTP-date format. Some(date) => format!( "The old API is maintained until {}.", date.format("%a, %d %b %Y %T GMT") ), None => "Currently there is no specific date for disabling this endpoint.".into(), }; let mut warning_text = format!( "Deprecated API: This endpoint is deprecated, \ see the service documentation to find an alternative. \ {}", expiration_note ); if let Some(description) = description { warning_text = format!("{} Additional information: {}.", warning_text, description); } let warning_string = create_warning_header(&warning_text); response.header(header::WARNING, warning_string); } response.json(json_value) } /// Formats warning string according to the following format: /// "<warn-code> <warn-agent> \"<warn-text>\" [<warn-date>]" /// <warn-code> in our case is 299, which means a miscellaneous persistent warning. /// <warn-agent> is optional, so we set it to "-". /// <warn-text> is a warning description, which is taken as an only argument. /// <warn-date> is not required. /// For details you can see RFC 7234, section 5.5: Warning. fn create_warning_header(warning_text: &str) -> String { format!("299 - \"{}\"", warning_text) } impl From<EndpointMutability> for actix_web::http::Method { fn from(mutability: EndpointMutability) -> Self { match mutability { EndpointMutability::Immutable => actix_web::http::Method::GET, EndpointMutability::Mutable => actix_web::http::Method::POST, } } } impl<Q, I, F> From<NamedWith<Q, I, api::Result<I>, F>> for RequestHandler where F: Fn(Q) -> api::Result<I> +'static + Send + Sync + Clone, Q: DeserializeOwned +'static, I: Serialize +'static, { fn from(f: NamedWith<Q, I, api::Result<I>, F>) -> Self { // Convert handler that returns a `Result` into handler that will return `FutureResult`. let handler = f.inner.handler; let future_endpoint = move |query| -> Box<dyn Future<Item = I, Error = api::Error>> { let future = handler(query).into_future(); Box::new(future) }; let named_with_future = NamedWith::new(f.name, future_endpoint, f.mutability); // Then we can create a `RequestHandler` with the `From` specialization for future result. RequestHandler::from(named_with_future) } } /// Takes `HttpRequest` as a parameter and extracts query: /// - If request is immutable, the query is parsed from query string, /// - If request is mutable, the query is parsed from the request body as JSON. fn extract_query<Q>( request: HttpRequest, mutability: EndpointMutability, ) -> impl Future<Item = Q, Error = actix_web::error::Error> where Q: DeserializeOwned +'static, { match mutability { EndpointMutability::Immutable => { let future = Query::from_request(&request, &Default::default()) .map(Query::into_inner) .map_err(From::from) .into_future(); Either::A(future) } EndpointMutability::Mutable => { let future = request.json().from_err(); Either::B(future) } } } impl<Q, I, F> From<NamedWith<Q, I, FutureResult<I>, F>> for RequestHandler where F: Fn(Q) -> FutureResult<I> +'static + Clone + Send + Sync, Q: DeserializeOwned +'static, I: Serialize +'static, { fn from(f: NamedWith<Q, I, FutureResult<I>, F>) -> Self { let handler = f.inner.handler; let actuality = f.inner.actuality; let mutability = f.mutability; let index = move |request: HttpRequest| -> FutureResponse { let handler = handler.clone(); let actuality = actuality.clone(); extract_query(request, mutability) .and_then(move |query| { handler(query) .map(|value| json_response(actuality, value)) .map_err(From::from) }) .responder() }; Self { name: f.name, method: f.mutability.into(), inner: Arc::from(index) as Arc<RawHandler>, } } } /// Creates `actix_web::App` for the given aggregator and runtime configuration. pub(crate) fn create_app(aggregator: &ApiAggregator, runtime_config: ApiRuntimeConfig) -> App { let app_config = runtime_config.app_config; let access = runtime_config.access; let mut app = App::new(); app = app.scope("api", |scope| aggregator.extend_backend(access, scope)); if let Some(app_config) = app_config { app = app_config(app); } app } /// Configuration parameters for the `App` runtime. #[derive(Clone)] pub struct ApiRuntimeConfig { /// The socket address to bind. pub listen_address: SocketAddr, /// API access level. pub access: ApiAccess, /// Optional App configuration. pub app_config: Option<AppConfig>, } impl ApiRuntimeConfig { /// Creates API runtime configuration for the given address and access level. pub fn new(listen_address: SocketAddr, access: ApiAccess) -> Self { Self { listen_address, access, app_config: Default::default(), } } } impl fmt::Debug for ApiRuntimeConfig { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ApiRuntimeConfig") .field("listen_address", &self.listen_address) .field("access", &self.access) .field("app_config", &self.app_config.as_ref().map(drop)) .finish() } } /// Configuration parameters for the actix system runtime. #[derive(Debug, Clone)] pub struct SystemRuntimeConfig { /// Active API runtimes. pub api_runtimes: Vec<ApiRuntimeConfig>, /// API aggregator. pub api_aggregator: ApiAggregator, /// The interval in milliseconds between attempts of restarting HTTP-server in case /// the server failed to restart pub server_restart_retry_timeout: u64, /// The attempts counts of restarting HTTP-server in case the server failed to restart pub server_restart_max_retries: u16, } /// Actix system runtime handle. pub struct SystemRuntime { system_thread: JoinHandle<result::Result<(), Error>>, system: System, } impl SystemRuntimeConfig { /// Starts actix system runtime along with all web runtimes. pub fn start( self, endpoints_rx: mpsc::Receiver<UpdateEndpoints>, ) -> result::Result<SystemRuntime, Error> { // Creates a system thread. let (system_tx, system_rx) = mpsc::unbounded(); let system_thread = thread::spawn(move || -> result::Result<(), Error> { let system = System::new("http-server"); system_tx.unbounded_send(System::current())?; ApiManager::new(self, endpoints_rx).start(); // Starts actix-web runtime. let code = system.run(); trace!("Actix runtime finished with code {}", code); ensure!( code == 0, "Actix runtime finished with the non zero error code: {}", code ); Ok(()) }); // Receives addresses of runtime items. let system = system_rx .wait() .next() .ok_or_else(|| format_err!("Unable to receive actix system handle"))? .map_err(|()| format_err!("Unable to receive actix system handle"))?; Ok(SystemRuntime { system_thread, system, }) } } impl SystemRuntime { /// Stops the actix system runtime along with all web runtimes. pub fn stop(self) -> result::Result<(), Error> { // Stop actix system runtime. self.system.stop(); self.system_thread.join().map_err(|e| { format_err!( "Unable to join actix web api thread, an error occurred: {:?}", e ) })? } } impl fmt::Debug for SystemRuntime { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SystemRuntime").finish() } } /// CORS header specification. #[derive(Debug, Clone, PartialEq, Eq)] pub enum AllowOrigin { /// Allows access from any host. Any, /// Allows access only from the specified hosts. Whitelist(Vec<String>), } impl ser::Serialize for AllowOrigin { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: ser::Serializer, { match *self { AllowOrigin::Any => "*".serialize(serializer), AllowOrigin::Whitelist(ref hosts) => { if hosts.len() == 1 { hosts[0].serialize(serializer) } else { hosts.serialize(serializer) } } } } } impl<'de> de::Deserialize<'de> for AllowOrigin { fn deserialize<D>(d: D) -> result::Result<Self, D::Error> where D: de::Deserializer<'de>, { struct Visitor; impl<'de> de::Visitor<'de> for Visitor { type Value = AllowOrigin; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a list of hosts or \"*\"") } fn visit_str<E>(self, value: &str) -> result::Result<AllowOrigin, E> where E: de::Error, {
"*" => Ok(AllowOrigin::Any), _ => Ok(AllowOrigin::Whitelist(vec![value.to_string()])), } } fn visit_seq<A>(self, seq: A) -> result::Result<AllowOrigin, A::Error> where A: de::SeqAccess<'de>, { let hosts = de::Deserialize::deserialize(de::value::SeqAccessDeserializer::new(seq))?; Ok(AllowOrigin::Whitelist(hosts)) } } d.deserialize_any(Visitor) } } impl FromStr for AllowOrigin { type Err = Error; fn from_str(s: &str) -> result::Result<Self, Self::Err> { if s == "*" { return Ok(AllowOrigin::Any); } let v: Vec<_> = s .split(',') .map(|s| s.trim().to_string()) .filter(|s|!s.is_empty()) .collect(); if v.is_empty() { bail!("Invalid AllowOrigin::Whitelist value"); } Ok(AllowOrigin::Whitelist(v)) } } impl<'a> From<&'a AllowOrigin> for Cors { fn from(origin: &'a AllowOrigin) -> Self { match *origin { AllowOrigin::Any => Self::build().finish(), AllowOrigin::Whitelist(ref hosts) => { let mut builder = Self::build(); for host in hosts { builder.allowed_origin(host); } builder.finish() } } } } impl From<AllowOrigin> for Cors { fn from(origin: AllowOrigin) -> Self { Self::from(&origin) } } #[cfg(test)] mod tests { use pretty_assertions::assert_eq; use super::*; #[test] fn allow_origin_from_str() { fn check(text: &str, expected: AllowOrigin) { let from_str = AllowOrigin::from_str(text).unwrap(); assert_eq!(from_str, expected); } check(r#"*"#, AllowOrigin::Any); check( r#"http://example.com"#, AllowOrigin::Whitelist(vec!["http://example.com".to_string()]), ); check( r#"http://a.org, http://b.org"#, AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]), ); check( r#"http://a.org, http://b.org, "#, AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]), ); check( r#"http://a.org,http://b.org"#, AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]), ); } fn assert_responses_eq(left: HttpResponse, right: HttpResponse) { assert_eq!(left.status(), right.status()); assert_eq!(left.headers(), right.headers()); assert_eq!(left.body(), right.body()); } #[test] fn test_create_warning_header() { assert_eq!( &create_warning_header("Description"), "299 - \"Description\"" ); } #[test] fn json_responses() { use chrono::TimeZone; let actual_response = json_response(Actuality::Actual, 123); assert_responses_eq(actual_response, HttpResponse::Ok().json(123)); let deprecated_response_no_deadline = json_response( Actuality::Deprecated { discontinued_on: None, description: None, }, 123, ); let expected_warning_text = "Deprecated API: This endpoint is deprecated, \ see the service documentation to find an alternative. \ Currently there is no specific date for disabling this endpoint."; let expected_warning = create_warning_header(expected_warning_text); assert_responses_eq( deprecated_response_no_deadline, HttpResponse::Ok() .header(header::WARNING, expected_warning) .json(123), ); let description = "Docs can be found on docs.rs".to_owned(); let deprecated_response_with_description = json_response( Actuality::Deprecated { discontinued_on: None, description: Some(description), }, 123, ); let expected_warning_text = "Deprecated API: This endpoint is deprecated, \ see the service documentation to find an alternative. \ Currently there is no specific date for disabling this endpoint. \ Additional information: Docs can be found on docs.rs."; let expected_warning = create_warning_header(expected_warning_text); assert_responses_eq( deprecated_response_with_description, HttpResponse::Ok() .header(header::WARNING, expected_warning) .json(123), ); let deadline = chrono::Utc.ymd(2020, 12, 31).and_hms(23, 59, 59); let deprecated_response_deadline = json_response( Actuality::Deprecated { discontinued_on: Some(deadline),
match value {
random_line_split
mock.rs
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Creating mock Runtime here use crate::{AssetConfig, Config, NetworkConfig}; use codec::{Codec, Decode, Encode}; use common::mock::ExistentialDeposits; use common::prelude::Balance; use common::{ Amount, AssetId32, AssetName, AssetSymbol, PredefinedAssetId, DEFAULT_BALANCE_PRECISION, VAL, }; use currencies::BasicCurrencyAdapter; use frame_support::dispatch::{DispatchInfo, GetDispatchInfo}; use frame_support::sp_io::TestExternalities; use frame_support::sp_runtime::app_crypto::sp_core; use frame_support::sp_runtime::app_crypto::sp_core::crypto::AccountId32; use frame_support::sp_runtime::app_crypto::sp_core::offchain::{OffchainExt, TransactionPoolExt}; use frame_support::sp_runtime::app_crypto::sp_core::{ecdsa, sr25519, Pair, Public}; use frame_support::sp_runtime::offchain::testing::{ OffchainState, PoolState, TestOffchainExt, TestTransactionPoolExt, }; use frame_support::sp_runtime::serde::{Serialize, Serializer}; use frame_support::sp_runtime::testing::Header; use frame_support::sp_runtime::traits::{ self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, IdentifyAccount, IdentityLookup, PostDispatchInfoOf, SignedExtension, ValidateUnsigned, Verify, }; use frame_support::sp_runtime::transaction_validity::{ TransactionSource, TransactionValidity, TransactionValidityError, }; use frame_support::sp_runtime::{ self, ApplyExtrinsicResultWithInfo, MultiSignature, MultiSigner, Perbill, Percent, }; use frame_support::traits::GenesisBuild; use frame_support::weights::{Pays, Weight}; use frame_support::{construct_runtime, parameter_types}; use frame_system::offchain::{Account, SigningTypes}; use parking_lot::RwLock; use sp_core::H256; use sp_keystore::testing::KeyStore; use sp_keystore::KeystoreExt; use sp_std::collections::btree_set::BTreeSet; use sp_std::fmt::Debug; use sp_std::str::FromStr; use sp_std::sync::Arc; use std::collections::HashMap; use {crate as eth_bridge, frame_system}; pub const PSWAP: PredefinedAssetId = PredefinedAssetId::PSWAP; pub const XOR: PredefinedAssetId = PredefinedAssetId::XOR; /// An index to a block. pub type BlockNumber = u64; pub type Signature = MultiSignature; /// Some way of identifying an account on the chain. We intentionally make it equivalent /// to the public key of our transaction signing scheme. pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>; type Block = frame_system::mocking::MockBlock<Runtime>; parameter_types! { pub const GetBaseAssetId: AssetId32<PredefinedAssetId> = AssetId32::from_asset_id(XOR); pub const DepositBase: u64 = 1; pub const DepositFactor: u64 = 1; pub const MaxSignatories: u16 = 4; pub const UnsignedPriority: u64 = 100; pub const EthNetworkId: <Runtime as Config>::NetworkId = 0; } #[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)] pub struct MyTestXt<Call, Extra> { /// Signature of the extrinsic. pub signature: Option<(AccountId, Extra)>, /// Call of the extrinsic. pub call: Call, } parity_util_mem::malloc_size_of_is_0!(any: MyTestXt<Call, Extra>); impl<Call: Codec + Sync + Send, Context, Extra> Checkable<Context> for MyTestXt<Call, Extra> { type Checked = Self; fn check(self, _c: &Context) -> Result<Self::Checked, TransactionValidityError> { Ok(self) } } impl<Call: Codec + Sync + Send, Extra> traits::Extrinsic for MyTestXt<Call, Extra> { type Call = Call; type SignaturePayload = (AccountId, Extra); fn is_signed(&self) -> Option<bool> { Some(self.signature.is_some()) } fn new(c: Call, sig: Option<Self::SignaturePayload>) -> Option<Self> { Some(MyTestXt { signature: sig, call: c, }) } } impl SignedExtension for MyExtra { type AccountId = AccountId; type Call = Call; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "testextension"; fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> { Ok(()) } } impl<Origin, Call, Extra> Applyable for MyTestXt<Call, Extra> where Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable<Origin = Origin>, Extra: SignedExtension<AccountId = AccountId, Call = Call>, Origin: From<Option<AccountId32>>, { type Call = Call; /// Checks to see if this is a valid *transaction*. It returns information on it if so. fn validate<U: ValidateUnsigned<Call = Self::Call>>( &self, _source: TransactionSource, _info: &DispatchInfoOf<Self::Call>, _len: usize, ) -> TransactionValidity { Ok(Default::default()) } /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. fn apply<U: ValidateUnsigned<Call = Self::Call>>( self, info: &DispatchInfoOf<Self::Call>, len: usize, ) -> ApplyExtrinsicResultWithInfo<PostDispatchInfoOf<Self::Call>> { let maybe_who = if let Some((who, extra)) = self.signature
else { Extra::pre_dispatch_unsigned(&self.call, info, len)?; None }; Ok(self.call.dispatch(maybe_who.into())) } } impl<Call, Extra> Serialize for MyTestXt<Call, Extra> where MyTestXt<Call, Extra>: Encode, { fn serialize<S>(&self, seq: S) -> Result<S::Ok, S::Error> where S: Serializer, { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } impl<Call: Encode, Extra: Encode> GetDispatchInfo for MyTestXt<Call, Extra> { fn get_dispatch_info(&self) -> DispatchInfo { // for testing: weight == size. DispatchInfo { weight: self.encode().len() as _, pays_fee: Pays::No, ..Default::default() } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Encode, Decode)] pub struct MyExtra; pub type TestExtrinsic = MyTestXt<Call, MyExtra>; parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); pub const ExistentialDeposit: u128 = 0; } impl frame_system::Config for Runtime { type BaseCallFilter = (); type BlockWeights = (); type BlockLength = (); type Origin = Origin; type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup<Self::AccountId>; type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); type AccountData = pallet_balances::AccountData<Balance>; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); type PalletInfo = PalletInfo; type SS58Prefix = (); } impl<T: SigningTypes> frame_system::offchain::SignMessage<T> for Runtime { type SignatureData = (); fn sign_message(&self, _message: &[u8]) -> Self::SignatureData { unimplemented!() } fn sign<TPayload, F>(&self, _f: F) -> Self::SignatureData where F: Fn(&Account<T>) -> TPayload, TPayload: frame_system::offchain::SignedPayload<T>, { unimplemented!() } } impl<LocalCall> frame_system::offchain::CreateSignedTransaction<LocalCall> for Runtime where Call: From<LocalCall>, { fn create_transaction<C: frame_system::offchain::AppCrypto<Self::Public, Self::Signature>>( call: Call, _public: <Signature as Verify>::Signer, account: <Runtime as frame_system::Config>::AccountId, _index: <Runtime as frame_system::Config>::Index, ) -> Option<( Call, <TestExtrinsic as sp_runtime::traits::Extrinsic>::SignaturePayload, )> { Some((call, (account, MyExtra {}))) } } impl frame_system::offchain::SigningTypes for Runtime { type Public = <Signature as Verify>::Signer; type Signature = Signature; } impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime where Call: From<C>, { type OverarchingCall = Call; type Extrinsic = TestExtrinsic; } impl pallet_balances::Config for Runtime { /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); type MaxLocks = (); } impl tokens::Config for Runtime { type Event = Event; type Balance = Balance; type Amount = Amount; type CurrencyId = <Runtime as assets::Config>::AssetId; type WeightInfo = (); type ExistentialDeposits = ExistentialDeposits; type OnDust = (); } impl currencies::Config for Runtime { type Event = Event; type MultiCurrency = Tokens; type NativeCurrency = BasicCurrencyAdapter<Runtime, Balances, Amount, BlockNumber>; type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId; type WeightInfo = (); } impl assets::Config for Runtime { type Event = Event; type ExtraAccountId = [u8; 32]; type ExtraAssetRecordArg = common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>; type AssetId = common::AssetId32<PredefinedAssetId>; type GetBaseAssetId = GetBaseAssetId; type Currency = currencies::Module<Runtime>; type WeightInfo = (); } impl common::Config for Runtime { type DEXId = common::DEXId; type LstId = common::LiquiditySourceType; } impl permissions::Config for Runtime { type Event = Event; } impl bridge_multisig::Config for Runtime { type Call = Call; type Event = Event; type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = (); } impl pallet_sudo::Config for Runtime { type Call = Call; type Event = Event; } impl crate::Config for Runtime { type PeerId = crate::crypto::TestAuthId; type Call = Call; type Event = Event; type NetworkId = u32; type GetEthNetworkId = EthNetworkId; type WeightInfo = (); } impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic { const VERSION: u8 = 1; type SignedExtensions = (); } construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system::{Module, Call, Config, Storage, Event<T>}, Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>}, Multisig: bridge_multisig::{Module, Call, Storage, Config<T>, Event<T>}, Tokens: tokens::{Module, Call, Storage, Config<T>, Event<T>}, Currencies: currencies::{Module, Call, Storage, Event<T>}, Assets: assets::{Module, Call, Storage, Config<T>, Event<T>}, Permissions: permissions::{Module, Call, Storage, Config<T>, Event<T>}, Sudo: pallet_sudo::{Module, Call, Storage, Config<T>, Event<T>}, EthBridge: eth_bridge::{Module, Call, Storage, Config<T>, Event<T>}, } ); pub type SubstrateAccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; pub struct State { pub networks: HashMap<u32, ExtendedNetworkConfig>, pub authority_account_id: AccountId32, pub pool_state: Arc<RwLock<PoolState>>, pub offchain_state: Arc<RwLock<OffchainState>>, } #[derive(Clone, Debug)] pub struct ExtendedNetworkConfig { pub ocw_keypairs: Vec<(MultiSigner, AccountId32, [u8; 32])>, pub config: NetworkConfig<Runtime>, } pub struct ExtBuilder { pub networks: HashMap<u32, ExtendedNetworkConfig>, last_network_id: u32, root_account_id: AccountId32, } impl Default for ExtBuilder { fn default() -> Self { let mut builder = Self { networks: Default::default(), last_network_id: Default::default(), root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"), }; builder.add_network( vec![ AssetConfig::Thischain { id: PSWAP.into() }, AssetConfig::Sidechain { id: XOR.into(), sidechain_id: sp_core::H160::from_str( "40fd72257597aa14c7231a7b1aaa29fce868f677", ) .unwrap(), owned: true, precision: DEFAULT_BALANCE_PRECISION, }, AssetConfig::Sidechain { id: VAL.into(), sidechain_id: sp_core::H160::from_str( "3f9feac97e5feb15d8bf98042a9a01b515da3dfb", ) .unwrap(), owned: true, precision: DEFAULT_BALANCE_PRECISION, }, ], Some(vec![ (XOR.into(), common::balance!(350000)), (VAL.into(), common::balance!(33900000)), ]), Some(4), ); builder } } impl ExtBuilder { pub fn new() -> Self { Self { networks: Default::default(), last_network_id: Default::default(), root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"), } } pub fn add_currency( &mut self, network_id: u32, currency: AssetConfig<AssetId32<PredefinedAssetId>>, ) { self.networks .get_mut(&network_id) .unwrap() .config .assets .push(currency); } pub fn add_network( &mut self, assets: Vec<AssetConfig<AssetId32<PredefinedAssetId>>>, reserves: Option<Vec<(AssetId32<PredefinedAssetId>, Balance)>>, peers_num: Option<usize>, ) -> u32 { let net_id = self.last_network_id; let multisig_account_id = bridge_multisig::Module::<Runtime>::multi_account_id( &self.root_account_id, 1, net_id as u64 + 10, ); let peers_keys = gen_peers_keys(&format!("OCW{}", net_id), peers_num.unwrap_or(4)); self.networks.insert( net_id, ExtendedNetworkConfig { config: NetworkConfig { initial_peers: peers_keys.iter().map(|(_, id, _)| id).cloned().collect(), bridge_account_id: multisig_account_id.clone(), assets, bridge_contract_address: Default::default(), reserves: reserves.unwrap_or_default(), }, ocw_keypairs: peers_keys, }, ); self.last_network_id += 1; net_id } pub fn build(self) -> (TestExternalities, State) { let (offchain, offchain_state) = TestOffchainExt::new(); let (pool, pool_state) = TestTransactionPoolExt::new(); let authority_account_id = bridge_multisig::Module::<Runtime>::multi_account_id(&self.root_account_id, 1, 0); let mut bridge_accounts = Vec::new(); let mut bridge_network_configs = Vec::new(); let mut endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new(); let mut networks: Vec<_> = self.networks.clone().into_iter().collect(); networks.sort_by(|(x, _), (y, _)| x.cmp(y)); for (_net_id, ext_network) in networks { bridge_network_configs.push(ext_network.config.clone()); endowed_accounts.extend(ext_network.config.assets.iter().cloned().map( |asset_config| { ( ext_network.config.bridge_account_id.clone(), asset_config.asset_id().clone(), 0, ) }, )); endowed_accounts.extend(ext_network.config.reserves.iter().cloned().map( |(asset_id, _balance)| (ext_network.config.bridge_account_id.clone(), asset_id, 0), )); bridge_accounts.push(( ext_network.config.bridge_account_id.clone(), bridge_multisig::MultisigAccount::new( ext_network .ocw_keypairs .iter() .map(|x| x.1.clone()) .collect(), Percent::from_parts(67), ), )); } // pallet_balances and orml_tokens no longer accept duplicate elements. let mut unique_endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new(); for acc in endowed_accounts { if let Some(unique_acc) = unique_endowed_accounts.iter_mut().find(|a| a.1 == acc.1) { unique_acc.2 += acc.2; } else { unique_endowed_accounts.push(acc); } } let endowed_accounts = unique_endowed_accounts; let endowed_assets: BTreeSet<_> = endowed_accounts .iter() .map(|x| { ( x.1, self.root_account_id.clone(), AssetSymbol(b"".to_vec()), AssetName(b"".to_vec()), 18, Balance::from(0u32), true, ) }) .collect(); let mut storage = frame_system::GenesisConfig::default() .build_storage::<Runtime>() .unwrap(); let mut balances: Vec<_> = endowed_accounts .iter() .map(|(acc,..)| acc) .chain(vec![&self.root_account_id, &authority_account_id]) .map(|x| (x.clone(), Balance::from(0u32))) .collect(); balances.extend(bridge_accounts.iter().map(|(acc, _)| (acc.clone(), 0))); for (_net_id, ext_network) in &self.networks { balances.extend(ext_network.ocw_keypairs.iter().map(|x| (x.1.clone(), 0))); } balances.sort_by_key(|x| x.0.clone()); balances.dedup_by_key(|x| x.0.clone()); BalancesConfig { balances } .assimilate_storage(&mut storage) .unwrap(); if!endowed_accounts.is_empty() { SudoConfig { key: endowed_accounts[0].0.clone(), } .assimilate_storage(&mut storage) .unwrap(); } MultisigConfig { accounts: bridge_accounts, } .assimilate_storage(&mut storage) .unwrap(); PermissionsConfig { initial_permission_owners: vec![], initial_permissions: Vec::new(), } .assimilate_storage(&mut storage) .unwrap(); TokensConfig { endowed_accounts: endowed_accounts.clone(), } .assimilate_storage(&mut storage) .unwrap(); AssetsConfig { endowed_assets: endowed_assets.into_iter().collect(), } .assimilate_storage(&mut storage) .unwrap(); EthBridgeConfig { networks: bridge_network_configs,
{ Extra::pre_dispatch(extra, &who, &self.call, info, len)?; Some(who) }
conditional_block
mock.rs
THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Creating mock Runtime here use crate::{AssetConfig, Config, NetworkConfig}; use codec::{Codec, Decode, Encode}; use common::mock::ExistentialDeposits; use common::prelude::Balance; use common::{ Amount, AssetId32, AssetName, AssetSymbol, PredefinedAssetId, DEFAULT_BALANCE_PRECISION, VAL, }; use currencies::BasicCurrencyAdapter; use frame_support::dispatch::{DispatchInfo, GetDispatchInfo}; use frame_support::sp_io::TestExternalities; use frame_support::sp_runtime::app_crypto::sp_core; use frame_support::sp_runtime::app_crypto::sp_core::crypto::AccountId32; use frame_support::sp_runtime::app_crypto::sp_core::offchain::{OffchainExt, TransactionPoolExt}; use frame_support::sp_runtime::app_crypto::sp_core::{ecdsa, sr25519, Pair, Public}; use frame_support::sp_runtime::offchain::testing::{ OffchainState, PoolState, TestOffchainExt, TestTransactionPoolExt, }; use frame_support::sp_runtime::serde::{Serialize, Serializer}; use frame_support::sp_runtime::testing::Header; use frame_support::sp_runtime::traits::{ self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, IdentifyAccount, IdentityLookup, PostDispatchInfoOf, SignedExtension, ValidateUnsigned, Verify, }; use frame_support::sp_runtime::transaction_validity::{ TransactionSource, TransactionValidity, TransactionValidityError, }; use frame_support::sp_runtime::{ self, ApplyExtrinsicResultWithInfo, MultiSignature, MultiSigner, Perbill, Percent, }; use frame_support::traits::GenesisBuild; use frame_support::weights::{Pays, Weight}; use frame_support::{construct_runtime, parameter_types}; use frame_system::offchain::{Account, SigningTypes}; use parking_lot::RwLock; use sp_core::H256; use sp_keystore::testing::KeyStore; use sp_keystore::KeystoreExt; use sp_std::collections::btree_set::BTreeSet; use sp_std::fmt::Debug; use sp_std::str::FromStr; use sp_std::sync::Arc; use std::collections::HashMap; use {crate as eth_bridge, frame_system}; pub const PSWAP: PredefinedAssetId = PredefinedAssetId::PSWAP; pub const XOR: PredefinedAssetId = PredefinedAssetId::XOR; /// An index to a block. pub type BlockNumber = u64; pub type Signature = MultiSignature; /// Some way of identifying an account on the chain. We intentionally make it equivalent /// to the public key of our transaction signing scheme. pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>; type Block = frame_system::mocking::MockBlock<Runtime>; parameter_types! { pub const GetBaseAssetId: AssetId32<PredefinedAssetId> = AssetId32::from_asset_id(XOR); pub const DepositBase: u64 = 1; pub const DepositFactor: u64 = 1; pub const MaxSignatories: u16 = 4; pub const UnsignedPriority: u64 = 100; pub const EthNetworkId: <Runtime as Config>::NetworkId = 0; } #[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)] pub struct MyTestXt<Call, Extra> { /// Signature of the extrinsic. pub signature: Option<(AccountId, Extra)>, /// Call of the extrinsic. pub call: Call, } parity_util_mem::malloc_size_of_is_0!(any: MyTestXt<Call, Extra>); impl<Call: Codec + Sync + Send, Context, Extra> Checkable<Context> for MyTestXt<Call, Extra> { type Checked = Self; fn check(self, _c: &Context) -> Result<Self::Checked, TransactionValidityError> { Ok(self) } } impl<Call: Codec + Sync + Send, Extra> traits::Extrinsic for MyTestXt<Call, Extra> { type Call = Call; type SignaturePayload = (AccountId, Extra); fn is_signed(&self) -> Option<bool> { Some(self.signature.is_some()) } fn new(c: Call, sig: Option<Self::SignaturePayload>) -> Option<Self> { Some(MyTestXt { signature: sig, call: c, }) } } impl SignedExtension for MyExtra { type AccountId = AccountId; type Call = Call; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "testextension"; fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> { Ok(()) } } impl<Origin, Call, Extra> Applyable for MyTestXt<Call, Extra> where Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable<Origin = Origin>, Extra: SignedExtension<AccountId = AccountId, Call = Call>, Origin: From<Option<AccountId32>>, { type Call = Call; /// Checks to see if this is a valid *transaction*. It returns information on it if so. fn validate<U: ValidateUnsigned<Call = Self::Call>>( &self, _source: TransactionSource, _info: &DispatchInfoOf<Self::Call>, _len: usize, ) -> TransactionValidity { Ok(Default::default()) } /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. fn apply<U: ValidateUnsigned<Call = Self::Call>>( self, info: &DispatchInfoOf<Self::Call>, len: usize, ) -> ApplyExtrinsicResultWithInfo<PostDispatchInfoOf<Self::Call>> { let maybe_who = if let Some((who, extra)) = self.signature { Extra::pre_dispatch(extra, &who, &self.call, info, len)?; Some(who) } else { Extra::pre_dispatch_unsigned(&self.call, info, len)?; None }; Ok(self.call.dispatch(maybe_who.into())) } } impl<Call, Extra> Serialize for MyTestXt<Call, Extra> where MyTestXt<Call, Extra>: Encode, { fn serialize<S>(&self, seq: S) -> Result<S::Ok, S::Error> where S: Serializer, { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } impl<Call: Encode, Extra: Encode> GetDispatchInfo for MyTestXt<Call, Extra> { fn get_dispatch_info(&self) -> DispatchInfo { // for testing: weight == size. DispatchInfo { weight: self.encode().len() as _, pays_fee: Pays::No, ..Default::default() } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Encode, Decode)] pub struct MyExtra; pub type TestExtrinsic = MyTestXt<Call, MyExtra>; parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); pub const ExistentialDeposit: u128 = 0; } impl frame_system::Config for Runtime { type BaseCallFilter = (); type BlockWeights = (); type BlockLength = (); type Origin = Origin; type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup<Self::AccountId>; type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); type AccountData = pallet_balances::AccountData<Balance>; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); type PalletInfo = PalletInfo; type SS58Prefix = (); } impl<T: SigningTypes> frame_system::offchain::SignMessage<T> for Runtime { type SignatureData = (); fn sign_message(&self, _message: &[u8]) -> Self::SignatureData { unimplemented!() } fn sign<TPayload, F>(&self, _f: F) -> Self::SignatureData where F: Fn(&Account<T>) -> TPayload, TPayload: frame_system::offchain::SignedPayload<T>, { unimplemented!() } } impl<LocalCall> frame_system::offchain::CreateSignedTransaction<LocalCall> for Runtime where Call: From<LocalCall>, { fn create_transaction<C: frame_system::offchain::AppCrypto<Self::Public, Self::Signature>>( call: Call, _public: <Signature as Verify>::Signer, account: <Runtime as frame_system::Config>::AccountId, _index: <Runtime as frame_system::Config>::Index, ) -> Option<( Call, <TestExtrinsic as sp_runtime::traits::Extrinsic>::SignaturePayload, )> { Some((call, (account, MyExtra {}))) } } impl frame_system::offchain::SigningTypes for Runtime { type Public = <Signature as Verify>::Signer; type Signature = Signature; } impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime where Call: From<C>, { type OverarchingCall = Call; type Extrinsic = TestExtrinsic; } impl pallet_balances::Config for Runtime { /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); type MaxLocks = (); } impl tokens::Config for Runtime { type Event = Event; type Balance = Balance; type Amount = Amount; type CurrencyId = <Runtime as assets::Config>::AssetId; type WeightInfo = (); type ExistentialDeposits = ExistentialDeposits; type OnDust = (); } impl currencies::Config for Runtime { type Event = Event; type MultiCurrency = Tokens; type NativeCurrency = BasicCurrencyAdapter<Runtime, Balances, Amount, BlockNumber>; type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId; type WeightInfo = (); } impl assets::Config for Runtime { type Event = Event; type ExtraAccountId = [u8; 32]; type ExtraAssetRecordArg = common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>; type AssetId = common::AssetId32<PredefinedAssetId>; type GetBaseAssetId = GetBaseAssetId; type Currency = currencies::Module<Runtime>; type WeightInfo = (); } impl common::Config for Runtime { type DEXId = common::DEXId; type LstId = common::LiquiditySourceType; } impl permissions::Config for Runtime { type Event = Event; } impl bridge_multisig::Config for Runtime { type Call = Call; type Event = Event; type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = (); } impl pallet_sudo::Config for Runtime { type Call = Call; type Event = Event; } impl crate::Config for Runtime { type PeerId = crate::crypto::TestAuthId; type Call = Call; type Event = Event; type NetworkId = u32;
impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic { const VERSION: u8 = 1; type SignedExtensions = (); } construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system::{Module, Call, Config, Storage, Event<T>}, Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>}, Multisig: bridge_multisig::{Module, Call, Storage, Config<T>, Event<T>}, Tokens: tokens::{Module, Call, Storage, Config<T>, Event<T>}, Currencies: currencies::{Module, Call, Storage, Event<T>}, Assets: assets::{Module, Call, Storage, Config<T>, Event<T>}, Permissions: permissions::{Module, Call, Storage, Config<T>, Event<T>}, Sudo: pallet_sudo::{Module, Call, Storage, Config<T>, Event<T>}, EthBridge: eth_bridge::{Module, Call, Storage, Config<T>, Event<T>}, } ); pub type SubstrateAccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; pub struct State { pub networks: HashMap<u32, ExtendedNetworkConfig>, pub authority_account_id: AccountId32, pub pool_state: Arc<RwLock<PoolState>>, pub offchain_state: Arc<RwLock<OffchainState>>, } #[derive(Clone, Debug)] pub struct ExtendedNetworkConfig { pub ocw_keypairs: Vec<(MultiSigner, AccountId32, [u8; 32])>, pub config: NetworkConfig<Runtime>, } pub struct ExtBuilder { pub networks: HashMap<u32, ExtendedNetworkConfig>, last_network_id: u32, root_account_id: AccountId32, } impl Default for ExtBuilder { fn default() -> Self { let mut builder = Self { networks: Default::default(), last_network_id: Default::default(), root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"), }; builder.add_network( vec![ AssetConfig::Thischain { id: PSWAP.into() }, AssetConfig::Sidechain { id: XOR.into(), sidechain_id: sp_core::H160::from_str( "40fd72257597aa14c7231a7b1aaa29fce868f677", ) .unwrap(), owned: true, precision: DEFAULT_BALANCE_PRECISION, }, AssetConfig::Sidechain { id: VAL.into(), sidechain_id: sp_core::H160::from_str( "3f9feac97e5feb15d8bf98042a9a01b515da3dfb", ) .unwrap(), owned: true, precision: DEFAULT_BALANCE_PRECISION, }, ], Some(vec![ (XOR.into(), common::balance!(350000)), (VAL.into(), common::balance!(33900000)), ]), Some(4), ); builder } } impl ExtBuilder { pub fn new() -> Self { Self { networks: Default::default(), last_network_id: Default::default(), root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"), } } pub fn add_currency( &mut self, network_id: u32, currency: AssetConfig<AssetId32<PredefinedAssetId>>, ) { self.networks .get_mut(&network_id) .unwrap() .config .assets .push(currency); } pub fn add_network( &mut self, assets: Vec<AssetConfig<AssetId32<PredefinedAssetId>>>, reserves: Option<Vec<(AssetId32<PredefinedAssetId>, Balance)>>, peers_num: Option<usize>, ) -> u32 { let net_id = self.last_network_id; let multisig_account_id = bridge_multisig::Module::<Runtime>::multi_account_id( &self.root_account_id, 1, net_id as u64 + 10, ); let peers_keys = gen_peers_keys(&format!("OCW{}", net_id), peers_num.unwrap_or(4)); self.networks.insert( net_id, ExtendedNetworkConfig { config: NetworkConfig { initial_peers: peers_keys.iter().map(|(_, id, _)| id).cloned().collect(), bridge_account_id: multisig_account_id.clone(), assets, bridge_contract_address: Default::default(), reserves: reserves.unwrap_or_default(), }, ocw_keypairs: peers_keys, }, ); self.last_network_id += 1; net_id } pub fn build(self) -> (TestExternalities, State) { let (offchain, offchain_state) = TestOffchainExt::new(); let (pool, pool_state) = TestTransactionPoolExt::new(); let authority_account_id = bridge_multisig::Module::<Runtime>::multi_account_id(&self.root_account_id, 1, 0); let mut bridge_accounts = Vec::new(); let mut bridge_network_configs = Vec::new(); let mut endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new(); let mut networks: Vec<_> = self.networks.clone().into_iter().collect(); networks.sort_by(|(x, _), (y, _)| x.cmp(y)); for (_net_id, ext_network) in networks { bridge_network_configs.push(ext_network.config.clone()); endowed_accounts.extend(ext_network.config.assets.iter().cloned().map( |asset_config| { ( ext_network.config.bridge_account_id.clone(), asset_config.asset_id().clone(), 0, ) }, )); endowed_accounts.extend(ext_network.config.reserves.iter().cloned().map( |(asset_id, _balance)| (ext_network.config.bridge_account_id.clone(), asset_id, 0), )); bridge_accounts.push(( ext_network.config.bridge_account_id.clone(), bridge_multisig::MultisigAccount::new( ext_network .ocw_keypairs .iter() .map(|x| x.1.clone()) .collect(), Percent::from_parts(67), ), )); } // pallet_balances and orml_tokens no longer accept duplicate elements. let mut unique_endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new(); for acc in endowed_accounts { if let Some(unique_acc) = unique_endowed_accounts.iter_mut().find(|a| a.1 == acc.1) { unique_acc.2 += acc.2; } else { unique_endowed_accounts.push(acc); } } let endowed_accounts = unique_endowed_accounts; let endowed_assets: BTreeSet<_> = endowed_accounts .iter() .map(|x| { ( x.1, self.root_account_id.clone(), AssetSymbol(b"".to_vec()), AssetName(b"".to_vec()), 18, Balance::from(0u32), true, ) }) .collect(); let mut storage = frame_system::GenesisConfig::default() .build_storage::<Runtime>() .unwrap(); let mut balances: Vec<_> = endowed_accounts .iter() .map(|(acc,..)| acc) .chain(vec![&self.root_account_id, &authority_account_id]) .map(|x| (x.clone(), Balance::from(0u32))) .collect(); balances.extend(bridge_accounts.iter().map(|(acc, _)| (acc.clone(), 0))); for (_net_id, ext_network) in &self.networks { balances.extend(ext_network.ocw_keypairs.iter().map(|x| (x.1.clone(), 0))); } balances.sort_by_key(|x| x.0.clone()); balances.dedup_by_key(|x| x.0.clone()); BalancesConfig { balances } .assimilate_storage(&mut storage) .unwrap(); if!endowed_accounts.is_empty() { SudoConfig { key: endowed_accounts[0].0.clone(), } .assimilate_storage(&mut storage) .unwrap(); } MultisigConfig { accounts: bridge_accounts, } .assimilate_storage(&mut storage) .unwrap(); PermissionsConfig { initial_permission_owners: vec![], initial_permissions: Vec::new(), } .assimilate_storage(&mut storage) .unwrap(); TokensConfig { endowed_accounts: endowed_accounts.clone(), } .assimilate_storage(&mut storage) .unwrap(); AssetsConfig { endowed_assets: endowed_assets.into_iter().collect(), } .assimilate_storage(&mut storage) .unwrap(); EthBridgeConfig { networks: bridge_network_configs,
type GetEthNetworkId = EthNetworkId; type WeightInfo = (); }
random_line_split
mock.rs
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Creating mock Runtime here use crate::{AssetConfig, Config, NetworkConfig}; use codec::{Codec, Decode, Encode}; use common::mock::ExistentialDeposits; use common::prelude::Balance; use common::{ Amount, AssetId32, AssetName, AssetSymbol, PredefinedAssetId, DEFAULT_BALANCE_PRECISION, VAL, }; use currencies::BasicCurrencyAdapter; use frame_support::dispatch::{DispatchInfo, GetDispatchInfo}; use frame_support::sp_io::TestExternalities; use frame_support::sp_runtime::app_crypto::sp_core; use frame_support::sp_runtime::app_crypto::sp_core::crypto::AccountId32; use frame_support::sp_runtime::app_crypto::sp_core::offchain::{OffchainExt, TransactionPoolExt}; use frame_support::sp_runtime::app_crypto::sp_core::{ecdsa, sr25519, Pair, Public}; use frame_support::sp_runtime::offchain::testing::{ OffchainState, PoolState, TestOffchainExt, TestTransactionPoolExt, }; use frame_support::sp_runtime::serde::{Serialize, Serializer}; use frame_support::sp_runtime::testing::Header; use frame_support::sp_runtime::traits::{ self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, IdentifyAccount, IdentityLookup, PostDispatchInfoOf, SignedExtension, ValidateUnsigned, Verify, }; use frame_support::sp_runtime::transaction_validity::{ TransactionSource, TransactionValidity, TransactionValidityError, }; use frame_support::sp_runtime::{ self, ApplyExtrinsicResultWithInfo, MultiSignature, MultiSigner, Perbill, Percent, }; use frame_support::traits::GenesisBuild; use frame_support::weights::{Pays, Weight}; use frame_support::{construct_runtime, parameter_types}; use frame_system::offchain::{Account, SigningTypes}; use parking_lot::RwLock; use sp_core::H256; use sp_keystore::testing::KeyStore; use sp_keystore::KeystoreExt; use sp_std::collections::btree_set::BTreeSet; use sp_std::fmt::Debug; use sp_std::str::FromStr; use sp_std::sync::Arc; use std::collections::HashMap; use {crate as eth_bridge, frame_system}; pub const PSWAP: PredefinedAssetId = PredefinedAssetId::PSWAP; pub const XOR: PredefinedAssetId = PredefinedAssetId::XOR; /// An index to a block. pub type BlockNumber = u64; pub type Signature = MultiSignature; /// Some way of identifying an account on the chain. We intentionally make it equivalent /// to the public key of our transaction signing scheme. pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>; type Block = frame_system::mocking::MockBlock<Runtime>; parameter_types! { pub const GetBaseAssetId: AssetId32<PredefinedAssetId> = AssetId32::from_asset_id(XOR); pub const DepositBase: u64 = 1; pub const DepositFactor: u64 = 1; pub const MaxSignatories: u16 = 4; pub const UnsignedPriority: u64 = 100; pub const EthNetworkId: <Runtime as Config>::NetworkId = 0; } #[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)] pub struct MyTestXt<Call, Extra> { /// Signature of the extrinsic. pub signature: Option<(AccountId, Extra)>, /// Call of the extrinsic. pub call: Call, } parity_util_mem::malloc_size_of_is_0!(any: MyTestXt<Call, Extra>); impl<Call: Codec + Sync + Send, Context, Extra> Checkable<Context> for MyTestXt<Call, Extra> { type Checked = Self; fn check(self, _c: &Context) -> Result<Self::Checked, TransactionValidityError> { Ok(self) } } impl<Call: Codec + Sync + Send, Extra> traits::Extrinsic for MyTestXt<Call, Extra> { type Call = Call; type SignaturePayload = (AccountId, Extra); fn is_signed(&self) -> Option<bool> { Some(self.signature.is_some()) } fn new(c: Call, sig: Option<Self::SignaturePayload>) -> Option<Self> { Some(MyTestXt { signature: sig, call: c, }) } } impl SignedExtension for MyExtra { type AccountId = AccountId; type Call = Call; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "testextension"; fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> { Ok(()) } } impl<Origin, Call, Extra> Applyable for MyTestXt<Call, Extra> where Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable<Origin = Origin>, Extra: SignedExtension<AccountId = AccountId, Call = Call>, Origin: From<Option<AccountId32>>, { type Call = Call; /// Checks to see if this is a valid *transaction*. It returns information on it if so. fn validate<U: ValidateUnsigned<Call = Self::Call>>( &self, _source: TransactionSource, _info: &DispatchInfoOf<Self::Call>, _len: usize, ) -> TransactionValidity { Ok(Default::default()) } /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. fn apply<U: ValidateUnsigned<Call = Self::Call>>( self, info: &DispatchInfoOf<Self::Call>, len: usize, ) -> ApplyExtrinsicResultWithInfo<PostDispatchInfoOf<Self::Call>> { let maybe_who = if let Some((who, extra)) = self.signature { Extra::pre_dispatch(extra, &who, &self.call, info, len)?; Some(who) } else { Extra::pre_dispatch_unsigned(&self.call, info, len)?; None }; Ok(self.call.dispatch(maybe_who.into())) } } impl<Call, Extra> Serialize for MyTestXt<Call, Extra> where MyTestXt<Call, Extra>: Encode, { fn serialize<S>(&self, seq: S) -> Result<S::Ok, S::Error> where S: Serializer, { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } impl<Call: Encode, Extra: Encode> GetDispatchInfo for MyTestXt<Call, Extra> { fn get_dispatch_info(&self) -> DispatchInfo { // for testing: weight == size. DispatchInfo { weight: self.encode().len() as _, pays_fee: Pays::No, ..Default::default() } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Encode, Decode)] pub struct
; pub type TestExtrinsic = MyTestXt<Call, MyExtra>; parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); pub const ExistentialDeposit: u128 = 0; } impl frame_system::Config for Runtime { type BaseCallFilter = (); type BlockWeights = (); type BlockLength = (); type Origin = Origin; type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup<Self::AccountId>; type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); type AccountData = pallet_balances::AccountData<Balance>; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); type PalletInfo = PalletInfo; type SS58Prefix = (); } impl<T: SigningTypes> frame_system::offchain::SignMessage<T> for Runtime { type SignatureData = (); fn sign_message(&self, _message: &[u8]) -> Self::SignatureData { unimplemented!() } fn sign<TPayload, F>(&self, _f: F) -> Self::SignatureData where F: Fn(&Account<T>) -> TPayload, TPayload: frame_system::offchain::SignedPayload<T>, { unimplemented!() } } impl<LocalCall> frame_system::offchain::CreateSignedTransaction<LocalCall> for Runtime where Call: From<LocalCall>, { fn create_transaction<C: frame_system::offchain::AppCrypto<Self::Public, Self::Signature>>( call: Call, _public: <Signature as Verify>::Signer, account: <Runtime as frame_system::Config>::AccountId, _index: <Runtime as frame_system::Config>::Index, ) -> Option<( Call, <TestExtrinsic as sp_runtime::traits::Extrinsic>::SignaturePayload, )> { Some((call, (account, MyExtra {}))) } } impl frame_system::offchain::SigningTypes for Runtime { type Public = <Signature as Verify>::Signer; type Signature = Signature; } impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime where Call: From<C>, { type OverarchingCall = Call; type Extrinsic = TestExtrinsic; } impl pallet_balances::Config for Runtime { /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); type MaxLocks = (); } impl tokens::Config for Runtime { type Event = Event; type Balance = Balance; type Amount = Amount; type CurrencyId = <Runtime as assets::Config>::AssetId; type WeightInfo = (); type ExistentialDeposits = ExistentialDeposits; type OnDust = (); } impl currencies::Config for Runtime { type Event = Event; type MultiCurrency = Tokens; type NativeCurrency = BasicCurrencyAdapter<Runtime, Balances, Amount, BlockNumber>; type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId; type WeightInfo = (); } impl assets::Config for Runtime { type Event = Event; type ExtraAccountId = [u8; 32]; type ExtraAssetRecordArg = common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>; type AssetId = common::AssetId32<PredefinedAssetId>; type GetBaseAssetId = GetBaseAssetId; type Currency = currencies::Module<Runtime>; type WeightInfo = (); } impl common::Config for Runtime { type DEXId = common::DEXId; type LstId = common::LiquiditySourceType; } impl permissions::Config for Runtime { type Event = Event; } impl bridge_multisig::Config for Runtime { type Call = Call; type Event = Event; type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = (); } impl pallet_sudo::Config for Runtime { type Call = Call; type Event = Event; } impl crate::Config for Runtime { type PeerId = crate::crypto::TestAuthId; type Call = Call; type Event = Event; type NetworkId = u32; type GetEthNetworkId = EthNetworkId; type WeightInfo = (); } impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic { const VERSION: u8 = 1; type SignedExtensions = (); } construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system::{Module, Call, Config, Storage, Event<T>}, Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>}, Multisig: bridge_multisig::{Module, Call, Storage, Config<T>, Event<T>}, Tokens: tokens::{Module, Call, Storage, Config<T>, Event<T>}, Currencies: currencies::{Module, Call, Storage, Event<T>}, Assets: assets::{Module, Call, Storage, Config<T>, Event<T>}, Permissions: permissions::{Module, Call, Storage, Config<T>, Event<T>}, Sudo: pallet_sudo::{Module, Call, Storage, Config<T>, Event<T>}, EthBridge: eth_bridge::{Module, Call, Storage, Config<T>, Event<T>}, } ); pub type SubstrateAccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; pub struct State { pub networks: HashMap<u32, ExtendedNetworkConfig>, pub authority_account_id: AccountId32, pub pool_state: Arc<RwLock<PoolState>>, pub offchain_state: Arc<RwLock<OffchainState>>, } #[derive(Clone, Debug)] pub struct ExtendedNetworkConfig { pub ocw_keypairs: Vec<(MultiSigner, AccountId32, [u8; 32])>, pub config: NetworkConfig<Runtime>, } pub struct ExtBuilder { pub networks: HashMap<u32, ExtendedNetworkConfig>, last_network_id: u32, root_account_id: AccountId32, } impl Default for ExtBuilder { fn default() -> Self { let mut builder = Self { networks: Default::default(), last_network_id: Default::default(), root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"), }; builder.add_network( vec![ AssetConfig::Thischain { id: PSWAP.into() }, AssetConfig::Sidechain { id: XOR.into(), sidechain_id: sp_core::H160::from_str( "40fd72257597aa14c7231a7b1aaa29fce868f677", ) .unwrap(), owned: true, precision: DEFAULT_BALANCE_PRECISION, }, AssetConfig::Sidechain { id: VAL.into(), sidechain_id: sp_core::H160::from_str( "3f9feac97e5feb15d8bf98042a9a01b515da3dfb", ) .unwrap(), owned: true, precision: DEFAULT_BALANCE_PRECISION, }, ], Some(vec![ (XOR.into(), common::balance!(350000)), (VAL.into(), common::balance!(33900000)), ]), Some(4), ); builder } } impl ExtBuilder { pub fn new() -> Self { Self { networks: Default::default(), last_network_id: Default::default(), root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"), } } pub fn add_currency( &mut self, network_id: u32, currency: AssetConfig<AssetId32<PredefinedAssetId>>, ) { self.networks .get_mut(&network_id) .unwrap() .config .assets .push(currency); } pub fn add_network( &mut self, assets: Vec<AssetConfig<AssetId32<PredefinedAssetId>>>, reserves: Option<Vec<(AssetId32<PredefinedAssetId>, Balance)>>, peers_num: Option<usize>, ) -> u32 { let net_id = self.last_network_id; let multisig_account_id = bridge_multisig::Module::<Runtime>::multi_account_id( &self.root_account_id, 1, net_id as u64 + 10, ); let peers_keys = gen_peers_keys(&format!("OCW{}", net_id), peers_num.unwrap_or(4)); self.networks.insert( net_id, ExtendedNetworkConfig { config: NetworkConfig { initial_peers: peers_keys.iter().map(|(_, id, _)| id).cloned().collect(), bridge_account_id: multisig_account_id.clone(), assets, bridge_contract_address: Default::default(), reserves: reserves.unwrap_or_default(), }, ocw_keypairs: peers_keys, }, ); self.last_network_id += 1; net_id } pub fn build(self) -> (TestExternalities, State) { let (offchain, offchain_state) = TestOffchainExt::new(); let (pool, pool_state) = TestTransactionPoolExt::new(); let authority_account_id = bridge_multisig::Module::<Runtime>::multi_account_id(&self.root_account_id, 1, 0); let mut bridge_accounts = Vec::new(); let mut bridge_network_configs = Vec::new(); let mut endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new(); let mut networks: Vec<_> = self.networks.clone().into_iter().collect(); networks.sort_by(|(x, _), (y, _)| x.cmp(y)); for (_net_id, ext_network) in networks { bridge_network_configs.push(ext_network.config.clone()); endowed_accounts.extend(ext_network.config.assets.iter().cloned().map( |asset_config| { ( ext_network.config.bridge_account_id.clone(), asset_config.asset_id().clone(), 0, ) }, )); endowed_accounts.extend(ext_network.config.reserves.iter().cloned().map( |(asset_id, _balance)| (ext_network.config.bridge_account_id.clone(), asset_id, 0), )); bridge_accounts.push(( ext_network.config.bridge_account_id.clone(), bridge_multisig::MultisigAccount::new( ext_network .ocw_keypairs .iter() .map(|x| x.1.clone()) .collect(), Percent::from_parts(67), ), )); } // pallet_balances and orml_tokens no longer accept duplicate elements. let mut unique_endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new(); for acc in endowed_accounts { if let Some(unique_acc) = unique_endowed_accounts.iter_mut().find(|a| a.1 == acc.1) { unique_acc.2 += acc.2; } else { unique_endowed_accounts.push(acc); } } let endowed_accounts = unique_endowed_accounts; let endowed_assets: BTreeSet<_> = endowed_accounts .iter() .map(|x| { ( x.1, self.root_account_id.clone(), AssetSymbol(b"".to_vec()), AssetName(b"".to_vec()), 18, Balance::from(0u32), true, ) }) .collect(); let mut storage = frame_system::GenesisConfig::default() .build_storage::<Runtime>() .unwrap(); let mut balances: Vec<_> = endowed_accounts .iter() .map(|(acc,..)| acc) .chain(vec![&self.root_account_id, &authority_account_id]) .map(|x| (x.clone(), Balance::from(0u32))) .collect(); balances.extend(bridge_accounts.iter().map(|(acc, _)| (acc.clone(), 0))); for (_net_id, ext_network) in &self.networks { balances.extend(ext_network.ocw_keypairs.iter().map(|x| (x.1.clone(), 0))); } balances.sort_by_key(|x| x.0.clone()); balances.dedup_by_key(|x| x.0.clone()); BalancesConfig { balances } .assimilate_storage(&mut storage) .unwrap(); if!endowed_accounts.is_empty() { SudoConfig { key: endowed_accounts[0].0.clone(), } .assimilate_storage(&mut storage) .unwrap(); } MultisigConfig { accounts: bridge_accounts, } .assimilate_storage(&mut storage) .unwrap(); PermissionsConfig { initial_permission_owners: vec![], initial_permissions: Vec::new(), } .assimilate_storage(&mut storage) .unwrap(); TokensConfig { endowed_accounts: endowed_accounts.clone(), } .assimilate_storage(&mut storage) .unwrap(); AssetsConfig { endowed_assets: endowed_assets.into_iter().collect(), } .assimilate_storage(&mut storage) .unwrap(); EthBridgeConfig { networks: bridge_network_configs,
MyExtra
identifier_name
mock.rs
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Creating mock Runtime here use crate::{AssetConfig, Config, NetworkConfig}; use codec::{Codec, Decode, Encode}; use common::mock::ExistentialDeposits; use common::prelude::Balance; use common::{ Amount, AssetId32, AssetName, AssetSymbol, PredefinedAssetId, DEFAULT_BALANCE_PRECISION, VAL, }; use currencies::BasicCurrencyAdapter; use frame_support::dispatch::{DispatchInfo, GetDispatchInfo}; use frame_support::sp_io::TestExternalities; use frame_support::sp_runtime::app_crypto::sp_core; use frame_support::sp_runtime::app_crypto::sp_core::crypto::AccountId32; use frame_support::sp_runtime::app_crypto::sp_core::offchain::{OffchainExt, TransactionPoolExt}; use frame_support::sp_runtime::app_crypto::sp_core::{ecdsa, sr25519, Pair, Public}; use frame_support::sp_runtime::offchain::testing::{ OffchainState, PoolState, TestOffchainExt, TestTransactionPoolExt, }; use frame_support::sp_runtime::serde::{Serialize, Serializer}; use frame_support::sp_runtime::testing::Header; use frame_support::sp_runtime::traits::{ self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, IdentifyAccount, IdentityLookup, PostDispatchInfoOf, SignedExtension, ValidateUnsigned, Verify, }; use frame_support::sp_runtime::transaction_validity::{ TransactionSource, TransactionValidity, TransactionValidityError, }; use frame_support::sp_runtime::{ self, ApplyExtrinsicResultWithInfo, MultiSignature, MultiSigner, Perbill, Percent, }; use frame_support::traits::GenesisBuild; use frame_support::weights::{Pays, Weight}; use frame_support::{construct_runtime, parameter_types}; use frame_system::offchain::{Account, SigningTypes}; use parking_lot::RwLock; use sp_core::H256; use sp_keystore::testing::KeyStore; use sp_keystore::KeystoreExt; use sp_std::collections::btree_set::BTreeSet; use sp_std::fmt::Debug; use sp_std::str::FromStr; use sp_std::sync::Arc; use std::collections::HashMap; use {crate as eth_bridge, frame_system}; pub const PSWAP: PredefinedAssetId = PredefinedAssetId::PSWAP; pub const XOR: PredefinedAssetId = PredefinedAssetId::XOR; /// An index to a block. pub type BlockNumber = u64; pub type Signature = MultiSignature; /// Some way of identifying an account on the chain. We intentionally make it equivalent /// to the public key of our transaction signing scheme. pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>; type Block = frame_system::mocking::MockBlock<Runtime>; parameter_types! { pub const GetBaseAssetId: AssetId32<PredefinedAssetId> = AssetId32::from_asset_id(XOR); pub const DepositBase: u64 = 1; pub const DepositFactor: u64 = 1; pub const MaxSignatories: u16 = 4; pub const UnsignedPriority: u64 = 100; pub const EthNetworkId: <Runtime as Config>::NetworkId = 0; } #[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)] pub struct MyTestXt<Call, Extra> { /// Signature of the extrinsic. pub signature: Option<(AccountId, Extra)>, /// Call of the extrinsic. pub call: Call, } parity_util_mem::malloc_size_of_is_0!(any: MyTestXt<Call, Extra>); impl<Call: Codec + Sync + Send, Context, Extra> Checkable<Context> for MyTestXt<Call, Extra> { type Checked = Self; fn check(self, _c: &Context) -> Result<Self::Checked, TransactionValidityError> { Ok(self) } } impl<Call: Codec + Sync + Send, Extra> traits::Extrinsic for MyTestXt<Call, Extra> { type Call = Call; type SignaturePayload = (AccountId, Extra); fn is_signed(&self) -> Option<bool> { Some(self.signature.is_some()) } fn new(c: Call, sig: Option<Self::SignaturePayload>) -> Option<Self> { Some(MyTestXt { signature: sig, call: c, }) } } impl SignedExtension for MyExtra { type AccountId = AccountId; type Call = Call; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "testextension"; fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> { Ok(()) } } impl<Origin, Call, Extra> Applyable for MyTestXt<Call, Extra> where Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable<Origin = Origin>, Extra: SignedExtension<AccountId = AccountId, Call = Call>, Origin: From<Option<AccountId32>>, { type Call = Call; /// Checks to see if this is a valid *transaction*. It returns information on it if so. fn validate<U: ValidateUnsigned<Call = Self::Call>>( &self, _source: TransactionSource, _info: &DispatchInfoOf<Self::Call>, _len: usize, ) -> TransactionValidity { Ok(Default::default()) } /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. fn apply<U: ValidateUnsigned<Call = Self::Call>>( self, info: &DispatchInfoOf<Self::Call>, len: usize, ) -> ApplyExtrinsicResultWithInfo<PostDispatchInfoOf<Self::Call>> { let maybe_who = if let Some((who, extra)) = self.signature { Extra::pre_dispatch(extra, &who, &self.call, info, len)?; Some(who) } else { Extra::pre_dispatch_unsigned(&self.call, info, len)?; None }; Ok(self.call.dispatch(maybe_who.into())) } } impl<Call, Extra> Serialize for MyTestXt<Call, Extra> where MyTestXt<Call, Extra>: Encode, { fn serialize<S>(&self, seq: S) -> Result<S::Ok, S::Error> where S: Serializer, { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } impl<Call: Encode, Extra: Encode> GetDispatchInfo for MyTestXt<Call, Extra> { fn get_dispatch_info(&self) -> DispatchInfo { // for testing: weight == size. DispatchInfo { weight: self.encode().len() as _, pays_fee: Pays::No, ..Default::default() } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Encode, Decode)] pub struct MyExtra; pub type TestExtrinsic = MyTestXt<Call, MyExtra>; parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); pub const ExistentialDeposit: u128 = 0; } impl frame_system::Config for Runtime { type BaseCallFilter = (); type BlockWeights = (); type BlockLength = (); type Origin = Origin; type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup<Self::AccountId>; type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); type AccountData = pallet_balances::AccountData<Balance>; type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); type PalletInfo = PalletInfo; type SS58Prefix = (); } impl<T: SigningTypes> frame_system::offchain::SignMessage<T> for Runtime { type SignatureData = (); fn sign_message(&self, _message: &[u8]) -> Self::SignatureData { unimplemented!() } fn sign<TPayload, F>(&self, _f: F) -> Self::SignatureData where F: Fn(&Account<T>) -> TPayload, TPayload: frame_system::offchain::SignedPayload<T>, { unimplemented!() } } impl<LocalCall> frame_system::offchain::CreateSignedTransaction<LocalCall> for Runtime where Call: From<LocalCall>, { fn create_transaction<C: frame_system::offchain::AppCrypto<Self::Public, Self::Signature>>( call: Call, _public: <Signature as Verify>::Signer, account: <Runtime as frame_system::Config>::AccountId, _index: <Runtime as frame_system::Config>::Index, ) -> Option<( Call, <TestExtrinsic as sp_runtime::traits::Extrinsic>::SignaturePayload, )> { Some((call, (account, MyExtra {}))) } } impl frame_system::offchain::SigningTypes for Runtime { type Public = <Signature as Verify>::Signer; type Signature = Signature; } impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime where Call: From<C>, { type OverarchingCall = Call; type Extrinsic = TestExtrinsic; } impl pallet_balances::Config for Runtime { /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); type MaxLocks = (); } impl tokens::Config for Runtime { type Event = Event; type Balance = Balance; type Amount = Amount; type CurrencyId = <Runtime as assets::Config>::AssetId; type WeightInfo = (); type ExistentialDeposits = ExistentialDeposits; type OnDust = (); } impl currencies::Config for Runtime { type Event = Event; type MultiCurrency = Tokens; type NativeCurrency = BasicCurrencyAdapter<Runtime, Balances, Amount, BlockNumber>; type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId; type WeightInfo = (); } impl assets::Config for Runtime { type Event = Event; type ExtraAccountId = [u8; 32]; type ExtraAssetRecordArg = common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>; type AssetId = common::AssetId32<PredefinedAssetId>; type GetBaseAssetId = GetBaseAssetId; type Currency = currencies::Module<Runtime>; type WeightInfo = (); } impl common::Config for Runtime { type DEXId = common::DEXId; type LstId = common::LiquiditySourceType; } impl permissions::Config for Runtime { type Event = Event; } impl bridge_multisig::Config for Runtime { type Call = Call; type Event = Event; type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; type MaxSignatories = MaxSignatories; type WeightInfo = (); } impl pallet_sudo::Config for Runtime { type Call = Call; type Event = Event; } impl crate::Config for Runtime { type PeerId = crate::crypto::TestAuthId; type Call = Call; type Event = Event; type NetworkId = u32; type GetEthNetworkId = EthNetworkId; type WeightInfo = (); } impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic { const VERSION: u8 = 1; type SignedExtensions = (); } construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system::{Module, Call, Config, Storage, Event<T>}, Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>}, Multisig: bridge_multisig::{Module, Call, Storage, Config<T>, Event<T>}, Tokens: tokens::{Module, Call, Storage, Config<T>, Event<T>}, Currencies: currencies::{Module, Call, Storage, Event<T>}, Assets: assets::{Module, Call, Storage, Config<T>, Event<T>}, Permissions: permissions::{Module, Call, Storage, Config<T>, Event<T>}, Sudo: pallet_sudo::{Module, Call, Storage, Config<T>, Event<T>}, EthBridge: eth_bridge::{Module, Call, Storage, Config<T>, Event<T>}, } ); pub type SubstrateAccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; pub struct State { pub networks: HashMap<u32, ExtendedNetworkConfig>, pub authority_account_id: AccountId32, pub pool_state: Arc<RwLock<PoolState>>, pub offchain_state: Arc<RwLock<OffchainState>>, } #[derive(Clone, Debug)] pub struct ExtendedNetworkConfig { pub ocw_keypairs: Vec<(MultiSigner, AccountId32, [u8; 32])>, pub config: NetworkConfig<Runtime>, } pub struct ExtBuilder { pub networks: HashMap<u32, ExtendedNetworkConfig>, last_network_id: u32, root_account_id: AccountId32, } impl Default for ExtBuilder { fn default() -> Self { let mut builder = Self { networks: Default::default(), last_network_id: Default::default(), root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"), }; builder.add_network( vec![ AssetConfig::Thischain { id: PSWAP.into() }, AssetConfig::Sidechain { id: XOR.into(), sidechain_id: sp_core::H160::from_str( "40fd72257597aa14c7231a7b1aaa29fce868f677", ) .unwrap(), owned: true, precision: DEFAULT_BALANCE_PRECISION, }, AssetConfig::Sidechain { id: VAL.into(), sidechain_id: sp_core::H160::from_str( "3f9feac97e5feb15d8bf98042a9a01b515da3dfb", ) .unwrap(), owned: true, precision: DEFAULT_BALANCE_PRECISION, }, ], Some(vec![ (XOR.into(), common::balance!(350000)), (VAL.into(), common::balance!(33900000)), ]), Some(4), ); builder } } impl ExtBuilder { pub fn new() -> Self { Self { networks: Default::default(), last_network_id: Default::default(), root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"), } } pub fn add_currency( &mut self, network_id: u32, currency: AssetConfig<AssetId32<PredefinedAssetId>>, ) { self.networks .get_mut(&network_id) .unwrap() .config .assets .push(currency); } pub fn add_network( &mut self, assets: Vec<AssetConfig<AssetId32<PredefinedAssetId>>>, reserves: Option<Vec<(AssetId32<PredefinedAssetId>, Balance)>>, peers_num: Option<usize>, ) -> u32
); self.last_network_id += 1; net_id } pub fn build(self) -> (TestExternalities, State) { let (offchain, offchain_state) = TestOffchainExt::new(); let (pool, pool_state) = TestTransactionPoolExt::new(); let authority_account_id = bridge_multisig::Module::<Runtime>::multi_account_id(&self.root_account_id, 1, 0); let mut bridge_accounts = Vec::new(); let mut bridge_network_configs = Vec::new(); let mut endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new(); let mut networks: Vec<_> = self.networks.clone().into_iter().collect(); networks.sort_by(|(x, _), (y, _)| x.cmp(y)); for (_net_id, ext_network) in networks { bridge_network_configs.push(ext_network.config.clone()); endowed_accounts.extend(ext_network.config.assets.iter().cloned().map( |asset_config| { ( ext_network.config.bridge_account_id.clone(), asset_config.asset_id().clone(), 0, ) }, )); endowed_accounts.extend(ext_network.config.reserves.iter().cloned().map( |(asset_id, _balance)| (ext_network.config.bridge_account_id.clone(), asset_id, 0), )); bridge_accounts.push(( ext_network.config.bridge_account_id.clone(), bridge_multisig::MultisigAccount::new( ext_network .ocw_keypairs .iter() .map(|x| x.1.clone()) .collect(), Percent::from_parts(67), ), )); } // pallet_balances and orml_tokens no longer accept duplicate elements. let mut unique_endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new(); for acc in endowed_accounts { if let Some(unique_acc) = unique_endowed_accounts.iter_mut().find(|a| a.1 == acc.1) { unique_acc.2 += acc.2; } else { unique_endowed_accounts.push(acc); } } let endowed_accounts = unique_endowed_accounts; let endowed_assets: BTreeSet<_> = endowed_accounts .iter() .map(|x| { ( x.1, self.root_account_id.clone(), AssetSymbol(b"".to_vec()), AssetName(b"".to_vec()), 18, Balance::from(0u32), true, ) }) .collect(); let mut storage = frame_system::GenesisConfig::default() .build_storage::<Runtime>() .unwrap(); let mut balances: Vec<_> = endowed_accounts .iter() .map(|(acc,..)| acc) .chain(vec![&self.root_account_id, &authority_account_id]) .map(|x| (x.clone(), Balance::from(0u32))) .collect(); balances.extend(bridge_accounts.iter().map(|(acc, _)| (acc.clone(), 0))); for (_net_id, ext_network) in &self.networks { balances.extend(ext_network.ocw_keypairs.iter().map(|x| (x.1.clone(), 0))); } balances.sort_by_key(|x| x.0.clone()); balances.dedup_by_key(|x| x.0.clone()); BalancesConfig { balances } .assimilate_storage(&mut storage) .unwrap(); if!endowed_accounts.is_empty() { SudoConfig { key: endowed_accounts[0].0.clone(), } .assimilate_storage(&mut storage) .unwrap(); } MultisigConfig { accounts: bridge_accounts, } .assimilate_storage(&mut storage) .unwrap(); PermissionsConfig { initial_permission_owners: vec![], initial_permissions: Vec::new(), } .assimilate_storage(&mut storage) .unwrap(); TokensConfig { endowed_accounts: endowed_accounts.clone(), } .assimilate_storage(&mut storage) .unwrap(); AssetsConfig { endowed_assets: endowed_assets.into_iter().collect(), } .assimilate_storage(&mut storage) .unwrap(); EthBridgeConfig { networks: bridge_network_configs,
{ let net_id = self.last_network_id; let multisig_account_id = bridge_multisig::Module::<Runtime>::multi_account_id( &self.root_account_id, 1, net_id as u64 + 10, ); let peers_keys = gen_peers_keys(&format!("OCW{}", net_id), peers_num.unwrap_or(4)); self.networks.insert( net_id, ExtendedNetworkConfig { config: NetworkConfig { initial_peers: peers_keys.iter().map(|(_, id, _)| id).cloned().collect(), bridge_account_id: multisig_account_id.clone(), assets, bridge_contract_address: Default::default(), reserves: reserves.unwrap_or_default(), }, ocw_keypairs: peers_keys, },
identifier_body
js_lua_state.rs
use std::sync::Arc; use std::{fs, thread}; use crate::js_traits::{FromJs, ToJs}; use crate::lua_execution; use crate::value::Value; use mlua::{Lua, StdLib}; use neon::context::Context; use neon::handle::Handle; use neon::prelude::*; use neon::declare_types; fn lua_version() -> &'static str { if cfg!(feature = "lua54") { "lua54" } else if cfg!(feature = "lua53") { "lua53" } else if cfg!(feature = "lua52") { "lua52" } else if cfg!(feature = "lua51") { "lua51" } else if cfg!(feature = "luajit") { "luajit" } else { panic!("No version specified") } } /// LuaState Class wrapper. Holds on to the lua context reference, /// as well as the set of active lua libraries, and (eventually) the registered functions pub struct LuaState { libraries: StdLib, lua: Arc<Lua>, } impl LuaState { fn reset(&mut self) -> () { // By creating a new lua state, we remove all references allowing the js runtime // to exit if we've attached any event emitters. Without this, the program won't // close. Is there a more explicit way to close event listeners, or is relying on // the GC a normal/reasonable approach? let lua = unsafe { Lua::unsafe_new_with(self.libraries) }; self.lua = Arc::new(lua) } } impl Default for LuaState { fn default() -> Self
} fn flag_into_std_lib(flag: u32) -> Option<StdLib> { const ALL_SAFE: u32 = u32::MAX - 1; match flag { #[cfg(any(feature = "lua54", feature = "lua53", feature = "lua52"))] 0x1 => Some(StdLib::COROUTINE), 0x2 => Some(StdLib::TABLE), 0x4 => Some(StdLib::IO), 0x8 => Some(StdLib::OS), 0x10 => Some(StdLib::STRING), #[cfg(any(feature = "lua54", feature = "lua53"))] 0x20 => Some(StdLib::UTF8), #[cfg(any(feature = "lua52", feature = "luajit"))] 0x40 => Some(StdLib::BIT), 0x80 => Some(StdLib::MATH), 0x100 => Some(StdLib::PACKAGE), #[cfg(any(feature = "luajit"))] 0x200 => Some(StdLib::JIT), #[cfg(any(feature = "luajit"))] 0x4000_0000 => Some(StdLib::FFI), 0x8000_0000 => Some(StdLib::DEBUG), u32::MAX => Some(StdLib::ALL), ALL_SAFE => Some(StdLib::ALL_SAFE), _ => None, } } /// These correspond to our JS Enum. Used for a clearer error notification when including them in /// incompatible versions. fn flag_to_string(flag: u32) -> String { const ALL_SAFE: u32 = u32::MAX - 1; match flag { 0x1 => String::from("Coroutine"), 0x2 => String::from("Table"), 0x4 => String::from("Io"), 0x8 => String::from("Os"), 0x10 => String::from("String"), 0x20 => String::from("Utf8"), 0x40 => String::from("Bit"), 0x80 => String::from("Math"), 0x100 => String::from("Package"), 0x200 => String::from("Jit"), 0x4000_0000 => String::from("Ffi"), 0x8000_0000 => String::from("Debug"), u32::MAX => String::from("All"), ALL_SAFE => String::from("AllSafe"), _ => flag.to_string(), } } fn build_libraries_option( mut cx: CallContext<JsUndefined>, libs: Handle<JsValue>, ) -> NeonResult<StdLib> { if libs.is_a::<JsArray>() { let libflags: Vec<Handle<JsValue>> = libs .downcast_or_throw::<JsArray, CallContext<JsUndefined>>(&mut cx)? .to_vec(&mut cx)?; // Hack to get a StdLib(0) let mut libset = StdLib::TABLE ^ StdLib::TABLE; for value in libflags.into_iter() { let flag = value .downcast_or_throw::<JsNumber, CallContext<JsUndefined>>(&mut cx)? .value() as u32; if let Some(lib) = flag_into_std_lib(flag) { libset |= lib; } else { return cx.throw_error(format!( "unrecognized Library flag \"{}\" for {}", flag_to_string(flag), lua_version() )); } } Ok(libset) } else if libs.is_a::<JsUndefined>() { Ok(StdLib::ALL_SAFE) } else { cx.throw_error("Expected 'libraries' to be an an array") } } fn init(mut cx: CallContext<JsUndefined>) -> NeonResult<LuaState> { let opt_options = cx.argument_opt(0); if let None = opt_options { return Ok(LuaState::default()); }; let options: Handle<JsObject> = opt_options.unwrap().downcast_or_throw(&mut cx)?; let libraries_key = cx.string("libraries"); let libs = options.get(&mut cx, libraries_key)?; let libraries = build_libraries_option(cx, libs)?; // Because we're allowing the end user to dynamically choose their libraries, // we're using the unsafe call in case they include `debug`. We need to notify // the end user in the documentation about the caveats of `debug`. let lua = unsafe { let lua = Lua::unsafe_new_with(libraries); Arc::new(lua) }; Ok(LuaState { lua, libraries }) } fn do_string_sync( mut cx: MethodContext<JsLuaState>, code: String, name: Option<String>, ) -> JsResult<JsValue> { let this = cx.this(); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; match lua_execution::do_string_sync(lua, code, name) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } fn do_file_sync( mut cx: MethodContext<JsLuaState>, filename: String, chunk_name: Option<String>, ) -> JsResult<JsValue> { match fs::read_to_string(filename) { Ok(contents) => do_string_sync(cx, contents, chunk_name), Err(e) => cx.throw_error(e.to_string()), } } fn call_chunk<'a>( mut cx: MethodContext<'a, JsLuaState>, code: String, chunk_name: Option<String>, js_args: Handle<'a, JsArray>, ) -> JsResult<'a, JsValue> { let this = cx.this(); let mut args: Vec<Value> = vec![]; let js_args = js_args.to_vec(&mut cx)?; for arg in js_args.iter() { let value = Value::from_js(*arg, &mut cx)?; args.push(value); } let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; match lua_execution::call_chunk(&lua, code, chunk_name, args) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } fn register_function<'a>( mut cx: MethodContext<'a, JsLuaState>, name: String, cb: Handle<JsFunction>, ) -> JsResult<'a, JsValue> { let this = cx.this(); let handler = EventHandler::new(&cx, this, cb); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; let callback = move |values: Vec<Value>| { let handler = handler.clone(); thread::spawn(move || { handler.schedule_with(move |event_ctx, this, callback| { let arr = JsArray::new(event_ctx, values.len() as u32); // TODO remove unwraps, handle errors, and pass to callback if needed. for (i, value) in values.into_iter().enumerate() { let js_val = value.to_js(event_ctx).unwrap(); arr.set(event_ctx, i as u32, js_val).unwrap(); } // TODO How to pass an error via on('error') vs the current setup? let args: Vec<Handle<JsValue>> = vec![arr.upcast()]; let _result = callback.call(event_ctx, this, args); }); }); }; match lua_execution::register_function(lua, name, callback) { Ok(_) => Ok(cx.undefined().upcast()), Err(e) => cx.throw_error(e.to_string()), } } fn set_global<'a>( mut cx: MethodContext<'a, JsLuaState>, name: String, handle: Handle<'a, JsValue>, ) -> JsResult<'a, JsValue> { let this: Handle<JsLuaState> = cx.this(); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; let set_value = Value::from_js(handle, &mut cx)?; match lua_execution::set_global(lua, name, set_value) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } fn get_global(mut cx: MethodContext<JsLuaState>, name: String) -> JsResult<JsValue> { let this: Handle<JsLuaState> = cx.this(); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; match lua_execution::get_global(lua, name) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } declare_types! { pub class JsLuaState for LuaState { init(cx) { init(cx) } method registerFunction(mut cx) { let name = cx.argument::<JsString>(0)?.value(); let cb = cx.argument::<JsFunction>(1)?; register_function(cx, name, cb) } method reset(mut cx) { let mut this = cx.this(); { let guard = cx.lock(); let mut state = this.borrow_mut(&guard); state.reset(); } Ok(cx.undefined().upcast()) } method close(mut cx) { let mut this = cx.this(); { let guard = cx.lock(); let mut state = this.borrow_mut(&guard); state.reset(); } Ok(cx.undefined().upcast()) } method doStringSync(mut cx) { let code = cx.argument::<JsString>(0)?.value(); let chunk_name = match cx.argument_opt(1) { Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()), None => None }; do_string_sync(cx, code, chunk_name) } method doFileSync(mut cx) { let filename = cx.argument::<JsString>(0)?.value(); // TODO chop the filename on error a bit so it's legible. // currently the `root/stuff/...` is at the end vs `.../stuff/things.lua` let chunk_name = match cx.argument_opt(1) { Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()), None => Some(String::from(filename.clone())) }; do_file_sync(cx, filename, chunk_name) } method callChunk(mut cx) { let code = cx.argument::<JsString>(0)?.value(); let (chunk_name, args) = match cx.len() { 2 => { let args = cx.argument::<JsArray>(1)?; Ok((None, args)) }, 3 => { let chunk_name = cx.argument::<JsString>(1)?.value(); let args = cx.argument::<JsArray>(2)?; Ok((Some(chunk_name), args)) }, _ => { let e = cx.string(format!("expected 2 or 3 arguments. Found: {}", cx.len())); cx.throw(e) } }?; call_chunk(cx, code, chunk_name, args) } method setGlobal(mut cx) { let name = cx.argument::<JsString>(0)?.value(); let value = cx.argument::<JsValue>(1)?; set_global(cx, name, value) } method getGlobal(mut cx) { let name = cx.argument::<JsString>(0)?.value(); get_global(cx, name) } } }
{ LuaState { libraries: StdLib::ALL_SAFE, lua: Arc::new(Lua::new_with(StdLib::ALL_SAFE).unwrap()), } }
identifier_body
js_lua_state.rs
use std::sync::Arc; use std::{fs, thread}; use crate::js_traits::{FromJs, ToJs}; use crate::lua_execution; use crate::value::Value; use mlua::{Lua, StdLib}; use neon::context::Context; use neon::handle::Handle; use neon::prelude::*; use neon::declare_types; fn lua_version() -> &'static str { if cfg!(feature = "lua54") { "lua54" } else if cfg!(feature = "lua53") { "lua53" } else if cfg!(feature = "lua52") { "lua52" } else if cfg!(feature = "lua51") { "lua51" } else if cfg!(feature = "luajit") { "luajit" } else { panic!("No version specified") } } /// LuaState Class wrapper. Holds on to the lua context reference, /// as well as the set of active lua libraries, and (eventually) the registered functions pub struct LuaState { libraries: StdLib, lua: Arc<Lua>, } impl LuaState { fn reset(&mut self) -> () { // By creating a new lua state, we remove all references allowing the js runtime // to exit if we've attached any event emitters. Without this, the program won't // close. Is there a more explicit way to close event listeners, or is relying on // the GC a normal/reasonable approach? let lua = unsafe { Lua::unsafe_new_with(self.libraries) }; self.lua = Arc::new(lua) } } impl Default for LuaState { fn default() -> Self { LuaState { libraries: StdLib::ALL_SAFE, lua: Arc::new(Lua::new_with(StdLib::ALL_SAFE).unwrap()), } } } fn flag_into_std_lib(flag: u32) -> Option<StdLib> { const ALL_SAFE: u32 = u32::MAX - 1; match flag { #[cfg(any(feature = "lua54", feature = "lua53", feature = "lua52"))] 0x1 => Some(StdLib::COROUTINE), 0x2 => Some(StdLib::TABLE), 0x4 => Some(StdLib::IO), 0x8 => Some(StdLib::OS), 0x10 => Some(StdLib::STRING), #[cfg(any(feature = "lua54", feature = "lua53"))] 0x20 => Some(StdLib::UTF8), #[cfg(any(feature = "lua52", feature = "luajit"))] 0x40 => Some(StdLib::BIT), 0x80 => Some(StdLib::MATH), 0x100 => Some(StdLib::PACKAGE), #[cfg(any(feature = "luajit"))] 0x200 => Some(StdLib::JIT), #[cfg(any(feature = "luajit"))] 0x4000_0000 => Some(StdLib::FFI), 0x8000_0000 => Some(StdLib::DEBUG), u32::MAX => Some(StdLib::ALL), ALL_SAFE => Some(StdLib::ALL_SAFE), _ => None, } } /// These correspond to our JS Enum. Used for a clearer error notification when including them in /// incompatible versions. fn flag_to_string(flag: u32) -> String { const ALL_SAFE: u32 = u32::MAX - 1; match flag { 0x1 => String::from("Coroutine"), 0x2 => String::from("Table"), 0x4 => String::from("Io"), 0x8 => String::from("Os"), 0x10 => String::from("String"), 0x20 => String::from("Utf8"), 0x40 => String::from("Bit"), 0x80 => String::from("Math"), 0x100 => String::from("Package"), 0x200 => String::from("Jit"), 0x4000_0000 => String::from("Ffi"), 0x8000_0000 => String::from("Debug"), u32::MAX => String::from("All"), ALL_SAFE => String::from("AllSafe"), _ => flag.to_string(), } } fn build_libraries_option( mut cx: CallContext<JsUndefined>, libs: Handle<JsValue>, ) -> NeonResult<StdLib> { if libs.is_a::<JsArray>() { let libflags: Vec<Handle<JsValue>> = libs .downcast_or_throw::<JsArray, CallContext<JsUndefined>>(&mut cx)? .to_vec(&mut cx)?; // Hack to get a StdLib(0) let mut libset = StdLib::TABLE ^ StdLib::TABLE; for value in libflags.into_iter() { let flag = value .downcast_or_throw::<JsNumber, CallContext<JsUndefined>>(&mut cx)? .value() as u32; if let Some(lib) = flag_into_std_lib(flag) { libset |= lib; } else { return cx.throw_error(format!( "unrecognized Library flag \"{}\" for {}", flag_to_string(flag), lua_version() )); } } Ok(libset) } else if libs.is_a::<JsUndefined>() { Ok(StdLib::ALL_SAFE) } else { cx.throw_error("Expected 'libraries' to be an an array") } } fn init(mut cx: CallContext<JsUndefined>) -> NeonResult<LuaState> { let opt_options = cx.argument_opt(0); if let None = opt_options { return Ok(LuaState::default()); }; let options: Handle<JsObject> = opt_options.unwrap().downcast_or_throw(&mut cx)?; let libraries_key = cx.string("libraries");
let libraries = build_libraries_option(cx, libs)?; // Because we're allowing the end user to dynamically choose their libraries, // we're using the unsafe call in case they include `debug`. We need to notify // the end user in the documentation about the caveats of `debug`. let lua = unsafe { let lua = Lua::unsafe_new_with(libraries); Arc::new(lua) }; Ok(LuaState { lua, libraries }) } fn do_string_sync( mut cx: MethodContext<JsLuaState>, code: String, name: Option<String>, ) -> JsResult<JsValue> { let this = cx.this(); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; match lua_execution::do_string_sync(lua, code, name) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } fn do_file_sync( mut cx: MethodContext<JsLuaState>, filename: String, chunk_name: Option<String>, ) -> JsResult<JsValue> { match fs::read_to_string(filename) { Ok(contents) => do_string_sync(cx, contents, chunk_name), Err(e) => cx.throw_error(e.to_string()), } } fn call_chunk<'a>( mut cx: MethodContext<'a, JsLuaState>, code: String, chunk_name: Option<String>, js_args: Handle<'a, JsArray>, ) -> JsResult<'a, JsValue> { let this = cx.this(); let mut args: Vec<Value> = vec![]; let js_args = js_args.to_vec(&mut cx)?; for arg in js_args.iter() { let value = Value::from_js(*arg, &mut cx)?; args.push(value); } let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; match lua_execution::call_chunk(&lua, code, chunk_name, args) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } fn register_function<'a>( mut cx: MethodContext<'a, JsLuaState>, name: String, cb: Handle<JsFunction>, ) -> JsResult<'a, JsValue> { let this = cx.this(); let handler = EventHandler::new(&cx, this, cb); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; let callback = move |values: Vec<Value>| { let handler = handler.clone(); thread::spawn(move || { handler.schedule_with(move |event_ctx, this, callback| { let arr = JsArray::new(event_ctx, values.len() as u32); // TODO remove unwraps, handle errors, and pass to callback if needed. for (i, value) in values.into_iter().enumerate() { let js_val = value.to_js(event_ctx).unwrap(); arr.set(event_ctx, i as u32, js_val).unwrap(); } // TODO How to pass an error via on('error') vs the current setup? let args: Vec<Handle<JsValue>> = vec![arr.upcast()]; let _result = callback.call(event_ctx, this, args); }); }); }; match lua_execution::register_function(lua, name, callback) { Ok(_) => Ok(cx.undefined().upcast()), Err(e) => cx.throw_error(e.to_string()), } } fn set_global<'a>( mut cx: MethodContext<'a, JsLuaState>, name: String, handle: Handle<'a, JsValue>, ) -> JsResult<'a, JsValue> { let this: Handle<JsLuaState> = cx.this(); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; let set_value = Value::from_js(handle, &mut cx)?; match lua_execution::set_global(lua, name, set_value) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } fn get_global(mut cx: MethodContext<JsLuaState>, name: String) -> JsResult<JsValue> { let this: Handle<JsLuaState> = cx.this(); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; match lua_execution::get_global(lua, name) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } declare_types! { pub class JsLuaState for LuaState { init(cx) { init(cx) } method registerFunction(mut cx) { let name = cx.argument::<JsString>(0)?.value(); let cb = cx.argument::<JsFunction>(1)?; register_function(cx, name, cb) } method reset(mut cx) { let mut this = cx.this(); { let guard = cx.lock(); let mut state = this.borrow_mut(&guard); state.reset(); } Ok(cx.undefined().upcast()) } method close(mut cx) { let mut this = cx.this(); { let guard = cx.lock(); let mut state = this.borrow_mut(&guard); state.reset(); } Ok(cx.undefined().upcast()) } method doStringSync(mut cx) { let code = cx.argument::<JsString>(0)?.value(); let chunk_name = match cx.argument_opt(1) { Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()), None => None }; do_string_sync(cx, code, chunk_name) } method doFileSync(mut cx) { let filename = cx.argument::<JsString>(0)?.value(); // TODO chop the filename on error a bit so it's legible. // currently the `root/stuff/...` is at the end vs `.../stuff/things.lua` let chunk_name = match cx.argument_opt(1) { Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()), None => Some(String::from(filename.clone())) }; do_file_sync(cx, filename, chunk_name) } method callChunk(mut cx) { let code = cx.argument::<JsString>(0)?.value(); let (chunk_name, args) = match cx.len() { 2 => { let args = cx.argument::<JsArray>(1)?; Ok((None, args)) }, 3 => { let chunk_name = cx.argument::<JsString>(1)?.value(); let args = cx.argument::<JsArray>(2)?; Ok((Some(chunk_name), args)) }, _ => { let e = cx.string(format!("expected 2 or 3 arguments. Found: {}", cx.len())); cx.throw(e) } }?; call_chunk(cx, code, chunk_name, args) } method setGlobal(mut cx) { let name = cx.argument::<JsString>(0)?.value(); let value = cx.argument::<JsValue>(1)?; set_global(cx, name, value) } method getGlobal(mut cx) { let name = cx.argument::<JsString>(0)?.value(); get_global(cx, name) } } }
let libs = options.get(&mut cx, libraries_key)?;
random_line_split
js_lua_state.rs
use std::sync::Arc; use std::{fs, thread}; use crate::js_traits::{FromJs, ToJs}; use crate::lua_execution; use crate::value::Value; use mlua::{Lua, StdLib}; use neon::context::Context; use neon::handle::Handle; use neon::prelude::*; use neon::declare_types; fn lua_version() -> &'static str { if cfg!(feature = "lua54") { "lua54" } else if cfg!(feature = "lua53") { "lua53" } else if cfg!(feature = "lua52") { "lua52" } else if cfg!(feature = "lua51") { "lua51" } else if cfg!(feature = "luajit") { "luajit" } else { panic!("No version specified") } } /// LuaState Class wrapper. Holds on to the lua context reference, /// as well as the set of active lua libraries, and (eventually) the registered functions pub struct LuaState { libraries: StdLib, lua: Arc<Lua>, } impl LuaState { fn reset(&mut self) -> () { // By creating a new lua state, we remove all references allowing the js runtime // to exit if we've attached any event emitters. Without this, the program won't // close. Is there a more explicit way to close event listeners, or is relying on // the GC a normal/reasonable approach? let lua = unsafe { Lua::unsafe_new_with(self.libraries) }; self.lua = Arc::new(lua) } } impl Default for LuaState { fn default() -> Self { LuaState { libraries: StdLib::ALL_SAFE, lua: Arc::new(Lua::new_with(StdLib::ALL_SAFE).unwrap()), } } } fn flag_into_std_lib(flag: u32) -> Option<StdLib> { const ALL_SAFE: u32 = u32::MAX - 1; match flag { #[cfg(any(feature = "lua54", feature = "lua53", feature = "lua52"))] 0x1 => Some(StdLib::COROUTINE), 0x2 => Some(StdLib::TABLE), 0x4 => Some(StdLib::IO), 0x8 => Some(StdLib::OS), 0x10 => Some(StdLib::STRING), #[cfg(any(feature = "lua54", feature = "lua53"))] 0x20 => Some(StdLib::UTF8), #[cfg(any(feature = "lua52", feature = "luajit"))] 0x40 => Some(StdLib::BIT), 0x80 => Some(StdLib::MATH), 0x100 => Some(StdLib::PACKAGE), #[cfg(any(feature = "luajit"))] 0x200 => Some(StdLib::JIT), #[cfg(any(feature = "luajit"))] 0x4000_0000 => Some(StdLib::FFI), 0x8000_0000 => Some(StdLib::DEBUG), u32::MAX => Some(StdLib::ALL), ALL_SAFE => Some(StdLib::ALL_SAFE), _ => None, } } /// These correspond to our JS Enum. Used for a clearer error notification when including them in /// incompatible versions. fn flag_to_string(flag: u32) -> String { const ALL_SAFE: u32 = u32::MAX - 1; match flag { 0x1 => String::from("Coroutine"), 0x2 => String::from("Table"), 0x4 => String::from("Io"), 0x8 => String::from("Os"), 0x10 => String::from("String"), 0x20 => String::from("Utf8"), 0x40 => String::from("Bit"), 0x80 => String::from("Math"), 0x100 => String::from("Package"), 0x200 => String::from("Jit"), 0x4000_0000 => String::from("Ffi"), 0x8000_0000 => String::from("Debug"), u32::MAX => String::from("All"), ALL_SAFE => String::from("AllSafe"), _ => flag.to_string(), } } fn build_libraries_option( mut cx: CallContext<JsUndefined>, libs: Handle<JsValue>, ) -> NeonResult<StdLib> { if libs.is_a::<JsArray>() { let libflags: Vec<Handle<JsValue>> = libs .downcast_or_throw::<JsArray, CallContext<JsUndefined>>(&mut cx)? .to_vec(&mut cx)?; // Hack to get a StdLib(0) let mut libset = StdLib::TABLE ^ StdLib::TABLE; for value in libflags.into_iter() { let flag = value .downcast_or_throw::<JsNumber, CallContext<JsUndefined>>(&mut cx)? .value() as u32; if let Some(lib) = flag_into_std_lib(flag) { libset |= lib; } else { return cx.throw_error(format!( "unrecognized Library flag \"{}\" for {}", flag_to_string(flag), lua_version() )); } } Ok(libset) } else if libs.is_a::<JsUndefined>() { Ok(StdLib::ALL_SAFE) } else { cx.throw_error("Expected 'libraries' to be an an array") } } fn init(mut cx: CallContext<JsUndefined>) -> NeonResult<LuaState> { let opt_options = cx.argument_opt(0); if let None = opt_options { return Ok(LuaState::default()); }; let options: Handle<JsObject> = opt_options.unwrap().downcast_or_throw(&mut cx)?; let libraries_key = cx.string("libraries"); let libs = options.get(&mut cx, libraries_key)?; let libraries = build_libraries_option(cx, libs)?; // Because we're allowing the end user to dynamically choose their libraries, // we're using the unsafe call in case they include `debug`. We need to notify // the end user in the documentation about the caveats of `debug`. let lua = unsafe { let lua = Lua::unsafe_new_with(libraries); Arc::new(lua) }; Ok(LuaState { lua, libraries }) } fn
( mut cx: MethodContext<JsLuaState>, code: String, name: Option<String>, ) -> JsResult<JsValue> { let this = cx.this(); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; match lua_execution::do_string_sync(lua, code, name) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } fn do_file_sync( mut cx: MethodContext<JsLuaState>, filename: String, chunk_name: Option<String>, ) -> JsResult<JsValue> { match fs::read_to_string(filename) { Ok(contents) => do_string_sync(cx, contents, chunk_name), Err(e) => cx.throw_error(e.to_string()), } } fn call_chunk<'a>( mut cx: MethodContext<'a, JsLuaState>, code: String, chunk_name: Option<String>, js_args: Handle<'a, JsArray>, ) -> JsResult<'a, JsValue> { let this = cx.this(); let mut args: Vec<Value> = vec![]; let js_args = js_args.to_vec(&mut cx)?; for arg in js_args.iter() { let value = Value::from_js(*arg, &mut cx)?; args.push(value); } let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; match lua_execution::call_chunk(&lua, code, chunk_name, args) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } fn register_function<'a>( mut cx: MethodContext<'a, JsLuaState>, name: String, cb: Handle<JsFunction>, ) -> JsResult<'a, JsValue> { let this = cx.this(); let handler = EventHandler::new(&cx, this, cb); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; let callback = move |values: Vec<Value>| { let handler = handler.clone(); thread::spawn(move || { handler.schedule_with(move |event_ctx, this, callback| { let arr = JsArray::new(event_ctx, values.len() as u32); // TODO remove unwraps, handle errors, and pass to callback if needed. for (i, value) in values.into_iter().enumerate() { let js_val = value.to_js(event_ctx).unwrap(); arr.set(event_ctx, i as u32, js_val).unwrap(); } // TODO How to pass an error via on('error') vs the current setup? let args: Vec<Handle<JsValue>> = vec![arr.upcast()]; let _result = callback.call(event_ctx, this, args); }); }); }; match lua_execution::register_function(lua, name, callback) { Ok(_) => Ok(cx.undefined().upcast()), Err(e) => cx.throw_error(e.to_string()), } } fn set_global<'a>( mut cx: MethodContext<'a, JsLuaState>, name: String, handle: Handle<'a, JsValue>, ) -> JsResult<'a, JsValue> { let this: Handle<JsLuaState> = cx.this(); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; let set_value = Value::from_js(handle, &mut cx)?; match lua_execution::set_global(lua, name, set_value) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } fn get_global(mut cx: MethodContext<JsLuaState>, name: String) -> JsResult<JsValue> { let this: Handle<JsLuaState> = cx.this(); let lua: &Lua = { let guard = cx.lock(); let state = this.borrow(&guard); &state.lua.clone() }; match lua_execution::get_global(lua, name) { Ok(v) => v.to_js(&mut cx), Err(e) => cx.throw_error(e.to_string()), } } declare_types! { pub class JsLuaState for LuaState { init(cx) { init(cx) } method registerFunction(mut cx) { let name = cx.argument::<JsString>(0)?.value(); let cb = cx.argument::<JsFunction>(1)?; register_function(cx, name, cb) } method reset(mut cx) { let mut this = cx.this(); { let guard = cx.lock(); let mut state = this.borrow_mut(&guard); state.reset(); } Ok(cx.undefined().upcast()) } method close(mut cx) { let mut this = cx.this(); { let guard = cx.lock(); let mut state = this.borrow_mut(&guard); state.reset(); } Ok(cx.undefined().upcast()) } method doStringSync(mut cx) { let code = cx.argument::<JsString>(0)?.value(); let chunk_name = match cx.argument_opt(1) { Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()), None => None }; do_string_sync(cx, code, chunk_name) } method doFileSync(mut cx) { let filename = cx.argument::<JsString>(0)?.value(); // TODO chop the filename on error a bit so it's legible. // currently the `root/stuff/...` is at the end vs `.../stuff/things.lua` let chunk_name = match cx.argument_opt(1) { Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()), None => Some(String::from(filename.clone())) }; do_file_sync(cx, filename, chunk_name) } method callChunk(mut cx) { let code = cx.argument::<JsString>(0)?.value(); let (chunk_name, args) = match cx.len() { 2 => { let args = cx.argument::<JsArray>(1)?; Ok((None, args)) }, 3 => { let chunk_name = cx.argument::<JsString>(1)?.value(); let args = cx.argument::<JsArray>(2)?; Ok((Some(chunk_name), args)) }, _ => { let e = cx.string(format!("expected 2 or 3 arguments. Found: {}", cx.len())); cx.throw(e) } }?; call_chunk(cx, code, chunk_name, args) } method setGlobal(mut cx) { let name = cx.argument::<JsString>(0)?.value(); let value = cx.argument::<JsValue>(1)?; set_global(cx, name, value) } method getGlobal(mut cx) { let name = cx.argument::<JsString>(0)?.value(); get_global(cx, name) } } }
do_string_sync
identifier_name
main.rs
use std::{fs, net::ToSocketAddrs, path::PathBuf, sync::Arc}; use structopt::StructOpt; use url::Url; use tracing::{Level, info}; use bevy::{ input::{ keyboard::ElementState as PressState, mouse::{MouseButtonInput, MouseScrollUnit, MouseWheel}, }, prelude::*, render::mesh::{Mesh, VertexAttribute} }; use bounded_planet::{ camera::*, networking::{events::*, packets::*, systems::*} }; // The thresholds for window edge. const CURSOR_H_THRESHOLD: f32 = 0.55; const CURSOR_V_THRESHOLD: f32 = 0.42; /// The stage at which the [`CameraBP`] cache is either updated or used to fill /// in the action cache now. const CAM_CACHE_UPDATE: &str = "push_cam_update"; #[derive(Default)] struct MoveCam { right: Option<f32>, forward: Option<f32>, } #[derive(StructOpt, Debug)] #[structopt(name = "client")] struct Opt { /// Address to connect to #[structopt(long="url", default_value="quic://localhost:4433")] url: Url, /// TLS certificate in PEM format #[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")] cert: PathBuf, /// Accept any TLS certificate from the server even if it is invalid #[structopt(short="a", long="accept_any")] accept_any_cert: bool } fn main() -> Result<(), Box<dyn std::error::Error>>
#[tokio::main] async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> { let path = std::env::current_dir().unwrap(); println!("The current directory is {}", path.display()); tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_max_level(Level::INFO) .finish(), ) .expect("Failed to configure logging"); // Resolve URL from options let url = options.url; let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433)) .to_socket_addrs()? .next() .expect("couldn't resolve to an address"); // Create a Bevy app let mut app = App::build(); let cert = get_cert(&options.cert)?; app.add_plugin(bounded_planet::networking::client::plugin::Network { addr: remote, url, cert, accept_any_cert: options.accept_any_cert }); app.init_resource::<PingResponderState>(); app.add_system(respond_to_pings.system()); app.init_resource::<NetEventLoggerState>(); app.add_system(log_net_events.system()); app.init_resource::<MoveCam>(); app.add_resource(Msaa { samples: 4 }); app.add_default_plugins(); app.add_plugin(CameraBPPlugin::default()); app.add_startup_system(setup_scene.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system()); app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE); app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system()); app.add_system(play_every_sound_on_mb1.system()); app.init_resource::<TileReceivedState>(); app.add_system(handle_tile_received.system()); app.init_resource::<RequestTileOnConnectedState>(); app.add_system(request_tile_on_connected.system()); // Run it forever app.run(); Ok(()) } /// Fetch certificates to use fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> { info!("Loading Cert: {:?}", cert_path); Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?) } #[derive(Default)] pub struct PingResponderState { pub event_reader: EventReader<ReceiveEvent>, } fn respond_to_pings( mut state: ResMut<PingResponderState>, receiver: ResMut<Events<ReceiveEvent>>, mut sender: ResMut<Events<SendEvent>>, ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt { if let Packet::Ping(Ping { timestamp }) = **data { sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::PingPong, data: Arc::new(Packet::Pong(Pong { timestamp })) }); info!("Received Ping, sending pong. {:?}", connection); } } } } #[derive(Default)] pub struct TileReceivedState { pub event_reader: EventReader<ReceiveEvent>, } /// When a tile is received from the server, we load it into the scene fn handle_tile_received( mut commands: Commands, asset_server: Res<AssetServer>, mut state: ResMut<TileReceivedState>, receiver: ResMut<Events<ReceiveEvent>>, mut meshes: ResMut<Assets<Mesh>>, mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt { if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() { info!("Loading tile received from server."); let land_texture_top_handle = asset_server .load_sync(&mut textures, "content/textures/CoveWorldTop.png") .expect("Failed to load CoveWorldTop.png"); commands.spawn(PbrComponents { mesh: meshes.add(Mesh { primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList, attributes: vec![ VertexAttribute::position(mesh_data.vertices), VertexAttribute::normal(mesh_data.normals), VertexAttribute::uv(mesh_data.uvs), ], indices: Some(mesh_data.indices), }), material: materials.add(StandardMaterial { albedo_texture: Some(land_texture_top_handle), shaded: true, ..Default::default() }), ..Default::default() }); info!("Finished loading tile."); } } } } #[derive(Default)] struct RequestTileOnConnectedState { pub event_reader: EventReader<ReceiveEvent>, } /// When the client connects to the server, request a tile fn request_tile_on_connected( mut state: ResMut<RequestTileOnConnectedState>, mut sender: ResMut<Events<SendEvent>>, receiver: ResMut<Events<ReceiveEvent>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::Connected(connection, _) = evt { info!("Requesting tile because connected to server..."); sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::WorldTileData, data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest { //todo(#46): Respect request coordinates (x, y lod) x: 0, y: 0, lod: 0 })) }); } } } /// set up a simple 3D scene with landscape? fn setup_scene( mut commands: Commands, asset_server: Res<AssetServer>, mut meshes: ResMut<Assets<Mesh>>, // mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>>, mut sounds: ResMut<Assets<AudioSource>>, ) { asset_server .load_sync(&mut sounds, "content/textures/test_sound.mp3") .expect("Failed to load test_sound.mp3"); // add entities to the world commands // cube .spawn(PbrComponents { mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()), transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)), ..Default::default() }) // light .spawn(LightComponents { transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)), light: Light { color: Color::WHITE, fov: 90f32, depth: 0f32..100.0 }, ..Default::default() }) // camera .spawn(Camera3dComponents { transform: Transform::from_translation_rotation( Vec3::new(20.0, 20.0, 20.0), Quat::from_rotation_ypr(2.7, -0.75, 0.0) ), ..Default::default() }) .with(CameraBPConfig { forward_weight: -0.01, back_weight: 0.01, left_weight: -0.01, right_weight: 0.01, ..Default::default() }); } /// Pushes camera actions based upon mouse movements near the window edge. fn act_camera_on_window_edge( wins: Res<Windows>, pos: Res<Events<CursorMoved>>, mut mcam: ResMut<MoveCam>, ) { if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) { let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y()); let window = wins.get(e.id).expect("Couldn't get primary window."); let (window_x, window_y) = (window.width as f32, window.height as f32); // map (mouse_x, mouse_y) into [-1, 1]^2 mouse_x /= window_x / 2.0; mouse_y /= window_y / 2.0; mouse_x -= 1.0; mouse_y -= 1.0; let angle = mouse_x.atan2(mouse_y); let (ax, ay) = (angle.sin(), angle.cos()); let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD) && (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD); if!in_rect && ax.is_finite() && ay.is_finite() { mcam.right = Some(ax); mcam.forward = Some(ay); } else { mcam.right = None; mcam.forward = None; } } } /// Pushes camera actions based upon scroll wheel movement. fn act_on_scroll_wheel( mouse_wheel: Res<Events<MouseWheel>>, mut acts: ResMut<Events<CameraBPAction>>, ) { for mw in mouse_wheel.get_reader().iter(&mouse_wheel) { /// If scrolling units are reported in lines rather than pixels, /// multiply the returned horizontal scrolling amount by this. const LINE_SIZE: f32 = 14.0; let w = mw.y.abs() * if let MouseScrollUnit::Line = mw.unit { LINE_SIZE } else { 1.0 }; if mw.y > 0.0 { acts.send(CameraBPAction::ZoomIn(Some(w))) } else if mw.y < 0.0 { acts.send(CameraBPAction::ZoomOut(Some(w))) } } } /// Depending on `dirty`, either update the local `cache` or fill the event /// queue for [`CameraBPAction`] with the locally cached copy. fn use_or_update_action_cache(mcam: Res<MoveCam>, mut acts: ResMut<Events<CameraBPAction>>) { if let Some(w) = mcam.right { acts.send(CameraBPAction::MoveRight(Some(w))) } if let Some(w) = mcam.forward { acts.send(CameraBPAction::MoveForward(Some(w))) } } fn play_every_sound_on_mb1( mev: Res<Events<MouseButtonInput>>, fxs: Res<Assets<AudioSource>>, output: Res<AudioOutput>, ) { for mev in mev.get_reader().iter(&mev) { if mev.button == MouseButton::Left && mev.state == PressState::Pressed { for (fx, _) in fxs.iter() { output.play(fx); } } } }
{ let opt = Opt::from_args(); run(opt) }
identifier_body
main.rs
use std::{fs, net::ToSocketAddrs, path::PathBuf, sync::Arc}; use structopt::StructOpt; use url::Url; use tracing::{Level, info}; use bevy::{ input::{ keyboard::ElementState as PressState, mouse::{MouseButtonInput, MouseScrollUnit, MouseWheel}, }, prelude::*, render::mesh::{Mesh, VertexAttribute} }; use bounded_planet::{ camera::*, networking::{events::*, packets::*, systems::*} }; // The thresholds for window edge. const CURSOR_H_THRESHOLD: f32 = 0.55; const CURSOR_V_THRESHOLD: f32 = 0.42; /// The stage at which the [`CameraBP`] cache is either updated or used to fill /// in the action cache now. const CAM_CACHE_UPDATE: &str = "push_cam_update"; #[derive(Default)] struct MoveCam { right: Option<f32>, forward: Option<f32>, } #[derive(StructOpt, Debug)] #[structopt(name = "client")] struct Opt { /// Address to connect to #[structopt(long="url", default_value="quic://localhost:4433")] url: Url, /// TLS certificate in PEM format #[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")] cert: PathBuf, /// Accept any TLS certificate from the server even if it is invalid #[structopt(short="a", long="accept_any")] accept_any_cert: bool } fn main() -> Result<(), Box<dyn std::error::Error>> { let opt = Opt::from_args(); run(opt) } #[tokio::main] async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> { let path = std::env::current_dir().unwrap(); println!("The current directory is {}", path.display()); tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_max_level(Level::INFO) .finish(), ) .expect("Failed to configure logging"); // Resolve URL from options let url = options.url; let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433)) .to_socket_addrs()? .next() .expect("couldn't resolve to an address"); // Create a Bevy app let mut app = App::build(); let cert = get_cert(&options.cert)?; app.add_plugin(bounded_planet::networking::client::plugin::Network { addr: remote, url, cert, accept_any_cert: options.accept_any_cert }); app.init_resource::<PingResponderState>(); app.add_system(respond_to_pings.system()); app.init_resource::<NetEventLoggerState>(); app.add_system(log_net_events.system()); app.init_resource::<MoveCam>(); app.add_resource(Msaa { samples: 4 }); app.add_default_plugins(); app.add_plugin(CameraBPPlugin::default()); app.add_startup_system(setup_scene.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system()); app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE); app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system()); app.add_system(play_every_sound_on_mb1.system()); app.init_resource::<TileReceivedState>(); app.add_system(handle_tile_received.system()); app.init_resource::<RequestTileOnConnectedState>(); app.add_system(request_tile_on_connected.system()); // Run it forever app.run(); Ok(()) } /// Fetch certificates to use fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> { info!("Loading Cert: {:?}", cert_path); Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?) } #[derive(Default)] pub struct PingResponderState { pub event_reader: EventReader<ReceiveEvent>, } fn respond_to_pings( mut state: ResMut<PingResponderState>, receiver: ResMut<Events<ReceiveEvent>>, mut sender: ResMut<Events<SendEvent>>, ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt { if let Packet::Ping(Ping { timestamp }) = **data { sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::PingPong, data: Arc::new(Packet::Pong(Pong { timestamp })) }); info!("Received Ping, sending pong. {:?}", connection); } } } } #[derive(Default)] pub struct TileReceivedState { pub event_reader: EventReader<ReceiveEvent>, } /// When a tile is received from the server, we load it into the scene fn handle_tile_received( mut commands: Commands, asset_server: Res<AssetServer>, mut state: ResMut<TileReceivedState>, receiver: ResMut<Events<ReceiveEvent>>, mut meshes: ResMut<Assets<Mesh>>, mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt { if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() { info!("Loading tile received from server."); let land_texture_top_handle = asset_server .load_sync(&mut textures, "content/textures/CoveWorldTop.png") .expect("Failed to load CoveWorldTop.png"); commands.spawn(PbrComponents { mesh: meshes.add(Mesh { primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList, attributes: vec![ VertexAttribute::position(mesh_data.vertices), VertexAttribute::normal(mesh_data.normals), VertexAttribute::uv(mesh_data.uvs), ], indices: Some(mesh_data.indices), }), material: materials.add(StandardMaterial { albedo_texture: Some(land_texture_top_handle), shaded: true, ..Default::default() }), ..Default::default() }); info!("Finished loading tile."); } } } } #[derive(Default)] struct RequestTileOnConnectedState { pub event_reader: EventReader<ReceiveEvent>, } /// When the client connects to the server, request a tile fn
( mut state: ResMut<RequestTileOnConnectedState>, mut sender: ResMut<Events<SendEvent>>, receiver: ResMut<Events<ReceiveEvent>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::Connected(connection, _) = evt { info!("Requesting tile because connected to server..."); sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::WorldTileData, data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest { //todo(#46): Respect request coordinates (x, y lod) x: 0, y: 0, lod: 0 })) }); } } } /// set up a simple 3D scene with landscape? fn setup_scene( mut commands: Commands, asset_server: Res<AssetServer>, mut meshes: ResMut<Assets<Mesh>>, // mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>>, mut sounds: ResMut<Assets<AudioSource>>, ) { asset_server .load_sync(&mut sounds, "content/textures/test_sound.mp3") .expect("Failed to load test_sound.mp3"); // add entities to the world commands // cube .spawn(PbrComponents { mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()), transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)), ..Default::default() }) // light .spawn(LightComponents { transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)), light: Light { color: Color::WHITE, fov: 90f32, depth: 0f32..100.0 }, ..Default::default() }) // camera .spawn(Camera3dComponents { transform: Transform::from_translation_rotation( Vec3::new(20.0, 20.0, 20.0), Quat::from_rotation_ypr(2.7, -0.75, 0.0) ), ..Default::default() }) .with(CameraBPConfig { forward_weight: -0.01, back_weight: 0.01, left_weight: -0.01, right_weight: 0.01, ..Default::default() }); } /// Pushes camera actions based upon mouse movements near the window edge. fn act_camera_on_window_edge( wins: Res<Windows>, pos: Res<Events<CursorMoved>>, mut mcam: ResMut<MoveCam>, ) { if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) { let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y()); let window = wins.get(e.id).expect("Couldn't get primary window."); let (window_x, window_y) = (window.width as f32, window.height as f32); // map (mouse_x, mouse_y) into [-1, 1]^2 mouse_x /= window_x / 2.0; mouse_y /= window_y / 2.0; mouse_x -= 1.0; mouse_y -= 1.0; let angle = mouse_x.atan2(mouse_y); let (ax, ay) = (angle.sin(), angle.cos()); let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD) && (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD); if!in_rect && ax.is_finite() && ay.is_finite() { mcam.right = Some(ax); mcam.forward = Some(ay); } else { mcam.right = None; mcam.forward = None; } } } /// Pushes camera actions based upon scroll wheel movement. fn act_on_scroll_wheel( mouse_wheel: Res<Events<MouseWheel>>, mut acts: ResMut<Events<CameraBPAction>>, ) { for mw in mouse_wheel.get_reader().iter(&mouse_wheel) { /// If scrolling units are reported in lines rather than pixels, /// multiply the returned horizontal scrolling amount by this. const LINE_SIZE: f32 = 14.0; let w = mw.y.abs() * if let MouseScrollUnit::Line = mw.unit { LINE_SIZE } else { 1.0 }; if mw.y > 0.0 { acts.send(CameraBPAction::ZoomIn(Some(w))) } else if mw.y < 0.0 { acts.send(CameraBPAction::ZoomOut(Some(w))) } } } /// Depending on `dirty`, either update the local `cache` or fill the event /// queue for [`CameraBPAction`] with the locally cached copy. fn use_or_update_action_cache(mcam: Res<MoveCam>, mut acts: ResMut<Events<CameraBPAction>>) { if let Some(w) = mcam.right { acts.send(CameraBPAction::MoveRight(Some(w))) } if let Some(w) = mcam.forward { acts.send(CameraBPAction::MoveForward(Some(w))) } } fn play_every_sound_on_mb1( mev: Res<Events<MouseButtonInput>>, fxs: Res<Assets<AudioSource>>, output: Res<AudioOutput>, ) { for mev in mev.get_reader().iter(&mev) { if mev.button == MouseButton::Left && mev.state == PressState::Pressed { for (fx, _) in fxs.iter() { output.play(fx); } } } }
request_tile_on_connected
identifier_name
main.rs
use std::{fs, net::ToSocketAddrs, path::PathBuf, sync::Arc}; use structopt::StructOpt; use url::Url; use tracing::{Level, info}; use bevy::{ input::{ keyboard::ElementState as PressState, mouse::{MouseButtonInput, MouseScrollUnit, MouseWheel}, }, prelude::*, render::mesh::{Mesh, VertexAttribute} }; use bounded_planet::{ camera::*, networking::{events::*, packets::*, systems::*} }; // The thresholds for window edge. const CURSOR_H_THRESHOLD: f32 = 0.55; const CURSOR_V_THRESHOLD: f32 = 0.42; /// The stage at which the [`CameraBP`] cache is either updated or used to fill /// in the action cache now. const CAM_CACHE_UPDATE: &str = "push_cam_update"; #[derive(Default)] struct MoveCam { right: Option<f32>, forward: Option<f32>, } #[derive(StructOpt, Debug)] #[structopt(name = "client")] struct Opt { /// Address to connect to #[structopt(long="url", default_value="quic://localhost:4433")] url: Url, /// TLS certificate in PEM format #[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")] cert: PathBuf, /// Accept any TLS certificate from the server even if it is invalid #[structopt(short="a", long="accept_any")] accept_any_cert: bool } fn main() -> Result<(), Box<dyn std::error::Error>> { let opt = Opt::from_args(); run(opt) } #[tokio::main] async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> { let path = std::env::current_dir().unwrap(); println!("The current directory is {}", path.display()); tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_max_level(Level::INFO) .finish(), ) .expect("Failed to configure logging"); // Resolve URL from options let url = options.url; let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433)) .to_socket_addrs()? .next() .expect("couldn't resolve to an address"); // Create a Bevy app let mut app = App::build(); let cert = get_cert(&options.cert)?; app.add_plugin(bounded_planet::networking::client::plugin::Network { addr: remote, url, cert, accept_any_cert: options.accept_any_cert }); app.init_resource::<PingResponderState>(); app.add_system(respond_to_pings.system()); app.init_resource::<NetEventLoggerState>(); app.add_system(log_net_events.system()); app.init_resource::<MoveCam>(); app.add_resource(Msaa { samples: 4 }); app.add_default_plugins(); app.add_plugin(CameraBPPlugin::default()); app.add_startup_system(setup_scene.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system()); app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE); app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system()); app.add_system(play_every_sound_on_mb1.system()); app.init_resource::<TileReceivedState>(); app.add_system(handle_tile_received.system()); app.init_resource::<RequestTileOnConnectedState>(); app.add_system(request_tile_on_connected.system()); // Run it forever app.run(); Ok(()) } /// Fetch certificates to use fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> { info!("Loading Cert: {:?}", cert_path); Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?) } #[derive(Default)] pub struct PingResponderState { pub event_reader: EventReader<ReceiveEvent>, } fn respond_to_pings( mut state: ResMut<PingResponderState>, receiver: ResMut<Events<ReceiveEvent>>, mut sender: ResMut<Events<SendEvent>>, ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt { if let Packet::Ping(Ping { timestamp }) = **data { sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::PingPong, data: Arc::new(Packet::Pong(Pong { timestamp })) }); info!("Received Ping, sending pong. {:?}", connection); } } } } #[derive(Default)] pub struct TileReceivedState { pub event_reader: EventReader<ReceiveEvent>, } /// When a tile is received from the server, we load it into the scene fn handle_tile_received( mut commands: Commands, asset_server: Res<AssetServer>, mut state: ResMut<TileReceivedState>, receiver: ResMut<Events<ReceiveEvent>>, mut meshes: ResMut<Assets<Mesh>>, mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt { if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() { info!("Loading tile received from server."); let land_texture_top_handle = asset_server .load_sync(&mut textures, "content/textures/CoveWorldTop.png") .expect("Failed to load CoveWorldTop.png"); commands.spawn(PbrComponents { mesh: meshes.add(Mesh { primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList, attributes: vec![ VertexAttribute::position(mesh_data.vertices), VertexAttribute::normal(mesh_data.normals), VertexAttribute::uv(mesh_data.uvs), ], indices: Some(mesh_data.indices), }), material: materials.add(StandardMaterial { albedo_texture: Some(land_texture_top_handle), shaded: true, ..Default::default() }), ..Default::default() }); info!("Finished loading tile."); } } } } #[derive(Default)] struct RequestTileOnConnectedState { pub event_reader: EventReader<ReceiveEvent>, } /// When the client connects to the server, request a tile fn request_tile_on_connected( mut state: ResMut<RequestTileOnConnectedState>, mut sender: ResMut<Events<SendEvent>>, receiver: ResMut<Events<ReceiveEvent>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::Connected(connection, _) = evt { info!("Requesting tile because connected to server..."); sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::WorldTileData, data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest { //todo(#46): Respect request coordinates (x, y lod) x: 0, y: 0, lod: 0 })) }); } } } /// set up a simple 3D scene with landscape? fn setup_scene( mut commands: Commands, asset_server: Res<AssetServer>, mut meshes: ResMut<Assets<Mesh>>, // mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>>, mut sounds: ResMut<Assets<AudioSource>>, ) { asset_server .load_sync(&mut sounds, "content/textures/test_sound.mp3") .expect("Failed to load test_sound.mp3"); // add entities to the world commands // cube .spawn(PbrComponents { mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()), transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)), ..Default::default() }) // light .spawn(LightComponents { transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)), light: Light { color: Color::WHITE, fov: 90f32, depth: 0f32..100.0 }, ..Default::default() }) // camera .spawn(Camera3dComponents { transform: Transform::from_translation_rotation( Vec3::new(20.0, 20.0, 20.0), Quat::from_rotation_ypr(2.7, -0.75, 0.0) ), ..Default::default() }) .with(CameraBPConfig { forward_weight: -0.01, back_weight: 0.01, left_weight: -0.01, right_weight: 0.01, ..Default::default() }); } /// Pushes camera actions based upon mouse movements near the window edge. fn act_camera_on_window_edge( wins: Res<Windows>, pos: Res<Events<CursorMoved>>, mut mcam: ResMut<MoveCam>, ) { if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) { let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y()); let window = wins.get(e.id).expect("Couldn't get primary window."); let (window_x, window_y) = (window.width as f32, window.height as f32); // map (mouse_x, mouse_y) into [-1, 1]^2 mouse_x /= window_x / 2.0; mouse_y /= window_y / 2.0; mouse_x -= 1.0; mouse_y -= 1.0; let angle = mouse_x.atan2(mouse_y); let (ax, ay) = (angle.sin(), angle.cos()); let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD) && (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD);
mcam.right = None; mcam.forward = None; } } } /// Pushes camera actions based upon scroll wheel movement. fn act_on_scroll_wheel( mouse_wheel: Res<Events<MouseWheel>>, mut acts: ResMut<Events<CameraBPAction>>, ) { for mw in mouse_wheel.get_reader().iter(&mouse_wheel) { /// If scrolling units are reported in lines rather than pixels, /// multiply the returned horizontal scrolling amount by this. const LINE_SIZE: f32 = 14.0; let w = mw.y.abs() * if let MouseScrollUnit::Line = mw.unit { LINE_SIZE } else { 1.0 }; if mw.y > 0.0 { acts.send(CameraBPAction::ZoomIn(Some(w))) } else if mw.y < 0.0 { acts.send(CameraBPAction::ZoomOut(Some(w))) } } } /// Depending on `dirty`, either update the local `cache` or fill the event /// queue for [`CameraBPAction`] with the locally cached copy. fn use_or_update_action_cache(mcam: Res<MoveCam>, mut acts: ResMut<Events<CameraBPAction>>) { if let Some(w) = mcam.right { acts.send(CameraBPAction::MoveRight(Some(w))) } if let Some(w) = mcam.forward { acts.send(CameraBPAction::MoveForward(Some(w))) } } fn play_every_sound_on_mb1( mev: Res<Events<MouseButtonInput>>, fxs: Res<Assets<AudioSource>>, output: Res<AudioOutput>, ) { for mev in mev.get_reader().iter(&mev) { if mev.button == MouseButton::Left && mev.state == PressState::Pressed { for (fx, _) in fxs.iter() { output.play(fx); } } } }
if !in_rect && ax.is_finite() && ay.is_finite() { mcam.right = Some(ax); mcam.forward = Some(ay); } else {
random_line_split
main.rs
use std::{fs, net::ToSocketAddrs, path::PathBuf, sync::Arc}; use structopt::StructOpt; use url::Url; use tracing::{Level, info}; use bevy::{ input::{ keyboard::ElementState as PressState, mouse::{MouseButtonInput, MouseScrollUnit, MouseWheel}, }, prelude::*, render::mesh::{Mesh, VertexAttribute} }; use bounded_planet::{ camera::*, networking::{events::*, packets::*, systems::*} }; // The thresholds for window edge. const CURSOR_H_THRESHOLD: f32 = 0.55; const CURSOR_V_THRESHOLD: f32 = 0.42; /// The stage at which the [`CameraBP`] cache is either updated or used to fill /// in the action cache now. const CAM_CACHE_UPDATE: &str = "push_cam_update"; #[derive(Default)] struct MoveCam { right: Option<f32>, forward: Option<f32>, } #[derive(StructOpt, Debug)] #[structopt(name = "client")] struct Opt { /// Address to connect to #[structopt(long="url", default_value="quic://localhost:4433")] url: Url, /// TLS certificate in PEM format #[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")] cert: PathBuf, /// Accept any TLS certificate from the server even if it is invalid #[structopt(short="a", long="accept_any")] accept_any_cert: bool } fn main() -> Result<(), Box<dyn std::error::Error>> { let opt = Opt::from_args(); run(opt) } #[tokio::main] async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> { let path = std::env::current_dir().unwrap(); println!("The current directory is {}", path.display()); tracing::subscriber::set_global_default( tracing_subscriber::FmtSubscriber::builder() .with_max_level(Level::INFO) .finish(), ) .expect("Failed to configure logging"); // Resolve URL from options let url = options.url; let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433)) .to_socket_addrs()? .next() .expect("couldn't resolve to an address"); // Create a Bevy app let mut app = App::build(); let cert = get_cert(&options.cert)?; app.add_plugin(bounded_planet::networking::client::plugin::Network { addr: remote, url, cert, accept_any_cert: options.accept_any_cert }); app.init_resource::<PingResponderState>(); app.add_system(respond_to_pings.system()); app.init_resource::<NetEventLoggerState>(); app.add_system(log_net_events.system()); app.init_resource::<MoveCam>(); app.add_resource(Msaa { samples: 4 }); app.add_default_plugins(); app.add_plugin(CameraBPPlugin::default()); app.add_startup_system(setup_scene.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system()); app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system()); app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE); app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system()); app.add_system(play_every_sound_on_mb1.system()); app.init_resource::<TileReceivedState>(); app.add_system(handle_tile_received.system()); app.init_resource::<RequestTileOnConnectedState>(); app.add_system(request_tile_on_connected.system()); // Run it forever app.run(); Ok(()) } /// Fetch certificates to use fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> { info!("Loading Cert: {:?}", cert_path); Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?) } #[derive(Default)] pub struct PingResponderState { pub event_reader: EventReader<ReceiveEvent>, } fn respond_to_pings( mut state: ResMut<PingResponderState>, receiver: ResMut<Events<ReceiveEvent>>, mut sender: ResMut<Events<SendEvent>>, ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt { if let Packet::Ping(Ping { timestamp }) = **data { sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::PingPong, data: Arc::new(Packet::Pong(Pong { timestamp })) }); info!("Received Ping, sending pong. {:?}", connection); } } } } #[derive(Default)] pub struct TileReceivedState { pub event_reader: EventReader<ReceiveEvent>, } /// When a tile is received from the server, we load it into the scene fn handle_tile_received( mut commands: Commands, asset_server: Res<AssetServer>, mut state: ResMut<TileReceivedState>, receiver: ResMut<Events<ReceiveEvent>>, mut meshes: ResMut<Assets<Mesh>>, mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt { if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() { info!("Loading tile received from server."); let land_texture_top_handle = asset_server .load_sync(&mut textures, "content/textures/CoveWorldTop.png") .expect("Failed to load CoveWorldTop.png"); commands.spawn(PbrComponents { mesh: meshes.add(Mesh { primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList, attributes: vec![ VertexAttribute::position(mesh_data.vertices), VertexAttribute::normal(mesh_data.normals), VertexAttribute::uv(mesh_data.uvs), ], indices: Some(mesh_data.indices), }), material: materials.add(StandardMaterial { albedo_texture: Some(land_texture_top_handle), shaded: true, ..Default::default() }), ..Default::default() }); info!("Finished loading tile."); } } } } #[derive(Default)] struct RequestTileOnConnectedState { pub event_reader: EventReader<ReceiveEvent>, } /// When the client connects to the server, request a tile fn request_tile_on_connected( mut state: ResMut<RequestTileOnConnectedState>, mut sender: ResMut<Events<SendEvent>>, receiver: ResMut<Events<ReceiveEvent>> ) { for evt in state.event_reader.iter(&receiver) { if let ReceiveEvent::Connected(connection, _) = evt { info!("Requesting tile because connected to server..."); sender.send(SendEvent::SendPacket { connection: *connection, stream: StreamType::WorldTileData, data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest { //todo(#46): Respect request coordinates (x, y lod) x: 0, y: 0, lod: 0 })) }); } } } /// set up a simple 3D scene with landscape? fn setup_scene( mut commands: Commands, asset_server: Res<AssetServer>, mut meshes: ResMut<Assets<Mesh>>, // mut textures: ResMut<Assets<Texture>>, mut materials: ResMut<Assets<StandardMaterial>>, mut sounds: ResMut<Assets<AudioSource>>, ) { asset_server .load_sync(&mut sounds, "content/textures/test_sound.mp3") .expect("Failed to load test_sound.mp3"); // add entities to the world commands // cube .spawn(PbrComponents { mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })), material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()), transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)), ..Default::default() }) // light .spawn(LightComponents { transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)), light: Light { color: Color::WHITE, fov: 90f32, depth: 0f32..100.0 }, ..Default::default() }) // camera .spawn(Camera3dComponents { transform: Transform::from_translation_rotation( Vec3::new(20.0, 20.0, 20.0), Quat::from_rotation_ypr(2.7, -0.75, 0.0) ), ..Default::default() }) .with(CameraBPConfig { forward_weight: -0.01, back_weight: 0.01, left_weight: -0.01, right_weight: 0.01, ..Default::default() }); } /// Pushes camera actions based upon mouse movements near the window edge. fn act_camera_on_window_edge( wins: Res<Windows>, pos: Res<Events<CursorMoved>>, mut mcam: ResMut<MoveCam>, ) { if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) { let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y()); let window = wins.get(e.id).expect("Couldn't get primary window."); let (window_x, window_y) = (window.width as f32, window.height as f32); // map (mouse_x, mouse_y) into [-1, 1]^2 mouse_x /= window_x / 2.0; mouse_y /= window_y / 2.0; mouse_x -= 1.0; mouse_y -= 1.0; let angle = mouse_x.atan2(mouse_y); let (ax, ay) = (angle.sin(), angle.cos()); let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD) && (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD); if!in_rect && ax.is_finite() && ay.is_finite() { mcam.right = Some(ax); mcam.forward = Some(ay); } else { mcam.right = None; mcam.forward = None; } } } /// Pushes camera actions based upon scroll wheel movement. fn act_on_scroll_wheel( mouse_wheel: Res<Events<MouseWheel>>, mut acts: ResMut<Events<CameraBPAction>>, ) { for mw in mouse_wheel.get_reader().iter(&mouse_wheel) { /// If scrolling units are reported in lines rather than pixels, /// multiply the returned horizontal scrolling amount by this. const LINE_SIZE: f32 = 14.0; let w = mw.y.abs() * if let MouseScrollUnit::Line = mw.unit { LINE_SIZE } else { 1.0 }; if mw.y > 0.0 { acts.send(CameraBPAction::ZoomIn(Some(w))) } else if mw.y < 0.0 { acts.send(CameraBPAction::ZoomOut(Some(w))) } } } /// Depending on `dirty`, either update the local `cache` or fill the event /// queue for [`CameraBPAction`] with the locally cached copy. fn use_or_update_action_cache(mcam: Res<MoveCam>, mut acts: ResMut<Events<CameraBPAction>>) { if let Some(w) = mcam.right
if let Some(w) = mcam.forward { acts.send(CameraBPAction::MoveForward(Some(w))) } } fn play_every_sound_on_mb1( mev: Res<Events<MouseButtonInput>>, fxs: Res<Assets<AudioSource>>, output: Res<AudioOutput>, ) { for mev in mev.get_reader().iter(&mev) { if mev.button == MouseButton::Left && mev.state == PressState::Pressed { for (fx, _) in fxs.iter() { output.play(fx); } } } }
{ acts.send(CameraBPAction::MoveRight(Some(w))) }
conditional_block
mod.rs
mod searcher; use self::searcher::{SearchEngine, SearchWorker}; use crate::find_usages::{CtagsSearcher, GtagsSearcher, QueryType, Usage, UsageMatcher, Usages}; use crate::stdio_server::handler::CachedPreviewImpl; use crate::stdio_server::job; use crate::stdio_server::provider::{BaseArgs, ClapProvider, Context}; use crate::tools::ctags::{get_language, TagsGenerator, CTAGS_EXISTS}; use crate::tools::gtags::GTAGS_EXISTS; use anyhow::Result; use filter::Query; use futures::Future; use itertools::Itertools; use paths::AbsPathBuf; use rayon::prelude::*; use serde_json::json; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tracing::Instrument; /// Internal reprentation of user input. #[derive(Debug, Clone, Default)] struct QueryInfo { /// Keyword for the tag or regex searching. keyword: String, /// Query type for `keyword`. query_type: QueryType, /// Search terms for further filtering. usage_matcher: UsageMatcher, } impl QueryInfo { /// Return `true` if the result of query info is a superset of the result of another, /// i.e., `self` contains all the search results of `other`. /// /// The rule is as follows: /// /// - the keyword is the same. /// - the new query is a subset of last query. fn is_superset(&self, other: &Self) -> bool { self.keyword == other.keyword && self.query_type == other.query_type && self.usage_matcher.is_superset(&other.usage_matcher) } } /// Parses the raw user input and returns the final keyword as well as the constraint terms. /// Currently, only one keyword is supported. /// /// `hel 'fn` => `keyword ++ exact_term/inverse_term`. /// /// # Argument /// /// - `query`: Initial query typed in the input window. fn parse_query_info(query: &str) -> QueryInfo { let Query { word_terms: _, // TODO: add word_terms to UsageMatcher exact_terms, fuzzy_terms, inverse_terms, } = Query::from(query); // If there is no fuzzy term, use the full query as the keyword, // otherwise restore the fuzzy query as the keyword we are going to search. let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() { if exact_terms.is_empty() { (query.into(), QueryType::StartWith, UsageMatcher::default()) } else { ( exact_terms[0].text.clone(), QueryType::Exact, UsageMatcher::new(exact_terms, inverse_terms), ) } } else { ( fuzzy_terms.iter().map(|term| &term.text).join(" "), QueryType::StartWith, UsageMatcher::new(exact_terms, inverse_terms), ) }; // TODO: Search syntax: // - 'foo // - foo* // - foo // // if let Some(stripped) = query.strip_suffix('*') { // (stripped, QueryType::Contain) // } else if let Some(stripped) = query.strip_prefix('\'') { // (stripped, QueryType::Exact) // } else { // (query, QueryType::StartWith) // }; QueryInfo { keyword, query_type, usage_matcher, } } #[derive(Debug, Clone, Default)] struct SearchResults { /// Last searching results. /// /// When passing the line content from Vim to Rust, the performance /// of Vim can become very bad because some lines are extremely long, /// we cache the last results on Rust to allow passing the line number /// from Vim later instead. usages: Usages, /// Last parsed query info. query_info: QueryInfo, } #[derive(Debug, Clone)] pub struct DumbJumpProvider { args: BaseArgs, /// Results from last searching. /// This might be a superset of searching results for the last query. cached_results: SearchResults, /// Current results from refiltering on `cached_results`. current_usages: Option<Usages>, /// Whether the tags file has been (re)-created. ctags_regenerated: Arc<AtomicBool>, /// Whether the GTAGS file has been (re)-created. gtags_regenerated: Arc<AtomicBool>, } async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) { let gtags_searcher = GtagsSearcher::new(cwd); match gtags_searcher.create_or_update_tags() { Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate..."); // TODO: creating gtags may take 20s+ for large project match tokio::task::spawn_blocking({ let gtags_searcher = gtags_searcher.clone(); move || gtags_searcher.force_recreate() }) .await { Ok(_) => { gtags_regenerated.store(true, Ordering::SeqCst); tracing::debug!("[dumb_jump] Recreating gtags db successfully"); } Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Failed to recreate gtags db"); } } } } } impl DumbJumpProvider { pub async fn new(ctx: &Context) -> Result<Self> { let args = ctx.parse_provider_args().await?; Ok(Self { args, cached_results: Default::default(), current_usages: None, ctags_regenerated: Arc::new(false.into()), gtags_regenerated: Arc::new(false.into()), }) } async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> { let job_id = utils::calculate_hash(&(&cwd, "dumb_jump")); if job::reserve(job_id) { let ctags_future = { let cwd = cwd.clone(); let mut tags_generator = TagsGenerator::with_dir(cwd.clone()); if let Some(language) = get_language(&extension) { tags_generator.set_languages(language.into()); } let ctags_regenerated = self.ctags_regenerated.clone(); // Ctags initialization is usually pretty fast. async move { let now = std::time::Instant::now(); let ctags_searcher = CtagsSearcher::new(tags_generator); match ctags_searcher.generate_tags() { Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Error at initializing ctags") } } tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed()); } }; let gtags_future = { let cwd: PathBuf = cwd.into(); let gtags_regenerated = self.gtags_regenerated.clone(); let span = tracing::span!(tracing::Level::INFO, "gtags"); async move { let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await; } .instrument(span) }; fn run(job_future: impl Send + Sync +'static + Future<Output = ()>, job_id: u64) { tokio::task::spawn({ async move { let now = std::time::Instant::now(); job_future.await; tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed()); job::unreserve(job_id); } }); } match (*CTAGS_EXISTS, *GTAGS_EXISTS) { (true, true) => run( async move { futures::future::join(ctags_future, gtags_future).await; }, job_id, ), (false, false) => {} (true, false) => run(ctags_future, job_id), (false, true) => run(gtags_future, job_id), } } Ok(()) } /// Starts a new searching task. async fn start_search( &self, search_worker: SearchWorker, query: &str, query_info: QueryInfo, ) -> Result<SearchResults> { if query.is_empty() { return Ok(Default::default()); } let search_engine = match ( self.ctags_regenerated.load(Ordering::Relaxed), self.gtags_regenerated.load(Ordering::Relaxed), ) { (true, true) => SearchEngine::All, (true, false) => SearchEngine::CtagsAndRegex, _ => SearchEngine::Regex, }; let usages = search_engine.run(search_worker).await?; Ok(SearchResults { usages, query_info }) } fn on_new_search_results( &mut self, search_results: SearchResults, ctx: &Context, ) -> Result<()> { let matched = search_results.usages.len(); // Only show the top 200 items. let (lines, indices): (Vec<_>, Vec<_>) = search_results .usages .iter() .take(200) .map(|usage| (usage.line.as_str(), usage.indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.cached_results = search_results; self.current_usages.take(); Ok(()) } } #[async_trait::async_trait] impl ClapProvider for DumbJumpProvider { async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> { let cwd = ctx.vim.working_dir().await?; let source_file_extension = ctx.start_buffer_extension()?.to_string(); tokio::task::spawn({ let cwd = cwd.clone(); let extension = source_file_extension.clone(); let dumb_jump = self.clone(); async move { if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await { tracing::error!(error =?err, "Failed to initialize dumb_jump provider"); } } }); if let Some(query) = &self.args.query { let query_info = parse_query_info(query); let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension, }; let search_results = self.start_search(search_worker, query, query_info).await?; self.on_new_search_results(search_results, ctx)?; } Ok(()) } async fn on_move(&mut self, ctx: &mut Context) -> Result<()> { let cur
.get_preview() .await?; let current_input = ctx.vim.input_get().await?; let current_lnum = ctx.vim.display_getcurlnum().await?; // Only send back the result if the request is not out-dated. if input == current_input && lnum == current_lnum { ctx.preview_manager.reset_scroll(); ctx.render_preview(preview)?; ctx.preview_manager.set_preview_target(preview_target); } Ok(()) } async fn on _typed(&mut self, ctx: &mut Context) -> Result<()> { let query = ctx.vim.input_get().await?; let query_info = parse_query_info(&query); // Try to refilter the cached results. if self.cached_results.query_info.is_superset(&query_info) { let refiltered = self .cached_results .usages .par_iter() .filter_map(|Usage { line, indices }| { query_info .usage_matcher .match_jump_line((line.clone(), indices.clone())) .map(|(line, indices)| Usage::new(line, indices)) }) .collect::<Vec<_>>(); let matched = refiltered.len(); let (lines, indices): (Vec<&str>, Vec<&[usize]>) = refiltered .iter() .take(200) .map(|Usage { line, indices }| (line.as_str(), indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.current_usages.replace(refiltered.into()); return Ok(()); } let cwd: AbsPathBuf = ctx.vim.working_dir().await?; let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension: ctx.start_buffer_extension()?.to_string(), }; let search_results = self.start_search(search_worker, &query, query_info).await?; self.on_new_search_results(search_results, ctx)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_search_info() { let query_info = parse_query_info("'foo"); println!("{query_info:?}"); } }
rent_lines = self .current_usages .as_ref() .unwrap_or(&self.cached_results.usages); if current_lines.is_empty() { return Ok(()); } let input = ctx.vim.input_get().await?; let lnum = ctx.vim.display_getcurlnum().await?; // lnum is 1-indexed let curline = current_lines .get_line(lnum - 1) .ok_or_else(|| anyhow::anyhow!("Can not find curline on Rust end for lnum: {lnum}"))?; let preview_height = ctx.preview_height().await?; let (preview_target, preview) = CachedPreviewImpl::new(curline.to_string(), preview_height, ctx)?
identifier_body
mod.rs
mod searcher; use self::searcher::{SearchEngine, SearchWorker}; use crate::find_usages::{CtagsSearcher, GtagsSearcher, QueryType, Usage, UsageMatcher, Usages}; use crate::stdio_server::handler::CachedPreviewImpl; use crate::stdio_server::job; use crate::stdio_server::provider::{BaseArgs, ClapProvider, Context}; use crate::tools::ctags::{get_language, TagsGenerator, CTAGS_EXISTS}; use crate::tools::gtags::GTAGS_EXISTS; use anyhow::Result; use filter::Query; use futures::Future; use itertools::Itertools; use paths::AbsPathBuf; use rayon::prelude::*; use serde_json::json; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tracing::Instrument; /// Internal reprentation of user input. #[derive(Debug, Clone, Default)] struct QueryInfo { /// Keyword for the tag or regex searching. keyword: String, /// Query type for `keyword`. query_type: QueryType, /// Search terms for further filtering. usage_matcher: UsageMatcher, } impl QueryInfo { /// Return `true` if the result of query info is a superset of the result of another, /// i.e., `self` contains all the search results of `other`. /// /// The rule is as follows: /// /// - the keyword is the same. /// - the new query is a subset of last query. fn is_superset(&self, other: &Self) -> bool { self.keyword == other.keyword && self.query_type == other.query_type && self.usage_matcher.is_superset(&other.usage_matcher) } } /// Parses the raw user input and returns the final keyword as well as the constraint terms. /// Currently, only one keyword is supported. /// /// `hel 'fn` => `keyword ++ exact_term/inverse_term`. /// /// # Argument /// /// - `query`: Initial query typed in the input window. fn parse_query_info(query: &str) -> QueryInfo { let Query { word_terms: _, // TODO: add word_terms to UsageMatcher exact_terms, fuzzy_terms, inverse_terms, } = Query::from(query); // If there is no fuzzy term, use the full query as the keyword,
// otherwise restore the fuzzy query as the keyword we are going to search. let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() { if exact_terms.is_empty() { (query.into(), QueryType::StartWith, UsageMatcher::default()) } else { ( exact_terms[0].text.clone(), QueryType::Exact, UsageMatcher::new(exact_terms, inverse_terms), ) } } else { ( fuzzy_terms.iter().map(|term| &term.text).join(" "), QueryType::StartWith, UsageMatcher::new(exact_terms, inverse_terms), ) }; // TODO: Search syntax: // - 'foo // - foo* // - foo // // if let Some(stripped) = query.strip_suffix('*') { // (stripped, QueryType::Contain) // } else if let Some(stripped) = query.strip_prefix('\'') { // (stripped, QueryType::Exact) // } else { // (query, QueryType::StartWith) // }; QueryInfo { keyword, query_type, usage_matcher, } } #[derive(Debug, Clone, Default)] struct SearchResults { /// Last searching results. /// /// When passing the line content from Vim to Rust, the performance /// of Vim can become very bad because some lines are extremely long, /// we cache the last results on Rust to allow passing the line number /// from Vim later instead. usages: Usages, /// Last parsed query info. query_info: QueryInfo, } #[derive(Debug, Clone)] pub struct DumbJumpProvider { args: BaseArgs, /// Results from last searching. /// This might be a superset of searching results for the last query. cached_results: SearchResults, /// Current results from refiltering on `cached_results`. current_usages: Option<Usages>, /// Whether the tags file has been (re)-created. ctags_regenerated: Arc<AtomicBool>, /// Whether the GTAGS file has been (re)-created. gtags_regenerated: Arc<AtomicBool>, } async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) { let gtags_searcher = GtagsSearcher::new(cwd); match gtags_searcher.create_or_update_tags() { Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate..."); // TODO: creating gtags may take 20s+ for large project match tokio::task::spawn_blocking({ let gtags_searcher = gtags_searcher.clone(); move || gtags_searcher.force_recreate() }) .await { Ok(_) => { gtags_regenerated.store(true, Ordering::SeqCst); tracing::debug!("[dumb_jump] Recreating gtags db successfully"); } Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Failed to recreate gtags db"); } } } } } impl DumbJumpProvider { pub async fn new(ctx: &Context) -> Result<Self> { let args = ctx.parse_provider_args().await?; Ok(Self { args, cached_results: Default::default(), current_usages: None, ctags_regenerated: Arc::new(false.into()), gtags_regenerated: Arc::new(false.into()), }) } async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> { let job_id = utils::calculate_hash(&(&cwd, "dumb_jump")); if job::reserve(job_id) { let ctags_future = { let cwd = cwd.clone(); let mut tags_generator = TagsGenerator::with_dir(cwd.clone()); if let Some(language) = get_language(&extension) { tags_generator.set_languages(language.into()); } let ctags_regenerated = self.ctags_regenerated.clone(); // Ctags initialization is usually pretty fast. async move { let now = std::time::Instant::now(); let ctags_searcher = CtagsSearcher::new(tags_generator); match ctags_searcher.generate_tags() { Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Error at initializing ctags") } } tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed()); } }; let gtags_future = { let cwd: PathBuf = cwd.into(); let gtags_regenerated = self.gtags_regenerated.clone(); let span = tracing::span!(tracing::Level::INFO, "gtags"); async move { let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await; } .instrument(span) }; fn run(job_future: impl Send + Sync +'static + Future<Output = ()>, job_id: u64) { tokio::task::spawn({ async move { let now = std::time::Instant::now(); job_future.await; tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed()); job::unreserve(job_id); } }); } match (*CTAGS_EXISTS, *GTAGS_EXISTS) { (true, true) => run( async move { futures::future::join(ctags_future, gtags_future).await; }, job_id, ), (false, false) => {} (true, false) => run(ctags_future, job_id), (false, true) => run(gtags_future, job_id), } } Ok(()) } /// Starts a new searching task. async fn start_search( &self, search_worker: SearchWorker, query: &str, query_info: QueryInfo, ) -> Result<SearchResults> { if query.is_empty() { return Ok(Default::default()); } let search_engine = match ( self.ctags_regenerated.load(Ordering::Relaxed), self.gtags_regenerated.load(Ordering::Relaxed), ) { (true, true) => SearchEngine::All, (true, false) => SearchEngine::CtagsAndRegex, _ => SearchEngine::Regex, }; let usages = search_engine.run(search_worker).await?; Ok(SearchResults { usages, query_info }) } fn on_new_search_results( &mut self, search_results: SearchResults, ctx: &Context, ) -> Result<()> { let matched = search_results.usages.len(); // Only show the top 200 items. let (lines, indices): (Vec<_>, Vec<_>) = search_results .usages .iter() .take(200) .map(|usage| (usage.line.as_str(), usage.indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.cached_results = search_results; self.current_usages.take(); Ok(()) } } #[async_trait::async_trait] impl ClapProvider for DumbJumpProvider { async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> { let cwd = ctx.vim.working_dir().await?; let source_file_extension = ctx.start_buffer_extension()?.to_string(); tokio::task::spawn({ let cwd = cwd.clone(); let extension = source_file_extension.clone(); let dumb_jump = self.clone(); async move { if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await { tracing::error!(error =?err, "Failed to initialize dumb_jump provider"); } } }); if let Some(query) = &self.args.query { let query_info = parse_query_info(query); let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension, }; let search_results = self.start_search(search_worker, query, query_info).await?; self.on_new_search_results(search_results, ctx)?; } Ok(()) } async fn on_move(&mut self, ctx: &mut Context) -> Result<()> { let current_lines = self .current_usages .as_ref() .unwrap_or(&self.cached_results.usages); if current_lines.is_empty() { return Ok(()); } let input = ctx.vim.input_get().await?; let lnum = ctx.vim.display_getcurlnum().await?; // lnum is 1-indexed let curline = current_lines .get_line(lnum - 1) .ok_or_else(|| anyhow::anyhow!("Can not find curline on Rust end for lnum: {lnum}"))?; let preview_height = ctx.preview_height().await?; let (preview_target, preview) = CachedPreviewImpl::new(curline.to_string(), preview_height, ctx)? .get_preview() .await?; let current_input = ctx.vim.input_get().await?; let current_lnum = ctx.vim.display_getcurlnum().await?; // Only send back the result if the request is not out-dated. if input == current_input && lnum == current_lnum { ctx.preview_manager.reset_scroll(); ctx.render_preview(preview)?; ctx.preview_manager.set_preview_target(preview_target); } Ok(()) } async fn on_typed(&mut self, ctx: &mut Context) -> Result<()> { let query = ctx.vim.input_get().await?; let query_info = parse_query_info(&query); // Try to refilter the cached results. if self.cached_results.query_info.is_superset(&query_info) { let refiltered = self .cached_results .usages .par_iter() .filter_map(|Usage { line, indices }| { query_info .usage_matcher .match_jump_line((line.clone(), indices.clone())) .map(|(line, indices)| Usage::new(line, indices)) }) .collect::<Vec<_>>(); let matched = refiltered.len(); let (lines, indices): (Vec<&str>, Vec<&[usize]>) = refiltered .iter() .take(200) .map(|Usage { line, indices }| (line.as_str(), indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.current_usages.replace(refiltered.into()); return Ok(()); } let cwd: AbsPathBuf = ctx.vim.working_dir().await?; let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension: ctx.start_buffer_extension()?.to_string(), }; let search_results = self.start_search(search_worker, &query, query_info).await?; self.on_new_search_results(search_results, ctx)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_search_info() { let query_info = parse_query_info("'foo"); println!("{query_info:?}"); } }
random_line_split
mod.rs
mod searcher; use self::searcher::{SearchEngine, SearchWorker}; use crate::find_usages::{CtagsSearcher, GtagsSearcher, QueryType, Usage, UsageMatcher, Usages}; use crate::stdio_server::handler::CachedPreviewImpl; use crate::stdio_server::job; use crate::stdio_server::provider::{BaseArgs, ClapProvider, Context}; use crate::tools::ctags::{get_language, TagsGenerator, CTAGS_EXISTS}; use crate::tools::gtags::GTAGS_EXISTS; use anyhow::Result; use filter::Query; use futures::Future; use itertools::Itertools; use paths::AbsPathBuf; use rayon::prelude::*; use serde_json::json; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tracing::Instrument; /// Internal reprentation of user input. #[derive(Debug, Clone, Default)] struct QueryInfo { /// Keyword for the tag or regex searching. keyword: String, /// Query type for `keyword`. query_type: QueryType, /// Search terms for further filtering. usage_matcher: UsageMatcher, } impl QueryInfo { /// Return `true` if the result of query info is a superset of the result of another, /// i.e., `self` contains all the search results of `other`. /// /// The rule is as follows: /// /// - the keyword is the same. /// - the new query is a subset of last query. fn is_superset(&self, other: &Self) -> bool { self.keyword == other.keyword && self.query_type == other.query_type && self.usage_matcher.is_superset(&other.usage_matcher) } } /// Parses the raw user input and returns the final keyword as well as the constraint terms. /// Currently, only one keyword is supported. /// /// `hel 'fn` => `keyword ++ exact_term/inverse_term`. /// /// # Argument /// /// - `query`: Initial query typed in the input window. fn parse_query_info(query: &str) -> QueryInfo { let Query { word_terms: _, // TODO: add word_terms to UsageMatcher exact_terms, fuzzy_terms, inverse_terms, } = Query::from(query); // If there is no fuzzy term, use the full query as the keyword, // otherwise restore the fuzzy query as the keyword we are going to search. let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() { if exact_terms.is_empty()
else { ( exact_terms[0].text.clone(), QueryType::Exact, UsageMatcher::new(exact_terms, inverse_terms), ) } } else { ( fuzzy_terms.iter().map(|term| &term.text).join(" "), QueryType::StartWith, UsageMatcher::new(exact_terms, inverse_terms), ) }; // TODO: Search syntax: // - 'foo // - foo* // - foo // // if let Some(stripped) = query.strip_suffix('*') { // (stripped, QueryType::Contain) // } else if let Some(stripped) = query.strip_prefix('\'') { // (stripped, QueryType::Exact) // } else { // (query, QueryType::StartWith) // }; QueryInfo { keyword, query_type, usage_matcher, } } #[derive(Debug, Clone, Default)] struct SearchResults { /// Last searching results. /// /// When passing the line content from Vim to Rust, the performance /// of Vim can become very bad because some lines are extremely long, /// we cache the last results on Rust to allow passing the line number /// from Vim later instead. usages: Usages, /// Last parsed query info. query_info: QueryInfo, } #[derive(Debug, Clone)] pub struct DumbJumpProvider { args: BaseArgs, /// Results from last searching. /// This might be a superset of searching results for the last query. cached_results: SearchResults, /// Current results from refiltering on `cached_results`. current_usages: Option<Usages>, /// Whether the tags file has been (re)-created. ctags_regenerated: Arc<AtomicBool>, /// Whether the GTAGS file has been (re)-created. gtags_regenerated: Arc<AtomicBool>, } async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) { let gtags_searcher = GtagsSearcher::new(cwd); match gtags_searcher.create_or_update_tags() { Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate..."); // TODO: creating gtags may take 20s+ for large project match tokio::task::spawn_blocking({ let gtags_searcher = gtags_searcher.clone(); move || gtags_searcher.force_recreate() }) .await { Ok(_) => { gtags_regenerated.store(true, Ordering::SeqCst); tracing::debug!("[dumb_jump] Recreating gtags db successfully"); } Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Failed to recreate gtags db"); } } } } } impl DumbJumpProvider { pub async fn new(ctx: &Context) -> Result<Self> { let args = ctx.parse_provider_args().await?; Ok(Self { args, cached_results: Default::default(), current_usages: None, ctags_regenerated: Arc::new(false.into()), gtags_regenerated: Arc::new(false.into()), }) } async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> { let job_id = utils::calculate_hash(&(&cwd, "dumb_jump")); if job::reserve(job_id) { let ctags_future = { let cwd = cwd.clone(); let mut tags_generator = TagsGenerator::with_dir(cwd.clone()); if let Some(language) = get_language(&extension) { tags_generator.set_languages(language.into()); } let ctags_regenerated = self.ctags_regenerated.clone(); // Ctags initialization is usually pretty fast. async move { let now = std::time::Instant::now(); let ctags_searcher = CtagsSearcher::new(tags_generator); match ctags_searcher.generate_tags() { Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Error at initializing ctags") } } tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed()); } }; let gtags_future = { let cwd: PathBuf = cwd.into(); let gtags_regenerated = self.gtags_regenerated.clone(); let span = tracing::span!(tracing::Level::INFO, "gtags"); async move { let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await; } .instrument(span) }; fn run(job_future: impl Send + Sync +'static + Future<Output = ()>, job_id: u64) { tokio::task::spawn({ async move { let now = std::time::Instant::now(); job_future.await; tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed()); job::unreserve(job_id); } }); } match (*CTAGS_EXISTS, *GTAGS_EXISTS) { (true, true) => run( async move { futures::future::join(ctags_future, gtags_future).await; }, job_id, ), (false, false) => {} (true, false) => run(ctags_future, job_id), (false, true) => run(gtags_future, job_id), } } Ok(()) } /// Starts a new searching task. async fn start_search( &self, search_worker: SearchWorker, query: &str, query_info: QueryInfo, ) -> Result<SearchResults> { if query.is_empty() { return Ok(Default::default()); } let search_engine = match ( self.ctags_regenerated.load(Ordering::Relaxed), self.gtags_regenerated.load(Ordering::Relaxed), ) { (true, true) => SearchEngine::All, (true, false) => SearchEngine::CtagsAndRegex, _ => SearchEngine::Regex, }; let usages = search_engine.run(search_worker).await?; Ok(SearchResults { usages, query_info }) } fn on_new_search_results( &mut self, search_results: SearchResults, ctx: &Context, ) -> Result<()> { let matched = search_results.usages.len(); // Only show the top 200 items. let (lines, indices): (Vec<_>, Vec<_>) = search_results .usages .iter() .take(200) .map(|usage| (usage.line.as_str(), usage.indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.cached_results = search_results; self.current_usages.take(); Ok(()) } } #[async_trait::async_trait] impl ClapProvider for DumbJumpProvider { async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> { let cwd = ctx.vim.working_dir().await?; let source_file_extension = ctx.start_buffer_extension()?.to_string(); tokio::task::spawn({ let cwd = cwd.clone(); let extension = source_file_extension.clone(); let dumb_jump = self.clone(); async move { if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await { tracing::error!(error =?err, "Failed to initialize dumb_jump provider"); } } }); if let Some(query) = &self.args.query { let query_info = parse_query_info(query); let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension, }; let search_results = self.start_search(search_worker, query, query_info).await?; self.on_new_search_results(search_results, ctx)?; } Ok(()) } async fn on_move(&mut self, ctx: &mut Context) -> Result<()> { let current_lines = self .current_usages .as_ref() .unwrap_or(&self.cached_results.usages); if current_lines.is_empty() { return Ok(()); } let input = ctx.vim.input_get().await?; let lnum = ctx.vim.display_getcurlnum().await?; // lnum is 1-indexed let curline = current_lines .get_line(lnum - 1) .ok_or_else(|| anyhow::anyhow!("Can not find curline on Rust end for lnum: {lnum}"))?; let preview_height = ctx.preview_height().await?; let (preview_target, preview) = CachedPreviewImpl::new(curline.to_string(), preview_height, ctx)? .get_preview() .await?; let current_input = ctx.vim.input_get().await?; let current_lnum = ctx.vim.display_getcurlnum().await?; // Only send back the result if the request is not out-dated. if input == current_input && lnum == current_lnum { ctx.preview_manager.reset_scroll(); ctx.render_preview(preview)?; ctx.preview_manager.set_preview_target(preview_target); } Ok(()) } async fn on_typed(&mut self, ctx: &mut Context) -> Result<()> { let query = ctx.vim.input_get().await?; let query_info = parse_query_info(&query); // Try to refilter the cached results. if self.cached_results.query_info.is_superset(&query_info) { let refiltered = self .cached_results .usages .par_iter() .filter_map(|Usage { line, indices }| { query_info .usage_matcher .match_jump_line((line.clone(), indices.clone())) .map(|(line, indices)| Usage::new(line, indices)) }) .collect::<Vec<_>>(); let matched = refiltered.len(); let (lines, indices): (Vec<&str>, Vec<&[usize]>) = refiltered .iter() .take(200) .map(|Usage { line, indices }| (line.as_str(), indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.current_usages.replace(refiltered.into()); return Ok(()); } let cwd: AbsPathBuf = ctx.vim.working_dir().await?; let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension: ctx.start_buffer_extension()?.to_string(), }; let search_results = self.start_search(search_worker, &query, query_info).await?; self.on_new_search_results(search_results, ctx)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_search_info() { let query_info = parse_query_info("'foo"); println!("{query_info:?}"); } }
{ (query.into(), QueryType::StartWith, UsageMatcher::default()) }
conditional_block
mod.rs
mod searcher; use self::searcher::{SearchEngine, SearchWorker}; use crate::find_usages::{CtagsSearcher, GtagsSearcher, QueryType, Usage, UsageMatcher, Usages}; use crate::stdio_server::handler::CachedPreviewImpl; use crate::stdio_server::job; use crate::stdio_server::provider::{BaseArgs, ClapProvider, Context}; use crate::tools::ctags::{get_language, TagsGenerator, CTAGS_EXISTS}; use crate::tools::gtags::GTAGS_EXISTS; use anyhow::Result; use filter::Query; use futures::Future; use itertools::Itertools; use paths::AbsPathBuf; use rayon::prelude::*; use serde_json::json; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use tracing::Instrument; /// Internal reprentation of user input. #[derive(Debug, Clone, Default)] struct QueryInfo { /// Keyword for the tag or regex searching. keyword: String, /// Query type for `keyword`. query_type: QueryType, /// Search terms for further filtering. usage_matcher: UsageMatcher, } impl QueryInfo { /// Return `true` if the result of query info is a superset of the result of another, /// i.e., `self` contains all the search results of `other`. /// /// The rule is as follows: /// /// - the keyword is the same. /// - the new query is a subset of last query. fn is_superset(&self, other: &Self) -> bool { self.keyword == other.keyword && self.query_type == other.query_type && self.usage_matcher.is_superset(&other.usage_matcher) } } /// Parses the raw user input and returns the final keyword as well as the constraint terms. /// Currently, only one keyword is supported. /// /// `hel 'fn` => `keyword ++ exact_term/inverse_term`. /// /// # Argument /// /// - `query`: Initial query typed in the input window. fn parse_query_info(query: &str) -> QueryInfo { let Query { word_terms: _, // TODO: add word_terms to UsageMatcher exact_terms, fuzzy_terms, inverse_terms, } = Query::from(query); // If there is no fuzzy term, use the full query as the keyword, // otherwise restore the fuzzy query as the keyword we are going to search. let (keyword, query_type, usage_matcher) = if fuzzy_terms.is_empty() { if exact_terms.is_empty() { (query.into(), QueryType::StartWith, UsageMatcher::default()) } else { ( exact_terms[0].text.clone(), QueryType::Exact, UsageMatcher::new(exact_terms, inverse_terms), ) } } else { ( fuzzy_terms.iter().map(|term| &term.text).join(" "), QueryType::StartWith, UsageMatcher::new(exact_terms, inverse_terms), ) }; // TODO: Search syntax: // - 'foo // - foo* // - foo // // if let Some(stripped) = query.strip_suffix('*') { // (stripped, QueryType::Contain) // } else if let Some(stripped) = query.strip_prefix('\'') { // (stripped, QueryType::Exact) // } else { // (query, QueryType::StartWith) // }; QueryInfo { keyword, query_type, usage_matcher, } } #[derive(Debug, Clone, Default)] struct SearchResults { /// Last searching results. /// /// When passing the line content from Vim to Rust, the performance /// of Vim can become very bad because some lines are extremely long, /// we cache the last results on Rust to allow passing the line number /// from Vim later instead. usages: Usages, /// Last parsed query info. query_info: QueryInfo, } #[derive(Debug, Clone)] pub struct DumbJumpProvider { args: BaseArgs, /// Results from last searching. /// This might be a superset of searching results for the last query. cached_results: SearchResults, /// Current results from refiltering on `cached_results`. current_usages: Option<Usages>, /// Whether the tags file has been (re)-created. ctags_regenerated: Arc<AtomicBool>, /// Whether the GTAGS file has been (re)-created. gtags_regenerated: Arc<AtomicBool>, } async fn init_gtags(cwd: PathBuf, gtags_regenerated: Arc<AtomicBool>) { let gtags_searcher = GtagsSearcher::new(cwd); match gtags_searcher.create_or_update_tags() { Ok(()) => gtags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Error at initializing GTAGS, attempting to recreate..."); // TODO: creating gtags may take 20s+ for large project match tokio::task::spawn_blocking({ let gtags_searcher = gtags_searcher.clone(); move || gtags_searcher.force_recreate() }) .await { Ok(_) => { gtags_regenerated.store(true, Ordering::SeqCst); tracing::debug!("[dumb_jump] Recreating gtags db successfully"); } Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Failed to recreate gtags db"); } } } } } impl DumbJumpProvider { pub async fn new(ctx: &Context) -> Result<Self> { let args = ctx.parse_provider_args().await?; Ok(Self { args, cached_results: Default::default(), current_usages: None, ctags_regenerated: Arc::new(false.into()), gtags_regenerated: Arc::new(false.into()), }) } async fn initialize_tags(&self, extension: String, cwd: AbsPathBuf) -> Result<()> { let job_id = utils::calculate_hash(&(&cwd, "dumb_jump")); if job::reserve(job_id) { let ctags_future = { let cwd = cwd.clone(); let mut tags_generator = TagsGenerator::with_dir(cwd.clone()); if let Some(language) = get_language(&extension) { tags_generator.set_languages(language.into()); } let ctags_regenerated = self.ctags_regenerated.clone(); // Ctags initialization is usually pretty fast. async move { let now = std::time::Instant::now(); let ctags_searcher = CtagsSearcher::new(tags_generator); match ctags_searcher.generate_tags() { Ok(()) => ctags_regenerated.store(true, Ordering::SeqCst), Err(e) => { tracing::error!(error =?e, "[dumb_jump] 💔 Error at initializing ctags") } } tracing::debug!(?cwd, "[dumb_jump] ⏱️ Ctags elapsed: {:?}", now.elapsed()); } }; let gtags_future = { let cwd: PathBuf = cwd.into(); let gtags_regenerated = self.gtags_regenerated.clone(); let span = tracing::span!(tracing::Level::INFO, "gtags"); async move { let _ = tokio::task::spawn(init_gtags(cwd, gtags_regenerated)).await; } .instrument(span) }; fn run(job_future: impl Send + Sync +'static + Future<Output = ()>, job_id: u64) { tokio::task::spawn({ async move { let now = std::time::Instant::now(); job_future.await; tracing::debug!("[dumb_jump] ⏱️ Total elapsed: {:?}", now.elapsed()); job::unreserve(job_id); } }); } match (*CTAGS_EXISTS, *GTAGS_EXISTS) { (true, true) => run( async move { futures::future::join(ctags_future, gtags_future).await; }, job_id, ), (false, false) => {} (true, false) => run(ctags_future, job_id), (false, true) => run(gtags_future, job_id), } } Ok(()) } /// Starts a new searching task. async fn start_search( &self, search_worker: SearchWorker, query: &str, query_info: QueryInfo, ) -> Result<SearchResults> { if query.is_empty() { return Ok(Default::default()); } let search_engine = match ( self.ctags_regenerated.load(Ordering::Relaxed), self.gtags_regenerated.load(Ordering::Relaxed), ) { (true, true) => SearchEngine::All, (true, false) => SearchEngine::CtagsAndRegex, _ => SearchEngine::Regex, }; let usages = search_engine.run(search_worker).await?; Ok(SearchResults { usages, query_info }) } fn on_new_search_results( &mut self, search_results: SearchResults, ctx: &Context, ) -> Result<()> { let matched = search_results.usages.len(); // Only show the top 200 items. let (lines, indices): (Vec<_>, Vec<_>) = search_results .usages .iter() .take(200) .map(|usage| (usage.line.as_str(), usage.indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.cached_results = search_results; self.current_usages.take(); Ok(()) } } #[async_trait::async_trait] impl ClapProvider for DumbJumpProvider { async fn on_initialize(&mut self, ctx: &mut Context) -> Result<()> { let cwd = ctx.vim.working_dir().await?; let source_file_extension = ctx.start_buffer_extension()?.to_string(); tokio::task::spawn({ let cwd = cwd.clone(); let extension = source_file_extension.clone(); let dumb_jump = self.clone(); async move { if let Err(err) = dumb_jump.initialize_tags(extension, cwd).await { tracing::error!(error =?err, "Failed to initialize dumb_jump provider"); } } }); if let Some(query) = &self.args.query { let query_info = parse_query_info(query); let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension, }; let search_results = self.start_search(search_worker, query, query_info).await?; self.on_new_search_results(search_results, ctx)?; } Ok(()) } async fn on_move(&mut self
&mut Context) -> Result<()> { let current_lines = self .current_usages .as_ref() .unwrap_or(&self.cached_results.usages); if current_lines.is_empty() { return Ok(()); } let input = ctx.vim.input_get().await?; let lnum = ctx.vim.display_getcurlnum().await?; // lnum is 1-indexed let curline = current_lines .get_line(lnum - 1) .ok_or_else(|| anyhow::anyhow!("Can not find curline on Rust end for lnum: {lnum}"))?; let preview_height = ctx.preview_height().await?; let (preview_target, preview) = CachedPreviewImpl::new(curline.to_string(), preview_height, ctx)? .get_preview() .await?; let current_input = ctx.vim.input_get().await?; let current_lnum = ctx.vim.display_getcurlnum().await?; // Only send back the result if the request is not out-dated. if input == current_input && lnum == current_lnum { ctx.preview_manager.reset_scroll(); ctx.render_preview(preview)?; ctx.preview_manager.set_preview_target(preview_target); } Ok(()) } async fn on_typed(&mut self, ctx: &mut Context) -> Result<()> { let query = ctx.vim.input_get().await?; let query_info = parse_query_info(&query); // Try to refilter the cached results. if self.cached_results.query_info.is_superset(&query_info) { let refiltered = self .cached_results .usages .par_iter() .filter_map(|Usage { line, indices }| { query_info .usage_matcher .match_jump_line((line.clone(), indices.clone())) .map(|(line, indices)| Usage::new(line, indices)) }) .collect::<Vec<_>>(); let matched = refiltered.len(); let (lines, indices): (Vec<&str>, Vec<&[usize]>) = refiltered .iter() .take(200) .map(|Usage { line, indices }| (line.as_str(), indices.as_slice())) .unzip(); let response = json!({ "lines": lines, "indices": indices, "matched": matched }); ctx.vim .exec("clap#state#process_response_on_typed", response)?; self.current_usages.replace(refiltered.into()); return Ok(()); } let cwd: AbsPathBuf = ctx.vim.working_dir().await?; let search_worker = SearchWorker { cwd, query_info: query_info.clone(), source_file_extension: ctx.start_buffer_extension()?.to_string(), }; let search_results = self.start_search(search_worker, &query, query_info).await?; self.on_new_search_results(search_results, ctx)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_search_info() { let query_info = parse_query_info("'foo"); println!("{query_info:?}"); } }
, ctx:
identifier_name
command_server.rs
//! Internal server that accepts raw commands, queues them up, and transmits //! them to the Tick Processor asynchronously. Commands are re-transmitted //! if a response isn't received in a timout period. //! //! Responses from the Tick Processor are sent back over the commands channel //! and are sent to worker processes that register interest in them over channels. //! Workers register interest after sending a command so that they can be notified //! of the successful reception of the command. //! //! TODO: Ensure that commands aren't processed twice by storing Uuids or most //! recent 200 commands or something and checking that list before executing (?) //! //! TODO: Use different channel for responses than for commands extern crate test; use std::collections::VecDeque; use std::thread::{self, Thread}; use std::time::Duration; use std::sync::{Arc, Mutex}; use std::str::FromStr; use futures::{Stream, Canceled}; use futures::sync::mpsc::{unbounded, UnboundedSender, UnboundedReceiver}; use futures::Future; use futures::sync::oneshot::{channel as oneshot, Sender, Receiver}; use uuid::Uuid; use redis; use transport::redis::{get_client, sub_channel}; use transport::commands::*; use conf::CONF; /// A command waiting to be sent plus a Sender to send the Response/Error String /// through and the channel on which to broadcast the Command. struct CommandRequest { cmd: Command, future: Sender<Result<Response, String>>, channel: String, } /// Contains a `CommandRequest` for a worker and a Sender that resolves when the worker /// becomes idle. type WorkerTask = (CommandRequest, Sender<()>); /// Threadsafe queue containing handles to idle command-sender threads in the form of `UnboundedSender`s type UnboundedSenderQueue = Arc<Mutex<VecDeque<UnboundedSender<WorkerTask>>>>; /// Threadsafe queue containing commands waiting to be sent type CommandQueue = Arc<Mutex<VecDeque<CommandRequest>>>; /// A `Vec` containing a `Uuid` of a `Response` that's expected and a `UnboundedSender` to send the /// response through once it arrives type RegisteredList = Vec<(Uuid, UnboundedSender<Result<Response, ()>>)>; /// A message to be sent to the timeout thread containing how long to time out for, /// a oneshot that resolves to a handle to the Timeout's thread as soon as the timeout begins, /// and a oneshot that resolves to `Err(())` if the timeout completes. /// /// The thread handle can be used to end the timeout early to make the timeout thread /// useable again. struct TimeoutRequest { dur: Duration, thread_future: Sender<Thread>, timeout_future: Sender<Result<Response, ()>>, } /// A list of `UnboundedSender`s over which Results from the Tick Processor will be sent if they /// match the ID of the request the command `UnboundedSender` thread sent. struct AlertList { // Vec to hold the ids of responses we're waiting for and `Sender`s // to send the result back to the worker thread // Wrapped in Arc<Mutex<>> so that it can be accessed from within futures pub list: RegisteredList, } /// Send out the Response to a worker that is registered interest to its Uuid fn send_messages(res: WrappedResponse, al: &Mutex<AlertList>) { let mut al_inner = al.lock().expect("Unable to unlock al n send_messages"); let pos_opt: Option<&mut (_, UnboundedSender<Result<Response, ()>>)> = al_inner.list.iter_mut().find(|x| x.0 == res.uuid ); if pos_opt.is_some() { pos_opt.unwrap().1.send( Ok(res.res) ).expect("Unable to send through subscribed future"); } } /// Utility struct for keeping track of the UUIDs of Responses that workers are /// interested in and holding Completes to let them know when they are received impl AlertList { pub fn new() -> AlertList { AlertList { list: Vec::new(), } } /// Register interest in Results with a specified Uuid and send /// the Result over the specified Oneshot when it's received pub fn register(&mut self, response_uuid: &Uuid, c: UnboundedSender<Result<Response, ()>>) { self.list.push((*response_uuid, c)); } /// Deregisters a listener if a timeout in the case of a timeout occuring pub fn deregister(&mut self, uuid: &Uuid) { let pos_opt = self.list.iter().position(|x| &x.0 == uuid ); match pos_opt { Some(pos) => { self.list.remove(pos); }, None => println!("Error deregistering element from interest list; it's not in it"), } } } #[derive(Clone)] pub struct CommandServer { al: Arc<Mutex<AlertList>>, command_queue: CommandQueue, // internal command queue conn_queue: UnboundedSenderQueue, // UnboundedSenders for idle command-UnboundedSender threadss client: redis::Client, instance: Instance, // The instance that owns this CommandServer } /// Locks the `CommandQueue` and returns a queued command, if there are any. fn try_get_new_command(command_queue: CommandQueue) -> Option<CommandRequest> { let mut qq_inner = command_queue.lock() .expect("Unable to unlock qq_inner in try_get_new_command"); qq_inner.pop_front() } fn send_command_outer( al: &Mutex<AlertList>, command: &Command, client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, res_c: Sender<Result<Response, String>>, command_queue: CommandQueue, mut attempts: usize, commands_channel: String ) { let wr_cmd = command.wrap(); let _ = send_command(&wr_cmd, client, commands_channel.as_str()); let (sleepy_c, sleepy_o) = oneshot::<Thread>(); let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); // start the timeout timer on a separate thread let dur = Duration::from_millis(CONF.cs_timeout as u64); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; sleeper_tx.send(timeout_msg).unwrap(); // sleepy_o fulfills immediately to a handle to the sleeper thread let sleepy_handle = sleepy_o.wait(); // UnboundedSender for giving to the AlertList and sending the response back let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); // register interest in new Responses coming in with our Command's Uuid { al.lock().expect("Unlock to lock al in send_command_outer #1") .register(&wr_cmd.uuid, res_recvd_c); } res_recvd_o.into_future().map(|(item_opt, _)| { item_opt.expect("item_opt was None") }).map_err(|_| Canceled ).select(awake_o).and_then(move |res| { let (status, _) = res; match status { Ok(wrapped_res) => { // command received { // deregister since we're only waiting on one message al.lock().expect("Unlock to lock al in send_command_outer #2") .deregister(&wr_cmd.uuid); } // end the timeout now so that we can re-use sleeper thread sleepy_handle.expect("Couldn't unwrap handle to sleeper thread").unpark(); // resolve the Response future res_c.complete(Ok(wrapped_res)); return Ok(sleeper_tx) }, Err(_) => { // timed out { al.lock().expect("Couldn't lock al in Err(_)") .deregister(&wr_cmd.uuid); } attempts += 1; if attempts >= CONF.cs_max_retries { // Let the main thread know it's safe to use the UnboundedSender again // This essentially indicates that the worker thread is idle let err_msg = String::from_str("Timed out too many times!").unwrap(); res_c.complete(Err(err_msg)); return Ok(sleeper_tx) } else { // re-send the command // we can do this recursively since it's only a few retries send_command_outer(al, &wr_cmd.cmd, client, sleeper_tx, res_c, command_queue, attempts, commands_channel) } } } Ok(sleeper_tx) }).wait().ok().unwrap(); // block until a response is received or the command times out } /// Manually loop over the converted Stream of commands fn dispatch_worker( work: WorkerTask, al: &Mutex<AlertList>, mut client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, command_queue: CommandQueue ) -> Option<()> { let (cr, idle_c) = work; // completes initial command and internally iterates until queue is empty send_command_outer(al, &cr.cmd, &mut client, sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); // keep trying to get queued commands to execute until the queue is empty; while let Some(cr) = try_get_new_command(command_queue.clone()) { send_command_outer(al, &cr.cmd, client, &mut sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); } idle_c.complete(()); Some(()) } /// Blocks the current thread until a Duration+Complete is received. /// Then it sleeps for that Duration and Completes the oneshot upon awakening. /// Returns a Complete upon starting that can be used to end the timeout early fn init_sleeper(rx: UnboundedReceiver<TimeoutRequest>,) { for res in rx.wait() { match res.unwrap() { TimeoutRequest{dur, thread_future, timeout_future} => { // send a Complete with a handle to the thread thread_future.complete(thread::current()); thread::park_timeout(dur); timeout_future.complete(Err(())); } } } } /// Creates a command processor that awaits requests fn init_command_processor( cmd_rx: UnboundedReceiver<WorkerTask>, command_queue: CommandQueue, al: &Mutex<AlertList> ) { let mut client = get_client(CONF.redis_host); // channel for communicating with the sleeper thread let (mut sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); thread::spawn(move || init_sleeper(sleeper_rx) ); for task in cmd_rx.wait() { let res = dispatch_worker( task.unwrap(), al, &mut client, &mut sleeper_tx, command_queue.clone() ); // exit if we're in the process of collapse if res.is_none() { break; } } } impl CommandServer { pub fn new(instance_uuid: Uuid, instance_type: &str) -> CommandServer { let mut conn_queue = VecDeque::with_capacity(CONF.conn_senders); let command_queue = Arc::new(Mutex::new(VecDeque::new())); let al = Arc::new(Mutex::new(AlertList::new())); let al_clone = al.clone(); // Handle newly received Responses let rx = sub_channel(CONF.redis_host, CONF.redis_responses_channel); thread::spawn(move || { for raw_res_res in rx.wait() { let raw_res = raw_res_res.expect("Res was error in CommandServer response UnboundedReceiver thread."); let parsed_res = parse_wrapped_response(raw_res); send_messages(parsed_res, &*al_clone); } }); for _ in 0..CONF.conn_senders { let al_clone = al.clone(); let qq_copy = command_queue.clone(); // channel for getting the UnboundedSender back from the worker thread let (tx, rx) = unbounded::<WorkerTask>(); thread::spawn(move || init_command_processor(rx, qq_copy, &*al_clone) ); // store the UnboundedSender which can be used to send queries // to the worker in the connection queue conn_queue.push_back(tx); } let client = get_client(CONF.redis_host); CommandServer { al: al, command_queue: command_queue, conn_queue: Arc::new(Mutex::new(conn_queue)), client: client, instance: Instance{ uuid: instance_uuid, instance_type: String::from(instance_type), }, } } /// Queues up a command to send to be sent. Returns a future that resolves to /// the returned response. pub fn
( &mut self, command: Command, commands_channel: String ) -> Receiver<Result<Response, String>> { let temp_lock_res = self.conn_queue.lock().unwrap().is_empty(); // Force the guard locking conn_queue to go out of scope // this prevents the lock from being held through the entire if/else let copy_res = temp_lock_res; // future for handing back to the caller that resolves to Response/Error let (res_c, res_o) = oneshot::<Result<Response, String>>(); // future for notifying main thread when command is done and worker is idle let (idle_c, idle_o) = oneshot::<()>(); let cr = CommandRequest { cmd: command, future: res_c, channel: commands_channel, }; if copy_res { self.command_queue.lock().unwrap().push_back(cr); }else{ // type WorkerTask let req = (cr, idle_c); let tx; { tx = self.conn_queue.lock().unwrap().pop_front().unwrap(); tx.send(req).unwrap(); } let cq_clone = self.conn_queue.clone(); thread::spawn(move || { // Wait until the worker thread signals that it is idle let _ = idle_o.wait(); // Put the UnboundedSender for the newly idle worker into the connection queue cq_clone.lock().unwrap().push_back(tx); }); } res_o } pub fn broadcast( &mut self, command: Command, commands_channel: String ) -> Receiver<Vec<Response>> { // spawn a new timeout thread just for this request let (sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); let dur = Duration::from_millis(CONF.cs_timeout as u64); let (sleepy_c, _) = oneshot::<Thread>(); // awake_o fulfills when the timeout expires let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); let wr_cmd = command.wrap(); // Oneshot for sending received responses back with. let (all_responses_c, all_responses_o) = oneshot::<Vec<Response>>(); let alc = self.al.clone(); let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); { // oneshot triggered with matching message received let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.register(&wr_cmd.uuid, res_recvd_c); } let responses_container = Arc::new(Mutex::new(Vec::new())); let responses_container_clone = responses_container.clone(); thread::spawn(move || { for response in res_recvd_o.wait() { match response { Ok(res) => { let mut responses = responses_container_clone.lock().unwrap(); responses.push(res.expect("Inner error in responses iterator")) }, Err(err) => println!("Got error from response iterator: {:?}", err), } } }); let wr_cmd_c = wr_cmd.clone(); thread::spawn(move || { // timer waiter thread // when a timeout happens, poll all the pending interest listners and send results back let _ = awake_o.wait(); // deregister interest { let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.deregister(&wr_cmd_c.uuid); } let responses; { responses = responses_container.lock().unwrap().clone(); } all_responses_c.complete(responses); }); thread::spawn(move || init_sleeper(sleeper_rx) ); // timer thread // actually send the Command let _ = send_command(&wr_cmd, &self.client, commands_channel.as_str()); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; // initiate timeout sleeper_tx.send(timeout_msg).unwrap(); all_responses_o } /// Sends a command asynchronously without bothering to wait for responses. pub fn send_forget(&self, cmd: &Command, channel: &str) { let _ = send_command(&cmd.wrap(), &self.client, channel); } /// Sends a message to the logger with the specified severity pub fn log(&mut self, message_type_opt: Option<&str>, message: &str, level: LogLevel) { let message_type = match message_type_opt { Some(t) => t, None => "General", }; let line = LogMessage { level: level, message_type: String::from(message_type), message: String::from(message), sender: self.instance.clone(), }; self.send_forget(&Command::Log{msg: line}, CONF.redis_log_channel); } /// Shortcut method for logging a debug-level message. pub fn debug(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Debug); } /// Shortcut method for logging a notice-level message. pub fn notice(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Notice); } /// Shortcut method for logging a warning-level message. pub fn warning(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Warning); } /// Shortcut method for logging a error-level message. pub fn error(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Error); } /// Shortcut method for logging a critical-level message. pub fn critical(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Critical); } } #[bench] fn thread_spawn(b: &mut test::Bencher) { b.iter(|| thread::spawn(|| {})) }
execute
identifier_name
command_server.rs
//! Internal server that accepts raw commands, queues them up, and transmits //! them to the Tick Processor asynchronously. Commands are re-transmitted //! if a response isn't received in a timout period. //! //! Responses from the Tick Processor are sent back over the commands channel //! and are sent to worker processes that register interest in them over channels. //! Workers register interest after sending a command so that they can be notified //! of the successful reception of the command. //! //! TODO: Ensure that commands aren't processed twice by storing Uuids or most //! recent 200 commands or something and checking that list before executing (?) //! //! TODO: Use different channel for responses than for commands extern crate test; use std::collections::VecDeque; use std::thread::{self, Thread}; use std::time::Duration; use std::sync::{Arc, Mutex}; use std::str::FromStr; use futures::{Stream, Canceled}; use futures::sync::mpsc::{unbounded, UnboundedSender, UnboundedReceiver}; use futures::Future; use futures::sync::oneshot::{channel as oneshot, Sender, Receiver}; use uuid::Uuid; use redis; use transport::redis::{get_client, sub_channel}; use transport::commands::*; use conf::CONF; /// A command waiting to be sent plus a Sender to send the Response/Error String /// through and the channel on which to broadcast the Command. struct CommandRequest { cmd: Command, future: Sender<Result<Response, String>>, channel: String, } /// Contains a `CommandRequest` for a worker and a Sender that resolves when the worker /// becomes idle. type WorkerTask = (CommandRequest, Sender<()>); /// Threadsafe queue containing handles to idle command-sender threads in the form of `UnboundedSender`s type UnboundedSenderQueue = Arc<Mutex<VecDeque<UnboundedSender<WorkerTask>>>>; /// Threadsafe queue containing commands waiting to be sent type CommandQueue = Arc<Mutex<VecDeque<CommandRequest>>>; /// A `Vec` containing a `Uuid` of a `Response` that's expected and a `UnboundedSender` to send the /// response through once it arrives type RegisteredList = Vec<(Uuid, UnboundedSender<Result<Response, ()>>)>; /// A message to be sent to the timeout thread containing how long to time out for, /// a oneshot that resolves to a handle to the Timeout's thread as soon as the timeout begins, /// and a oneshot that resolves to `Err(())` if the timeout completes. /// /// The thread handle can be used to end the timeout early to make the timeout thread /// useable again. struct TimeoutRequest { dur: Duration, thread_future: Sender<Thread>, timeout_future: Sender<Result<Response, ()>>, } /// A list of `UnboundedSender`s over which Results from the Tick Processor will be sent if they /// match the ID of the request the command `UnboundedSender` thread sent. struct AlertList { // Vec to hold the ids of responses we're waiting for and `Sender`s // to send the result back to the worker thread // Wrapped in Arc<Mutex<>> so that it can be accessed from within futures pub list: RegisteredList, } /// Send out the Response to a worker that is registered interest to its Uuid fn send_messages(res: WrappedResponse, al: &Mutex<AlertList>) { let mut al_inner = al.lock().expect("Unable to unlock al n send_messages"); let pos_opt: Option<&mut (_, UnboundedSender<Result<Response, ()>>)> = al_inner.list.iter_mut().find(|x| x.0 == res.uuid ); if pos_opt.is_some() { pos_opt.unwrap().1.send( Ok(res.res) ).expect("Unable to send through subscribed future"); } } /// Utility struct for keeping track of the UUIDs of Responses that workers are /// interested in and holding Completes to let them know when they are received impl AlertList { pub fn new() -> AlertList { AlertList { list: Vec::new(), } } /// Register interest in Results with a specified Uuid and send /// the Result over the specified Oneshot when it's received pub fn register(&mut self, response_uuid: &Uuid, c: UnboundedSender<Result<Response, ()>>) { self.list.push((*response_uuid, c)); } /// Deregisters a listener if a timeout in the case of a timeout occuring pub fn deregister(&mut self, uuid: &Uuid) { let pos_opt = self.list.iter().position(|x| &x.0 == uuid ); match pos_opt { Some(pos) => { self.list.remove(pos); }, None => println!("Error deregistering element from interest list; it's not in it"), } } } #[derive(Clone)] pub struct CommandServer { al: Arc<Mutex<AlertList>>, command_queue: CommandQueue, // internal command queue conn_queue: UnboundedSenderQueue, // UnboundedSenders for idle command-UnboundedSender threadss client: redis::Client, instance: Instance, // The instance that owns this CommandServer } /// Locks the `CommandQueue` and returns a queued command, if there are any. fn try_get_new_command(command_queue: CommandQueue) -> Option<CommandRequest> { let mut qq_inner = command_queue.lock() .expect("Unable to unlock qq_inner in try_get_new_command"); qq_inner.pop_front() } fn send_command_outer( al: &Mutex<AlertList>, command: &Command, client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, res_c: Sender<Result<Response, String>>, command_queue: CommandQueue, mut attempts: usize, commands_channel: String ) { let wr_cmd = command.wrap(); let _ = send_command(&wr_cmd, client, commands_channel.as_str()); let (sleepy_c, sleepy_o) = oneshot::<Thread>(); let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); // start the timeout timer on a separate thread let dur = Duration::from_millis(CONF.cs_timeout as u64); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; sleeper_tx.send(timeout_msg).unwrap(); // sleepy_o fulfills immediately to a handle to the sleeper thread let sleepy_handle = sleepy_o.wait(); // UnboundedSender for giving to the AlertList and sending the response back let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); // register interest in new Responses coming in with our Command's Uuid { al.lock().expect("Unlock to lock al in send_command_outer #1") .register(&wr_cmd.uuid, res_recvd_c); } res_recvd_o.into_future().map(|(item_opt, _)| { item_opt.expect("item_opt was None") }).map_err(|_| Canceled ).select(awake_o).and_then(move |res| { let (status, _) = res; match status { Ok(wrapped_res) => { // command received { // deregister since we're only waiting on one message al.lock().expect("Unlock to lock al in send_command_outer #2") .deregister(&wr_cmd.uuid); } // end the timeout now so that we can re-use sleeper thread sleepy_handle.expect("Couldn't unwrap handle to sleeper thread").unpark(); // resolve the Response future res_c.complete(Ok(wrapped_res)); return Ok(sleeper_tx) }, Err(_) => { // timed out { al.lock().expect("Couldn't lock al in Err(_)") .deregister(&wr_cmd.uuid); } attempts += 1; if attempts >= CONF.cs_max_retries { // Let the main thread know it's safe to use the UnboundedSender again // This essentially indicates that the worker thread is idle let err_msg = String::from_str("Timed out too many times!").unwrap(); res_c.complete(Err(err_msg)); return Ok(sleeper_tx) } else { // re-send the command // we can do this recursively since it's only a few retries send_command_outer(al, &wr_cmd.cmd, client, sleeper_tx, res_c, command_queue, attempts, commands_channel) } } } Ok(sleeper_tx) }).wait().ok().unwrap(); // block until a response is received or the command times out } /// Manually loop over the converted Stream of commands fn dispatch_worker( work: WorkerTask, al: &Mutex<AlertList>, mut client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, command_queue: CommandQueue ) -> Option<()> { let (cr, idle_c) = work; // completes initial command and internally iterates until queue is empty send_command_outer(al, &cr.cmd, &mut client, sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); // keep trying to get queued commands to execute until the queue is empty; while let Some(cr) = try_get_new_command(command_queue.clone()) { send_command_outer(al, &cr.cmd, client, &mut sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); } idle_c.complete(()); Some(()) } /// Blocks the current thread until a Duration+Complete is received. /// Then it sleeps for that Duration and Completes the oneshot upon awakening. /// Returns a Complete upon starting that can be used to end the timeout early fn init_sleeper(rx: UnboundedReceiver<TimeoutRequest>,) { for res in rx.wait() { match res.unwrap() { TimeoutRequest{dur, thread_future, timeout_future} => { // send a Complete with a handle to the thread thread_future.complete(thread::current()); thread::park_timeout(dur); timeout_future.complete(Err(())); } } } } /// Creates a command processor that awaits requests fn init_command_processor( cmd_rx: UnboundedReceiver<WorkerTask>, command_queue: CommandQueue, al: &Mutex<AlertList> ) { let mut client = get_client(CONF.redis_host); // channel for communicating with the sleeper thread let (mut sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); thread::spawn(move || init_sleeper(sleeper_rx) );
for task in cmd_rx.wait() { let res = dispatch_worker( task.unwrap(), al, &mut client, &mut sleeper_tx, command_queue.clone() ); // exit if we're in the process of collapse if res.is_none() { break; } } } impl CommandServer { pub fn new(instance_uuid: Uuid, instance_type: &str) -> CommandServer { let mut conn_queue = VecDeque::with_capacity(CONF.conn_senders); let command_queue = Arc::new(Mutex::new(VecDeque::new())); let al = Arc::new(Mutex::new(AlertList::new())); let al_clone = al.clone(); // Handle newly received Responses let rx = sub_channel(CONF.redis_host, CONF.redis_responses_channel); thread::spawn(move || { for raw_res_res in rx.wait() { let raw_res = raw_res_res.expect("Res was error in CommandServer response UnboundedReceiver thread."); let parsed_res = parse_wrapped_response(raw_res); send_messages(parsed_res, &*al_clone); } }); for _ in 0..CONF.conn_senders { let al_clone = al.clone(); let qq_copy = command_queue.clone(); // channel for getting the UnboundedSender back from the worker thread let (tx, rx) = unbounded::<WorkerTask>(); thread::spawn(move || init_command_processor(rx, qq_copy, &*al_clone) ); // store the UnboundedSender which can be used to send queries // to the worker in the connection queue conn_queue.push_back(tx); } let client = get_client(CONF.redis_host); CommandServer { al: al, command_queue: command_queue, conn_queue: Arc::new(Mutex::new(conn_queue)), client: client, instance: Instance{ uuid: instance_uuid, instance_type: String::from(instance_type), }, } } /// Queues up a command to send to be sent. Returns a future that resolves to /// the returned response. pub fn execute( &mut self, command: Command, commands_channel: String ) -> Receiver<Result<Response, String>> { let temp_lock_res = self.conn_queue.lock().unwrap().is_empty(); // Force the guard locking conn_queue to go out of scope // this prevents the lock from being held through the entire if/else let copy_res = temp_lock_res; // future for handing back to the caller that resolves to Response/Error let (res_c, res_o) = oneshot::<Result<Response, String>>(); // future for notifying main thread when command is done and worker is idle let (idle_c, idle_o) = oneshot::<()>(); let cr = CommandRequest { cmd: command, future: res_c, channel: commands_channel, }; if copy_res { self.command_queue.lock().unwrap().push_back(cr); }else{ // type WorkerTask let req = (cr, idle_c); let tx; { tx = self.conn_queue.lock().unwrap().pop_front().unwrap(); tx.send(req).unwrap(); } let cq_clone = self.conn_queue.clone(); thread::spawn(move || { // Wait until the worker thread signals that it is idle let _ = idle_o.wait(); // Put the UnboundedSender for the newly idle worker into the connection queue cq_clone.lock().unwrap().push_back(tx); }); } res_o } pub fn broadcast( &mut self, command: Command, commands_channel: String ) -> Receiver<Vec<Response>> { // spawn a new timeout thread just for this request let (sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); let dur = Duration::from_millis(CONF.cs_timeout as u64); let (sleepy_c, _) = oneshot::<Thread>(); // awake_o fulfills when the timeout expires let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); let wr_cmd = command.wrap(); // Oneshot for sending received responses back with. let (all_responses_c, all_responses_o) = oneshot::<Vec<Response>>(); let alc = self.al.clone(); let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); { // oneshot triggered with matching message received let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.register(&wr_cmd.uuid, res_recvd_c); } let responses_container = Arc::new(Mutex::new(Vec::new())); let responses_container_clone = responses_container.clone(); thread::spawn(move || { for response in res_recvd_o.wait() { match response { Ok(res) => { let mut responses = responses_container_clone.lock().unwrap(); responses.push(res.expect("Inner error in responses iterator")) }, Err(err) => println!("Got error from response iterator: {:?}", err), } } }); let wr_cmd_c = wr_cmd.clone(); thread::spawn(move || { // timer waiter thread // when a timeout happens, poll all the pending interest listners and send results back let _ = awake_o.wait(); // deregister interest { let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.deregister(&wr_cmd_c.uuid); } let responses; { responses = responses_container.lock().unwrap().clone(); } all_responses_c.complete(responses); }); thread::spawn(move || init_sleeper(sleeper_rx) ); // timer thread // actually send the Command let _ = send_command(&wr_cmd, &self.client, commands_channel.as_str()); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; // initiate timeout sleeper_tx.send(timeout_msg).unwrap(); all_responses_o } /// Sends a command asynchronously without bothering to wait for responses. pub fn send_forget(&self, cmd: &Command, channel: &str) { let _ = send_command(&cmd.wrap(), &self.client, channel); } /// Sends a message to the logger with the specified severity pub fn log(&mut self, message_type_opt: Option<&str>, message: &str, level: LogLevel) { let message_type = match message_type_opt { Some(t) => t, None => "General", }; let line = LogMessage { level: level, message_type: String::from(message_type), message: String::from(message), sender: self.instance.clone(), }; self.send_forget(&Command::Log{msg: line}, CONF.redis_log_channel); } /// Shortcut method for logging a debug-level message. pub fn debug(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Debug); } /// Shortcut method for logging a notice-level message. pub fn notice(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Notice); } /// Shortcut method for logging a warning-level message. pub fn warning(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Warning); } /// Shortcut method for logging a error-level message. pub fn error(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Error); } /// Shortcut method for logging a critical-level message. pub fn critical(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Critical); } } #[bench] fn thread_spawn(b: &mut test::Bencher) { b.iter(|| thread::spawn(|| {})) }
random_line_split
command_server.rs
//! Internal server that accepts raw commands, queues them up, and transmits //! them to the Tick Processor asynchronously. Commands are re-transmitted //! if a response isn't received in a timout period. //! //! Responses from the Tick Processor are sent back over the commands channel //! and are sent to worker processes that register interest in them over channels. //! Workers register interest after sending a command so that they can be notified //! of the successful reception of the command. //! //! TODO: Ensure that commands aren't processed twice by storing Uuids or most //! recent 200 commands or something and checking that list before executing (?) //! //! TODO: Use different channel for responses than for commands extern crate test; use std::collections::VecDeque; use std::thread::{self, Thread}; use std::time::Duration; use std::sync::{Arc, Mutex}; use std::str::FromStr; use futures::{Stream, Canceled}; use futures::sync::mpsc::{unbounded, UnboundedSender, UnboundedReceiver}; use futures::Future; use futures::sync::oneshot::{channel as oneshot, Sender, Receiver}; use uuid::Uuid; use redis; use transport::redis::{get_client, sub_channel}; use transport::commands::*; use conf::CONF; /// A command waiting to be sent plus a Sender to send the Response/Error String /// through and the channel on which to broadcast the Command. struct CommandRequest { cmd: Command, future: Sender<Result<Response, String>>, channel: String, } /// Contains a `CommandRequest` for a worker and a Sender that resolves when the worker /// becomes idle. type WorkerTask = (CommandRequest, Sender<()>); /// Threadsafe queue containing handles to idle command-sender threads in the form of `UnboundedSender`s type UnboundedSenderQueue = Arc<Mutex<VecDeque<UnboundedSender<WorkerTask>>>>; /// Threadsafe queue containing commands waiting to be sent type CommandQueue = Arc<Mutex<VecDeque<CommandRequest>>>; /// A `Vec` containing a `Uuid` of a `Response` that's expected and a `UnboundedSender` to send the /// response through once it arrives type RegisteredList = Vec<(Uuid, UnboundedSender<Result<Response, ()>>)>; /// A message to be sent to the timeout thread containing how long to time out for, /// a oneshot that resolves to a handle to the Timeout's thread as soon as the timeout begins, /// and a oneshot that resolves to `Err(())` if the timeout completes. /// /// The thread handle can be used to end the timeout early to make the timeout thread /// useable again. struct TimeoutRequest { dur: Duration, thread_future: Sender<Thread>, timeout_future: Sender<Result<Response, ()>>, } /// A list of `UnboundedSender`s over which Results from the Tick Processor will be sent if they /// match the ID of the request the command `UnboundedSender` thread sent. struct AlertList { // Vec to hold the ids of responses we're waiting for and `Sender`s // to send the result back to the worker thread // Wrapped in Arc<Mutex<>> so that it can be accessed from within futures pub list: RegisteredList, } /// Send out the Response to a worker that is registered interest to its Uuid fn send_messages(res: WrappedResponse, al: &Mutex<AlertList>) { let mut al_inner = al.lock().expect("Unable to unlock al n send_messages"); let pos_opt: Option<&mut (_, UnboundedSender<Result<Response, ()>>)> = al_inner.list.iter_mut().find(|x| x.0 == res.uuid ); if pos_opt.is_some() { pos_opt.unwrap().1.send( Ok(res.res) ).expect("Unable to send through subscribed future"); } } /// Utility struct for keeping track of the UUIDs of Responses that workers are /// interested in and holding Completes to let them know when they are received impl AlertList { pub fn new() -> AlertList { AlertList { list: Vec::new(), } } /// Register interest in Results with a specified Uuid and send /// the Result over the specified Oneshot when it's received pub fn register(&mut self, response_uuid: &Uuid, c: UnboundedSender<Result<Response, ()>>) { self.list.push((*response_uuid, c)); } /// Deregisters a listener if a timeout in the case of a timeout occuring pub fn deregister(&mut self, uuid: &Uuid) { let pos_opt = self.list.iter().position(|x| &x.0 == uuid ); match pos_opt { Some(pos) => { self.list.remove(pos); }, None => println!("Error deregistering element from interest list; it's not in it"), } } } #[derive(Clone)] pub struct CommandServer { al: Arc<Mutex<AlertList>>, command_queue: CommandQueue, // internal command queue conn_queue: UnboundedSenderQueue, // UnboundedSenders for idle command-UnboundedSender threadss client: redis::Client, instance: Instance, // The instance that owns this CommandServer } /// Locks the `CommandQueue` and returns a queued command, if there are any. fn try_get_new_command(command_queue: CommandQueue) -> Option<CommandRequest> { let mut qq_inner = command_queue.lock() .expect("Unable to unlock qq_inner in try_get_new_command"); qq_inner.pop_front() } fn send_command_outer( al: &Mutex<AlertList>, command: &Command, client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, res_c: Sender<Result<Response, String>>, command_queue: CommandQueue, mut attempts: usize, commands_channel: String ) { let wr_cmd = command.wrap(); let _ = send_command(&wr_cmd, client, commands_channel.as_str()); let (sleepy_c, sleepy_o) = oneshot::<Thread>(); let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); // start the timeout timer on a separate thread let dur = Duration::from_millis(CONF.cs_timeout as u64); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; sleeper_tx.send(timeout_msg).unwrap(); // sleepy_o fulfills immediately to a handle to the sleeper thread let sleepy_handle = sleepy_o.wait(); // UnboundedSender for giving to the AlertList and sending the response back let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); // register interest in new Responses coming in with our Command's Uuid { al.lock().expect("Unlock to lock al in send_command_outer #1") .register(&wr_cmd.uuid, res_recvd_c); } res_recvd_o.into_future().map(|(item_opt, _)| { item_opt.expect("item_opt was None") }).map_err(|_| Canceled ).select(awake_o).and_then(move |res| { let (status, _) = res; match status { Ok(wrapped_res) => { // command received { // deregister since we're only waiting on one message al.lock().expect("Unlock to lock al in send_command_outer #2") .deregister(&wr_cmd.uuid); } // end the timeout now so that we can re-use sleeper thread sleepy_handle.expect("Couldn't unwrap handle to sleeper thread").unpark(); // resolve the Response future res_c.complete(Ok(wrapped_res)); return Ok(sleeper_tx) }, Err(_) =>
} Ok(sleeper_tx) }).wait().ok().unwrap(); // block until a response is received or the command times out } /// Manually loop over the converted Stream of commands fn dispatch_worker( work: WorkerTask, al: &Mutex<AlertList>, mut client: &mut redis::Client, mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, command_queue: CommandQueue ) -> Option<()> { let (cr, idle_c) = work; // completes initial command and internally iterates until queue is empty send_command_outer(al, &cr.cmd, &mut client, sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); // keep trying to get queued commands to execute until the queue is empty; while let Some(cr) = try_get_new_command(command_queue.clone()) { send_command_outer(al, &cr.cmd, client, &mut sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel); } idle_c.complete(()); Some(()) } /// Blocks the current thread until a Duration+Complete is received. /// Then it sleeps for that Duration and Completes the oneshot upon awakening. /// Returns a Complete upon starting that can be used to end the timeout early fn init_sleeper(rx: UnboundedReceiver<TimeoutRequest>,) { for res in rx.wait() { match res.unwrap() { TimeoutRequest{dur, thread_future, timeout_future} => { // send a Complete with a handle to the thread thread_future.complete(thread::current()); thread::park_timeout(dur); timeout_future.complete(Err(())); } } } } /// Creates a command processor that awaits requests fn init_command_processor( cmd_rx: UnboundedReceiver<WorkerTask>, command_queue: CommandQueue, al: &Mutex<AlertList> ) { let mut client = get_client(CONF.redis_host); // channel for communicating with the sleeper thread let (mut sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); thread::spawn(move || init_sleeper(sleeper_rx) ); for task in cmd_rx.wait() { let res = dispatch_worker( task.unwrap(), al, &mut client, &mut sleeper_tx, command_queue.clone() ); // exit if we're in the process of collapse if res.is_none() { break; } } } impl CommandServer { pub fn new(instance_uuid: Uuid, instance_type: &str) -> CommandServer { let mut conn_queue = VecDeque::with_capacity(CONF.conn_senders); let command_queue = Arc::new(Mutex::new(VecDeque::new())); let al = Arc::new(Mutex::new(AlertList::new())); let al_clone = al.clone(); // Handle newly received Responses let rx = sub_channel(CONF.redis_host, CONF.redis_responses_channel); thread::spawn(move || { for raw_res_res in rx.wait() { let raw_res = raw_res_res.expect("Res was error in CommandServer response UnboundedReceiver thread."); let parsed_res = parse_wrapped_response(raw_res); send_messages(parsed_res, &*al_clone); } }); for _ in 0..CONF.conn_senders { let al_clone = al.clone(); let qq_copy = command_queue.clone(); // channel for getting the UnboundedSender back from the worker thread let (tx, rx) = unbounded::<WorkerTask>(); thread::spawn(move || init_command_processor(rx, qq_copy, &*al_clone) ); // store the UnboundedSender which can be used to send queries // to the worker in the connection queue conn_queue.push_back(tx); } let client = get_client(CONF.redis_host); CommandServer { al: al, command_queue: command_queue, conn_queue: Arc::new(Mutex::new(conn_queue)), client: client, instance: Instance{ uuid: instance_uuid, instance_type: String::from(instance_type), }, } } /// Queues up a command to send to be sent. Returns a future that resolves to /// the returned response. pub fn execute( &mut self, command: Command, commands_channel: String ) -> Receiver<Result<Response, String>> { let temp_lock_res = self.conn_queue.lock().unwrap().is_empty(); // Force the guard locking conn_queue to go out of scope // this prevents the lock from being held through the entire if/else let copy_res = temp_lock_res; // future for handing back to the caller that resolves to Response/Error let (res_c, res_o) = oneshot::<Result<Response, String>>(); // future for notifying main thread when command is done and worker is idle let (idle_c, idle_o) = oneshot::<()>(); let cr = CommandRequest { cmd: command, future: res_c, channel: commands_channel, }; if copy_res { self.command_queue.lock().unwrap().push_back(cr); }else{ // type WorkerTask let req = (cr, idle_c); let tx; { tx = self.conn_queue.lock().unwrap().pop_front().unwrap(); tx.send(req).unwrap(); } let cq_clone = self.conn_queue.clone(); thread::spawn(move || { // Wait until the worker thread signals that it is idle let _ = idle_o.wait(); // Put the UnboundedSender for the newly idle worker into the connection queue cq_clone.lock().unwrap().push_back(tx); }); } res_o } pub fn broadcast( &mut self, command: Command, commands_channel: String ) -> Receiver<Vec<Response>> { // spawn a new timeout thread just for this request let (sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>(); let dur = Duration::from_millis(CONF.cs_timeout as u64); let (sleepy_c, _) = oneshot::<Thread>(); // awake_o fulfills when the timeout expires let (awake_c, awake_o) = oneshot::<Result<Response, ()>>(); let wr_cmd = command.wrap(); // Oneshot for sending received responses back with. let (all_responses_c, all_responses_o) = oneshot::<Vec<Response>>(); let alc = self.al.clone(); let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>(); { // oneshot triggered with matching message received let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.register(&wr_cmd.uuid, res_recvd_c); } let responses_container = Arc::new(Mutex::new(Vec::new())); let responses_container_clone = responses_container.clone(); thread::spawn(move || { for response in res_recvd_o.wait() { match response { Ok(res) => { let mut responses = responses_container_clone.lock().unwrap(); responses.push(res.expect("Inner error in responses iterator")) }, Err(err) => println!("Got error from response iterator: {:?}", err), } } }); let wr_cmd_c = wr_cmd.clone(); thread::spawn(move || { // timer waiter thread // when a timeout happens, poll all the pending interest listners and send results back let _ = awake_o.wait(); // deregister interest { let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast"); al_inner.deregister(&wr_cmd_c.uuid); } let responses; { responses = responses_container.lock().unwrap().clone(); } all_responses_c.complete(responses); }); thread::spawn(move || init_sleeper(sleeper_rx) ); // timer thread // actually send the Command let _ = send_command(&wr_cmd, &self.client, commands_channel.as_str()); let timeout_msg = TimeoutRequest { dur: dur, thread_future: sleepy_c, timeout_future: awake_c }; // initiate timeout sleeper_tx.send(timeout_msg).unwrap(); all_responses_o } /// Sends a command asynchronously without bothering to wait for responses. pub fn send_forget(&self, cmd: &Command, channel: &str) { let _ = send_command(&cmd.wrap(), &self.client, channel); } /// Sends a message to the logger with the specified severity pub fn log(&mut self, message_type_opt: Option<&str>, message: &str, level: LogLevel) { let message_type = match message_type_opt { Some(t) => t, None => "General", }; let line = LogMessage { level: level, message_type: String::from(message_type), message: String::from(message), sender: self.instance.clone(), }; self.send_forget(&Command::Log{msg: line}, CONF.redis_log_channel); } /// Shortcut method for logging a debug-level message. pub fn debug(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Debug); } /// Shortcut method for logging a notice-level message. pub fn notice(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Notice); } /// Shortcut method for logging a warning-level message. pub fn warning(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Warning); } /// Shortcut method for logging a error-level message. pub fn error(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Error); } /// Shortcut method for logging a critical-level message. pub fn critical(&mut self, message_type: Option<&str>, message: &str) { self.log(message_type, message, LogLevel::Critical); } } #[bench] fn thread_spawn(b: &mut test::Bencher) { b.iter(|| thread::spawn(|| {})) }
{ // timed out { al.lock().expect("Couldn't lock al in Err(_)") .deregister(&wr_cmd.uuid); } attempts += 1; if attempts >= CONF.cs_max_retries { // Let the main thread know it's safe to use the UnboundedSender again // This essentially indicates that the worker thread is idle let err_msg = String::from_str("Timed out too many times!").unwrap(); res_c.complete(Err(err_msg)); return Ok(sleeper_tx) } else { // re-send the command // we can do this recursively since it's only a few retries send_command_outer(al, &wr_cmd.cmd, client, sleeper_tx, res_c, command_queue, attempts, commands_channel) } }
conditional_block
lib.rs
//! Securely zero memory with a simple trait ([Zeroize]) built on stable Rust //! primitives which guarantee the operation will not be 'optimized away'. //! //! ## Usage //! //! ``` //! use zeroize::Zeroize; //! //! fn main() { //! // Protip: don't embed secrets in your source code. //! // This is just an example. //! let mut secret = b"Air shield password: 1,2,3,4,5".to_vec(); //! // [... ] open the air shield here //! //! // Now that we're done using the secret, zero it out. //! secret.zeroize(); //! } //! ``` //! //! The [Zeroize] trait is impl'd on all of Rust's core scalar types including //! integers, floats, `bool`, and `char`. //! //! Additionally, it's implemented on slices and `IterMut`s of the above types. //! //! When the `std` feature is enabled (which it is by default), it's also impl'd //! for `Vec`s of the above types as well as `String`, where it provides //! [Vec::clear()] / [String::clear()]-like behavior (truncating to zero-length) //! but ensures the backing memory is securely zeroed. //! //! The [DefaultIsZeroes] marker trait can be impl'd on types which also //! impl [Default], which implements [Zeroize] by overwriting a value with //! the default value. //! //! ## Custom Derive Support //! //! This crate has custom derive support for the `Zeroize` trait, which //! automatically calls `zeroize()` on all members of a struct or tuple struct: //! //! ``` //! // Ensure you import the crate with `macro_use`: //! // #[macro_use] //! // extern crate zeroize; //! //! use zeroize::Zeroize; //! //! #[derive(Zeroize)] //! struct MyStruct([u8; 64]); //! ``` //! //! Additionally, you can derive `ZeroizeOnDrop`, which will automatically //! derive a `Drop` handler that calls `zeroize()`: //! //! ``` //! use zeroize::{Zeroize, ZeroizeOnDrop}; //! //! // This struct will be zeroized on drop //! #[derive(Zeroize, ZeroizeOnDrop)] //! struct MyStruct([u8; 64]); //! ``` //! //! ## About //! //! [Zeroing memory securely is hard] - compilers optimize for performance, and //! in doing so they love to "optimize away" unnecessary zeroing calls. There are //! many documented "tricks" to attempt to avoid these optimizations and ensure //! that a zeroing routine is performed reliably. //! //! This crate isn't about tricks: it uses [core::ptr::write_volatile] //! and [core::sync::atomic] memory fences to provide easy-to-use, portable //! zeroing behavior which works on all of Rust's core number types and slices //! thereof, implemented in pure Rust with no usage of FFI or assembly. //! //! - **No insecure fallbacks!** //! - **No dependencies!** //! - **No FFI or inline assembly!** //! - `#![no_std]` **i.e. embedded-friendly**! //! - **No functionality besides securely zeroing memory!** //!
//! 2. Ensure all subsequent reads to the memory following the zeroing operation //! will always see zeroes. //! //! This crate guarantees #1 is true: LLVM's volatile semantics ensure it. //! //! The story around #2 is much more complicated. In brief, it should be true that //! LLVM's current implementation does not attempt to perform optimizations which //! would allow a subsequent (non-volatile) read to see the original value prior //! to zeroization. However, this is not a guarantee, but rather an LLVM //! implementation detail. //! //! For more background, we can look to the [core::ptr::write_volatile] //! documentation: //! //! > Volatile operations are intended to act on I/O memory, and are guaranteed //! > to not be elided or reordered by the compiler across other volatile //! > operations. //! > //! > Memory accessed with `read_volatile` or `write_volatile` should not be //! > accessed with non-volatile operations. //! //! Uhoh! This crate does not guarantee all reads to the memory it operates on //! are volatile, and the documentation for [core::ptr::write_volatile] //! explicitly warns against mixing volatile and non-volatile operations. //! Perhaps we'd be better off with something like a `VolatileCell` //! type which owns the associated data and ensures all reads and writes are //! volatile so we don't have to worry about the semantics of mixing volatile and //! non-volatile accesses. //! //! While that's a strategy worth pursuing (and something we may investigate //! separately from this crate), it comes with some onerous API requirements: //! it means any data that we might ever desire to zero is owned by a //! `VolatileCell`. However, this does not make it possible for this crate //! to act on references, which severely limits its applicability. In fact //! a `VolatileCell` can only act on values, i.e. to read a value from it, //! we'd need to make a copy of it, and that's literally the opposite of //! what we want. //! //! It's worth asking what the precise semantics of mixing volatile and //! non-volatile reads actually are, and whether a less obtrusive API which //! can act entirely on mutable references is possible, safe, and provides the //! desired behavior. //! //! Unfortunately, that's a tricky question, because //! [Rust does not have a formally defined memory model][memory-model], //! and the behavior of mixing volatile and non-volatile memory accesses is //! therefore not rigorously specified and winds up being an LLVM //! implementation detail. The semantics were discussed extensively in this //! thread, specifically in the context of zeroing secrets from memory: //! //! <https://internals.rust-lang.org/t/volatile-and-sensitive-memory/3188/24> //! //! Some notable details from this thread: //! //! - Rust/LLVM's notion of "volatile" is centered around data *accesses*, not //! the data itself. Specifically it maps to flags in LLVM IR which control //! the behavior of the optimizer, and is therefore a bit different from the //! typical C notion of "volatile". //! - As mentioned earlier, LLVM does not presently contain optimizations which //! would reorder a non-volatile read to occur before a volatile write. //! However, there is nothing precluding such optimizations from being added. //! LLVM presently appears to exhibit the desired behavior for both points //! #1 and #2 above, but there is nothing preventing future versions of Rust //! and/or LLVM from changing that. //! //! To help mitigate concerns about reordering potentially exposing secrets //! after they have been zeroed, this crate leverages the [core::sync::atomic] //! memory fence functions including [compiler_fence] and [fence] (which uses //! the CPU's native fence instructions). These fences are leveraged with the //! strictest ordering guarantees, [Ordering::SeqCst], which ensures no //! accesses are reordered. Without a formally defined memory model we can't //! guarantee these will be effective, but we hope they will cover most cases. //! //! Concretely the threat of leaking "zeroized" secrets (via reordering by //! LLVM and/or the CPU via out-of-order or speculative execution) would //! require a non-volatile access to be reordered ahead of the following: //! //! 1. before an [Ordering::SeqCst] compiler fence //! 2. before an [Ordering::SeqCst] runtime fence //! 3. before a volatile write //! //! This seems unlikely, but our usage of mixed non-volatile and volatile //! accesses is technically undefined behavior, at least until guarantees //! about this particular mixture of operations is formally defined in a //! Rust memory model. //! //! Furthermore, given the recent history of microarchitectural attacks //! (Spectre, Meltdown, etc), there is also potential for "zeroized" secrets //! to be leaked through covert channels (e.g. memory fences have been used //! as a covert channel), so we are wary to make guarantees unless they can //! be made firmly in terms of both a formal Rust memory model and the //! generated code for a particular CPU architecture. //! //! In conclusion, this crate guarantees the zeroize operation will not be //! elided or "optimized away", makes a "best effort" to ensure that //! memory accesses will not be reordered ahead of the "zeroize" operation, //! but **cannot** yet guarantee that such reordering will not occur. //! //! ## Stack/Heap Zeroing Notes //! //! This crate can be used to zero values from either the stack or the heap. //! //! However, be aware that Rust's current memory semantics (e.g. `Copy` types) //! can leave copies of data in memory, and there isn't presently a good solution //! for ensuring all copies of data on the stack are properly cleared. //! //! The [`Pin` RFC][pin] proposes a method for avoiding this. //! //! ## What about: clearing registers, mlock, mprotect, etc? //! //! This crate is laser-focused on being a simple, unobtrusive crate for zeroing //! memory in as reliable a manner as is possible on stable Rust. //! //! Clearing registers is a difficult problem that can't easily be solved by //! something like a crate, and requires either inline ASM or rustc support. //! See <https://github.com/rust-lang/rust/issues/17046> for background on //! this particular problem. //! //! Other memory protection mechanisms are interesting and useful, but often //! overkill (e.g. defending against RAM scraping or attackers with swap access). //! In as much as there may be merit to these approaches, there are also many //! other crates that already implement more sophisticated memory protections. //! Such protections are explicitly out-of-scope for this crate. //! //! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote //! it in the most unobtrusive manner possible. This includes omitting complex //! `unsafe` memory protection systems and just trying to make the best memory //! zeroing crate available. //! //! [Zeroize]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html //! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html //! [Vec::clear()]: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.clear //! [String::clear()]: https://doc.rust-lang.org/std/string/struct.String.html#method.clear //! [DefaultIsZeroes]: https://docs.rs/zeroize/latest/zeroize/trait.DefaultIsZeroes.html //! [Default]: https://doc.rust-lang.org/std/default/trait.Default.html //! [core::ptr::write_volatile]: https://doc.rust-lang.org/core/ptr/fn.write_volatile.html //! [core::sync::atomic]: https://doc.rust-lang.org/stable/core/sync/atomic/index.html //! [Ordering::SeqCst]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html#variant.SeqCst //! [compiler_fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.compiler_fence.html //! [fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.fence.html //! [memory-model]: https://github.com/nikomatsakis/rust-memory-model //! [pin]: https://github.com/rust-lang/rfcs/blob/master/text/2349-pin.md //! [good cryptographic hygiene]: https://cryptocoding.net/index.php/Coding_rules#Clean_memory_of_secret_data #![no_std] #![deny(warnings, missing_docs, unused_import_braces, unused_qualifications)] #![cfg_attr(all(feature = "nightly", not(feature = "std")), feature(alloc))] #![cfg_attr(feature = "nightly", feature(core_intrinsics))] #![doc(html_root_url = "https://docs.rs/zeroize/0.6.0")] #[cfg(any(feature = "std", test))] #[cfg_attr(test, macro_use)] extern crate std; #[cfg(feature = "zeroize_derive")] #[allow(unused_imports)] #[macro_use] extern crate zeroize_derive; #[cfg(feature = "zeroize_derive")] #[doc(hidden)] pub use zeroize_derive::*; use core::{ptr, slice::IterMut, sync::atomic}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::prelude::*; #[cfg(feature = "std")] use std::prelude::v1::*; /// Trait for securely erasing types from memory pub trait Zeroize { /// Zero out this object from memory (using Rust or OS intrinsics which /// ensure the zeroization operation is not "optimized away") fn zeroize(&mut self); } /// Marker trait for types whose `Default` is the desired zeroization result pub trait DefaultIsZeroes: Copy + Default + Sized {} /// Marker trait intended for use with `zeroize_derive` which indicates that /// a type should have a drop handler which calls Zeroize. /// /// Use `#[derive(ZeroizeOnDrop)]` to automatically impl this trait and an /// associated drop handler. pub trait ZeroizeOnDrop: Zeroize + Drop {} impl<Z> Zeroize for Z where Z: DefaultIsZeroes, { fn zeroize(&mut self) { volatile_set(self, Z::default()); atomic_fence(); } } macro_rules! impl_zeroize_with_default { ($($type:ty),+) => { $(impl DefaultIsZeroes for $type {})+ }; } impl_zeroize_with_default!(i8, i16, i32, i64, i128, isize); impl_zeroize_with_default!(u16, u32, u64, u128, usize); impl_zeroize_with_default!(f32, f64, char, bool); /// On non-nightly targets, avoid special-casing u8 #[cfg(not(feature = "nightly"))] impl_zeroize_with_default!(u8); /// On nightly targets, don't implement `DefaultIsZeroes` so we can special /// case using batch set operations. #[cfg(feature = "nightly")] impl Zeroize for u8 { fn zeroize(&mut self) { volatile_set(self, 0); atomic_fence(); } } impl<'a, Z> Zeroize for IterMut<'a, Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { let default = Z::default(); for elem in self { volatile_set(elem, default); } atomic_fence(); } } /// Implement zeroize on all types that can be zeroized with the zero value impl<Z> Zeroize for [Z] where Z: DefaultIsZeroes, { fn zeroize(&mut self) { // TODO: batch volatile set operation? self.iter_mut().zeroize(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte slice zeroing #[cfg(feature = "nightly")] impl Zeroize for [u8] { fn zeroize(&mut self) { volatile_zero_bytes(self); atomic_fence(); } } #[cfg(feature = "alloc")] impl<Z> Zeroize for Vec<Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { self.resize(self.capacity(), Default::default()); self.as_mut_slice().zeroize(); self.clear(); } } #[cfg(feature = "alloc")] impl Zeroize for String { fn zeroize(&mut self) { unsafe { self.as_bytes_mut() }.zeroize(); debug_assert!(self.as_bytes().iter().all(|b| *b == 0)); self.clear(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte array zeroing #[cfg(feature = "nightly")] macro_rules! impl_zeroize_for_byte_array { ($($size:expr),+) => { $( impl Zeroize for [u8; $size] { fn zeroize(&mut self) { volatile_zero_bytes(self.as_mut()); atomic_fence(); } } )+ }; } #[cfg(feature = "nightly")] impl_zeroize_for_byte_array!( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 ); /// Use fences to prevent accesses from being reordered before this /// point, which should hopefully help ensure that all accessors /// see zeroes after this point. #[inline] fn atomic_fence() { atomic::fence(atomic::Ordering::SeqCst); atomic::compiler_fence(atomic::Ordering::SeqCst); } /// Set a mutable reference to a value to the given replacement #[inline] fn volatile_set<T: Copy + Sized>(dst: &mut T, src: T) { unsafe { ptr::write_volatile(dst, src) } } #[cfg(feature = "nightly")] #[inline] fn volatile_zero_bytes(dst: &mut [u8]) { unsafe { core::intrinsics::volatile_set_memory(dst.as_mut_ptr(), 0, dst.len()) } } #[cfg(test)] mod tests { use super::Zeroize; use std::prelude::v1::*; #[test] fn zeroize_byte_arrays() { let mut arr = [42u8; 64]; arr.zeroize(); assert_eq!(arr.as_ref(), [0u8; 64].as_ref()); } #[test] fn zeroize_vec() { let mut vec = vec![42; 3]; vec.zeroize(); assert!(vec.is_empty()); } #[test] fn zeroize_vec_past_len() { let mut vec = Vec::with_capacity(5); for i in 0..4 { vec.push(10 + i); } vec.clear(); // safe if: new_len <= capacity AND elements "were initialised" unsafe { vec.set_len(1); } assert_eq!(10, vec[0], "clear() hasn't erased our push()es"); vec.clear(); vec.zeroize(); unsafe { vec.set_len(4); } for i in 0..4 { assert_eq!(0, vec[i], "it's been zero'd"); } } #[test] fn zeroize_string() { let mut string = String::from("Hello, world!"); string.zeroize(); assert!(string.is_empty()); } #[test] fn zeroize_box() { let mut boxed_arr = Box::new([42u8; 3]); boxed_arr.zeroize(); assert_eq!(boxed_arr.as_ref(), &[0u8; 3]); } #[cfg(feature = "zeroize_derive")] mod derive { use super::*; #[derive(Zeroize)] struct ZeroizableTupleStruct([u8; 3]); #[test] fn derive_tuple_struct_test() { let mut value = ZeroizableTupleStruct([1, 2, 3]); value.zeroize(); assert_eq!(&value.0, &[0, 0, 0]) } #[derive(Zeroize)] struct ZeroizableStruct { string: String, vec: Vec<u8>, bytearray: [u8; 3], number: usize, boolean: bool, } #[test] fn derive_struct_test() { let mut value = ZeroizableStruct { string: "Hello, world!".to_owned(), vec: vec![1, 2, 3], bytearray: [4, 5, 6], number: 42, boolean: true, }; value.zeroize(); assert!(value.string.is_empty()); assert!(value.vec.is_empty()); assert_eq!(&value.bytearray, &[0, 0, 0]); assert_eq!(value.number, 0); assert!(!value.boolean); } } }
//! ## What guarantees does this crate provide? //! //! Ideally a secure memory-zeroing function would guarantee the following: //! //! 1. Ensure the zeroing operation can't be "optimized away" by the compiler.
random_line_split
lib.rs
//! Securely zero memory with a simple trait ([Zeroize]) built on stable Rust //! primitives which guarantee the operation will not be 'optimized away'. //! //! ## Usage //! //! ``` //! use zeroize::Zeroize; //! //! fn main() { //! // Protip: don't embed secrets in your source code. //! // This is just an example. //! let mut secret = b"Air shield password: 1,2,3,4,5".to_vec(); //! // [... ] open the air shield here //! //! // Now that we're done using the secret, zero it out. //! secret.zeroize(); //! } //! ``` //! //! The [Zeroize] trait is impl'd on all of Rust's core scalar types including //! integers, floats, `bool`, and `char`. //! //! Additionally, it's implemented on slices and `IterMut`s of the above types. //! //! When the `std` feature is enabled (which it is by default), it's also impl'd //! for `Vec`s of the above types as well as `String`, where it provides //! [Vec::clear()] / [String::clear()]-like behavior (truncating to zero-length) //! but ensures the backing memory is securely zeroed. //! //! The [DefaultIsZeroes] marker trait can be impl'd on types which also //! impl [Default], which implements [Zeroize] by overwriting a value with //! the default value. //! //! ## Custom Derive Support //! //! This crate has custom derive support for the `Zeroize` trait, which //! automatically calls `zeroize()` on all members of a struct or tuple struct: //! //! ``` //! // Ensure you import the crate with `macro_use`: //! // #[macro_use] //! // extern crate zeroize; //! //! use zeroize::Zeroize; //! //! #[derive(Zeroize)] //! struct MyStruct([u8; 64]); //! ``` //! //! Additionally, you can derive `ZeroizeOnDrop`, which will automatically //! derive a `Drop` handler that calls `zeroize()`: //! //! ``` //! use zeroize::{Zeroize, ZeroizeOnDrop}; //! //! // This struct will be zeroized on drop //! #[derive(Zeroize, ZeroizeOnDrop)] //! struct MyStruct([u8; 64]); //! ``` //! //! ## About //! //! [Zeroing memory securely is hard] - compilers optimize for performance, and //! in doing so they love to "optimize away" unnecessary zeroing calls. There are //! many documented "tricks" to attempt to avoid these optimizations and ensure //! that a zeroing routine is performed reliably. //! //! This crate isn't about tricks: it uses [core::ptr::write_volatile] //! and [core::sync::atomic] memory fences to provide easy-to-use, portable //! zeroing behavior which works on all of Rust's core number types and slices //! thereof, implemented in pure Rust with no usage of FFI or assembly. //! //! - **No insecure fallbacks!** //! - **No dependencies!** //! - **No FFI or inline assembly!** //! - `#![no_std]` **i.e. embedded-friendly**! //! - **No functionality besides securely zeroing memory!** //! //! ## What guarantees does this crate provide? //! //! Ideally a secure memory-zeroing function would guarantee the following: //! //! 1. Ensure the zeroing operation can't be "optimized away" by the compiler. //! 2. Ensure all subsequent reads to the memory following the zeroing operation //! will always see zeroes. //! //! This crate guarantees #1 is true: LLVM's volatile semantics ensure it. //! //! The story around #2 is much more complicated. In brief, it should be true that //! LLVM's current implementation does not attempt to perform optimizations which //! would allow a subsequent (non-volatile) read to see the original value prior //! to zeroization. However, this is not a guarantee, but rather an LLVM //! implementation detail. //! //! For more background, we can look to the [core::ptr::write_volatile] //! documentation: //! //! > Volatile operations are intended to act on I/O memory, and are guaranteed //! > to not be elided or reordered by the compiler across other volatile //! > operations. //! > //! > Memory accessed with `read_volatile` or `write_volatile` should not be //! > accessed with non-volatile operations. //! //! Uhoh! This crate does not guarantee all reads to the memory it operates on //! are volatile, and the documentation for [core::ptr::write_volatile] //! explicitly warns against mixing volatile and non-volatile operations. //! Perhaps we'd be better off with something like a `VolatileCell` //! type which owns the associated data and ensures all reads and writes are //! volatile so we don't have to worry about the semantics of mixing volatile and //! non-volatile accesses. //! //! While that's a strategy worth pursuing (and something we may investigate //! separately from this crate), it comes with some onerous API requirements: //! it means any data that we might ever desire to zero is owned by a //! `VolatileCell`. However, this does not make it possible for this crate //! to act on references, which severely limits its applicability. In fact //! a `VolatileCell` can only act on values, i.e. to read a value from it, //! we'd need to make a copy of it, and that's literally the opposite of //! what we want. //! //! It's worth asking what the precise semantics of mixing volatile and //! non-volatile reads actually are, and whether a less obtrusive API which //! can act entirely on mutable references is possible, safe, and provides the //! desired behavior. //! //! Unfortunately, that's a tricky question, because //! [Rust does not have a formally defined memory model][memory-model], //! and the behavior of mixing volatile and non-volatile memory accesses is //! therefore not rigorously specified and winds up being an LLVM //! implementation detail. The semantics were discussed extensively in this //! thread, specifically in the context of zeroing secrets from memory: //! //! <https://internals.rust-lang.org/t/volatile-and-sensitive-memory/3188/24> //! //! Some notable details from this thread: //! //! - Rust/LLVM's notion of "volatile" is centered around data *accesses*, not //! the data itself. Specifically it maps to flags in LLVM IR which control //! the behavior of the optimizer, and is therefore a bit different from the //! typical C notion of "volatile". //! - As mentioned earlier, LLVM does not presently contain optimizations which //! would reorder a non-volatile read to occur before a volatile write. //! However, there is nothing precluding such optimizations from being added. //! LLVM presently appears to exhibit the desired behavior for both points //! #1 and #2 above, but there is nothing preventing future versions of Rust //! and/or LLVM from changing that. //! //! To help mitigate concerns about reordering potentially exposing secrets //! after they have been zeroed, this crate leverages the [core::sync::atomic] //! memory fence functions including [compiler_fence] and [fence] (which uses //! the CPU's native fence instructions). These fences are leveraged with the //! strictest ordering guarantees, [Ordering::SeqCst], which ensures no //! accesses are reordered. Without a formally defined memory model we can't //! guarantee these will be effective, but we hope they will cover most cases. //! //! Concretely the threat of leaking "zeroized" secrets (via reordering by //! LLVM and/or the CPU via out-of-order or speculative execution) would //! require a non-volatile access to be reordered ahead of the following: //! //! 1. before an [Ordering::SeqCst] compiler fence //! 2. before an [Ordering::SeqCst] runtime fence //! 3. before a volatile write //! //! This seems unlikely, but our usage of mixed non-volatile and volatile //! accesses is technically undefined behavior, at least until guarantees //! about this particular mixture of operations is formally defined in a //! Rust memory model. //! //! Furthermore, given the recent history of microarchitectural attacks //! (Spectre, Meltdown, etc), there is also potential for "zeroized" secrets //! to be leaked through covert channels (e.g. memory fences have been used //! as a covert channel), so we are wary to make guarantees unless they can //! be made firmly in terms of both a formal Rust memory model and the //! generated code for a particular CPU architecture. //! //! In conclusion, this crate guarantees the zeroize operation will not be //! elided or "optimized away", makes a "best effort" to ensure that //! memory accesses will not be reordered ahead of the "zeroize" operation, //! but **cannot** yet guarantee that such reordering will not occur. //! //! ## Stack/Heap Zeroing Notes //! //! This crate can be used to zero values from either the stack or the heap. //! //! However, be aware that Rust's current memory semantics (e.g. `Copy` types) //! can leave copies of data in memory, and there isn't presently a good solution //! for ensuring all copies of data on the stack are properly cleared. //! //! The [`Pin` RFC][pin] proposes a method for avoiding this. //! //! ## What about: clearing registers, mlock, mprotect, etc? //! //! This crate is laser-focused on being a simple, unobtrusive crate for zeroing //! memory in as reliable a manner as is possible on stable Rust. //! //! Clearing registers is a difficult problem that can't easily be solved by //! something like a crate, and requires either inline ASM or rustc support. //! See <https://github.com/rust-lang/rust/issues/17046> for background on //! this particular problem. //! //! Other memory protection mechanisms are interesting and useful, but often //! overkill (e.g. defending against RAM scraping or attackers with swap access). //! In as much as there may be merit to these approaches, there are also many //! other crates that already implement more sophisticated memory protections. //! Such protections are explicitly out-of-scope for this crate. //! //! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote //! it in the most unobtrusive manner possible. This includes omitting complex //! `unsafe` memory protection systems and just trying to make the best memory //! zeroing crate available. //! //! [Zeroize]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html //! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html //! [Vec::clear()]: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.clear //! [String::clear()]: https://doc.rust-lang.org/std/string/struct.String.html#method.clear //! [DefaultIsZeroes]: https://docs.rs/zeroize/latest/zeroize/trait.DefaultIsZeroes.html //! [Default]: https://doc.rust-lang.org/std/default/trait.Default.html //! [core::ptr::write_volatile]: https://doc.rust-lang.org/core/ptr/fn.write_volatile.html //! [core::sync::atomic]: https://doc.rust-lang.org/stable/core/sync/atomic/index.html //! [Ordering::SeqCst]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html#variant.SeqCst //! [compiler_fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.compiler_fence.html //! [fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.fence.html //! [memory-model]: https://github.com/nikomatsakis/rust-memory-model //! [pin]: https://github.com/rust-lang/rfcs/blob/master/text/2349-pin.md //! [good cryptographic hygiene]: https://cryptocoding.net/index.php/Coding_rules#Clean_memory_of_secret_data #![no_std] #![deny(warnings, missing_docs, unused_import_braces, unused_qualifications)] #![cfg_attr(all(feature = "nightly", not(feature = "std")), feature(alloc))] #![cfg_attr(feature = "nightly", feature(core_intrinsics))] #![doc(html_root_url = "https://docs.rs/zeroize/0.6.0")] #[cfg(any(feature = "std", test))] #[cfg_attr(test, macro_use)] extern crate std; #[cfg(feature = "zeroize_derive")] #[allow(unused_imports)] #[macro_use] extern crate zeroize_derive; #[cfg(feature = "zeroize_derive")] #[doc(hidden)] pub use zeroize_derive::*; use core::{ptr, slice::IterMut, sync::atomic}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::prelude::*; #[cfg(feature = "std")] use std::prelude::v1::*; /// Trait for securely erasing types from memory pub trait Zeroize { /// Zero out this object from memory (using Rust or OS intrinsics which /// ensure the zeroization operation is not "optimized away") fn zeroize(&mut self); } /// Marker trait for types whose `Default` is the desired zeroization result pub trait DefaultIsZeroes: Copy + Default + Sized {} /// Marker trait intended for use with `zeroize_derive` which indicates that /// a type should have a drop handler which calls Zeroize. /// /// Use `#[derive(ZeroizeOnDrop)]` to automatically impl this trait and an /// associated drop handler. pub trait ZeroizeOnDrop: Zeroize + Drop {} impl<Z> Zeroize for Z where Z: DefaultIsZeroes, { fn zeroize(&mut self) { volatile_set(self, Z::default()); atomic_fence(); } } macro_rules! impl_zeroize_with_default { ($($type:ty),+) => { $(impl DefaultIsZeroes for $type {})+ }; } impl_zeroize_with_default!(i8, i16, i32, i64, i128, isize); impl_zeroize_with_default!(u16, u32, u64, u128, usize); impl_zeroize_with_default!(f32, f64, char, bool); /// On non-nightly targets, avoid special-casing u8 #[cfg(not(feature = "nightly"))] impl_zeroize_with_default!(u8); /// On nightly targets, don't implement `DefaultIsZeroes` so we can special /// case using batch set operations. #[cfg(feature = "nightly")] impl Zeroize for u8 { fn zeroize(&mut self) { volatile_set(self, 0); atomic_fence(); } } impl<'a, Z> Zeroize for IterMut<'a, Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { let default = Z::default(); for elem in self { volatile_set(elem, default); } atomic_fence(); } } /// Implement zeroize on all types that can be zeroized with the zero value impl<Z> Zeroize for [Z] where Z: DefaultIsZeroes, { fn zeroize(&mut self) { // TODO: batch volatile set operation? self.iter_mut().zeroize(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte slice zeroing #[cfg(feature = "nightly")] impl Zeroize for [u8] { fn zeroize(&mut self) { volatile_zero_bytes(self); atomic_fence(); } } #[cfg(feature = "alloc")] impl<Z> Zeroize for Vec<Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { self.resize(self.capacity(), Default::default()); self.as_mut_slice().zeroize(); self.clear(); } } #[cfg(feature = "alloc")] impl Zeroize for String { fn zeroize(&mut self) { unsafe { self.as_bytes_mut() }.zeroize(); debug_assert!(self.as_bytes().iter().all(|b| *b == 0)); self.clear(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte array zeroing #[cfg(feature = "nightly")] macro_rules! impl_zeroize_for_byte_array { ($($size:expr),+) => { $( impl Zeroize for [u8; $size] { fn zeroize(&mut self) { volatile_zero_bytes(self.as_mut()); atomic_fence(); } } )+ }; } #[cfg(feature = "nightly")] impl_zeroize_for_byte_array!( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 ); /// Use fences to prevent accesses from being reordered before this /// point, which should hopefully help ensure that all accessors /// see zeroes after this point. #[inline] fn
() { atomic::fence(atomic::Ordering::SeqCst); atomic::compiler_fence(atomic::Ordering::SeqCst); } /// Set a mutable reference to a value to the given replacement #[inline] fn volatile_set<T: Copy + Sized>(dst: &mut T, src: T) { unsafe { ptr::write_volatile(dst, src) } } #[cfg(feature = "nightly")] #[inline] fn volatile_zero_bytes(dst: &mut [u8]) { unsafe { core::intrinsics::volatile_set_memory(dst.as_mut_ptr(), 0, dst.len()) } } #[cfg(test)] mod tests { use super::Zeroize; use std::prelude::v1::*; #[test] fn zeroize_byte_arrays() { let mut arr = [42u8; 64]; arr.zeroize(); assert_eq!(arr.as_ref(), [0u8; 64].as_ref()); } #[test] fn zeroize_vec() { let mut vec = vec![42; 3]; vec.zeroize(); assert!(vec.is_empty()); } #[test] fn zeroize_vec_past_len() { let mut vec = Vec::with_capacity(5); for i in 0..4 { vec.push(10 + i); } vec.clear(); // safe if: new_len <= capacity AND elements "were initialised" unsafe { vec.set_len(1); } assert_eq!(10, vec[0], "clear() hasn't erased our push()es"); vec.clear(); vec.zeroize(); unsafe { vec.set_len(4); } for i in 0..4 { assert_eq!(0, vec[i], "it's been zero'd"); } } #[test] fn zeroize_string() { let mut string = String::from("Hello, world!"); string.zeroize(); assert!(string.is_empty()); } #[test] fn zeroize_box() { let mut boxed_arr = Box::new([42u8; 3]); boxed_arr.zeroize(); assert_eq!(boxed_arr.as_ref(), &[0u8; 3]); } #[cfg(feature = "zeroize_derive")] mod derive { use super::*; #[derive(Zeroize)] struct ZeroizableTupleStruct([u8; 3]); #[test] fn derive_tuple_struct_test() { let mut value = ZeroizableTupleStruct([1, 2, 3]); value.zeroize(); assert_eq!(&value.0, &[0, 0, 0]) } #[derive(Zeroize)] struct ZeroizableStruct { string: String, vec: Vec<u8>, bytearray: [u8; 3], number: usize, boolean: bool, } #[test] fn derive_struct_test() { let mut value = ZeroizableStruct { string: "Hello, world!".to_owned(), vec: vec![1, 2, 3], bytearray: [4, 5, 6], number: 42, boolean: true, }; value.zeroize(); assert!(value.string.is_empty()); assert!(value.vec.is_empty()); assert_eq!(&value.bytearray, &[0, 0, 0]); assert_eq!(value.number, 0); assert!(!value.boolean); } } }
atomic_fence
identifier_name
lib.rs
//! Securely zero memory with a simple trait ([Zeroize]) built on stable Rust //! primitives which guarantee the operation will not be 'optimized away'. //! //! ## Usage //! //! ``` //! use zeroize::Zeroize; //! //! fn main() { //! // Protip: don't embed secrets in your source code. //! // This is just an example. //! let mut secret = b"Air shield password: 1,2,3,4,5".to_vec(); //! // [... ] open the air shield here //! //! // Now that we're done using the secret, zero it out. //! secret.zeroize(); //! } //! ``` //! //! The [Zeroize] trait is impl'd on all of Rust's core scalar types including //! integers, floats, `bool`, and `char`. //! //! Additionally, it's implemented on slices and `IterMut`s of the above types. //! //! When the `std` feature is enabled (which it is by default), it's also impl'd //! for `Vec`s of the above types as well as `String`, where it provides //! [Vec::clear()] / [String::clear()]-like behavior (truncating to zero-length) //! but ensures the backing memory is securely zeroed. //! //! The [DefaultIsZeroes] marker trait can be impl'd on types which also //! impl [Default], which implements [Zeroize] by overwriting a value with //! the default value. //! //! ## Custom Derive Support //! //! This crate has custom derive support for the `Zeroize` trait, which //! automatically calls `zeroize()` on all members of a struct or tuple struct: //! //! ``` //! // Ensure you import the crate with `macro_use`: //! // #[macro_use] //! // extern crate zeroize; //! //! use zeroize::Zeroize; //! //! #[derive(Zeroize)] //! struct MyStruct([u8; 64]); //! ``` //! //! Additionally, you can derive `ZeroizeOnDrop`, which will automatically //! derive a `Drop` handler that calls `zeroize()`: //! //! ``` //! use zeroize::{Zeroize, ZeroizeOnDrop}; //! //! // This struct will be zeroized on drop //! #[derive(Zeroize, ZeroizeOnDrop)] //! struct MyStruct([u8; 64]); //! ``` //! //! ## About //! //! [Zeroing memory securely is hard] - compilers optimize for performance, and //! in doing so they love to "optimize away" unnecessary zeroing calls. There are //! many documented "tricks" to attempt to avoid these optimizations and ensure //! that a zeroing routine is performed reliably. //! //! This crate isn't about tricks: it uses [core::ptr::write_volatile] //! and [core::sync::atomic] memory fences to provide easy-to-use, portable //! zeroing behavior which works on all of Rust's core number types and slices //! thereof, implemented in pure Rust with no usage of FFI or assembly. //! //! - **No insecure fallbacks!** //! - **No dependencies!** //! - **No FFI or inline assembly!** //! - `#![no_std]` **i.e. embedded-friendly**! //! - **No functionality besides securely zeroing memory!** //! //! ## What guarantees does this crate provide? //! //! Ideally a secure memory-zeroing function would guarantee the following: //! //! 1. Ensure the zeroing operation can't be "optimized away" by the compiler. //! 2. Ensure all subsequent reads to the memory following the zeroing operation //! will always see zeroes. //! //! This crate guarantees #1 is true: LLVM's volatile semantics ensure it. //! //! The story around #2 is much more complicated. In brief, it should be true that //! LLVM's current implementation does not attempt to perform optimizations which //! would allow a subsequent (non-volatile) read to see the original value prior //! to zeroization. However, this is not a guarantee, but rather an LLVM //! implementation detail. //! //! For more background, we can look to the [core::ptr::write_volatile] //! documentation: //! //! > Volatile operations are intended to act on I/O memory, and are guaranteed //! > to not be elided or reordered by the compiler across other volatile //! > operations. //! > //! > Memory accessed with `read_volatile` or `write_volatile` should not be //! > accessed with non-volatile operations. //! //! Uhoh! This crate does not guarantee all reads to the memory it operates on //! are volatile, and the documentation for [core::ptr::write_volatile] //! explicitly warns against mixing volatile and non-volatile operations. //! Perhaps we'd be better off with something like a `VolatileCell` //! type which owns the associated data and ensures all reads and writes are //! volatile so we don't have to worry about the semantics of mixing volatile and //! non-volatile accesses. //! //! While that's a strategy worth pursuing (and something we may investigate //! separately from this crate), it comes with some onerous API requirements: //! it means any data that we might ever desire to zero is owned by a //! `VolatileCell`. However, this does not make it possible for this crate //! to act on references, which severely limits its applicability. In fact //! a `VolatileCell` can only act on values, i.e. to read a value from it, //! we'd need to make a copy of it, and that's literally the opposite of //! what we want. //! //! It's worth asking what the precise semantics of mixing volatile and //! non-volatile reads actually are, and whether a less obtrusive API which //! can act entirely on mutable references is possible, safe, and provides the //! desired behavior. //! //! Unfortunately, that's a tricky question, because //! [Rust does not have a formally defined memory model][memory-model], //! and the behavior of mixing volatile and non-volatile memory accesses is //! therefore not rigorously specified and winds up being an LLVM //! implementation detail. The semantics were discussed extensively in this //! thread, specifically in the context of zeroing secrets from memory: //! //! <https://internals.rust-lang.org/t/volatile-and-sensitive-memory/3188/24> //! //! Some notable details from this thread: //! //! - Rust/LLVM's notion of "volatile" is centered around data *accesses*, not //! the data itself. Specifically it maps to flags in LLVM IR which control //! the behavior of the optimizer, and is therefore a bit different from the //! typical C notion of "volatile". //! - As mentioned earlier, LLVM does not presently contain optimizations which //! would reorder a non-volatile read to occur before a volatile write. //! However, there is nothing precluding such optimizations from being added. //! LLVM presently appears to exhibit the desired behavior for both points //! #1 and #2 above, but there is nothing preventing future versions of Rust //! and/or LLVM from changing that. //! //! To help mitigate concerns about reordering potentially exposing secrets //! after they have been zeroed, this crate leverages the [core::sync::atomic] //! memory fence functions including [compiler_fence] and [fence] (which uses //! the CPU's native fence instructions). These fences are leveraged with the //! strictest ordering guarantees, [Ordering::SeqCst], which ensures no //! accesses are reordered. Without a formally defined memory model we can't //! guarantee these will be effective, but we hope they will cover most cases. //! //! Concretely the threat of leaking "zeroized" secrets (via reordering by //! LLVM and/or the CPU via out-of-order or speculative execution) would //! require a non-volatile access to be reordered ahead of the following: //! //! 1. before an [Ordering::SeqCst] compiler fence //! 2. before an [Ordering::SeqCst] runtime fence //! 3. before a volatile write //! //! This seems unlikely, but our usage of mixed non-volatile and volatile //! accesses is technically undefined behavior, at least until guarantees //! about this particular mixture of operations is formally defined in a //! Rust memory model. //! //! Furthermore, given the recent history of microarchitectural attacks //! (Spectre, Meltdown, etc), there is also potential for "zeroized" secrets //! to be leaked through covert channels (e.g. memory fences have been used //! as a covert channel), so we are wary to make guarantees unless they can //! be made firmly in terms of both a formal Rust memory model and the //! generated code for a particular CPU architecture. //! //! In conclusion, this crate guarantees the zeroize operation will not be //! elided or "optimized away", makes a "best effort" to ensure that //! memory accesses will not be reordered ahead of the "zeroize" operation, //! but **cannot** yet guarantee that such reordering will not occur. //! //! ## Stack/Heap Zeroing Notes //! //! This crate can be used to zero values from either the stack or the heap. //! //! However, be aware that Rust's current memory semantics (e.g. `Copy` types) //! can leave copies of data in memory, and there isn't presently a good solution //! for ensuring all copies of data on the stack are properly cleared. //! //! The [`Pin` RFC][pin] proposes a method for avoiding this. //! //! ## What about: clearing registers, mlock, mprotect, etc? //! //! This crate is laser-focused on being a simple, unobtrusive crate for zeroing //! memory in as reliable a manner as is possible on stable Rust. //! //! Clearing registers is a difficult problem that can't easily be solved by //! something like a crate, and requires either inline ASM or rustc support. //! See <https://github.com/rust-lang/rust/issues/17046> for background on //! this particular problem. //! //! Other memory protection mechanisms are interesting and useful, but often //! overkill (e.g. defending against RAM scraping or attackers with swap access). //! In as much as there may be merit to these approaches, there are also many //! other crates that already implement more sophisticated memory protections. //! Such protections are explicitly out-of-scope for this crate. //! //! Zeroing memory is [good cryptographic hygiene] and this crate seeks to promote //! it in the most unobtrusive manner possible. This includes omitting complex //! `unsafe` memory protection systems and just trying to make the best memory //! zeroing crate available. //! //! [Zeroize]: https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html //! [Zeroing memory securely is hard]: http://www.daemonology.net/blog/2014-09-04-how-to-zero-a-buffer.html //! [Vec::clear()]: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.clear //! [String::clear()]: https://doc.rust-lang.org/std/string/struct.String.html#method.clear //! [DefaultIsZeroes]: https://docs.rs/zeroize/latest/zeroize/trait.DefaultIsZeroes.html //! [Default]: https://doc.rust-lang.org/std/default/trait.Default.html //! [core::ptr::write_volatile]: https://doc.rust-lang.org/core/ptr/fn.write_volatile.html //! [core::sync::atomic]: https://doc.rust-lang.org/stable/core/sync/atomic/index.html //! [Ordering::SeqCst]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html#variant.SeqCst //! [compiler_fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.compiler_fence.html //! [fence]: https://doc.rust-lang.org/stable/core/sync/atomic/fn.fence.html //! [memory-model]: https://github.com/nikomatsakis/rust-memory-model //! [pin]: https://github.com/rust-lang/rfcs/blob/master/text/2349-pin.md //! [good cryptographic hygiene]: https://cryptocoding.net/index.php/Coding_rules#Clean_memory_of_secret_data #![no_std] #![deny(warnings, missing_docs, unused_import_braces, unused_qualifications)] #![cfg_attr(all(feature = "nightly", not(feature = "std")), feature(alloc))] #![cfg_attr(feature = "nightly", feature(core_intrinsics))] #![doc(html_root_url = "https://docs.rs/zeroize/0.6.0")] #[cfg(any(feature = "std", test))] #[cfg_attr(test, macro_use)] extern crate std; #[cfg(feature = "zeroize_derive")] #[allow(unused_imports)] #[macro_use] extern crate zeroize_derive; #[cfg(feature = "zeroize_derive")] #[doc(hidden)] pub use zeroize_derive::*; use core::{ptr, slice::IterMut, sync::atomic}; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::prelude::*; #[cfg(feature = "std")] use std::prelude::v1::*; /// Trait for securely erasing types from memory pub trait Zeroize { /// Zero out this object from memory (using Rust or OS intrinsics which /// ensure the zeroization operation is not "optimized away") fn zeroize(&mut self); } /// Marker trait for types whose `Default` is the desired zeroization result pub trait DefaultIsZeroes: Copy + Default + Sized {} /// Marker trait intended for use with `zeroize_derive` which indicates that /// a type should have a drop handler which calls Zeroize. /// /// Use `#[derive(ZeroizeOnDrop)]` to automatically impl this trait and an /// associated drop handler. pub trait ZeroizeOnDrop: Zeroize + Drop {} impl<Z> Zeroize for Z where Z: DefaultIsZeroes, { fn zeroize(&mut self) { volatile_set(self, Z::default()); atomic_fence(); } } macro_rules! impl_zeroize_with_default { ($($type:ty),+) => { $(impl DefaultIsZeroes for $type {})+ }; } impl_zeroize_with_default!(i8, i16, i32, i64, i128, isize); impl_zeroize_with_default!(u16, u32, u64, u128, usize); impl_zeroize_with_default!(f32, f64, char, bool); /// On non-nightly targets, avoid special-casing u8 #[cfg(not(feature = "nightly"))] impl_zeroize_with_default!(u8); /// On nightly targets, don't implement `DefaultIsZeroes` so we can special /// case using batch set operations. #[cfg(feature = "nightly")] impl Zeroize for u8 { fn zeroize(&mut self) { volatile_set(self, 0); atomic_fence(); } } impl<'a, Z> Zeroize for IterMut<'a, Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { let default = Z::default(); for elem in self { volatile_set(elem, default); } atomic_fence(); } } /// Implement zeroize on all types that can be zeroized with the zero value impl<Z> Zeroize for [Z] where Z: DefaultIsZeroes, { fn zeroize(&mut self) { // TODO: batch volatile set operation? self.iter_mut().zeroize(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte slice zeroing #[cfg(feature = "nightly")] impl Zeroize for [u8] { fn zeroize(&mut self) { volatile_zero_bytes(self); atomic_fence(); } } #[cfg(feature = "alloc")] impl<Z> Zeroize for Vec<Z> where Z: DefaultIsZeroes, { fn zeroize(&mut self) { self.resize(self.capacity(), Default::default()); self.as_mut_slice().zeroize(); self.clear(); } } #[cfg(feature = "alloc")] impl Zeroize for String { fn zeroize(&mut self) { unsafe { self.as_bytes_mut() }.zeroize(); debug_assert!(self.as_bytes().iter().all(|b| *b == 0)); self.clear(); } } /// On `nightly` Rust, `volatile_set_memory` provides fast byte array zeroing #[cfg(feature = "nightly")] macro_rules! impl_zeroize_for_byte_array { ($($size:expr),+) => { $( impl Zeroize for [u8; $size] { fn zeroize(&mut self) { volatile_zero_bytes(self.as_mut()); atomic_fence(); } } )+ }; } #[cfg(feature = "nightly")] impl_zeroize_for_byte_array!( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64 ); /// Use fences to prevent accesses from being reordered before this /// point, which should hopefully help ensure that all accessors /// see zeroes after this point. #[inline] fn atomic_fence()
/// Set a mutable reference to a value to the given replacement #[inline] fn volatile_set<T: Copy + Sized>(dst: &mut T, src: T) { unsafe { ptr::write_volatile(dst, src) } } #[cfg(feature = "nightly")] #[inline] fn volatile_zero_bytes(dst: &mut [u8]) { unsafe { core::intrinsics::volatile_set_memory(dst.as_mut_ptr(), 0, dst.len()) } } #[cfg(test)] mod tests { use super::Zeroize; use std::prelude::v1::*; #[test] fn zeroize_byte_arrays() { let mut arr = [42u8; 64]; arr.zeroize(); assert_eq!(arr.as_ref(), [0u8; 64].as_ref()); } #[test] fn zeroize_vec() { let mut vec = vec![42; 3]; vec.zeroize(); assert!(vec.is_empty()); } #[test] fn zeroize_vec_past_len() { let mut vec = Vec::with_capacity(5); for i in 0..4 { vec.push(10 + i); } vec.clear(); // safe if: new_len <= capacity AND elements "were initialised" unsafe { vec.set_len(1); } assert_eq!(10, vec[0], "clear() hasn't erased our push()es"); vec.clear(); vec.zeroize(); unsafe { vec.set_len(4); } for i in 0..4 { assert_eq!(0, vec[i], "it's been zero'd"); } } #[test] fn zeroize_string() { let mut string = String::from("Hello, world!"); string.zeroize(); assert!(string.is_empty()); } #[test] fn zeroize_box() { let mut boxed_arr = Box::new([42u8; 3]); boxed_arr.zeroize(); assert_eq!(boxed_arr.as_ref(), &[0u8; 3]); } #[cfg(feature = "zeroize_derive")] mod derive { use super::*; #[derive(Zeroize)] struct ZeroizableTupleStruct([u8; 3]); #[test] fn derive_tuple_struct_test() { let mut value = ZeroizableTupleStruct([1, 2, 3]); value.zeroize(); assert_eq!(&value.0, &[0, 0, 0]) } #[derive(Zeroize)] struct ZeroizableStruct { string: String, vec: Vec<u8>, bytearray: [u8; 3], number: usize, boolean: bool, } #[test] fn derive_struct_test() { let mut value = ZeroizableStruct { string: "Hello, world!".to_owned(), vec: vec![1, 2, 3], bytearray: [4, 5, 6], number: 42, boolean: true, }; value.zeroize(); assert!(value.string.is_empty()); assert!(value.vec.is_empty()); assert_eq!(&value.bytearray, &[0, 0, 0]); assert_eq!(value.number, 0); assert!(!value.boolean); } } }
{ atomic::fence(atomic::Ordering::SeqCst); atomic::compiler_fence(atomic::Ordering::SeqCst); }
identifier_body
fakes.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![cfg(test)] use { crate::{ client::{bss_selection::SignalData, scan, types as client_types}, config_management::{ Credential, NetworkConfig, NetworkConfigError, NetworkIdentifier, PastConnectionData, PastConnectionList, SavedNetworksManagerApi, ScanResultType, }, }, async_trait::async_trait, fidl_fuchsia_wlan_sme as fidl_sme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{channel::mpsc, lock::Mutex}, log::{info, warn}, rand::Rng, std::{ collections::{HashMap, VecDeque}, convert::TryInto, sync::Arc, }, wlan_common::hasher::WlanHasher, }; pub struct FakeSavedNetworksManager { saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>, connections_recorded: Mutex<Vec<ConnectionRecord>>, connect_results_recorded: Mutex<Vec<ConnectResultRecord>>, lookup_compatible_response: Mutex<LookupCompatibleResponse>, pub fail_all_stores: bool, pub active_scan_result_recorded: Arc<Mutex<bool>>, pub passive_scan_result_recorded: Arc<Mutex<bool>>, pub past_connections_response: PastConnectionList, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectionRecord { pub id: NetworkIdentifier, pub credential: Credential, pub data: PastConnectionData, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectResultRecord { pub id: NetworkIdentifier, pub credential: Credential, pub bssid: client_types::Bssid, pub connect_result: fidl_sme::ConnectResult, pub scan_type: client_types::ScanObservation, } /// Use a struct so that the option can be updated from None to Some to allow the response to be /// set after FakeSavedNetworksManager is created. Use an optional response value rather than /// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic /// for easier debugging. struct LookupCompatibleResponse { inner: Option<Vec<NetworkConfig>>, } impl LookupCompatibleResponse { fn new() -> Self { LookupCompatibleResponse { inner: None } } } impl FakeSavedNetworksManager { pub fn new() -> Self { Self { saved_networks: Mutex::new(HashMap::new()), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Create FakeSavedNetworksManager, saving network configs with the specified /// network identifiers and credentials at init. pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self { let saved_networks = network_configs .into_iter() .filter_map(|(id, cred)| { NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config])) }) .collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>(); Self { saved_networks: Mutex::new(saved_networks), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Returns the past connections as they were recorded, rather than how they would have been /// stored. pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord>
pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> { self.connect_results_recorded .try_lock() .expect("expect locking self.connect_results_recorded to succeed") .clone() } /// Manually change the hidden network probabiltiy of a saved network. pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) { let mut saved_networks = self.saved_networks.lock().await; let networks = match saved_networks.get_mut(&id) { Some(networks) => networks, None => { info!("Failed to find network to update"); return; } }; for network in networks.iter_mut() { network.hidden_probability = hidden_prob; } } pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) { self.lookup_compatible_response.try_lock().expect("failed to get lock").inner = Some(response); } } #[async_trait] impl SavedNetworksManagerApi for FakeSavedNetworksManager { async fn remove( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<bool, NetworkConfigError> { let mut saved_networks = self.saved_networks.lock().await; if let Some(network_configs) = saved_networks.get_mut(&network_id) { let original_len = network_configs.len(); network_configs.retain(|cfg| cfg.credential!= credential); if original_len!= network_configs.len() { return Ok(true); } } Ok(false) } async fn known_network_count(&self) -> usize { unimplemented!() } async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> { self.saved_networks.lock().await.get(id).cloned().unwrap_or_default() } async fn lookup_compatible( &self, ssid: &client_types::Ssid, _scan_security: client_types::SecurityTypeDetailed, ) -> Vec<NetworkConfig> { let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone(); match predetermined_response { Some(resp) => resp, None => { warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID"); self.saved_networks .lock() .await .iter() .filter_map( |(id, config)| if id.ssid == *ssid { Some(config.clone()) } else { None }, ) .flatten() .collect() } } } /// Note that the configs-per-NetworkIdentifier limit is set to 1 in /// this mock struct. If a NetworkIdentifier is already stored, writing /// a config to it will evict the previously store one. async fn store( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<Option<NetworkConfig>, NetworkConfigError> { if self.fail_all_stores { return Err(NetworkConfigError::StashWriteError); } let config = NetworkConfig::new(network_id.clone(), credential, false)?; return Ok(self .saved_networks .lock() .await .insert(network_id, vec![config]) .and_then(|mut v| v.pop())); } async fn record_connect_result( &self, id: NetworkIdentifier, credential: &Credential, bssid: client_types::Bssid, connect_result: fidl_sme::ConnectResult, scan_type: client_types::ScanObservation, ) { self.connect_results_recorded.try_lock().expect("failed to record connect result").push( ConnectResultRecord { id: id.clone(), credential: credential.clone(), bssid, connect_result, scan_type, }, ); } async fn record_disconnect( &self, id: &NetworkIdentifier, credential: &Credential, data: PastConnectionData, ) { let mut connections_recorded = self.connections_recorded.lock().await; connections_recorded.push(ConnectionRecord { id: id.clone(), credential: credential.clone(), data, }); } async fn record_periodic_metrics(&self) {} async fn record_scan_result( &self, scan_type: ScanResultType, _results: Vec<client_types::NetworkIdentifierDetailed>, ) { match scan_type { ScanResultType::Undirected => { let mut v = self.passive_scan_result_recorded.lock().await; *v = true; } ScanResultType::Directed(_) => { let mut v = self.active_scan_result_recorded.lock().await; *v = true } } } async fn get_networks(&self) -> Vec<NetworkConfig> { self.saved_networks .lock() .await .values() .into_iter() .flat_map(|cfgs| cfgs.clone()) .collect() } async fn get_past_connections( &self, _id: &NetworkIdentifier, _credential: &Credential, _bssid: &client_types::Bssid, ) -> PastConnectionList { self.past_connections_response.clone() } } pub fn create_wlan_hasher() -> WlanHasher { WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes()) } pub fn create_inspect_persistence_channel() -> (mpsc::Sender<String>, mpsc::Receiver<String>) { const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value mpsc::channel(DEFAULT_BUFFER_SIZE) } /// Create past connection data with all random values. Tests can set the values they care about. pub fn random_connection_data() -> PastConnectionData { let mut rng = rand::thread_rng(); let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into()); let time_to_connect = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..10)); let uptime = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..1000)); let disconnect_time = connect_time + time_to_connect + uptime; PastConnectionData::new( client_types::Bssid( (0..6).map(|_| rng.gen::<u8>()).collect::<Vec<u8>>().try_into().unwrap(), ), connect_time, time_to_connect, disconnect_time, uptime, client_types::DisconnectReason::DisconnectDetectedFromSme, SignalData::new(rng.gen_range(-90..-20), rng.gen_range(10..50), 10), rng.gen::<u8>().into(), ) } #[derive(Clone)] pub struct FakeScanRequester { // A type alias for this complex type would be needless indirection, so allow the complex type #[allow(clippy::type_complexity)] pub scan_results: Arc<Mutex<VecDeque<Result<Vec<client_types::ScanResult>, client_types::ScanError>>>>, #[allow(clippy::type_complexity)] pub scan_requests: Arc<Mutex<Vec<(scan::ScanReason, Vec<client_types::Ssid>, Vec<client_types::WlanChan>)>>>, } impl FakeScanRequester { pub fn new() -> Self { FakeScanRequester { scan_results: Arc::new(Mutex::new(VecDeque::new())), scan_requests: Arc::new(Mutex::new(vec![])), } } pub async fn add_scan_result( &self, res: Result<Vec<client_types::ScanResult>, client_types::ScanError>, ) { self.scan_results.lock().await.push_back(res); } } #[async_trait] impl scan::ScanRequestApi for FakeScanRequester { async fn perform_scan( &self, scan_reason: scan::ScanReason, ssids: Vec<client_types::Ssid>, channels: Vec<client_types::WlanChan>, ) -> Result<Vec<client_types::ScanResult>, client_types::ScanError> { self.scan_requests.lock().await.push((scan_reason, ssids, channels)); self.scan_results.lock().await.pop_front().unwrap() } }
{ self.connections_recorded .try_lock() .expect("expect locking self.connections_recorded to succeed") .clone() }
identifier_body
fakes.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![cfg(test)] use { crate::{ client::{bss_selection::SignalData, scan, types as client_types}, config_management::{ Credential, NetworkConfig, NetworkConfigError, NetworkIdentifier, PastConnectionData, PastConnectionList, SavedNetworksManagerApi, ScanResultType, }, }, async_trait::async_trait, fidl_fuchsia_wlan_sme as fidl_sme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{channel::mpsc, lock::Mutex}, log::{info, warn}, rand::Rng, std::{ collections::{HashMap, VecDeque}, convert::TryInto, sync::Arc, }, wlan_common::hasher::WlanHasher, }; pub struct FakeSavedNetworksManager { saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>, connections_recorded: Mutex<Vec<ConnectionRecord>>, connect_results_recorded: Mutex<Vec<ConnectResultRecord>>, lookup_compatible_response: Mutex<LookupCompatibleResponse>, pub fail_all_stores: bool, pub active_scan_result_recorded: Arc<Mutex<bool>>, pub passive_scan_result_recorded: Arc<Mutex<bool>>, pub past_connections_response: PastConnectionList, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectionRecord { pub id: NetworkIdentifier, pub credential: Credential, pub data: PastConnectionData, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectResultRecord { pub id: NetworkIdentifier, pub credential: Credential, pub bssid: client_types::Bssid, pub connect_result: fidl_sme::ConnectResult, pub scan_type: client_types::ScanObservation, } /// Use a struct so that the option can be updated from None to Some to allow the response to be /// set after FakeSavedNetworksManager is created. Use an optional response value rather than /// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic /// for easier debugging. struct LookupCompatibleResponse { inner: Option<Vec<NetworkConfig>>, } impl LookupCompatibleResponse { fn new() -> Self { LookupCompatibleResponse { inner: None } } } impl FakeSavedNetworksManager { pub fn new() -> Self { Self { saved_networks: Mutex::new(HashMap::new()), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Create FakeSavedNetworksManager, saving network configs with the specified /// network identifiers and credentials at init. pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self { let saved_networks = network_configs .into_iter() .filter_map(|(id, cred)| { NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config])) }) .collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>(); Self { saved_networks: Mutex::new(saved_networks), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Returns the past connections as they were recorded, rather than how they would have been /// stored. pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord> { self.connections_recorded .try_lock() .expect("expect locking self.connections_recorded to succeed") .clone() } pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> { self.connect_results_recorded .try_lock() .expect("expect locking self.connect_results_recorded to succeed") .clone() } /// Manually change the hidden network probabiltiy of a saved network. pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) { let mut saved_networks = self.saved_networks.lock().await; let networks = match saved_networks.get_mut(&id) { Some(networks) => networks, None => { info!("Failed to find network to update"); return; } }; for network in networks.iter_mut() { network.hidden_probability = hidden_prob; } } pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) { self.lookup_compatible_response.try_lock().expect("failed to get lock").inner = Some(response); } } #[async_trait] impl SavedNetworksManagerApi for FakeSavedNetworksManager { async fn remove( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<bool, NetworkConfigError> { let mut saved_networks = self.saved_networks.lock().await; if let Some(network_configs) = saved_networks.get_mut(&network_id) { let original_len = network_configs.len(); network_configs.retain(|cfg| cfg.credential!= credential); if original_len!= network_configs.len() { return Ok(true); } } Ok(false) } async fn known_network_count(&self) -> usize { unimplemented!() } async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> { self.saved_networks.lock().await.get(id).cloned().unwrap_or_default() } async fn lookup_compatible( &self, ssid: &client_types::Ssid, _scan_security: client_types::SecurityTypeDetailed, ) -> Vec<NetworkConfig> { let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone(); match predetermined_response { Some(resp) => resp, None => { warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID"); self.saved_networks .lock() .await .iter() .filter_map( |(id, config)| if id.ssid == *ssid { Some(config.clone()) } else { None }, ) .flatten() .collect() } } } /// Note that the configs-per-NetworkIdentifier limit is set to 1 in /// this mock struct. If a NetworkIdentifier is already stored, writing /// a config to it will evict the previously store one. async fn store( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<Option<NetworkConfig>, NetworkConfigError> { if self.fail_all_stores { return Err(NetworkConfigError::StashWriteError); } let config = NetworkConfig::new(network_id.clone(), credential, false)?; return Ok(self .saved_networks .lock() .await .insert(network_id, vec![config]) .and_then(|mut v| v.pop())); } async fn record_connect_result( &self, id: NetworkIdentifier, credential: &Credential, bssid: client_types::Bssid, connect_result: fidl_sme::ConnectResult, scan_type: client_types::ScanObservation, ) { self.connect_results_recorded.try_lock().expect("failed to record connect result").push( ConnectResultRecord { id: id.clone(), credential: credential.clone(), bssid, connect_result, scan_type, }, ); } async fn record_disconnect( &self, id: &NetworkIdentifier, credential: &Credential, data: PastConnectionData, ) { let mut connections_recorded = self.connections_recorded.lock().await; connections_recorded.push(ConnectionRecord { id: id.clone(), credential: credential.clone(), data, }); } async fn record_periodic_metrics(&self) {} async fn record_scan_result( &self, scan_type: ScanResultType, _results: Vec<client_types::NetworkIdentifierDetailed>, ) { match scan_type { ScanResultType::Undirected => { let mut v = self.passive_scan_result_recorded.lock().await; *v = true; } ScanResultType::Directed(_) => { let mut v = self.active_scan_result_recorded.lock().await; *v = true } } } async fn get_networks(&self) -> Vec<NetworkConfig> { self.saved_networks .lock() .await .values() .into_iter() .flat_map(|cfgs| cfgs.clone()) .collect() } async fn get_past_connections( &self, _id: &NetworkIdentifier, _credential: &Credential, _bssid: &client_types::Bssid, ) -> PastConnectionList { self.past_connections_response.clone() } } pub fn create_wlan_hasher() -> WlanHasher { WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes()) } pub fn create_inspect_persistence_channel() -> (mpsc::Sender<String>, mpsc::Receiver<String>) { const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value mpsc::channel(DEFAULT_BUFFER_SIZE) } /// Create past connection data with all random values. Tests can set the values they care about. pub fn random_connection_data() -> PastConnectionData { let mut rng = rand::thread_rng(); let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into()); let time_to_connect = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..10)); let uptime = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..1000)); let disconnect_time = connect_time + time_to_connect + uptime; PastConnectionData::new( client_types::Bssid( (0..6).map(|_| rng.gen::<u8>()).collect::<Vec<u8>>().try_into().unwrap(), ), connect_time, time_to_connect, disconnect_time, uptime, client_types::DisconnectReason::DisconnectDetectedFromSme, SignalData::new(rng.gen_range(-90..-20), rng.gen_range(10..50), 10),
#[derive(Clone)] pub struct FakeScanRequester { // A type alias for this complex type would be needless indirection, so allow the complex type #[allow(clippy::type_complexity)] pub scan_results: Arc<Mutex<VecDeque<Result<Vec<client_types::ScanResult>, client_types::ScanError>>>>, #[allow(clippy::type_complexity)] pub scan_requests: Arc<Mutex<Vec<(scan::ScanReason, Vec<client_types::Ssid>, Vec<client_types::WlanChan>)>>>, } impl FakeScanRequester { pub fn new() -> Self { FakeScanRequester { scan_results: Arc::new(Mutex::new(VecDeque::new())), scan_requests: Arc::new(Mutex::new(vec![])), } } pub async fn add_scan_result( &self, res: Result<Vec<client_types::ScanResult>, client_types::ScanError>, ) { self.scan_results.lock().await.push_back(res); } } #[async_trait] impl scan::ScanRequestApi for FakeScanRequester { async fn perform_scan( &self, scan_reason: scan::ScanReason, ssids: Vec<client_types::Ssid>, channels: Vec<client_types::WlanChan>, ) -> Result<Vec<client_types::ScanResult>, client_types::ScanError> { self.scan_requests.lock().await.push((scan_reason, ssids, channels)); self.scan_results.lock().await.pop_front().unwrap() } }
rng.gen::<u8>().into(), ) }
random_line_split
fakes.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![cfg(test)] use { crate::{ client::{bss_selection::SignalData, scan, types as client_types}, config_management::{ Credential, NetworkConfig, NetworkConfigError, NetworkIdentifier, PastConnectionData, PastConnectionList, SavedNetworksManagerApi, ScanResultType, }, }, async_trait::async_trait, fidl_fuchsia_wlan_sme as fidl_sme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{channel::mpsc, lock::Mutex}, log::{info, warn}, rand::Rng, std::{ collections::{HashMap, VecDeque}, convert::TryInto, sync::Arc, }, wlan_common::hasher::WlanHasher, }; pub struct FakeSavedNetworksManager { saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>, connections_recorded: Mutex<Vec<ConnectionRecord>>, connect_results_recorded: Mutex<Vec<ConnectResultRecord>>, lookup_compatible_response: Mutex<LookupCompatibleResponse>, pub fail_all_stores: bool, pub active_scan_result_recorded: Arc<Mutex<bool>>, pub passive_scan_result_recorded: Arc<Mutex<bool>>, pub past_connections_response: PastConnectionList, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectionRecord { pub id: NetworkIdentifier, pub credential: Credential, pub data: PastConnectionData, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectResultRecord { pub id: NetworkIdentifier, pub credential: Credential, pub bssid: client_types::Bssid, pub connect_result: fidl_sme::ConnectResult, pub scan_type: client_types::ScanObservation, } /// Use a struct so that the option can be updated from None to Some to allow the response to be /// set after FakeSavedNetworksManager is created. Use an optional response value rather than /// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic /// for easier debugging. struct LookupCompatibleResponse { inner: Option<Vec<NetworkConfig>>, } impl LookupCompatibleResponse { fn new() -> Self { LookupCompatibleResponse { inner: None } } } impl FakeSavedNetworksManager { pub fn new() -> Self { Self { saved_networks: Mutex::new(HashMap::new()), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Create FakeSavedNetworksManager, saving network configs with the specified /// network identifiers and credentials at init. pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self { let saved_networks = network_configs .into_iter() .filter_map(|(id, cred)| { NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config])) }) .collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>(); Self { saved_networks: Mutex::new(saved_networks), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Returns the past connections as they were recorded, rather than how they would have been /// stored. pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord> { self.connections_recorded .try_lock() .expect("expect locking self.connections_recorded to succeed") .clone() } pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> { self.connect_results_recorded .try_lock() .expect("expect locking self.connect_results_recorded to succeed") .clone() } /// Manually change the hidden network probabiltiy of a saved network. pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) { let mut saved_networks = self.saved_networks.lock().await; let networks = match saved_networks.get_mut(&id) { Some(networks) => networks, None => { info!("Failed to find network to update"); return; } }; for network in networks.iter_mut() { network.hidden_probability = hidden_prob; } } pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) { self.lookup_compatible_response.try_lock().expect("failed to get lock").inner = Some(response); } } #[async_trait] impl SavedNetworksManagerApi for FakeSavedNetworksManager { async fn remove( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<bool, NetworkConfigError> { let mut saved_networks = self.saved_networks.lock().await; if let Some(network_configs) = saved_networks.get_mut(&network_id) { let original_len = network_configs.len(); network_configs.retain(|cfg| cfg.credential!= credential); if original_len!= network_configs.len() { return Ok(true); } } Ok(false) } async fn known_network_count(&self) -> usize { unimplemented!() } async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> { self.saved_networks.lock().await.get(id).cloned().unwrap_or_default() } async fn lookup_compatible( &self, ssid: &client_types::Ssid, _scan_security: client_types::SecurityTypeDetailed, ) -> Vec<NetworkConfig> { let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone(); match predetermined_response { Some(resp) => resp, None => { warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID"); self.saved_networks .lock() .await .iter() .filter_map( |(id, config)| if id.ssid == *ssid { Some(config.clone()) } else { None }, ) .flatten() .collect() } } } /// Note that the configs-per-NetworkIdentifier limit is set to 1 in /// this mock struct. If a NetworkIdentifier is already stored, writing /// a config to it will evict the previously store one. async fn store( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<Option<NetworkConfig>, NetworkConfigError> { if self.fail_all_stores { return Err(NetworkConfigError::StashWriteError); } let config = NetworkConfig::new(network_id.clone(), credential, false)?; return Ok(self .saved_networks .lock() .await .insert(network_id, vec![config]) .and_then(|mut v| v.pop())); } async fn record_connect_result( &self, id: NetworkIdentifier, credential: &Credential, bssid: client_types::Bssid, connect_result: fidl_sme::ConnectResult, scan_type: client_types::ScanObservation, ) { self.connect_results_recorded.try_lock().expect("failed to record connect result").push( ConnectResultRecord { id: id.clone(), credential: credential.clone(), bssid, connect_result, scan_type, }, ); } async fn record_disconnect( &self, id: &NetworkIdentifier, credential: &Credential, data: PastConnectionData, ) { let mut connections_recorded = self.connections_recorded.lock().await; connections_recorded.push(ConnectionRecord { id: id.clone(), credential: credential.clone(), data, }); } async fn record_periodic_metrics(&self) {} async fn record_scan_result( &self, scan_type: ScanResultType, _results: Vec<client_types::NetworkIdentifierDetailed>, ) { match scan_type { ScanResultType::Undirected => { let mut v = self.passive_scan_result_recorded.lock().await; *v = true; } ScanResultType::Directed(_) => { let mut v = self.active_scan_result_recorded.lock().await; *v = true } } } async fn get_networks(&self) -> Vec<NetworkConfig> { self.saved_networks .lock() .await .values() .into_iter() .flat_map(|cfgs| cfgs.clone()) .collect() } async fn get_past_connections( &self, _id: &NetworkIdentifier, _credential: &Credential, _bssid: &client_types::Bssid, ) -> PastConnectionList { self.past_connections_response.clone() } } pub fn create_wlan_hasher() -> WlanHasher { WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes()) } pub fn
() -> (mpsc::Sender<String>, mpsc::Receiver<String>) { const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value mpsc::channel(DEFAULT_BUFFER_SIZE) } /// Create past connection data with all random values. Tests can set the values they care about. pub fn random_connection_data() -> PastConnectionData { let mut rng = rand::thread_rng(); let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into()); let time_to_connect = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..10)); let uptime = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..1000)); let disconnect_time = connect_time + time_to_connect + uptime; PastConnectionData::new( client_types::Bssid( (0..6).map(|_| rng.gen::<u8>()).collect::<Vec<u8>>().try_into().unwrap(), ), connect_time, time_to_connect, disconnect_time, uptime, client_types::DisconnectReason::DisconnectDetectedFromSme, SignalData::new(rng.gen_range(-90..-20), rng.gen_range(10..50), 10), rng.gen::<u8>().into(), ) } #[derive(Clone)] pub struct FakeScanRequester { // A type alias for this complex type would be needless indirection, so allow the complex type #[allow(clippy::type_complexity)] pub scan_results: Arc<Mutex<VecDeque<Result<Vec<client_types::ScanResult>, client_types::ScanError>>>>, #[allow(clippy::type_complexity)] pub scan_requests: Arc<Mutex<Vec<(scan::ScanReason, Vec<client_types::Ssid>, Vec<client_types::WlanChan>)>>>, } impl FakeScanRequester { pub fn new() -> Self { FakeScanRequester { scan_results: Arc::new(Mutex::new(VecDeque::new())), scan_requests: Arc::new(Mutex::new(vec![])), } } pub async fn add_scan_result( &self, res: Result<Vec<client_types::ScanResult>, client_types::ScanError>, ) { self.scan_results.lock().await.push_back(res); } } #[async_trait] impl scan::ScanRequestApi for FakeScanRequester { async fn perform_scan( &self, scan_reason: scan::ScanReason, ssids: Vec<client_types::Ssid>, channels: Vec<client_types::WlanChan>, ) -> Result<Vec<client_types::ScanResult>, client_types::ScanError> { self.scan_requests.lock().await.push((scan_reason, ssids, channels)); self.scan_results.lock().await.pop_front().unwrap() } }
create_inspect_persistence_channel
identifier_name
fakes.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![cfg(test)] use { crate::{ client::{bss_selection::SignalData, scan, types as client_types}, config_management::{ Credential, NetworkConfig, NetworkConfigError, NetworkIdentifier, PastConnectionData, PastConnectionList, SavedNetworksManagerApi, ScanResultType, }, }, async_trait::async_trait, fidl_fuchsia_wlan_sme as fidl_sme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{channel::mpsc, lock::Mutex}, log::{info, warn}, rand::Rng, std::{ collections::{HashMap, VecDeque}, convert::TryInto, sync::Arc, }, wlan_common::hasher::WlanHasher, }; pub struct FakeSavedNetworksManager { saved_networks: Mutex<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>, connections_recorded: Mutex<Vec<ConnectionRecord>>, connect_results_recorded: Mutex<Vec<ConnectResultRecord>>, lookup_compatible_response: Mutex<LookupCompatibleResponse>, pub fail_all_stores: bool, pub active_scan_result_recorded: Arc<Mutex<bool>>, pub passive_scan_result_recorded: Arc<Mutex<bool>>, pub past_connections_response: PastConnectionList, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectionRecord { pub id: NetworkIdentifier, pub credential: Credential, pub data: PastConnectionData, } #[derive(Debug, Clone, PartialEq)] pub struct ConnectResultRecord { pub id: NetworkIdentifier, pub credential: Credential, pub bssid: client_types::Bssid, pub connect_result: fidl_sme::ConnectResult, pub scan_type: client_types::ScanObservation, } /// Use a struct so that the option can be updated from None to Some to allow the response to be /// set after FakeSavedNetworksManager is created. Use an optional response value rather than /// defaulting to an empty vector so that if the response is not set, lookup_compatible will panic /// for easier debugging. struct LookupCompatibleResponse { inner: Option<Vec<NetworkConfig>>, } impl LookupCompatibleResponse { fn new() -> Self { LookupCompatibleResponse { inner: None } } } impl FakeSavedNetworksManager { pub fn new() -> Self { Self { saved_networks: Mutex::new(HashMap::new()), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Create FakeSavedNetworksManager, saving network configs with the specified /// network identifiers and credentials at init. pub fn new_with_saved_networks(network_configs: Vec<(NetworkIdentifier, Credential)>) -> Self { let saved_networks = network_configs .into_iter() .filter_map(|(id, cred)| { NetworkConfig::new(id.clone(), cred, false).ok().map(|config| (id, vec![config])) }) .collect::<HashMap<NetworkIdentifier, Vec<NetworkConfig>>>(); Self { saved_networks: Mutex::new(saved_networks), connections_recorded: Mutex::new(vec![]), connect_results_recorded: Mutex::new(vec![]), fail_all_stores: false, lookup_compatible_response: Mutex::new(LookupCompatibleResponse::new()), active_scan_result_recorded: Arc::new(Mutex::new(false)), passive_scan_result_recorded: Arc::new(Mutex::new(false)), past_connections_response: PastConnectionList::new(), } } /// Returns the past connections as they were recorded, rather than how they would have been /// stored. pub fn get_recorded_past_connections(&self) -> Vec<ConnectionRecord> { self.connections_recorded .try_lock() .expect("expect locking self.connections_recorded to succeed") .clone() } pub fn get_recorded_connect_reslts(&self) -> Vec<ConnectResultRecord> { self.connect_results_recorded .try_lock() .expect("expect locking self.connect_results_recorded to succeed") .clone() } /// Manually change the hidden network probabiltiy of a saved network. pub async fn update_hidden_prob(&self, id: NetworkIdentifier, hidden_prob: f32) { let mut saved_networks = self.saved_networks.lock().await; let networks = match saved_networks.get_mut(&id) { Some(networks) => networks, None => { info!("Failed to find network to update"); return; } }; for network in networks.iter_mut() { network.hidden_probability = hidden_prob; } } pub fn set_lookup_compatible_response(&self, response: Vec<NetworkConfig>) { self.lookup_compatible_response.try_lock().expect("failed to get lock").inner = Some(response); } } #[async_trait] impl SavedNetworksManagerApi for FakeSavedNetworksManager { async fn remove( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<bool, NetworkConfigError> { let mut saved_networks = self.saved_networks.lock().await; if let Some(network_configs) = saved_networks.get_mut(&network_id) { let original_len = network_configs.len(); network_configs.retain(|cfg| cfg.credential!= credential); if original_len!= network_configs.len() { return Ok(true); } } Ok(false) } async fn known_network_count(&self) -> usize { unimplemented!() } async fn lookup(&self, id: &NetworkIdentifier) -> Vec<NetworkConfig> { self.saved_networks.lock().await.get(id).cloned().unwrap_or_default() } async fn lookup_compatible( &self, ssid: &client_types::Ssid, _scan_security: client_types::SecurityTypeDetailed, ) -> Vec<NetworkConfig> { let predetermined_response = self.lookup_compatible_response.lock().await.inner.clone(); match predetermined_response { Some(resp) => resp, None => { warn!("FakeSavedNetworksManager lookup_compatible response is not set, returning all networks with matching SSID"); self.saved_networks .lock() .await .iter() .filter_map( |(id, config)| if id.ssid == *ssid
else { None }, ) .flatten() .collect() } } } /// Note that the configs-per-NetworkIdentifier limit is set to 1 in /// this mock struct. If a NetworkIdentifier is already stored, writing /// a config to it will evict the previously store one. async fn store( &self, network_id: NetworkIdentifier, credential: Credential, ) -> Result<Option<NetworkConfig>, NetworkConfigError> { if self.fail_all_stores { return Err(NetworkConfigError::StashWriteError); } let config = NetworkConfig::new(network_id.clone(), credential, false)?; return Ok(self .saved_networks .lock() .await .insert(network_id, vec![config]) .and_then(|mut v| v.pop())); } async fn record_connect_result( &self, id: NetworkIdentifier, credential: &Credential, bssid: client_types::Bssid, connect_result: fidl_sme::ConnectResult, scan_type: client_types::ScanObservation, ) { self.connect_results_recorded.try_lock().expect("failed to record connect result").push( ConnectResultRecord { id: id.clone(), credential: credential.clone(), bssid, connect_result, scan_type, }, ); } async fn record_disconnect( &self, id: &NetworkIdentifier, credential: &Credential, data: PastConnectionData, ) { let mut connections_recorded = self.connections_recorded.lock().await; connections_recorded.push(ConnectionRecord { id: id.clone(), credential: credential.clone(), data, }); } async fn record_periodic_metrics(&self) {} async fn record_scan_result( &self, scan_type: ScanResultType, _results: Vec<client_types::NetworkIdentifierDetailed>, ) { match scan_type { ScanResultType::Undirected => { let mut v = self.passive_scan_result_recorded.lock().await; *v = true; } ScanResultType::Directed(_) => { let mut v = self.active_scan_result_recorded.lock().await; *v = true } } } async fn get_networks(&self) -> Vec<NetworkConfig> { self.saved_networks .lock() .await .values() .into_iter() .flat_map(|cfgs| cfgs.clone()) .collect() } async fn get_past_connections( &self, _id: &NetworkIdentifier, _credential: &Credential, _bssid: &client_types::Bssid, ) -> PastConnectionList { self.past_connections_response.clone() } } pub fn create_wlan_hasher() -> WlanHasher { WlanHasher::new(rand::thread_rng().gen::<u64>().to_le_bytes()) } pub fn create_inspect_persistence_channel() -> (mpsc::Sender<String>, mpsc::Receiver<String>) { const DEFAULT_BUFFER_SIZE: usize = 100; // arbitrary value mpsc::channel(DEFAULT_BUFFER_SIZE) } /// Create past connection data with all random values. Tests can set the values they care about. pub fn random_connection_data() -> PastConnectionData { let mut rng = rand::thread_rng(); let connect_time = fasync::Time::from_nanos(rng.gen::<u16>().into()); let time_to_connect = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..10)); let uptime = zx::Duration::from_seconds(rng.gen_range::<i64, _>(5..1000)); let disconnect_time = connect_time + time_to_connect + uptime; PastConnectionData::new( client_types::Bssid( (0..6).map(|_| rng.gen::<u8>()).collect::<Vec<u8>>().try_into().unwrap(), ), connect_time, time_to_connect, disconnect_time, uptime, client_types::DisconnectReason::DisconnectDetectedFromSme, SignalData::new(rng.gen_range(-90..-20), rng.gen_range(10..50), 10), rng.gen::<u8>().into(), ) } #[derive(Clone)] pub struct FakeScanRequester { // A type alias for this complex type would be needless indirection, so allow the complex type #[allow(clippy::type_complexity)] pub scan_results: Arc<Mutex<VecDeque<Result<Vec<client_types::ScanResult>, client_types::ScanError>>>>, #[allow(clippy::type_complexity)] pub scan_requests: Arc<Mutex<Vec<(scan::ScanReason, Vec<client_types::Ssid>, Vec<client_types::WlanChan>)>>>, } impl FakeScanRequester { pub fn new() -> Self { FakeScanRequester { scan_results: Arc::new(Mutex::new(VecDeque::new())), scan_requests: Arc::new(Mutex::new(vec![])), } } pub async fn add_scan_result( &self, res: Result<Vec<client_types::ScanResult>, client_types::ScanError>, ) { self.scan_results.lock().await.push_back(res); } } #[async_trait] impl scan::ScanRequestApi for FakeScanRequester { async fn perform_scan( &self, scan_reason: scan::ScanReason, ssids: Vec<client_types::Ssid>, channels: Vec<client_types::WlanChan>, ) -> Result<Vec<client_types::ScanResult>, client_types::ScanError> { self.scan_requests.lock().await.push((scan_reason, ssids, channels)); self.scan_results.lock().await.pop_front().unwrap() } }
{ Some(config.clone()) }
conditional_block
render_global.rs
use std::error; use std::rc::Rc; use std::cell::RefCell; use std::ops::Deref; use std::sync::Mutex; use gl_bindings::gl; use cgmath::{Matrix4, SquareMatrix, vec3, Point3, Rad}; use crate::demo; use crate::utils::lazy_option::Lazy; use crate::render::{Framebuffer, FramebufferAttachment, AttachmentPoint, ImageFormat, RenderSubsystem}; use crate::render::separable_sss::SeparableSSSSubsystem; use crate::render::shader::managed::ManagedProgram; use crate::asset::AssetPathBuf; pub struct RenderGlobal { current_configuration: Rc<RefCell<GraphicsConfiguration>>, current_resolution: (u32, u32), separable_sss_system: SeparableSSSSubsystem, framebuffer_scene_hdr_ehaa: Option<Rc<RefCell<Framebuffer>>>, program_ehaa_scene: ManagedProgram, program_post_composite: ManagedProgram, frametime_query_object_gl: gl::uint, queued_shader_reload: bool, } impl RenderGlobal { pub fn new() -> RenderGlobal { RenderGlobal { current_configuration: Rc::new(RefCell::new(GraphicsConfiguration::new())), current_resolution: (0, 0), separable_sss_system: SeparableSSSSubsystem::new(), framebuffer_scene_hdr_ehaa: None, program_ehaa_scene: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/legacy/main_scene_forward.program"))), program_post_composite: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/post_composite.program"))), frametime_query_object_gl: 0, queued_shader_reload: false, } } pub fn initialize(&mut self, resolution: (u32, u32)) -> Result<(), Box<dyn error::Error>> { // Set initial resolution self.current_resolution = resolution; // Init subsystems self.separable_sss_system.initialize(); // Do initial reconfiguration self.do_reconfigure_pipeline(self.current_resolution, false)?; Ok(()) } pub fn do_reconfigure_pipeline(&mut self, new_resolution: (u32, u32), only_resize: bool) -> Result<(), Box<dyn error::Error>> { // Update state self.current_resolution = new_resolution; let config = RefCell::borrow(&self.current_configuration); let event = ReconfigureEvent { configuration: config.deref(), resolution: new_resolution, only_resize, }; // Configure main fbo if let Some(t) = &mut self.framebuffer_scene_hdr_ehaa { let mut fbo = RefCell::borrow_mut(t); fbo.resize(event.resolution.0, event.resolution.1); } else { // Create fbo self.framebuffer_scene_hdr_ehaa = Some(Rc::new(RefCell::new({ let mut fbo = Framebuffer::new(event.resolution.0, event.resolution.1); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Depth, ImageFormat::get(gl::DEPTH_COMPONENT32F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(0), ImageFormat::get(gl::R11F_G11F_B10F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGB8))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(2), ImageFormat::get(gl::RGB8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGBA8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RG16_SNORM))); fbo.allocate(); fbo }))); } // Reconfigure subsystems self.separable_sss_system.reconfigure(event); // Drop config for now drop(config); // Create query object if self.frametime_query_object_gl == 0 { self.frametime_query_object_gl = unsafe { let mut query: gl::uint = 0; gl::CreateQueries(gl::TIME_ELAPSED, 1, &mut query); query }; } // Load shaders self.reload_shaders(); Ok(()) } fn reload_shaders(&mut self) { // let asset_folder = demo::demo_instance().asset_folder.as_mut().unwrap(); // Log println!("Reloading shaders!"); // Reload shaders from asset self.program_ehaa_scene.reload_from_asset().expect("Failed to reload scene shader from asset"); self.program_post_composite.reload_from_asset().expect("Failed to reload post composite shader from asset"); // // Delete old shaders
// let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // if let Some(program) = self.program_post_resolve.take() { // let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // Reload shader from assets // // Load shaders // self.program_ehaa_scene = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/scene_ehaa.vert.glsl"), // &asset_folder.join("shaders/scene_ehaa.frag.glsl"), // Some(&asset_folder.join("shaders/scene_ehaa.tesseval.glsl")) //// None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // self.program_post_resolve = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/post_resolve.vert.glsl"), // &asset_folder.join("shaders/post_resolve.frag.glsl"), // None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // Reload subsystem shaders self.separable_sss_system.reload_shaders(); } pub fn do_render_frame(&mut self) { // Reload shaders if needed if self.queued_shader_reload { self.queued_shader_reload = false; self.reload_shaders(); } // Update cam state // LATER: Do this when rendering a scene: Get active camera from scene, make CameraState, calc proj matrix, pass state along in functions let active_camera = demo::demo_instance().get_test_camera(); let active_camera = if let Some(cam) = active_camera.upgrade() { cam } else { // No active camera, so don't render anything for now return; }; let camera_fovy: Rad<f32>; let camera_near_z: f32; let camera_far_z: f32; let cam_state = { let cam = Mutex::lock(&active_camera).unwrap(); let mut state = RenderCameraState::new(); // Get camera fovy // let projection: &dyn Any = cam.projection.as_ref(); // let projection: &PerspectiveProjection = projection.downcast_ref::<PerspectiveProjection>().unwrap(); camera_fovy = cam.projection.camera_fovy(); let (near_z, far_z) = cam.projection.test_depth_planes(); camera_near_z = near_z; camera_far_z = far_z; // Base matrix for our coordinate system ( let base_matrix = Matrix4::look_at_dir(Point3 {x: 0.0, y: 0.0, z: 0.0}, vec3(0.0, 0.0, 1.0), vec3(0.0, 1.0, 0.0)); // For some reason look_at_dir inverts the dir vector state.view_matrix = base_matrix * Matrix4::from(cam.rotation) * Matrix4::from_translation(-cam.translation); state.projection_matrix = cam.projection.projection_matrix(cam.viewport_size); state }; let viewprojection_matrix = cam_state.projection_matrix * cam_state.view_matrix; // Recompile shaders if self.program_ehaa_scene.needs_recompile() { self.program_ehaa_scene.do_recompile(); } if self.program_post_composite.needs_recompile() { self.program_post_composite.do_recompile(); } unsafe { gl::Disable(gl::FRAMEBUFFER_SRGB); gl::Disable(gl::BLEND); gl::Enable(gl::CULL_FACE); gl::FrontFace(gl::CCW); gl::CullFace(gl::FRONT); // For some reason we need to cull FRONT. This might be due to reverse-z flipping the winding order? gl::Enable(gl::DEPTH_TEST); // Setup NDC z axis for reverse float depth gl::DepthFunc(gl::GREATER); gl::ClearDepth(0.0); // 0.0 is far with reverse z gl::ClipControl(gl::LOWER_LEFT, gl::ZERO_TO_ONE); gl::DepthRange(0.0, 1.0); // Standard (non-inversed) depth range, we use a reverse-z projection matrix instead // Use scene shader let scene_shader = self.program_ehaa_scene.program().unwrap(); let scene_shader_gl = scene_shader.program_gl().unwrap(); gl::UseProgram(scene_shader_gl); // Bind scene framebuffer let scene_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); gl::BindFramebuffer(gl::FRAMEBUFFER, scene_fbo.handle_gl()); // Set the viewport gl::Viewport(0, 0, self.current_resolution.0 as gl::sizei, self.current_resolution.1 as gl::sizei); gl::ClearColor(0.0, 0.0, 0.0, 0.0); gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT); {// Upload matrices let model_matrix = Matrix4::from_scale(1.0); let model_matrix_arr: [[f32; 4]; 4] = model_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixModel\0".as_ptr() as *const gl::char), 1, gl::FALSE, model_matrix_arr.as_ptr() as *const gl::float); let view_matrix_arr: [[f32; 4]; 4] = cam_state.view_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixView\0".as_ptr() as *const gl::char), 1, gl::FALSE, view_matrix_arr.as_ptr() as *const gl::float); let viewprojection_matrix_arr: [[f32; 4]; 4] = viewprojection_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixViewProjection\0".as_ptr() as *const gl::char), 1, gl::FALSE, viewprojection_matrix_arr.as_ptr() as *const gl::float); } let start_frametimer = {// Start frametime timer let mut elapsed_frametime: u64 = std::u64::MAX; gl::GetQueryObjectui64v(self.frametime_query_object_gl, gl::QUERY_RESULT_NO_WAIT, &mut elapsed_frametime); if elapsed_frametime!= std::u64::MAX { let _float_frametime = (elapsed_frametime as f64) / 1e6; // let title = format!("EHAA Demo ~ Frametime {} ms", float_frametime); // self.window.need_mut().set_title(title.as_str()); // Restart query gl::BeginQuery(gl::TIME_ELAPSED, self.frametime_query_object_gl); true } else { false } }; // Set tessellation state gl::PatchParameteri(gl::PATCH_VERTICES, 3); gl::PatchParameterfv(gl::PATCH_DEFAULT_OUTER_LEVEL, [1.0f32, 1.0f32, 1.0f32, 1.0f32].as_ptr()); gl::PatchParameterfv(gl::PATCH_DEFAULT_INNER_LEVEL, [1.0f32, 1.0f32].as_ptr()); gl::EnableVertexAttribArray(0); // gl::EnableVertexAttribArray(1); // gl::EnableVertexAttribArray(2); /* {// Draw teapot let test_teapot_vbo = demo::demo_instance().test_teapot_vbo.need(); gl::BindBuffer(gl::ARRAY_BUFFER, test_teapot_vbo.vbo_gl); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const gl::void); gl::DrawArrays(gl::PATCHES, 0, (crate::render::teapot::TEAPOT_VERTEX_DATA.len() / 3) as gl::sizei); } */ // /* {// Draw head model let test_head_model = demo::demo_instance().test_head_model.need(); // Bind textures gl::BindTextureUnit(1, test_head_model.tex_albedo.texture_gl()); gl::BindTextureUnit(2, test_head_model.tex_normal.texture_gl()); gl::BindTextureUnit(4, test_head_model.tex_transmission.texture_gl()); gl::BindBuffer(gl::ARRAY_BUFFER, test_head_model.vertex_buffer_gl); // let stride = 8*4; let stride = 12*4; gl::EnableVertexAttribArray(0); gl::EnableVertexAttribArray(1); gl::EnableVertexAttribArray(2); gl::EnableVertexAttribArray(3); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, stride, 0 as *const gl::void); // vertex gl::VertexAttribPointer(1, 2, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4) as *const gl::void); // texcoord gl::VertexAttribPointer(2, 3, gl::FLOAT, gl::FALSE, stride, (3*4) as *const gl::void); // normal gl::VertexAttribPointer(3, 4, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4 + 2*4) as *const gl::void); // tangent gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, test_head_model.index_buffer_gl); gl::DrawElements(gl::PATCHES, test_head_model.num_indices as gl::sizei, gl::UNSIGNED_INT, 0 as *const gl::void); // gl::DrawElements(gl::TRIANGLES, self.test_head_model.need().num_indices as gl::GLsizei, gl::UNSIGNED_INT, 0 as *const std::ffi::c_void); gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DisableVertexAttribArray(3); } // */ /* {// Draw debug triangles gl::Begin(gl::PATCHES); // gl::VertexAttrib3f(2, 1.0, 0.616, 0.984); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.5, 0.2, 0.0); let (mouse_x, mouse_y) = demo::demo_instance().window.need().get_cursor_pos(); // gl::VertexAttribI1ui(1, 2); gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); // gl::Vertex3f(0.1, 0.6 + 0.2*(std::time::UNIX_EPOCH.elapsed().unwrap().as_secs_f32()).sin(), 0.0); // gl::Vertex3f(0.1, 0.6, 0.0); // gl::VertexAttrib3f(2, 0.153, 0.0, 1.0); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.2, 0.6, 0.0); // gl::VertexAttribI1ui(1, 2); // gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); gl::End(); } */ {// Resolve separable sss let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); let scene_hdr_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture); let scene_depth_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Depth).unwrap().texture); // Render ssss self.separable_sss_system.do_resolve_sss(&scene_hdr_rt, &scene_depth_rt, camera_fovy, (camera_near_z, camera_far_z)); } {// Do ehaa resolve pass let post_resolve_shader = self.program_post_composite.program().unwrap(); // // DEBUG: Blit framebuffer // gl::BlitNamedFramebuffer(self.framebuffer_scene_hdr_ehaa.need().handle_gl(), 0, 0, 0, 1280, 720, 0, 0, 1280, 720, gl::COLOR_BUFFER_BIT, gl::NEAREST); gl::Disable(gl::DEPTH_TEST); // Bind resolve shader gl::UseProgram(post_resolve_shader.program_gl().unwrap()); // Bind shaders let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); // gl::BindTextureUnit(0, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(0, RefCell::borrow(&self.separable_sss_system.fbo_resolve_final.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(1, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(1)).unwrap().texture).texture_gl()); gl::BindTextureUnit(2, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(2)).unwrap().texture).texture_gl()); // Bind back buffer gl::BindFramebuffer(gl::FRAMEBUFFER, 0); // Draw oversized fullscreen triangles gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DrawArrays(gl::TRIANGLES, 0, 3); } // End frametimer query if start_frametimer { gl::EndQuery(gl::TIME_ELAPSED); } } } pub fn queue_shader_reload(&mut self) { self.queued_shader_reload = true; } } pub struct GraphicsConfiguration { } impl GraphicsConfiguration { pub fn new() -> GraphicsConfiguration { GraphicsConfiguration {} } } pub struct ReconfigureEvent<'a> { pub configuration: &'a GraphicsConfiguration, pub resolution: (u32, u32), pub only_resize: bool, } pub struct RenderCameraState { pub projection_matrix: Matrix4<f32>, pub view_matrix: Matrix4<f32>, } impl RenderCameraState { pub fn new() -> RenderCameraState { RenderCameraState { projection_matrix: Matrix4::identity(), view_matrix: Matrix4::identity(), } } }
// if let Some(program) = self.program_ehaa_scene.take() {
random_line_split
render_global.rs
use std::error; use std::rc::Rc; use std::cell::RefCell; use std::ops::Deref; use std::sync::Mutex; use gl_bindings::gl; use cgmath::{Matrix4, SquareMatrix, vec3, Point3, Rad}; use crate::demo; use crate::utils::lazy_option::Lazy; use crate::render::{Framebuffer, FramebufferAttachment, AttachmentPoint, ImageFormat, RenderSubsystem}; use crate::render::separable_sss::SeparableSSSSubsystem; use crate::render::shader::managed::ManagedProgram; use crate::asset::AssetPathBuf; pub struct RenderGlobal { current_configuration: Rc<RefCell<GraphicsConfiguration>>, current_resolution: (u32, u32), separable_sss_system: SeparableSSSSubsystem, framebuffer_scene_hdr_ehaa: Option<Rc<RefCell<Framebuffer>>>, program_ehaa_scene: ManagedProgram, program_post_composite: ManagedProgram, frametime_query_object_gl: gl::uint, queued_shader_reload: bool, } impl RenderGlobal { pub fn new() -> RenderGlobal { RenderGlobal { current_configuration: Rc::new(RefCell::new(GraphicsConfiguration::new())), current_resolution: (0, 0), separable_sss_system: SeparableSSSSubsystem::new(), framebuffer_scene_hdr_ehaa: None, program_ehaa_scene: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/legacy/main_scene_forward.program"))), program_post_composite: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/post_composite.program"))), frametime_query_object_gl: 0, queued_shader_reload: false, } } pub fn initialize(&mut self, resolution: (u32, u32)) -> Result<(), Box<dyn error::Error>> { // Set initial resolution self.current_resolution = resolution; // Init subsystems self.separable_sss_system.initialize(); // Do initial reconfiguration self.do_reconfigure_pipeline(self.current_resolution, false)?; Ok(()) } pub fn do_reconfigure_pipeline(&mut self, new_resolution: (u32, u32), only_resize: bool) -> Result<(), Box<dyn error::Error>> { // Update state self.current_resolution = new_resolution; let config = RefCell::borrow(&self.current_configuration); let event = ReconfigureEvent { configuration: config.deref(), resolution: new_resolution, only_resize, }; // Configure main fbo if let Some(t) = &mut self.framebuffer_scene_hdr_ehaa
else { // Create fbo self.framebuffer_scene_hdr_ehaa = Some(Rc::new(RefCell::new({ let mut fbo = Framebuffer::new(event.resolution.0, event.resolution.1); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Depth, ImageFormat::get(gl::DEPTH_COMPONENT32F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(0), ImageFormat::get(gl::R11F_G11F_B10F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGB8))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(2), ImageFormat::get(gl::RGB8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGBA8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RG16_SNORM))); fbo.allocate(); fbo }))); } // Reconfigure subsystems self.separable_sss_system.reconfigure(event); // Drop config for now drop(config); // Create query object if self.frametime_query_object_gl == 0 { self.frametime_query_object_gl = unsafe { let mut query: gl::uint = 0; gl::CreateQueries(gl::TIME_ELAPSED, 1, &mut query); query }; } // Load shaders self.reload_shaders(); Ok(()) } fn reload_shaders(&mut self) { // let asset_folder = demo::demo_instance().asset_folder.as_mut().unwrap(); // Log println!("Reloading shaders!"); // Reload shaders from asset self.program_ehaa_scene.reload_from_asset().expect("Failed to reload scene shader from asset"); self.program_post_composite.reload_from_asset().expect("Failed to reload post composite shader from asset"); // // Delete old shaders // if let Some(program) = self.program_ehaa_scene.take() { // let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // if let Some(program) = self.program_post_resolve.take() { // let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // Reload shader from assets // // Load shaders // self.program_ehaa_scene = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/scene_ehaa.vert.glsl"), // &asset_folder.join("shaders/scene_ehaa.frag.glsl"), // Some(&asset_folder.join("shaders/scene_ehaa.tesseval.glsl")) //// None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // self.program_post_resolve = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/post_resolve.vert.glsl"), // &asset_folder.join("shaders/post_resolve.frag.glsl"), // None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // Reload subsystem shaders self.separable_sss_system.reload_shaders(); } pub fn do_render_frame(&mut self) { // Reload shaders if needed if self.queued_shader_reload { self.queued_shader_reload = false; self.reload_shaders(); } // Update cam state // LATER: Do this when rendering a scene: Get active camera from scene, make CameraState, calc proj matrix, pass state along in functions let active_camera = demo::demo_instance().get_test_camera(); let active_camera = if let Some(cam) = active_camera.upgrade() { cam } else { // No active camera, so don't render anything for now return; }; let camera_fovy: Rad<f32>; let camera_near_z: f32; let camera_far_z: f32; let cam_state = { let cam = Mutex::lock(&active_camera).unwrap(); let mut state = RenderCameraState::new(); // Get camera fovy // let projection: &dyn Any = cam.projection.as_ref(); // let projection: &PerspectiveProjection = projection.downcast_ref::<PerspectiveProjection>().unwrap(); camera_fovy = cam.projection.camera_fovy(); let (near_z, far_z) = cam.projection.test_depth_planes(); camera_near_z = near_z; camera_far_z = far_z; // Base matrix for our coordinate system ( let base_matrix = Matrix4::look_at_dir(Point3 {x: 0.0, y: 0.0, z: 0.0}, vec3(0.0, 0.0, 1.0), vec3(0.0, 1.0, 0.0)); // For some reason look_at_dir inverts the dir vector state.view_matrix = base_matrix * Matrix4::from(cam.rotation) * Matrix4::from_translation(-cam.translation); state.projection_matrix = cam.projection.projection_matrix(cam.viewport_size); state }; let viewprojection_matrix = cam_state.projection_matrix * cam_state.view_matrix; // Recompile shaders if self.program_ehaa_scene.needs_recompile() { self.program_ehaa_scene.do_recompile(); } if self.program_post_composite.needs_recompile() { self.program_post_composite.do_recompile(); } unsafe { gl::Disable(gl::FRAMEBUFFER_SRGB); gl::Disable(gl::BLEND); gl::Enable(gl::CULL_FACE); gl::FrontFace(gl::CCW); gl::CullFace(gl::FRONT); // For some reason we need to cull FRONT. This might be due to reverse-z flipping the winding order? gl::Enable(gl::DEPTH_TEST); // Setup NDC z axis for reverse float depth gl::DepthFunc(gl::GREATER); gl::ClearDepth(0.0); // 0.0 is far with reverse z gl::ClipControl(gl::LOWER_LEFT, gl::ZERO_TO_ONE); gl::DepthRange(0.0, 1.0); // Standard (non-inversed) depth range, we use a reverse-z projection matrix instead // Use scene shader let scene_shader = self.program_ehaa_scene.program().unwrap(); let scene_shader_gl = scene_shader.program_gl().unwrap(); gl::UseProgram(scene_shader_gl); // Bind scene framebuffer let scene_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); gl::BindFramebuffer(gl::FRAMEBUFFER, scene_fbo.handle_gl()); // Set the viewport gl::Viewport(0, 0, self.current_resolution.0 as gl::sizei, self.current_resolution.1 as gl::sizei); gl::ClearColor(0.0, 0.0, 0.0, 0.0); gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT); {// Upload matrices let model_matrix = Matrix4::from_scale(1.0); let model_matrix_arr: [[f32; 4]; 4] = model_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixModel\0".as_ptr() as *const gl::char), 1, gl::FALSE, model_matrix_arr.as_ptr() as *const gl::float); let view_matrix_arr: [[f32; 4]; 4] = cam_state.view_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixView\0".as_ptr() as *const gl::char), 1, gl::FALSE, view_matrix_arr.as_ptr() as *const gl::float); let viewprojection_matrix_arr: [[f32; 4]; 4] = viewprojection_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixViewProjection\0".as_ptr() as *const gl::char), 1, gl::FALSE, viewprojection_matrix_arr.as_ptr() as *const gl::float); } let start_frametimer = {// Start frametime timer let mut elapsed_frametime: u64 = std::u64::MAX; gl::GetQueryObjectui64v(self.frametime_query_object_gl, gl::QUERY_RESULT_NO_WAIT, &mut elapsed_frametime); if elapsed_frametime!= std::u64::MAX { let _float_frametime = (elapsed_frametime as f64) / 1e6; // let title = format!("EHAA Demo ~ Frametime {} ms", float_frametime); // self.window.need_mut().set_title(title.as_str()); // Restart query gl::BeginQuery(gl::TIME_ELAPSED, self.frametime_query_object_gl); true } else { false } }; // Set tessellation state gl::PatchParameteri(gl::PATCH_VERTICES, 3); gl::PatchParameterfv(gl::PATCH_DEFAULT_OUTER_LEVEL, [1.0f32, 1.0f32, 1.0f32, 1.0f32].as_ptr()); gl::PatchParameterfv(gl::PATCH_DEFAULT_INNER_LEVEL, [1.0f32, 1.0f32].as_ptr()); gl::EnableVertexAttribArray(0); // gl::EnableVertexAttribArray(1); // gl::EnableVertexAttribArray(2); /* {// Draw teapot let test_teapot_vbo = demo::demo_instance().test_teapot_vbo.need(); gl::BindBuffer(gl::ARRAY_BUFFER, test_teapot_vbo.vbo_gl); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const gl::void); gl::DrawArrays(gl::PATCHES, 0, (crate::render::teapot::TEAPOT_VERTEX_DATA.len() / 3) as gl::sizei); } */ // /* {// Draw head model let test_head_model = demo::demo_instance().test_head_model.need(); // Bind textures gl::BindTextureUnit(1, test_head_model.tex_albedo.texture_gl()); gl::BindTextureUnit(2, test_head_model.tex_normal.texture_gl()); gl::BindTextureUnit(4, test_head_model.tex_transmission.texture_gl()); gl::BindBuffer(gl::ARRAY_BUFFER, test_head_model.vertex_buffer_gl); // let stride = 8*4; let stride = 12*4; gl::EnableVertexAttribArray(0); gl::EnableVertexAttribArray(1); gl::EnableVertexAttribArray(2); gl::EnableVertexAttribArray(3); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, stride, 0 as *const gl::void); // vertex gl::VertexAttribPointer(1, 2, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4) as *const gl::void); // texcoord gl::VertexAttribPointer(2, 3, gl::FLOAT, gl::FALSE, stride, (3*4) as *const gl::void); // normal gl::VertexAttribPointer(3, 4, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4 + 2*4) as *const gl::void); // tangent gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, test_head_model.index_buffer_gl); gl::DrawElements(gl::PATCHES, test_head_model.num_indices as gl::sizei, gl::UNSIGNED_INT, 0 as *const gl::void); // gl::DrawElements(gl::TRIANGLES, self.test_head_model.need().num_indices as gl::GLsizei, gl::UNSIGNED_INT, 0 as *const std::ffi::c_void); gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DisableVertexAttribArray(3); } // */ /* {// Draw debug triangles gl::Begin(gl::PATCHES); // gl::VertexAttrib3f(2, 1.0, 0.616, 0.984); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.5, 0.2, 0.0); let (mouse_x, mouse_y) = demo::demo_instance().window.need().get_cursor_pos(); // gl::VertexAttribI1ui(1, 2); gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); // gl::Vertex3f(0.1, 0.6 + 0.2*(std::time::UNIX_EPOCH.elapsed().unwrap().as_secs_f32()).sin(), 0.0); // gl::Vertex3f(0.1, 0.6, 0.0); // gl::VertexAttrib3f(2, 0.153, 0.0, 1.0); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.2, 0.6, 0.0); // gl::VertexAttribI1ui(1, 2); // gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); gl::End(); } */ {// Resolve separable sss let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); let scene_hdr_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture); let scene_depth_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Depth).unwrap().texture); // Render ssss self.separable_sss_system.do_resolve_sss(&scene_hdr_rt, &scene_depth_rt, camera_fovy, (camera_near_z, camera_far_z)); } {// Do ehaa resolve pass let post_resolve_shader = self.program_post_composite.program().unwrap(); // // DEBUG: Blit framebuffer // gl::BlitNamedFramebuffer(self.framebuffer_scene_hdr_ehaa.need().handle_gl(), 0, 0, 0, 1280, 720, 0, 0, 1280, 720, gl::COLOR_BUFFER_BIT, gl::NEAREST); gl::Disable(gl::DEPTH_TEST); // Bind resolve shader gl::UseProgram(post_resolve_shader.program_gl().unwrap()); // Bind shaders let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); // gl::BindTextureUnit(0, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(0, RefCell::borrow(&self.separable_sss_system.fbo_resolve_final.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(1, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(1)).unwrap().texture).texture_gl()); gl::BindTextureUnit(2, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(2)).unwrap().texture).texture_gl()); // Bind back buffer gl::BindFramebuffer(gl::FRAMEBUFFER, 0); // Draw oversized fullscreen triangles gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DrawArrays(gl::TRIANGLES, 0, 3); } // End frametimer query if start_frametimer { gl::EndQuery(gl::TIME_ELAPSED); } } } pub fn queue_shader_reload(&mut self) { self.queued_shader_reload = true; } } pub struct GraphicsConfiguration { } impl GraphicsConfiguration { pub fn new() -> GraphicsConfiguration { GraphicsConfiguration {} } } pub struct ReconfigureEvent<'a> { pub configuration: &'a GraphicsConfiguration, pub resolution: (u32, u32), pub only_resize: bool, } pub struct RenderCameraState { pub projection_matrix: Matrix4<f32>, pub view_matrix: Matrix4<f32>, } impl RenderCameraState { pub fn new() -> RenderCameraState { RenderCameraState { projection_matrix: Matrix4::identity(), view_matrix: Matrix4::identity(), } } }
{ let mut fbo = RefCell::borrow_mut(t); fbo.resize(event.resolution.0, event.resolution.1); }
conditional_block
render_global.rs
use std::error; use std::rc::Rc; use std::cell::RefCell; use std::ops::Deref; use std::sync::Mutex; use gl_bindings::gl; use cgmath::{Matrix4, SquareMatrix, vec3, Point3, Rad}; use crate::demo; use crate::utils::lazy_option::Lazy; use crate::render::{Framebuffer, FramebufferAttachment, AttachmentPoint, ImageFormat, RenderSubsystem}; use crate::render::separable_sss::SeparableSSSSubsystem; use crate::render::shader::managed::ManagedProgram; use crate::asset::AssetPathBuf; pub struct RenderGlobal { current_configuration: Rc<RefCell<GraphicsConfiguration>>, current_resolution: (u32, u32), separable_sss_system: SeparableSSSSubsystem, framebuffer_scene_hdr_ehaa: Option<Rc<RefCell<Framebuffer>>>, program_ehaa_scene: ManagedProgram, program_post_composite: ManagedProgram, frametime_query_object_gl: gl::uint, queued_shader_reload: bool, } impl RenderGlobal { pub fn new() -> RenderGlobal { RenderGlobal { current_configuration: Rc::new(RefCell::new(GraphicsConfiguration::new())), current_resolution: (0, 0), separable_sss_system: SeparableSSSSubsystem::new(), framebuffer_scene_hdr_ehaa: None, program_ehaa_scene: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/legacy/main_scene_forward.program"))), program_post_composite: ManagedProgram::new(Some(AssetPathBuf::from("/shaders/post_composite.program"))), frametime_query_object_gl: 0, queued_shader_reload: false, } } pub fn initialize(&mut self, resolution: (u32, u32)) -> Result<(), Box<dyn error::Error>> { // Set initial resolution self.current_resolution = resolution; // Init subsystems self.separable_sss_system.initialize(); // Do initial reconfiguration self.do_reconfigure_pipeline(self.current_resolution, false)?; Ok(()) } pub fn do_reconfigure_pipeline(&mut self, new_resolution: (u32, u32), only_resize: bool) -> Result<(), Box<dyn error::Error>> { // Update state self.current_resolution = new_resolution; let config = RefCell::borrow(&self.current_configuration); let event = ReconfigureEvent { configuration: config.deref(), resolution: new_resolution, only_resize, }; // Configure main fbo if let Some(t) = &mut self.framebuffer_scene_hdr_ehaa { let mut fbo = RefCell::borrow_mut(t); fbo.resize(event.resolution.0, event.resolution.1); } else { // Create fbo self.framebuffer_scene_hdr_ehaa = Some(Rc::new(RefCell::new({ let mut fbo = Framebuffer::new(event.resolution.0, event.resolution.1); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Depth, ImageFormat::get(gl::DEPTH_COMPONENT32F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(0), ImageFormat::get(gl::R11F_G11F_B10F))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGB8))); fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(2), ImageFormat::get(gl::RGB8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RGBA8))); // fbo.add_attachment(FramebufferAttachment::from_new_texture(AttachmentPoint::Color(1), ImageFormat::get(gl::RG16_SNORM))); fbo.allocate(); fbo }))); } // Reconfigure subsystems self.separable_sss_system.reconfigure(event); // Drop config for now drop(config); // Create query object if self.frametime_query_object_gl == 0 { self.frametime_query_object_gl = unsafe { let mut query: gl::uint = 0; gl::CreateQueries(gl::TIME_ELAPSED, 1, &mut query); query }; } // Load shaders self.reload_shaders(); Ok(()) } fn
(&mut self) { // let asset_folder = demo::demo_instance().asset_folder.as_mut().unwrap(); // Log println!("Reloading shaders!"); // Reload shaders from asset self.program_ehaa_scene.reload_from_asset().expect("Failed to reload scene shader from asset"); self.program_post_composite.reload_from_asset().expect("Failed to reload post composite shader from asset"); // // Delete old shaders // if let Some(program) = self.program_ehaa_scene.take() { // let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // if let Some(program) = self.program_post_resolve.take() { // let mut program = RefCell::borrow_mut(&program); // program.delete(); // } // Reload shader from assets // // Load shaders // self.program_ehaa_scene = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/scene_ehaa.vert.glsl"), // &asset_folder.join("shaders/scene_ehaa.frag.glsl"), // Some(&asset_folder.join("shaders/scene_ehaa.tesseval.glsl")) //// None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // self.program_post_resolve = Some({ // let mut s = ShaderProgram::new_from_file( // &asset_folder.join("shaders/post_resolve.vert.glsl"), // &asset_folder.join("shaders/post_resolve.frag.glsl"), // None // ); // s.compile(); // Rc::new(RefCell::new(s)) // }); // Reload subsystem shaders self.separable_sss_system.reload_shaders(); } pub fn do_render_frame(&mut self) { // Reload shaders if needed if self.queued_shader_reload { self.queued_shader_reload = false; self.reload_shaders(); } // Update cam state // LATER: Do this when rendering a scene: Get active camera from scene, make CameraState, calc proj matrix, pass state along in functions let active_camera = demo::demo_instance().get_test_camera(); let active_camera = if let Some(cam) = active_camera.upgrade() { cam } else { // No active camera, so don't render anything for now return; }; let camera_fovy: Rad<f32>; let camera_near_z: f32; let camera_far_z: f32; let cam_state = { let cam = Mutex::lock(&active_camera).unwrap(); let mut state = RenderCameraState::new(); // Get camera fovy // let projection: &dyn Any = cam.projection.as_ref(); // let projection: &PerspectiveProjection = projection.downcast_ref::<PerspectiveProjection>().unwrap(); camera_fovy = cam.projection.camera_fovy(); let (near_z, far_z) = cam.projection.test_depth_planes(); camera_near_z = near_z; camera_far_z = far_z; // Base matrix for our coordinate system ( let base_matrix = Matrix4::look_at_dir(Point3 {x: 0.0, y: 0.0, z: 0.0}, vec3(0.0, 0.0, 1.0), vec3(0.0, 1.0, 0.0)); // For some reason look_at_dir inverts the dir vector state.view_matrix = base_matrix * Matrix4::from(cam.rotation) * Matrix4::from_translation(-cam.translation); state.projection_matrix = cam.projection.projection_matrix(cam.viewport_size); state }; let viewprojection_matrix = cam_state.projection_matrix * cam_state.view_matrix; // Recompile shaders if self.program_ehaa_scene.needs_recompile() { self.program_ehaa_scene.do_recompile(); } if self.program_post_composite.needs_recompile() { self.program_post_composite.do_recompile(); } unsafe { gl::Disable(gl::FRAMEBUFFER_SRGB); gl::Disable(gl::BLEND); gl::Enable(gl::CULL_FACE); gl::FrontFace(gl::CCW); gl::CullFace(gl::FRONT); // For some reason we need to cull FRONT. This might be due to reverse-z flipping the winding order? gl::Enable(gl::DEPTH_TEST); // Setup NDC z axis for reverse float depth gl::DepthFunc(gl::GREATER); gl::ClearDepth(0.0); // 0.0 is far with reverse z gl::ClipControl(gl::LOWER_LEFT, gl::ZERO_TO_ONE); gl::DepthRange(0.0, 1.0); // Standard (non-inversed) depth range, we use a reverse-z projection matrix instead // Use scene shader let scene_shader = self.program_ehaa_scene.program().unwrap(); let scene_shader_gl = scene_shader.program_gl().unwrap(); gl::UseProgram(scene_shader_gl); // Bind scene framebuffer let scene_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); gl::BindFramebuffer(gl::FRAMEBUFFER, scene_fbo.handle_gl()); // Set the viewport gl::Viewport(0, 0, self.current_resolution.0 as gl::sizei, self.current_resolution.1 as gl::sizei); gl::ClearColor(0.0, 0.0, 0.0, 0.0); gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT); {// Upload matrices let model_matrix = Matrix4::from_scale(1.0); let model_matrix_arr: [[f32; 4]; 4] = model_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixModel\0".as_ptr() as *const gl::char), 1, gl::FALSE, model_matrix_arr.as_ptr() as *const gl::float); let view_matrix_arr: [[f32; 4]; 4] = cam_state.view_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixView\0".as_ptr() as *const gl::char), 1, gl::FALSE, view_matrix_arr.as_ptr() as *const gl::float); let viewprojection_matrix_arr: [[f32; 4]; 4] = viewprojection_matrix.into(); gl::UniformMatrix4fv(gl::GetUniformLocation(scene_shader_gl, "uMatrixViewProjection\0".as_ptr() as *const gl::char), 1, gl::FALSE, viewprojection_matrix_arr.as_ptr() as *const gl::float); } let start_frametimer = {// Start frametime timer let mut elapsed_frametime: u64 = std::u64::MAX; gl::GetQueryObjectui64v(self.frametime_query_object_gl, gl::QUERY_RESULT_NO_WAIT, &mut elapsed_frametime); if elapsed_frametime!= std::u64::MAX { let _float_frametime = (elapsed_frametime as f64) / 1e6; // let title = format!("EHAA Demo ~ Frametime {} ms", float_frametime); // self.window.need_mut().set_title(title.as_str()); // Restart query gl::BeginQuery(gl::TIME_ELAPSED, self.frametime_query_object_gl); true } else { false } }; // Set tessellation state gl::PatchParameteri(gl::PATCH_VERTICES, 3); gl::PatchParameterfv(gl::PATCH_DEFAULT_OUTER_LEVEL, [1.0f32, 1.0f32, 1.0f32, 1.0f32].as_ptr()); gl::PatchParameterfv(gl::PATCH_DEFAULT_INNER_LEVEL, [1.0f32, 1.0f32].as_ptr()); gl::EnableVertexAttribArray(0); // gl::EnableVertexAttribArray(1); // gl::EnableVertexAttribArray(2); /* {// Draw teapot let test_teapot_vbo = demo::demo_instance().test_teapot_vbo.need(); gl::BindBuffer(gl::ARRAY_BUFFER, test_teapot_vbo.vbo_gl); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, 0, 0 as *const gl::void); gl::DrawArrays(gl::PATCHES, 0, (crate::render::teapot::TEAPOT_VERTEX_DATA.len() / 3) as gl::sizei); } */ // /* {// Draw head model let test_head_model = demo::demo_instance().test_head_model.need(); // Bind textures gl::BindTextureUnit(1, test_head_model.tex_albedo.texture_gl()); gl::BindTextureUnit(2, test_head_model.tex_normal.texture_gl()); gl::BindTextureUnit(4, test_head_model.tex_transmission.texture_gl()); gl::BindBuffer(gl::ARRAY_BUFFER, test_head_model.vertex_buffer_gl); // let stride = 8*4; let stride = 12*4; gl::EnableVertexAttribArray(0); gl::EnableVertexAttribArray(1); gl::EnableVertexAttribArray(2); gl::EnableVertexAttribArray(3); gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, stride, 0 as *const gl::void); // vertex gl::VertexAttribPointer(1, 2, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4) as *const gl::void); // texcoord gl::VertexAttribPointer(2, 3, gl::FLOAT, gl::FALSE, stride, (3*4) as *const gl::void); // normal gl::VertexAttribPointer(3, 4, gl::FLOAT, gl::FALSE, stride, (3*4 + 3*4 + 2*4) as *const gl::void); // tangent gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, test_head_model.index_buffer_gl); gl::DrawElements(gl::PATCHES, test_head_model.num_indices as gl::sizei, gl::UNSIGNED_INT, 0 as *const gl::void); // gl::DrawElements(gl::TRIANGLES, self.test_head_model.need().num_indices as gl::GLsizei, gl::UNSIGNED_INT, 0 as *const std::ffi::c_void); gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DisableVertexAttribArray(3); } // */ /* {// Draw debug triangles gl::Begin(gl::PATCHES); // gl::VertexAttrib3f(2, 1.0, 0.616, 0.984); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.5, 0.2, 0.0); let (mouse_x, mouse_y) = demo::demo_instance().window.need().get_cursor_pos(); // gl::VertexAttribI1ui(1, 2); gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); // gl::Vertex3f(0.1, 0.6 + 0.2*(std::time::UNIX_EPOCH.elapsed().unwrap().as_secs_f32()).sin(), 0.0); // gl::Vertex3f(0.1, 0.6, 0.0); // gl::VertexAttrib3f(2, 0.153, 0.0, 1.0); // gl::VertexAttribI1ui(1, 0); gl::VertexAttrib3f(0, 0.0, 0.1, 0.0); // gl::VertexAttribI1ui(1, 1); gl::VertexAttrib3f(0, 0.2, 0.6, 0.0); // gl::VertexAttribI1ui(1, 2); // gl::VertexAttrib3f(0, (mouse_x / 1280.0) as f32 * 2.0 - 1.0, 1.0 - (mouse_y / 720.0) as f32 * 2.0, 0.0); gl::End(); } */ {// Resolve separable sss let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); let scene_hdr_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture); let scene_depth_rt = RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Depth).unwrap().texture); // Render ssss self.separable_sss_system.do_resolve_sss(&scene_hdr_rt, &scene_depth_rt, camera_fovy, (camera_near_z, camera_far_z)); } {// Do ehaa resolve pass let post_resolve_shader = self.program_post_composite.program().unwrap(); // // DEBUG: Blit framebuffer // gl::BlitNamedFramebuffer(self.framebuffer_scene_hdr_ehaa.need().handle_gl(), 0, 0, 0, 1280, 720, 0, 0, 1280, 720, gl::COLOR_BUFFER_BIT, gl::NEAREST); gl::Disable(gl::DEPTH_TEST); // Bind resolve shader gl::UseProgram(post_resolve_shader.program_gl().unwrap()); // Bind shaders let main_fbo = RefCell::borrow(self.framebuffer_scene_hdr_ehaa.need()); // gl::BindTextureUnit(0, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(0, RefCell::borrow(&self.separable_sss_system.fbo_resolve_final.get_attachment(AttachmentPoint::Color(0)).unwrap().texture).texture_gl()); gl::BindTextureUnit(1, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(1)).unwrap().texture).texture_gl()); gl::BindTextureUnit(2, RefCell::borrow(&main_fbo.get_attachment(AttachmentPoint::Color(2)).unwrap().texture).texture_gl()); // Bind back buffer gl::BindFramebuffer(gl::FRAMEBUFFER, 0); // Draw oversized fullscreen triangles gl::DisableVertexAttribArray(0); gl::DisableVertexAttribArray(1); gl::DisableVertexAttribArray(2); gl::DrawArrays(gl::TRIANGLES, 0, 3); } // End frametimer query if start_frametimer { gl::EndQuery(gl::TIME_ELAPSED); } } } pub fn queue_shader_reload(&mut self) { self.queued_shader_reload = true; } } pub struct GraphicsConfiguration { } impl GraphicsConfiguration { pub fn new() -> GraphicsConfiguration { GraphicsConfiguration {} } } pub struct ReconfigureEvent<'a> { pub configuration: &'a GraphicsConfiguration, pub resolution: (u32, u32), pub only_resize: bool, } pub struct RenderCameraState { pub projection_matrix: Matrix4<f32>, pub view_matrix: Matrix4<f32>, } impl RenderCameraState { pub fn new() -> RenderCameraState { RenderCameraState { projection_matrix: Matrix4::identity(), view_matrix: Matrix4::identity(), } } }
reload_shaders
identifier_name
hslcolor.rs
//! This file implements what I refer to as HSL but which would precisely be called sHSL: a simple //! transformation of sRGB that creates a cylindrical space. HSL has the same problems with //! perceptual uniformity and general unsuitability for exact psychophysically-accurate //! representation as color as sRGB does, but it does have the advantage of being easy to display on //! a monitor and having some conception of common color attributes. HSL and HSV are very similar //! but have an important difference: *value* in HSV runs from black to fully saturated colors, //! whereas *lightness* or *luminosity* in HSL runs from black to fully saturated in the middle to //! white at the end. This makes the saturation component of HSL extremely inaccurate, because light //! colors can have a very high saturation even if they are extremely close to white. This space is //! mathematically cylindrical, but when you account for the actual differentiation of colors //! (saturation's actual importance varies with lightness) it forms a "bi-hexcone" model, where the //! hue component is actually a hexagon but simply stretched into a circle, and the area of a //! horizontal cross-section varies with lightness. A special note: some implementations of HSV and //! HSL are circular in nature, using polar coordinates explicitly. This implementation is instead //! hexagonal: first values are put on a hexagon, and then that hexagon is "squeezed" into a //! circle. This can cause small variations between Scarlet and other applications. //! Another small implementation note is that converting gray into HSL or HSV will give a hue of 0 //! degrees, although any hue could be used in its place. use std::f64; use std::f64::EPSILON; use std::str::FromStr; use bound::Bound; use color::{Color, RGBColor, XYZColor}; use coord::Coord; use csscolor::{parse_hsl_hsv_tuple, CSSParseError}; use illuminants::Illuminant; /// A color in the HSL color space, a direct transformation of the sRGB space. sHSL is used to /// distinguish this space from a similar transformation of a different RGB space, which can cause /// some confusion as other implementations of HSL (such as on the web) omit this distinction. /// # Example /// Shifting from red to yellow creates two colors of clearly different brightnesses. This is because /// HSL doesn't account for the perceptual difference in brightness of light and dark colors. /// /// ``` /// # use scarlet::prelude::*; /// # use scarlet::colors::HSLColor; /// let red = HSLColor{h: 20., s: 0.5, l: 0.5}; /// let yellow = HSLColor{h: 60., s: 0.5, l: 0.5}; /// println!("{} {}", red.convert::<RGBColor>().to_string(), yellow.convert::<RGBColor>().to_string()); /// // prints #BF6A40 #BFBF40 /// // note how the second one is strictly more light /// ``` #[derive(Debug, Copy, Clone, Serialize, Deserialize)] pub struct HSLColor { /// The hue component. Ranges from 0 to 360, as the angle in a cylindrical space. Exactly the same /// as the hue component of HSV. pub h: f64, /// The saturation component. Ranges between 0 and 1. Note that this is much less accurate to /// human perception than the chroma or saturation found in other, higher-fidelity color spaces. pub s: f64, /// The lightness component. Ranges from 0 to 1. Defined in HSL as the average of the largest and /// smallest color components in RGB, which sacrifices accuracy for convenience. pub l: f64, } impl Color for HSLColor { /// Converts from XYZ to HSL through RGB: thus, there is a limited precision because RGB colors /// are limited to integer values of R, G, and B. fn from_xyz(xyz: XYZColor) -> HSLColor { // first get RGB color let rgb = RGBColor::from_xyz(xyz); // this is sorta interesting: a hexagonal projection instead of the circular projection used // in CIEHCL. It turns out that, if you tilt the RGB cube and project it into a hexagon, the // equivalent of radius is simply the largest component minus the smallest component: adding // a constant to every component simply travels up and down vertically and doesn't change the // projection. // I call this chroma, but it's a very very rough estimate of the actual color attribute. // More info: https://en.wikipedia.org/wiki/HSL_and_HSV#Formal_derivation let components = [rgb.r, rgb.g, rgb.b]; let max_c = components.iter().cloned().fold(-1.0, f64::max); let min_c = components.iter().cloned().fold(2.0, f64::min); let chroma = max_c - min_c; // hue is crazy in a hexagon! no more trig functions for us! // it's technically the proportion of the length of the hexagon through the point, but it's // treated as degrees let mut hue = if chroma == 0.0 { // could be anything, undefined according to Wikipedia, in Scarlet just 0 for gray 0.0 } else if (max_c - rgb.r).abs() < EPSILON { // in red sector: find which part by comparing green and blue and scaling // adding green moves up on the hexagon, adding blue moves down: hence, linearity // the modulo makes sure it's in the range 0-360 (((rgb.g - rgb.b) / chroma) % 6.0) * 60.0 } else if (max_c - rgb.g).abs() < EPSILON { // similar to above, but you add an offset (((rgb.b - rgb.r) / chroma) % 6.0) * 60.0 + 120.0 } else { // same as above, different offset (((rgb.r - rgb.g) / chroma) % 6.0) * 60.0 + 240.0 }; // if hue still not in 0-360, add until it does: this can sometimes happen while hue < 0. { hue += 360.; } while hue >= 360. { hue -= 360.; } // saturation, scientifically speaking, is chroma adjusted for lightness. For HSL, it's // defined relative to the maximum chroma, which varies depending on the place on the // cone. Thus, I'll compute lightness first. // now we choose lightness as the average of the largest and smallest components. This // essentially translates to a double hex cone, quite the interesting structure! let lightness = (max_c + min_c) / 2.0; // now back to saturation let saturation = if (lightness - 1.0).abs() < EPSILON || lightness == 0.0 { // this would be a divide by 0 otherwise, just set it to 0 because it doesn't matter 0.0 } else { chroma / (1.0 - (2.0 * lightness - 1.0).abs()) }; HSLColor { h: hue, s: saturation, l: lightness, } } // Converts back to XYZ through RGB. fn to_xyz(&self, illuminant: Illuminant) -> XYZColor { // first get back chroma let chroma = (1.0 - (2.0 * self.l - 1.0).abs()) * self.s; // find the point with 0 lightness that matches ours in the other two components // intermediate value is the second-largest RGB value, where C is the largest because the // smallest is 0: call this x let x = chroma * (1.0 - ((self.h / 60.0) % 2.0 - 1.0).abs()); // now split based on which line of the hexagon we're on, i.e., which are the two largest // components let (r1, g1, b1) = if self.h <= 60.0 { (chroma, x, 0.0) } else if self.h <= 120.0 { (x, chroma, 0.0) } else if self.h <= 180.0 { (0.0, chroma, x) } else if self.h <= 240.0 { (0.0, x, chroma) } else if self.h <= 300.0 { (x, 0.0, chroma) } else { (chroma, 0.0, x) }; // now we add the right value to each component to get the correct lightness and scale back // to 0-255 let offset = self.l - chroma / 2.0; let r = r1 + offset; let g = g1 + offset; let b = b1 + offset; RGBColor { r, g, b }.to_xyz(illuminant) } } impl From<Coord> for HSLColor { fn from(c: Coord) -> HSLColor { HSLColor { h: c.x, s: c.y, l: c.z, } } } impl From<HSLColor> for Coord { fn from(val: HSLColor) -> Self { Coord { x: val.h, y: val.s, z: val.l, } } } impl Bound for HSLColor { fn bounds() -> [(f64, f64); 3] { [(0., 360.), (0., 1.), (0., 1.)] } } impl FromStr for HSLColor { type Err = CSSParseError; fn from_str(s: &str) -> Result<HSLColor, CSSParseError> { if!s.starts_with("hsl(") { return Err(CSSParseError::InvalidColorSyntax); } let tup: String = s.chars().skip(3).collect::<String>(); match parse_hsl_hsv_tuple(&tup) { Ok(res) => Ok(HSLColor { h: res.0, s: res.1, l: res.2, }), Err(_e) => Err(_e), } } } #[cfg(test)] mod tests { #[allow(unused_imports)] use super::*; use consts::TEST_PRECISION;
#[test] fn test_hsl_rgb_conversion() { let red_rgb = RGBColor { r: 1., g: 0., b: 0., }; let red_hsl: HSLColor = red_rgb.convert(); assert!(red_hsl.h.abs() <= 0.0001); assert!((red_hsl.s - 1.0) <= 0.0001); assert!((red_hsl.l - 0.5) <= 0.0001); assert!(red_hsl.distance(&red_rgb) < TEST_PRECISION); let lavender_hsl = HSLColor { h: 245.0, s: 0.5, l: 0.6, }; let lavender_rgb: RGBColor = lavender_hsl.convert(); assert_eq!(lavender_rgb.to_string(), "#6F66CC"); } #[test] fn test_hsl_string_parsing() { let red_hsl: HSLColor = "hsl(0, 120%, 50%)".parse().unwrap(); assert!(red_hsl.h.abs() <= 0.0001); assert!((red_hsl.s - 1.0) <= 0.0001); assert!((red_hsl.l - 0.5) <= 0.0001); let lavender_hsl: HSLColor = "hsl(-475, 50%, 60%)".parse().unwrap(); let lavender_rgb: RGBColor = lavender_hsl.convert(); assert_eq!(lavender_rgb.to_string(), "#6F66CC"); // test error assert!("hsl(254%, 0, 0)".parse::<HSLColor>().is_err()); } }
random_line_split
hslcolor.rs
//! This file implements what I refer to as HSL but which would precisely be called sHSL: a simple //! transformation of sRGB that creates a cylindrical space. HSL has the same problems with //! perceptual uniformity and general unsuitability for exact psychophysically-accurate //! representation as color as sRGB does, but it does have the advantage of being easy to display on //! a monitor and having some conception of common color attributes. HSL and HSV are very similar //! but have an important difference: *value* in HSV runs from black to fully saturated colors, //! whereas *lightness* or *luminosity* in HSL runs from black to fully saturated in the middle to //! white at the end. This makes the saturation component of HSL extremely inaccurate, because light //! colors can have a very high saturation even if they are extremely close to white. This space is //! mathematically cylindrical, but when you account for the actual differentiation of colors //! (saturation's actual importance varies with lightness) it forms a "bi-hexcone" model, where the //! hue component is actually a hexagon but simply stretched into a circle, and the area of a //! horizontal cross-section varies with lightness. A special note: some implementations of HSV and //! HSL are circular in nature, using polar coordinates explicitly. This implementation is instead //! hexagonal: first values are put on a hexagon, and then that hexagon is "squeezed" into a //! circle. This can cause small variations between Scarlet and other applications. //! Another small implementation note is that converting gray into HSL or HSV will give a hue of 0 //! degrees, although any hue could be used in its place. use std::f64; use std::f64::EPSILON; use std::str::FromStr; use bound::Bound; use color::{Color, RGBColor, XYZColor}; use coord::Coord; use csscolor::{parse_hsl_hsv_tuple, CSSParseError}; use illuminants::Illuminant; /// A color in the HSL color space, a direct transformation of the sRGB space. sHSL is used to /// distinguish this space from a similar transformation of a different RGB space, which can cause /// some confusion as other implementations of HSL (such as on the web) omit this distinction. /// # Example /// Shifting from red to yellow creates two colors of clearly different brightnesses. This is because /// HSL doesn't account for the perceptual difference in brightness of light and dark colors. /// /// ``` /// # use scarlet::prelude::*; /// # use scarlet::colors::HSLColor; /// let red = HSLColor{h: 20., s: 0.5, l: 0.5}; /// let yellow = HSLColor{h: 60., s: 0.5, l: 0.5}; /// println!("{} {}", red.convert::<RGBColor>().to_string(), yellow.convert::<RGBColor>().to_string()); /// // prints #BF6A40 #BFBF40 /// // note how the second one is strictly more light /// ``` #[derive(Debug, Copy, Clone, Serialize, Deserialize)] pub struct HSLColor { /// The hue component. Ranges from 0 to 360, as the angle in a cylindrical space. Exactly the same /// as the hue component of HSV. pub h: f64, /// The saturation component. Ranges between 0 and 1. Note that this is much less accurate to /// human perception than the chroma or saturation found in other, higher-fidelity color spaces. pub s: f64, /// The lightness component. Ranges from 0 to 1. Defined in HSL as the average of the largest and /// smallest color components in RGB, which sacrifices accuracy for convenience. pub l: f64, } impl Color for HSLColor { /// Converts from XYZ to HSL through RGB: thus, there is a limited precision because RGB colors /// are limited to integer values of R, G, and B. fn from_xyz(xyz: XYZColor) -> HSLColor { // first get RGB color let rgb = RGBColor::from_xyz(xyz); // this is sorta interesting: a hexagonal projection instead of the circular projection used // in CIEHCL. It turns out that, if you tilt the RGB cube and project it into a hexagon, the // equivalent of radius is simply the largest component minus the smallest component: adding // a constant to every component simply travels up and down vertically and doesn't change the // projection. // I call this chroma, but it's a very very rough estimate of the actual color attribute. // More info: https://en.wikipedia.org/wiki/HSL_and_HSV#Formal_derivation let components = [rgb.r, rgb.g, rgb.b]; let max_c = components.iter().cloned().fold(-1.0, f64::max); let min_c = components.iter().cloned().fold(2.0, f64::min); let chroma = max_c - min_c; // hue is crazy in a hexagon! no more trig functions for us! // it's technically the proportion of the length of the hexagon through the point, but it's // treated as degrees let mut hue = if chroma == 0.0 { // could be anything, undefined according to Wikipedia, in Scarlet just 0 for gray 0.0 } else if (max_c - rgb.r).abs() < EPSILON { // in red sector: find which part by comparing green and blue and scaling // adding green moves up on the hexagon, adding blue moves down: hence, linearity // the modulo makes sure it's in the range 0-360 (((rgb.g - rgb.b) / chroma) % 6.0) * 60.0 } else if (max_c - rgb.g).abs() < EPSILON { // similar to above, but you add an offset (((rgb.b - rgb.r) / chroma) % 6.0) * 60.0 + 120.0 } else { // same as above, different offset (((rgb.r - rgb.g) / chroma) % 6.0) * 60.0 + 240.0 }; // if hue still not in 0-360, add until it does: this can sometimes happen while hue < 0. { hue += 360.; } while hue >= 360. { hue -= 360.; } // saturation, scientifically speaking, is chroma adjusted for lightness. For HSL, it's // defined relative to the maximum chroma, which varies depending on the place on the // cone. Thus, I'll compute lightness first. // now we choose lightness as the average of the largest and smallest components. This // essentially translates to a double hex cone, quite the interesting structure! let lightness = (max_c + min_c) / 2.0; // now back to saturation let saturation = if (lightness - 1.0).abs() < EPSILON || lightness == 0.0 { // this would be a divide by 0 otherwise, just set it to 0 because it doesn't matter 0.0 } else { chroma / (1.0 - (2.0 * lightness - 1.0).abs()) }; HSLColor { h: hue, s: saturation, l: lightness, } } // Converts back to XYZ through RGB. fn to_xyz(&self, illuminant: Illuminant) -> XYZColor { // first get back chroma let chroma = (1.0 - (2.0 * self.l - 1.0).abs()) * self.s; // find the point with 0 lightness that matches ours in the other two components // intermediate value is the second-largest RGB value, where C is the largest because the // smallest is 0: call this x let x = chroma * (1.0 - ((self.h / 60.0) % 2.0 - 1.0).abs()); // now split based on which line of the hexagon we're on, i.e., which are the two largest // components let (r1, g1, b1) = if self.h <= 60.0 { (chroma, x, 0.0) } else if self.h <= 120.0 { (x, chroma, 0.0) } else if self.h <= 180.0 { (0.0, chroma, x) } else if self.h <= 240.0 { (0.0, x, chroma) } else if self.h <= 300.0 { (x, 0.0, chroma) } else { (chroma, 0.0, x) }; // now we add the right value to each component to get the correct lightness and scale back // to 0-255 let offset = self.l - chroma / 2.0; let r = r1 + offset; let g = g1 + offset; let b = b1 + offset; RGBColor { r, g, b }.to_xyz(illuminant) } } impl From<Coord> for HSLColor { fn from(c: Coord) -> HSLColor
} impl From<HSLColor> for Coord { fn from(val: HSLColor) -> Self { Coord { x: val.h, y: val.s, z: val.l, } } } impl Bound for HSLColor { fn bounds() -> [(f64, f64); 3] { [(0., 360.), (0., 1.), (0., 1.)] } } impl FromStr for HSLColor { type Err = CSSParseError; fn from_str(s: &str) -> Result<HSLColor, CSSParseError> { if!s.starts_with("hsl(") { return Err(CSSParseError::InvalidColorSyntax); } let tup: String = s.chars().skip(3).collect::<String>(); match parse_hsl_hsv_tuple(&tup) { Ok(res) => Ok(HSLColor { h: res.0, s: res.1, l: res.2, }), Err(_e) => Err(_e), } } } #[cfg(test)] mod tests { #[allow(unused_imports)] use super::*; use consts::TEST_PRECISION; #[test] fn test_hsl_rgb_conversion() { let red_rgb = RGBColor { r: 1., g: 0., b: 0., }; let red_hsl: HSLColor = red_rgb.convert(); assert!(red_hsl.h.abs() <= 0.0001); assert!((red_hsl.s - 1.0) <= 0.0001); assert!((red_hsl.l - 0.5) <= 0.0001); assert!(red_hsl.distance(&red_rgb) < TEST_PRECISION); let lavender_hsl = HSLColor { h: 245.0, s: 0.5, l: 0.6, }; let lavender_rgb: RGBColor = lavender_hsl.convert(); assert_eq!(lavender_rgb.to_string(), "#6F66CC"); } #[test] fn test_hsl_string_parsing() { let red_hsl: HSLColor = "hsl(0, 120%, 50%)".parse().unwrap(); assert!(red_hsl.h.abs() <= 0.0001); assert!((red_hsl.s - 1.0) <= 0.0001); assert!((red_hsl.l - 0.5) <= 0.0001); let lavender_hsl: HSLColor = "hsl(-475, 50%, 60%)".parse().unwrap(); let lavender_rgb: RGBColor = lavender_hsl.convert(); assert_eq!(lavender_rgb.to_string(), "#6F66CC"); // test error assert!("hsl(254%, 0, 0)".parse::<HSLColor>().is_err()); } }
{ HSLColor { h: c.x, s: c.y, l: c.z, } }
identifier_body
hslcolor.rs
//! This file implements what I refer to as HSL but which would precisely be called sHSL: a simple //! transformation of sRGB that creates a cylindrical space. HSL has the same problems with //! perceptual uniformity and general unsuitability for exact psychophysically-accurate //! representation as color as sRGB does, but it does have the advantage of being easy to display on //! a monitor and having some conception of common color attributes. HSL and HSV are very similar //! but have an important difference: *value* in HSV runs from black to fully saturated colors, //! whereas *lightness* or *luminosity* in HSL runs from black to fully saturated in the middle to //! white at the end. This makes the saturation component of HSL extremely inaccurate, because light //! colors can have a very high saturation even if they are extremely close to white. This space is //! mathematically cylindrical, but when you account for the actual differentiation of colors //! (saturation's actual importance varies with lightness) it forms a "bi-hexcone" model, where the //! hue component is actually a hexagon but simply stretched into a circle, and the area of a //! horizontal cross-section varies with lightness. A special note: some implementations of HSV and //! HSL are circular in nature, using polar coordinates explicitly. This implementation is instead //! hexagonal: first values are put on a hexagon, and then that hexagon is "squeezed" into a //! circle. This can cause small variations between Scarlet and other applications. //! Another small implementation note is that converting gray into HSL or HSV will give a hue of 0 //! degrees, although any hue could be used in its place. use std::f64; use std::f64::EPSILON; use std::str::FromStr; use bound::Bound; use color::{Color, RGBColor, XYZColor}; use coord::Coord; use csscolor::{parse_hsl_hsv_tuple, CSSParseError}; use illuminants::Illuminant; /// A color in the HSL color space, a direct transformation of the sRGB space. sHSL is used to /// distinguish this space from a similar transformation of a different RGB space, which can cause /// some confusion as other implementations of HSL (such as on the web) omit this distinction. /// # Example /// Shifting from red to yellow creates two colors of clearly different brightnesses. This is because /// HSL doesn't account for the perceptual difference in brightness of light and dark colors. /// /// ``` /// # use scarlet::prelude::*; /// # use scarlet::colors::HSLColor; /// let red = HSLColor{h: 20., s: 0.5, l: 0.5}; /// let yellow = HSLColor{h: 60., s: 0.5, l: 0.5}; /// println!("{} {}", red.convert::<RGBColor>().to_string(), yellow.convert::<RGBColor>().to_string()); /// // prints #BF6A40 #BFBF40 /// // note how the second one is strictly more light /// ``` #[derive(Debug, Copy, Clone, Serialize, Deserialize)] pub struct HSLColor { /// The hue component. Ranges from 0 to 360, as the angle in a cylindrical space. Exactly the same /// as the hue component of HSV. pub h: f64, /// The saturation component. Ranges between 0 and 1. Note that this is much less accurate to /// human perception than the chroma or saturation found in other, higher-fidelity color spaces. pub s: f64, /// The lightness component. Ranges from 0 to 1. Defined in HSL as the average of the largest and /// smallest color components in RGB, which sacrifices accuracy for convenience. pub l: f64, } impl Color for HSLColor { /// Converts from XYZ to HSL through RGB: thus, there is a limited precision because RGB colors /// are limited to integer values of R, G, and B. fn from_xyz(xyz: XYZColor) -> HSLColor { // first get RGB color let rgb = RGBColor::from_xyz(xyz); // this is sorta interesting: a hexagonal projection instead of the circular projection used // in CIEHCL. It turns out that, if you tilt the RGB cube and project it into a hexagon, the // equivalent of radius is simply the largest component minus the smallest component: adding // a constant to every component simply travels up and down vertically and doesn't change the // projection. // I call this chroma, but it's a very very rough estimate of the actual color attribute. // More info: https://en.wikipedia.org/wiki/HSL_and_HSV#Formal_derivation let components = [rgb.r, rgb.g, rgb.b]; let max_c = components.iter().cloned().fold(-1.0, f64::max); let min_c = components.iter().cloned().fold(2.0, f64::min); let chroma = max_c - min_c; // hue is crazy in a hexagon! no more trig functions for us! // it's technically the proportion of the length of the hexagon through the point, but it's // treated as degrees let mut hue = if chroma == 0.0 { // could be anything, undefined according to Wikipedia, in Scarlet just 0 for gray 0.0 } else if (max_c - rgb.r).abs() < EPSILON { // in red sector: find which part by comparing green and blue and scaling // adding green moves up on the hexagon, adding blue moves down: hence, linearity // the modulo makes sure it's in the range 0-360 (((rgb.g - rgb.b) / chroma) % 6.0) * 60.0 } else if (max_c - rgb.g).abs() < EPSILON { // similar to above, but you add an offset (((rgb.b - rgb.r) / chroma) % 6.0) * 60.0 + 120.0 } else { // same as above, different offset (((rgb.r - rgb.g) / chroma) % 6.0) * 60.0 + 240.0 }; // if hue still not in 0-360, add until it does: this can sometimes happen while hue < 0. { hue += 360.; } while hue >= 360. { hue -= 360.; } // saturation, scientifically speaking, is chroma adjusted for lightness. For HSL, it's // defined relative to the maximum chroma, which varies depending on the place on the // cone. Thus, I'll compute lightness first. // now we choose lightness as the average of the largest and smallest components. This // essentially translates to a double hex cone, quite the interesting structure! let lightness = (max_c + min_c) / 2.0; // now back to saturation let saturation = if (lightness - 1.0).abs() < EPSILON || lightness == 0.0 { // this would be a divide by 0 otherwise, just set it to 0 because it doesn't matter 0.0 } else { chroma / (1.0 - (2.0 * lightness - 1.0).abs()) }; HSLColor { h: hue, s: saturation, l: lightness, } } // Converts back to XYZ through RGB. fn to_xyz(&self, illuminant: Illuminant) -> XYZColor { // first get back chroma let chroma = (1.0 - (2.0 * self.l - 1.0).abs()) * self.s; // find the point with 0 lightness that matches ours in the other two components // intermediate value is the second-largest RGB value, where C is the largest because the // smallest is 0: call this x let x = chroma * (1.0 - ((self.h / 60.0) % 2.0 - 1.0).abs()); // now split based on which line of the hexagon we're on, i.e., which are the two largest // components let (r1, g1, b1) = if self.h <= 60.0 { (chroma, x, 0.0) } else if self.h <= 120.0
else if self.h <= 180.0 { (0.0, chroma, x) } else if self.h <= 240.0 { (0.0, x, chroma) } else if self.h <= 300.0 { (x, 0.0, chroma) } else { (chroma, 0.0, x) }; // now we add the right value to each component to get the correct lightness and scale back // to 0-255 let offset = self.l - chroma / 2.0; let r = r1 + offset; let g = g1 + offset; let b = b1 + offset; RGBColor { r, g, b }.to_xyz(illuminant) } } impl From<Coord> for HSLColor { fn from(c: Coord) -> HSLColor { HSLColor { h: c.x, s: c.y, l: c.z, } } } impl From<HSLColor> for Coord { fn from(val: HSLColor) -> Self { Coord { x: val.h, y: val.s, z: val.l, } } } impl Bound for HSLColor { fn bounds() -> [(f64, f64); 3] { [(0., 360.), (0., 1.), (0., 1.)] } } impl FromStr for HSLColor { type Err = CSSParseError; fn from_str(s: &str) -> Result<HSLColor, CSSParseError> { if!s.starts_with("hsl(") { return Err(CSSParseError::InvalidColorSyntax); } let tup: String = s.chars().skip(3).collect::<String>(); match parse_hsl_hsv_tuple(&tup) { Ok(res) => Ok(HSLColor { h: res.0, s: res.1, l: res.2, }), Err(_e) => Err(_e), } } } #[cfg(test)] mod tests { #[allow(unused_imports)] use super::*; use consts::TEST_PRECISION; #[test] fn test_hsl_rgb_conversion() { let red_rgb = RGBColor { r: 1., g: 0., b: 0., }; let red_hsl: HSLColor = red_rgb.convert(); assert!(red_hsl.h.abs() <= 0.0001); assert!((red_hsl.s - 1.0) <= 0.0001); assert!((red_hsl.l - 0.5) <= 0.0001); assert!(red_hsl.distance(&red_rgb) < TEST_PRECISION); let lavender_hsl = HSLColor { h: 245.0, s: 0.5, l: 0.6, }; let lavender_rgb: RGBColor = lavender_hsl.convert(); assert_eq!(lavender_rgb.to_string(), "#6F66CC"); } #[test] fn test_hsl_string_parsing() { let red_hsl: HSLColor = "hsl(0, 120%, 50%)".parse().unwrap(); assert!(red_hsl.h.abs() <= 0.0001); assert!((red_hsl.s - 1.0) <= 0.0001); assert!((red_hsl.l - 0.5) <= 0.0001); let lavender_hsl: HSLColor = "hsl(-475, 50%, 60%)".parse().unwrap(); let lavender_rgb: RGBColor = lavender_hsl.convert(); assert_eq!(lavender_rgb.to_string(), "#6F66CC"); // test error assert!("hsl(254%, 0, 0)".parse::<HSLColor>().is_err()); } }
{ (x, chroma, 0.0) }
conditional_block
hslcolor.rs
//! This file implements what I refer to as HSL but which would precisely be called sHSL: a simple //! transformation of sRGB that creates a cylindrical space. HSL has the same problems with //! perceptual uniformity and general unsuitability for exact psychophysically-accurate //! representation as color as sRGB does, but it does have the advantage of being easy to display on //! a monitor and having some conception of common color attributes. HSL and HSV are very similar //! but have an important difference: *value* in HSV runs from black to fully saturated colors, //! whereas *lightness* or *luminosity* in HSL runs from black to fully saturated in the middle to //! white at the end. This makes the saturation component of HSL extremely inaccurate, because light //! colors can have a very high saturation even if they are extremely close to white. This space is //! mathematically cylindrical, but when you account for the actual differentiation of colors //! (saturation's actual importance varies with lightness) it forms a "bi-hexcone" model, where the //! hue component is actually a hexagon but simply stretched into a circle, and the area of a //! horizontal cross-section varies with lightness. A special note: some implementations of HSV and //! HSL are circular in nature, using polar coordinates explicitly. This implementation is instead //! hexagonal: first values are put on a hexagon, and then that hexagon is "squeezed" into a //! circle. This can cause small variations between Scarlet and other applications. //! Another small implementation note is that converting gray into HSL or HSV will give a hue of 0 //! degrees, although any hue could be used in its place. use std::f64; use std::f64::EPSILON; use std::str::FromStr; use bound::Bound; use color::{Color, RGBColor, XYZColor}; use coord::Coord; use csscolor::{parse_hsl_hsv_tuple, CSSParseError}; use illuminants::Illuminant; /// A color in the HSL color space, a direct transformation of the sRGB space. sHSL is used to /// distinguish this space from a similar transformation of a different RGB space, which can cause /// some confusion as other implementations of HSL (such as on the web) omit this distinction. /// # Example /// Shifting from red to yellow creates two colors of clearly different brightnesses. This is because /// HSL doesn't account for the perceptual difference in brightness of light and dark colors. /// /// ``` /// # use scarlet::prelude::*; /// # use scarlet::colors::HSLColor; /// let red = HSLColor{h: 20., s: 0.5, l: 0.5}; /// let yellow = HSLColor{h: 60., s: 0.5, l: 0.5}; /// println!("{} {}", red.convert::<RGBColor>().to_string(), yellow.convert::<RGBColor>().to_string()); /// // prints #BF6A40 #BFBF40 /// // note how the second one is strictly more light /// ``` #[derive(Debug, Copy, Clone, Serialize, Deserialize)] pub struct HSLColor { /// The hue component. Ranges from 0 to 360, as the angle in a cylindrical space. Exactly the same /// as the hue component of HSV. pub h: f64, /// The saturation component. Ranges between 0 and 1. Note that this is much less accurate to /// human perception than the chroma or saturation found in other, higher-fidelity color spaces. pub s: f64, /// The lightness component. Ranges from 0 to 1. Defined in HSL as the average of the largest and /// smallest color components in RGB, which sacrifices accuracy for convenience. pub l: f64, } impl Color for HSLColor { /// Converts from XYZ to HSL through RGB: thus, there is a limited precision because RGB colors /// are limited to integer values of R, G, and B. fn from_xyz(xyz: XYZColor) -> HSLColor { // first get RGB color let rgb = RGBColor::from_xyz(xyz); // this is sorta interesting: a hexagonal projection instead of the circular projection used // in CIEHCL. It turns out that, if you tilt the RGB cube and project it into a hexagon, the // equivalent of radius is simply the largest component minus the smallest component: adding // a constant to every component simply travels up and down vertically and doesn't change the // projection. // I call this chroma, but it's a very very rough estimate of the actual color attribute. // More info: https://en.wikipedia.org/wiki/HSL_and_HSV#Formal_derivation let components = [rgb.r, rgb.g, rgb.b]; let max_c = components.iter().cloned().fold(-1.0, f64::max); let min_c = components.iter().cloned().fold(2.0, f64::min); let chroma = max_c - min_c; // hue is crazy in a hexagon! no more trig functions for us! // it's technically the proportion of the length of the hexagon through the point, but it's // treated as degrees let mut hue = if chroma == 0.0 { // could be anything, undefined according to Wikipedia, in Scarlet just 0 for gray 0.0 } else if (max_c - rgb.r).abs() < EPSILON { // in red sector: find which part by comparing green and blue and scaling // adding green moves up on the hexagon, adding blue moves down: hence, linearity // the modulo makes sure it's in the range 0-360 (((rgb.g - rgb.b) / chroma) % 6.0) * 60.0 } else if (max_c - rgb.g).abs() < EPSILON { // similar to above, but you add an offset (((rgb.b - rgb.r) / chroma) % 6.0) * 60.0 + 120.0 } else { // same as above, different offset (((rgb.r - rgb.g) / chroma) % 6.0) * 60.0 + 240.0 }; // if hue still not in 0-360, add until it does: this can sometimes happen while hue < 0. { hue += 360.; } while hue >= 360. { hue -= 360.; } // saturation, scientifically speaking, is chroma adjusted for lightness. For HSL, it's // defined relative to the maximum chroma, which varies depending on the place on the // cone. Thus, I'll compute lightness first. // now we choose lightness as the average of the largest and smallest components. This // essentially translates to a double hex cone, quite the interesting structure! let lightness = (max_c + min_c) / 2.0; // now back to saturation let saturation = if (lightness - 1.0).abs() < EPSILON || lightness == 0.0 { // this would be a divide by 0 otherwise, just set it to 0 because it doesn't matter 0.0 } else { chroma / (1.0 - (2.0 * lightness - 1.0).abs()) }; HSLColor { h: hue, s: saturation, l: lightness, } } // Converts back to XYZ through RGB. fn to_xyz(&self, illuminant: Illuminant) -> XYZColor { // first get back chroma let chroma = (1.0 - (2.0 * self.l - 1.0).abs()) * self.s; // find the point with 0 lightness that matches ours in the other two components // intermediate value is the second-largest RGB value, where C is the largest because the // smallest is 0: call this x let x = chroma * (1.0 - ((self.h / 60.0) % 2.0 - 1.0).abs()); // now split based on which line of the hexagon we're on, i.e., which are the two largest // components let (r1, g1, b1) = if self.h <= 60.0 { (chroma, x, 0.0) } else if self.h <= 120.0 { (x, chroma, 0.0) } else if self.h <= 180.0 { (0.0, chroma, x) } else if self.h <= 240.0 { (0.0, x, chroma) } else if self.h <= 300.0 { (x, 0.0, chroma) } else { (chroma, 0.0, x) }; // now we add the right value to each component to get the correct lightness and scale back // to 0-255 let offset = self.l - chroma / 2.0; let r = r1 + offset; let g = g1 + offset; let b = b1 + offset; RGBColor { r, g, b }.to_xyz(illuminant) } } impl From<Coord> for HSLColor { fn from(c: Coord) -> HSLColor { HSLColor { h: c.x, s: c.y, l: c.z, } } } impl From<HSLColor> for Coord { fn from(val: HSLColor) -> Self { Coord { x: val.h, y: val.s, z: val.l, } } } impl Bound for HSLColor { fn bounds() -> [(f64, f64); 3] { [(0., 360.), (0., 1.), (0., 1.)] } } impl FromStr for HSLColor { type Err = CSSParseError; fn from_str(s: &str) -> Result<HSLColor, CSSParseError> { if!s.starts_with("hsl(") { return Err(CSSParseError::InvalidColorSyntax); } let tup: String = s.chars().skip(3).collect::<String>(); match parse_hsl_hsv_tuple(&tup) { Ok(res) => Ok(HSLColor { h: res.0, s: res.1, l: res.2, }), Err(_e) => Err(_e), } } } #[cfg(test)] mod tests { #[allow(unused_imports)] use super::*; use consts::TEST_PRECISION; #[test] fn
() { let red_rgb = RGBColor { r: 1., g: 0., b: 0., }; let red_hsl: HSLColor = red_rgb.convert(); assert!(red_hsl.h.abs() <= 0.0001); assert!((red_hsl.s - 1.0) <= 0.0001); assert!((red_hsl.l - 0.5) <= 0.0001); assert!(red_hsl.distance(&red_rgb) < TEST_PRECISION); let lavender_hsl = HSLColor { h: 245.0, s: 0.5, l: 0.6, }; let lavender_rgb: RGBColor = lavender_hsl.convert(); assert_eq!(lavender_rgb.to_string(), "#6F66CC"); } #[test] fn test_hsl_string_parsing() { let red_hsl: HSLColor = "hsl(0, 120%, 50%)".parse().unwrap(); assert!(red_hsl.h.abs() <= 0.0001); assert!((red_hsl.s - 1.0) <= 0.0001); assert!((red_hsl.l - 0.5) <= 0.0001); let lavender_hsl: HSLColor = "hsl(-475, 50%, 60%)".parse().unwrap(); let lavender_rgb: RGBColor = lavender_hsl.convert(); assert_eq!(lavender_rgb.to_string(), "#6F66CC"); // test error assert!("hsl(254%, 0, 0)".parse::<HSLColor>().is_err()); } }
test_hsl_rgb_conversion
identifier_name
channel_layout.rs
use std::{array, ffi::CString, fmt, mem, ptr, slice}; use crate::{ ffi::{AVChannel, AVChannelOrder, *}, Error, }; // new channel layout since 5.1 #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[repr(i32)] pub enum Channel { None = AVChannel::AV_CHAN_NONE.0, FrontLeft = AVChannel::AV_CHAN_FRONT_LEFT.0, FrontRight = AVChannel::AV_CHAN_FRONT_RIGHT.0, FrontCenter = AVChannel::AV_CHAN_FRONT_CENTER.0, LowFrequency = AVChannel::AV_CHAN_LOW_FREQUENCY.0, BackLeft = AVChannel::AV_CHAN_BACK_LEFT.0, BackRight = AVChannel::AV_CHAN_BACK_RIGHT.0, FrontLeftOfCenter = AVChannel::AV_CHAN_FRONT_LEFT_OF_CENTER.0, FrontRightOfCenter = AVChannel::AV_CHAN_FRONT_RIGHT_OF_CENTER.0, BackCenter = AVChannel::AV_CHAN_BACK_CENTER.0, SideLeft = AVChannel::AV_CHAN_SIDE_LEFT.0, SideRight = AVChannel::AV_CHAN_SIDE_RIGHT.0, TopCenter = AVChannel::AV_CHAN_TOP_CENTER.0, TopFrontLeft = AVChannel::AV_CHAN_TOP_FRONT_LEFT.0, TopFrontCenter = AVChannel::AV_CHAN_TOP_FRONT_CENTER.0, TopFrontRight = AVChannel::AV_CHAN_TOP_FRONT_RIGHT.0, TopBackLeft = AVChannel::AV_CHAN_TOP_BACK_LEFT.0, TopBackCenter = AVChannel::AV_CHAN_TOP_BACK_CENTER.0, TopBackRight = AVChannel::AV_CHAN_TOP_BACK_RIGHT.0, /// Stereo downmix. StereoLeft = AVChannel::AV_CHAN_STEREO_LEFT.0, /// Stereo downmix. StereoRight = AVChannel::AV_CHAN_STEREO_RIGHT.0, WideLeft = AVChannel::AV_CHAN_WIDE_LEFT.0, WideRight = AVChannel::AV_CHAN_WIDE_RIGHT.0, SurroundDirectLeft = AVChannel::AV_CHAN_SURROUND_DIRECT_LEFT.0, SurroundDirectRight = AVChannel::AV_CHAN_SURROUND_DIRECT_RIGHT.0, LowFrequency2 = AVChannel::AV_CHAN_LOW_FREQUENCY_2.0, TopSideLeft = AVChannel::AV_CHAN_TOP_SIDE_LEFT.0, TopSideRight = AVChannel::AV_CHAN_TOP_SIDE_RIGHT.0, BottomFrontCenter = AVChannel::AV_CHAN_BOTTOM_FRONT_CENTER.0, BottomFrontLeft = AVChannel::AV_CHAN_BOTTOM_FRONT_LEFT.0, BottomFrontRight = AVChannel::AV_CHAN_BOTTOM_FRONT_RIGHT.0, Unknown = AVChannel::AV_CHAN_UNKNOWN.0, } #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[repr(u32)] pub enum ChannelOrder { Unspecified = AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC.0, Native = AVChannelOrder::AV_CHANNEL_ORDER_NATIVE.0, Custom = AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM.0, Ambisonic = AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC.0, } #[derive(Copy, Clone)] pub struct ChannelLayout(AVChannelLayout); /// SAFETY: these are not auto-implemented due to the `opaque: *mut c_void` in /// `AVChannelLayout`. The `ChannelLayout` wrapper does not expose `opaque` /// directly, but only through `From` conversion into an `AVChannelLayout`, /// which doesn't have these unsafe impls. unsafe impl Sync for ChannelLayout {} unsafe impl Send for ChannelLayout {} type ChannelData = AVChannelLayout__bindgen_ty_1; fn alloc_custom_channels(channels: &[CustomChannel]) -> *mut AVChannelCustom { unsafe { let map = av_malloc_array(channels.len(), mem::size_of::<AVChannelCustom>()).cast::<AVChannelCustom>(); if map.is_null() { panic!("out of memory") } for (i, c) in channels.iter().enumerate() { *map.offset(i as isize) = c.0.clone(); } map } } impl ChannelLayout { pub const fn new() -> Self { Self(AVChannelLayout { order: AVChannelOrder::AV_CHANNEL_ORDER_NATIVE, nb_channels: 0, u: ChannelData { mask: 0 }, opaque: ptr::null_mut(), }) } pub fn default(n_channels: i32) -> ChannelLayout { let mut layout = Self::new(); unsafe { av_channel_layout_default(&mut layout.0 as *mut _, n_channels); } layout } pub fn from_name(name: impl Into<Vec<u8>>) -> Result<ChannelLayout, Error> { let s = CString::new(name.into()).unwrap(); let mut layout = Self::new(); unsafe { match av_channel_layout_from_string(&mut layout.0 as *mut _, s.as_ptr()) { 0 => Ok(layout), err => Err(Error::from(err)), } } } pub fn custom(channels: &[CustomChannel]) -> Self { assert!(channels.len() < i32::MAX as usize); Self(AVChannelLayout { order: AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM, nb_channels: channels.len() as i32, u: ChannelData { map: alloc_custom_channels(channels), }, opaque: ptr::null_mut(), }) } const fn const_clone(&self) -> Self { Self(AVChannelLayout { order: self.0.order, nb_channels: self.0.nb_channels, u: self.0.u, opaque: self.0.opaque, }) } pub const fn native(channels: &[Channel]) -> Self { Self::new().with_channels_native(channels) } pub const fn with_channels_native(&self, channels: &[Channel]) -> Self { let mut layout = self.const_clone(); let mask = { let mut mask = 0; let mut idx = 0; while idx < channels.len() { let ch = channels[idx]; if ch as u64 == Channel::None as u64 { continue; } mask |= 1 << ch as u64; idx += 1; } mask }; unsafe { layout.0.u.mask |= mask; layout.0.nb_channels = layout.0.u.mask.count_ones() as i32; } layout } pub fn is_zeroed(&self) -> bool { unsafe { self.0.order == AVChannelOrder(0) && self.0.nb_channels == 0 && self.0.u.mask == 0 && self.0.opaque == ptr::null_mut() } } fn contains_avchannel(&self, channel: AVChannel) -> Option<bool> { match self.0.order { AVChannelOrder::AV_CHANNEL_ORDER_NATIVE => unsafe { Some(self.0.u.mask & (1 << channel.0 as u64)!= 0) }, AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM => unsafe { let channels = self.custom_channels_unchecked(); Some(channels.iter().any(|ch| ch.0.id == channel)) }, // no information about channels available AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC => None, AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC => None, order => panic!("invalid channel order: {order:?}"), } } /// Returns `Some(v)` if the membership of `channel` in `self` can be tested, /// where `v` is only true if `channel` is contained in `self`. /// For `ChannelOrder::Unspecified` and `ChannelOrder::Ambisonic`, this /// function currently returns `None`, though this may change. /// /// Panics if called on a layout with an invalid channel order. pub fn contains(&self, channel: Channel) -> Option<bool> { self.contains_avchannel(AVChannel(channel as i32)) } /// Similar to `contains`, check if all channels in `layout` are also /// contained in `self`. Only a few order combinations are supported: /// /// - native to native /// - pub fn contains_all(&self, layout: &ChannelLayout) -> Option<bool> { match (self.0.order, layout.0.order) { (AVChannelOrder::AV_CHANNEL_ORDER_NATIVE, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE) => unsafe { Some(self.0.u.mask & layout.0.u.mask == layout.0.u.mask) }, // could be implemented in the future (AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE) => None, (_, AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM) => unsafe { let channels = self.custom_channels_unchecked(); Some( channels .iter() .all(|ch| self.contains_avchannel(ch.0.id).unwrap_or(false)), ) }, // no information about channels available (AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC, _) | (_, AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC) => None, (AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC, _) | (_, AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC) => None, (self_order, layout_order) => panic!("invalid channel orders: {self_order:?}, {layout_order:?}"), } } // this would need only one pass with the bprint API, but that's currently // unwrapped pub fn describe(&self) -> Result<String, Error> { fn describe_into(buf: &mut [u8], layout: &ChannelLayout) -> Result<Result<String, usize>, Error> { unsafe { let bytes_needed = match av_channel_layout_describe(layout.as_ptr(), buf.as_mut_ptr() as *mut _, buf.len()) { e if e < 0 => return Err(Error::from(e))?, needed => needed as usize, }; if bytes_needed <= buf.len() { let s = String::from_utf8_lossy(&buf[..bytes_needed]); Ok(Ok(s.into_owned())) } else { Ok(Err(bytes_needed)) } } } const BUF_SIZE: usize = 64; let mut buf = [0u8; BUF_SIZE]; match describe_into(&mut buf[..], self)? { Ok(s) => Ok(s), Err(needed) => { let mut buf = vec![0; needed + 1]; Ok(describe_into(&mut buf[..], self)?.expect("allocated buffer should have been big enough")) } } } pub fn is_empty(&self) -> bool { self.0.nb_channels == 0 } pub fn order(&self) -> ChannelOrder { match self.0.order { AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC => ChannelOrder::Unspecified, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE => ChannelOrder::Native, AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM => ChannelOrder::Custom, AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC => ChannelOrder::Ambisonic, order => panic!("invalid channel order: {order:?}"), } } pub fn set_order(&mut self, order: ChannelOrder) { self.0.order = AVChannelOrder(order as u32); } pub fn channels(&self) -> i32 { self.0.nb_channels } pub fn as_ptr(&self) -> *const AVChannelLayout { &self.0 as *const _ } pub fn native_order_bits(&self) -> Option<u64> { (self.0.order == AVChannelOrder::AV_CHANNEL_ORDER_NATIVE).then_some(unsafe { self.0.u.mask }) } unsafe fn custom_channels_unchecked(&self) -> &[CustomChannel] { slice::from_raw_parts(self.0.u.map.cast::<CustomChannel>(), self.0.nb_channels.max(0) as usize) } pub fn custom_channels(&self) -> Option<&[CustomChannel]> { (self.0.order == AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM).then_some(unsafe { self.custom_channels_unchecked() }) } } impl ChannelLayout { pub const CUBE: ChannelLayout = Self::QUAD.with_channels_native(&[ Channel::TopFrontLeft, Channel::TopFrontRight, Channel::TopBackLeft, Channel::TopBackRight, ]); pub const HEXADECAGONAL: ChannelLayout = Self::OCTAGONAL.with_channels_native(&[ Channel::WideLeft, Channel::WideRight, Channel::TopBackLeft, Channel::TopBackRight, Channel::TopBackCenter, Channel::TopFrontCenter, Channel::TopFrontLeft, Channel::TopFrontRight, ]); pub const HEXAGONAL: ChannelLayout = Self::_5POINT0_BACK.with_channels_native(&[Channel::BackCenter]); pub const MONO: ChannelLayout = Self::native(&[Channel::FrontCenter]); pub const OCTAGONAL: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackLeft, Channel::BackCenter, Channel::BackRight]); pub const QUAD: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const STEREO: ChannelLayout = Self::native(&[Channel::FrontLeft, Channel::FrontRight]); pub const STEREO_DOWNMIX: ChannelLayout = Self::native(&[Channel::StereoLeft, Channel::StereoRight]); pub const SURROUND: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::FrontCenter]); pub const _22POINT2: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[ Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter, Channel::BackCenter, Channel::LowFrequency2, Channel::SideLeft, Channel::SideRight, Channel::TopFrontLeft, Channel::TopFrontRight, Channel::TopFrontCenter, Channel::TopCenter, Channel::TopBackLeft, Channel::TopBackRight, Channel::TopSideLeft, Channel::TopSideRight, Channel::TopBackCenter, Channel::BottomFrontCenter, Channel::BottomFrontLeft, Channel::BottomFrontRight, ]); pub const _2POINT1: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::LowFrequency]); pub const _2_1: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::BackCenter]); pub const _2_2: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::SideLeft, Channel::SideRight]); pub const _3POINT1: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::LowFrequency]); pub const _4POINT0: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::BackCenter]); pub const _4POINT1: ChannelLayout = Self::_4POINT0.with_channels_native(&[Channel::LowFrequency]); pub const _5POINT0: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::SideLeft, Channel::SideRight]); pub const _5POINT0_BACK: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const _5POINT1: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::LowFrequency]); pub const _5POINT1_BACK: ChannelLayout = Self::_5POINT0_BACK.with_channels_native(&[Channel::LowFrequency]); pub const _6POINT0: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackCenter]); pub const _6POINT0_FRONT: ChannelLayout = Self::_2_2.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); pub const _6POINT1: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::BackCenter]); pub const _6POINT1_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::BackCenter]); pub const _6POINT1_FRONT: ChannelLayout = Self::_6POINT0_FRONT.with_channels_native(&[Channel::LowFrequency]); pub const _7POINT0: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const _7POINT0_FRONT: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); pub const _7POINT1: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const _7POINT1_TOP_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::TopFrontLeft, Channel::TopFrontRight]); pub const _7POINT1_WIDE: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); pub const _7POINT1_WIDE_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); } impl From<ChannelLayout> for AVChannelLayout { fn from(v: ChannelLayout) -> AVChannelLayout { v.0 } } impl From<AVChannelLayout> for ChannelLayout { fn from(v: AVChannelLayout) -> ChannelLayout { Self(v) } } impl fmt::Debug for ChannelLayout { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut d = f.debug_struct("ChannelLayout"); d.field("order", &self.0.order); d.field("nb_channels", &self.0.nb_channels); if let Some(custom) = self.custom_channels() { d.field("map", &custom); } else { unsafe { d.field("mask", &self.0.u.mask); } } d.field("opaque", &self.0.opaque); d.finish() } } impl PartialEq for ChannelLayout { fn eq(&self, other: &ChannelLayout) -> bool { unsafe { let ord = av_channel_layout_compare(self.as_ptr(), other.as_ptr()); match ord { // negative return values for invalid layouts ..=-1 => false, 0 => true, 1 => false, 2.. => panic!("illegal return value"), } } } } #[derive(Debug, Clone, PartialEq, Eq)] #[repr(transparent)] pub struct CustomChannel(AVChannelCustom); impl CustomChannel { pub fn new(channel: Channel, name: Option<&str>) -> Self { Self::new_raw(channel as i32, name) } pub fn new_raw(channel: i32, name: Option<&str>) -> Self { let name = name.unwrap_or("").as_bytes(); let mut name_with_zero = [0; 16]; let len = name.len().min(15); name_with_zero[..len].copy_from_slice(&name[..len]); Self::custom(channel as i32, array::from_fn(|i| name_with_zero[i] as i8)) } pub fn custom(channel: i32, name: [i8; 16]) -> Self { assert_eq!(name[15], 0); Self(AVChannelCustom { id: AVChannel(channel as i32), name, opaque: ptr::null_mut(), }) } } impl From<Channel> for CustomChannel { fn from(v: Channel) -> CustomChannel { CustomChannel::new(v, None) } } impl From<CustomChannel> for AVChannelCustom { fn from(v: CustomChannel) -> AVChannelCustom { v.0 } } impl From<AVChannelCustom> for CustomChannel { fn from(v: AVChannelCustom) -> CustomChannel { Self(v) } } #[cfg(feature = "serde")] mod serde { //! It is expected that `CustomChannel::name` contains human-readable names in //! zero-terminated UTF-8. They are serialized as text instead of byte arrays //! to make them easily readable in e.g. JSON output. You'll need a different //! serde impl if you cleverly hid extra data after the null terminator, or //! use the name field to smuggle non-UTF-8 data. use std::{array, ffi::CStr, ptr, str}; use serde_::{ de::Error as _, ser::{Error as _, SerializeStruct}, Deserialize, Deserializer, Serialize, Serializer, }; use super::{alloc_custom_channels, ChannelData, ChannelLayout, CustomChannel}; use crate::ffi::{AVChannelLayout, AVChannelOrder}; impl Serialize for CustomChannel { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut s = serializer.serialize_struct("CustomChannel", 2)?; s.serialize_field("id", &self.0.id.0)?; if self.0.name[0]!= 0 { let u8_name = array::from_fn::<u8, 16, _>(|i| self.0.name[i] as u8); let str_name = CStr::from_bytes_until_nul(&u8_name[..]) .map_err(|_| S::Error::custom("name is not a null-terminated string"))? .to_str() .map_err(|_| S::Error::custom("name is not valid UTF-8"))?; s.serialize_field("name", &str_name)?; } s.end() } } impl<'de> Deserialize<'de> for CustomChannel { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] #[serde(crate = "serde_")] struct Channel<'a> { id: i32, name: Option<&'a str>, } let Channel { id, name } = Channel::deserialize(deserializer)?; Ok(CustomChannel::new_raw(id, name.as_deref())) } } impl Serialize for ChannelLayout { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut s = serializer.serialize_struct("ChannelLayout", 2)?; // provide type hints in order to get compile-time errors if ffmpeg // changes the struct definition s.serialize_field::<u32>("order", &self.0.order.0)?; if let Some(custom) = self.custom_channels() { s.serialize_field("map", &custom)?; } else { s.serialize_field::<u64>("mask", unsafe { &self.0.u.mask })?; } s.end() } } impl<'de> Deserialize<'de> for ChannelLayout { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] #[serde(crate = "serde_")] struct OldLayout { bits: u64, } #[derive(Deserialize)] #[serde(crate = "serde_")] struct
{ order: u32, mask: Option<u64>, map: Option<Vec<CustomChannel>>, } #[derive(Deserialize)] #[serde(untagged, crate = "serde_")] enum VersionedLayout { Old(OldLayout), New(NewLayout), } let (order, u, nb_channels); match VersionedLayout::deserialize(deserializer)? { VersionedLayout::Old(OldLayout { bits: mask }) => { order = AVChannelOrder::AV_CHANNEL_ORDER_NATIVE; u = ChannelData { mask }; nb_channels = mask.count_ones() as i32; } VersionedLayout::New(NewLayout { order: num_order, mask, map, }) => { order = AVChannelOrder(num_order); match (order, mask, map) { (AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM, _, Some(map)) => { u = ChannelData { map: alloc_custom_channels(&map), }; nb_channels = map.len() as i32; } ( AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC | AVChannelOrder::AV_CHANNEL_ORDER_NATIVE | AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC, Some(mask), _, ) => { u = ChannelData { mask }; nb_channels = mask.count_ones() as i32 } (_, _, _) => return Err(D::Error::missing_field("mask or map")), } } } Ok(ChannelLayout(AVChannelLayout { order, nb_channels, u, opaque: ptr::null_mut(), })) } } #[cfg(test)] mod test { use std::fmt::Debug; use serde_::{de::DeserializeOwned, Serialize}; use super::super::{Channel, ChannelLayout, CustomChannel}; use crate::ffi::AVChannelOrder; fn round_trip_debug<T>(x: T) where T: Serialize + DeserializeOwned + Debug, { let json = serde_json::to_string(&x).unwrap(); let y: T = serde_json::from_str(&json).unwrap(); assert_eq!(format!("{x:?}"), format!("{y:?}")); } #[test] fn serde() { round_trip_debug(ChannelLayout::native(&[Channel::StereoRight, Channel::LowFrequency])); round_trip_debug(ChannelLayout::custom(&[ CustomChannel::new(Channel::LowFrequency, Some("low-freq")), CustomChannel::new(Channel::BackCenter, None), ])); } #[test] fn old_format() { let x: ChannelLayout = serde_json::from_str(r#"{ "bits": 31 }"#).unwrap(); assert_eq!(x.0.order, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE); assert_eq!(x.0.nb_channels, 5); assert_eq!(unsafe { x.0.u.mask }, 31); } } }
NewLayout
identifier_name
channel_layout.rs
use std::{array, ffi::CString, fmt, mem, ptr, slice}; use crate::{ ffi::{AVChannel, AVChannelOrder, *}, Error, }; // new channel layout since 5.1 #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[repr(i32)] pub enum Channel { None = AVChannel::AV_CHAN_NONE.0, FrontLeft = AVChannel::AV_CHAN_FRONT_LEFT.0, FrontRight = AVChannel::AV_CHAN_FRONT_RIGHT.0, FrontCenter = AVChannel::AV_CHAN_FRONT_CENTER.0, LowFrequency = AVChannel::AV_CHAN_LOW_FREQUENCY.0, BackLeft = AVChannel::AV_CHAN_BACK_LEFT.0, BackRight = AVChannel::AV_CHAN_BACK_RIGHT.0, FrontLeftOfCenter = AVChannel::AV_CHAN_FRONT_LEFT_OF_CENTER.0, FrontRightOfCenter = AVChannel::AV_CHAN_FRONT_RIGHT_OF_CENTER.0, BackCenter = AVChannel::AV_CHAN_BACK_CENTER.0, SideLeft = AVChannel::AV_CHAN_SIDE_LEFT.0, SideRight = AVChannel::AV_CHAN_SIDE_RIGHT.0, TopCenter = AVChannel::AV_CHAN_TOP_CENTER.0, TopFrontLeft = AVChannel::AV_CHAN_TOP_FRONT_LEFT.0, TopFrontCenter = AVChannel::AV_CHAN_TOP_FRONT_CENTER.0, TopFrontRight = AVChannel::AV_CHAN_TOP_FRONT_RIGHT.0, TopBackLeft = AVChannel::AV_CHAN_TOP_BACK_LEFT.0, TopBackCenter = AVChannel::AV_CHAN_TOP_BACK_CENTER.0, TopBackRight = AVChannel::AV_CHAN_TOP_BACK_RIGHT.0, /// Stereo downmix. StereoLeft = AVChannel::AV_CHAN_STEREO_LEFT.0, /// Stereo downmix. StereoRight = AVChannel::AV_CHAN_STEREO_RIGHT.0, WideLeft = AVChannel::AV_CHAN_WIDE_LEFT.0, WideRight = AVChannel::AV_CHAN_WIDE_RIGHT.0, SurroundDirectLeft = AVChannel::AV_CHAN_SURROUND_DIRECT_LEFT.0, SurroundDirectRight = AVChannel::AV_CHAN_SURROUND_DIRECT_RIGHT.0, LowFrequency2 = AVChannel::AV_CHAN_LOW_FREQUENCY_2.0, TopSideLeft = AVChannel::AV_CHAN_TOP_SIDE_LEFT.0, TopSideRight = AVChannel::AV_CHAN_TOP_SIDE_RIGHT.0, BottomFrontCenter = AVChannel::AV_CHAN_BOTTOM_FRONT_CENTER.0, BottomFrontLeft = AVChannel::AV_CHAN_BOTTOM_FRONT_LEFT.0, BottomFrontRight = AVChannel::AV_CHAN_BOTTOM_FRONT_RIGHT.0, Unknown = AVChannel::AV_CHAN_UNKNOWN.0, } #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[repr(u32)] pub enum ChannelOrder { Unspecified = AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC.0, Native = AVChannelOrder::AV_CHANNEL_ORDER_NATIVE.0, Custom = AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM.0, Ambisonic = AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC.0, } #[derive(Copy, Clone)] pub struct ChannelLayout(AVChannelLayout); /// SAFETY: these are not auto-implemented due to the `opaque: *mut c_void` in /// `AVChannelLayout`. The `ChannelLayout` wrapper does not expose `opaque` /// directly, but only through `From` conversion into an `AVChannelLayout`, /// which doesn't have these unsafe impls. unsafe impl Sync for ChannelLayout {} unsafe impl Send for ChannelLayout {} type ChannelData = AVChannelLayout__bindgen_ty_1; fn alloc_custom_channels(channels: &[CustomChannel]) -> *mut AVChannelCustom { unsafe { let map = av_malloc_array(channels.len(), mem::size_of::<AVChannelCustom>()).cast::<AVChannelCustom>(); if map.is_null() { panic!("out of memory") } for (i, c) in channels.iter().enumerate() { *map.offset(i as isize) = c.0.clone(); } map } } impl ChannelLayout { pub const fn new() -> Self { Self(AVChannelLayout { order: AVChannelOrder::AV_CHANNEL_ORDER_NATIVE, nb_channels: 0, u: ChannelData { mask: 0 }, opaque: ptr::null_mut(), }) } pub fn default(n_channels: i32) -> ChannelLayout { let mut layout = Self::new(); unsafe { av_channel_layout_default(&mut layout.0 as *mut _, n_channels); } layout } pub fn from_name(name: impl Into<Vec<u8>>) -> Result<ChannelLayout, Error> { let s = CString::new(name.into()).unwrap(); let mut layout = Self::new(); unsafe { match av_channel_layout_from_string(&mut layout.0 as *mut _, s.as_ptr()) { 0 => Ok(layout), err => Err(Error::from(err)), } } } pub fn custom(channels: &[CustomChannel]) -> Self { assert!(channels.len() < i32::MAX as usize); Self(AVChannelLayout { order: AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM, nb_channels: channels.len() as i32, u: ChannelData { map: alloc_custom_channels(channels), }, opaque: ptr::null_mut(), }) } const fn const_clone(&self) -> Self { Self(AVChannelLayout { order: self.0.order, nb_channels: self.0.nb_channels, u: self.0.u, opaque: self.0.opaque, }) } pub const fn native(channels: &[Channel]) -> Self { Self::new().with_channels_native(channels) } pub const fn with_channels_native(&self, channels: &[Channel]) -> Self { let mut layout = self.const_clone(); let mask = { let mut mask = 0; let mut idx = 0; while idx < channels.len() { let ch = channels[idx]; if ch as u64 == Channel::None as u64 { continue; } mask |= 1 << ch as u64; idx += 1; } mask }; unsafe { layout.0.u.mask |= mask; layout.0.nb_channels = layout.0.u.mask.count_ones() as i32; } layout } pub fn is_zeroed(&self) -> bool { unsafe { self.0.order == AVChannelOrder(0) && self.0.nb_channels == 0 && self.0.u.mask == 0 && self.0.opaque == ptr::null_mut() } } fn contains_avchannel(&self, channel: AVChannel) -> Option<bool> { match self.0.order { AVChannelOrder::AV_CHANNEL_ORDER_NATIVE => unsafe { Some(self.0.u.mask & (1 << channel.0 as u64)!= 0) }, AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM => unsafe { let channels = self.custom_channels_unchecked(); Some(channels.iter().any(|ch| ch.0.id == channel)) }, // no information about channels available AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC => None, AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC => None, order => panic!("invalid channel order: {order:?}"), } } /// Returns `Some(v)` if the membership of `channel` in `self` can be tested, /// where `v` is only true if `channel` is contained in `self`. /// For `ChannelOrder::Unspecified` and `ChannelOrder::Ambisonic`, this /// function currently returns `None`, though this may change. /// /// Panics if called on a layout with an invalid channel order. pub fn contains(&self, channel: Channel) -> Option<bool> { self.contains_avchannel(AVChannel(channel as i32)) } /// Similar to `contains`, check if all channels in `layout` are also /// contained in `self`. Only a few order combinations are supported: /// /// - native to native /// - pub fn contains_all(&self, layout: &ChannelLayout) -> Option<bool> { match (self.0.order, layout.0.order) { (AVChannelOrder::AV_CHANNEL_ORDER_NATIVE, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE) => unsafe { Some(self.0.u.mask & layout.0.u.mask == layout.0.u.mask) }, // could be implemented in the future (AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE) => None, (_, AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM) => unsafe { let channels = self.custom_channels_unchecked(); Some( channels .iter() .all(|ch| self.contains_avchannel(ch.0.id).unwrap_or(false)), ) }, // no information about channels available (AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC, _) | (_, AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC) => None, (AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC, _) | (_, AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC) => None, (self_order, layout_order) => panic!("invalid channel orders: {self_order:?}, {layout_order:?}"), } } // this would need only one pass with the bprint API, but that's currently // unwrapped pub fn describe(&self) -> Result<String, Error> { fn describe_into(buf: &mut [u8], layout: &ChannelLayout) -> Result<Result<String, usize>, Error> { unsafe { let bytes_needed = match av_channel_layout_describe(layout.as_ptr(), buf.as_mut_ptr() as *mut _, buf.len()) { e if e < 0 => return Err(Error::from(e))?, needed => needed as usize, }; if bytes_needed <= buf.len() { let s = String::from_utf8_lossy(&buf[..bytes_needed]); Ok(Ok(s.into_owned())) } else { Ok(Err(bytes_needed)) } } } const BUF_SIZE: usize = 64; let mut buf = [0u8; BUF_SIZE]; match describe_into(&mut buf[..], self)? { Ok(s) => Ok(s), Err(needed) => { let mut buf = vec![0; needed + 1]; Ok(describe_into(&mut buf[..], self)?.expect("allocated buffer should have been big enough")) } } } pub fn is_empty(&self) -> bool { self.0.nb_channels == 0 } pub fn order(&self) -> ChannelOrder { match self.0.order { AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC => ChannelOrder::Unspecified, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE => ChannelOrder::Native, AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM => ChannelOrder::Custom, AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC => ChannelOrder::Ambisonic, order => panic!("invalid channel order: {order:?}"), } } pub fn set_order(&mut self, order: ChannelOrder) { self.0.order = AVChannelOrder(order as u32); } pub fn channels(&self) -> i32 { self.0.nb_channels } pub fn as_ptr(&self) -> *const AVChannelLayout { &self.0 as *const _ } pub fn native_order_bits(&self) -> Option<u64> { (self.0.order == AVChannelOrder::AV_CHANNEL_ORDER_NATIVE).then_some(unsafe { self.0.u.mask }) } unsafe fn custom_channels_unchecked(&self) -> &[CustomChannel] { slice::from_raw_parts(self.0.u.map.cast::<CustomChannel>(), self.0.nb_channels.max(0) as usize) } pub fn custom_channels(&self) -> Option<&[CustomChannel]> { (self.0.order == AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM).then_some(unsafe { self.custom_channels_unchecked() }) } } impl ChannelLayout { pub const CUBE: ChannelLayout = Self::QUAD.with_channels_native(&[ Channel::TopFrontLeft, Channel::TopFrontRight, Channel::TopBackLeft, Channel::TopBackRight, ]); pub const HEXADECAGONAL: ChannelLayout = Self::OCTAGONAL.with_channels_native(&[ Channel::WideLeft, Channel::WideRight, Channel::TopBackLeft, Channel::TopBackRight, Channel::TopBackCenter, Channel::TopFrontCenter, Channel::TopFrontLeft, Channel::TopFrontRight, ]); pub const HEXAGONAL: ChannelLayout = Self::_5POINT0_BACK.with_channels_native(&[Channel::BackCenter]); pub const MONO: ChannelLayout = Self::native(&[Channel::FrontCenter]); pub const OCTAGONAL: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackLeft, Channel::BackCenter, Channel::BackRight]); pub const QUAD: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const STEREO: ChannelLayout = Self::native(&[Channel::FrontLeft, Channel::FrontRight]); pub const STEREO_DOWNMIX: ChannelLayout = Self::native(&[Channel::StereoLeft, Channel::StereoRight]); pub const SURROUND: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::FrontCenter]); pub const _22POINT2: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[ Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter, Channel::BackCenter, Channel::LowFrequency2, Channel::SideLeft, Channel::SideRight, Channel::TopFrontLeft, Channel::TopFrontRight, Channel::TopFrontCenter, Channel::TopCenter, Channel::TopBackLeft, Channel::TopBackRight, Channel::TopSideLeft, Channel::TopSideRight, Channel::TopBackCenter, Channel::BottomFrontCenter, Channel::BottomFrontLeft, Channel::BottomFrontRight, ]); pub const _2POINT1: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::LowFrequency]); pub const _2_1: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::BackCenter]); pub const _2_2: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::SideLeft, Channel::SideRight]); pub const _3POINT1: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::LowFrequency]); pub const _4POINT0: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::BackCenter]); pub const _4POINT1: ChannelLayout = Self::_4POINT0.with_channels_native(&[Channel::LowFrequency]); pub const _5POINT0: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::SideLeft, Channel::SideRight]); pub const _5POINT0_BACK: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const _5POINT1: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::LowFrequency]); pub const _5POINT1_BACK: ChannelLayout = Self::_5POINT0_BACK.with_channels_native(&[Channel::LowFrequency]); pub const _6POINT0: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackCenter]); pub const _6POINT0_FRONT: ChannelLayout = Self::_2_2.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); pub const _6POINT1: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::BackCenter]); pub const _6POINT1_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::BackCenter]); pub const _6POINT1_FRONT: ChannelLayout = Self::_6POINT0_FRONT.with_channels_native(&[Channel::LowFrequency]); pub const _7POINT0: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const _7POINT0_FRONT: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); pub const _7POINT1: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const _7POINT1_TOP_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::TopFrontLeft, Channel::TopFrontRight]); pub const _7POINT1_WIDE: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); pub const _7POINT1_WIDE_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); } impl From<ChannelLayout> for AVChannelLayout { fn from(v: ChannelLayout) -> AVChannelLayout { v.0 } } impl From<AVChannelLayout> for ChannelLayout { fn from(v: AVChannelLayout) -> ChannelLayout { Self(v) } } impl fmt::Debug for ChannelLayout { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut d = f.debug_struct("ChannelLayout"); d.field("order", &self.0.order); d.field("nb_channels", &self.0.nb_channels); if let Some(custom) = self.custom_channels() { d.field("map", &custom); } else { unsafe { d.field("mask", &self.0.u.mask); } } d.field("opaque", &self.0.opaque); d.finish() } } impl PartialEq for ChannelLayout { fn eq(&self, other: &ChannelLayout) -> bool { unsafe { let ord = av_channel_layout_compare(self.as_ptr(), other.as_ptr()); match ord { // negative return values for invalid layouts ..=-1 => false, 0 => true, 1 => false, 2.. => panic!("illegal return value"), } } } } #[derive(Debug, Clone, PartialEq, Eq)] #[repr(transparent)] pub struct CustomChannel(AVChannelCustom); impl CustomChannel { pub fn new(channel: Channel, name: Option<&str>) -> Self { Self::new_raw(channel as i32, name) } pub fn new_raw(channel: i32, name: Option<&str>) -> Self { let name = name.unwrap_or("").as_bytes(); let mut name_with_zero = [0; 16]; let len = name.len().min(15); name_with_zero[..len].copy_from_slice(&name[..len]); Self::custom(channel as i32, array::from_fn(|i| name_with_zero[i] as i8)) } pub fn custom(channel: i32, name: [i8; 16]) -> Self { assert_eq!(name[15], 0); Self(AVChannelCustom { id: AVChannel(channel as i32), name, opaque: ptr::null_mut(), }) } } impl From<Channel> for CustomChannel { fn from(v: Channel) -> CustomChannel { CustomChannel::new(v, None) } } impl From<CustomChannel> for AVChannelCustom { fn from(v: CustomChannel) -> AVChannelCustom { v.0 } } impl From<AVChannelCustom> for CustomChannel { fn from(v: AVChannelCustom) -> CustomChannel { Self(v) } } #[cfg(feature = "serde")] mod serde { //! It is expected that `CustomChannel::name` contains human-readable names in //! zero-terminated UTF-8. They are serialized as text instead of byte arrays //! to make them easily readable in e.g. JSON output. You'll need a different //! serde impl if you cleverly hid extra data after the null terminator, or //! use the name field to smuggle non-UTF-8 data. use std::{array, ffi::CStr, ptr, str}; use serde_::{ de::Error as _, ser::{Error as _, SerializeStruct}, Deserialize, Deserializer, Serialize, Serializer, }; use super::{alloc_custom_channels, ChannelData, ChannelLayout, CustomChannel}; use crate::ffi::{AVChannelLayout, AVChannelOrder}; impl Serialize for CustomChannel { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut s = serializer.serialize_struct("CustomChannel", 2)?; s.serialize_field("id", &self.0.id.0)?; if self.0.name[0]!= 0 { let u8_name = array::from_fn::<u8, 16, _>(|i| self.0.name[i] as u8); let str_name = CStr::from_bytes_until_nul(&u8_name[..]) .map_err(|_| S::Error::custom("name is not a null-terminated string"))? .to_str() .map_err(|_| S::Error::custom("name is not valid UTF-8"))?; s.serialize_field("name", &str_name)?; } s.end() } } impl<'de> Deserialize<'de> for CustomChannel { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] #[serde(crate = "serde_")] struct Channel<'a> { id: i32, name: Option<&'a str>, } let Channel { id, name } = Channel::deserialize(deserializer)?; Ok(CustomChannel::new_raw(id, name.as_deref())) } } impl Serialize for ChannelLayout { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut s = serializer.serialize_struct("ChannelLayout", 2)?; // provide type hints in order to get compile-time errors if ffmpeg // changes the struct definition s.serialize_field::<u32>("order", &self.0.order.0)?; if let Some(custom) = self.custom_channels() { s.serialize_field("map", &custom)?; } else { s.serialize_field::<u64>("mask", unsafe { &self.0.u.mask })?; } s.end() } } impl<'de> Deserialize<'de> for ChannelLayout { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] #[serde(crate = "serde_")] struct OldLayout { bits: u64, } #[derive(Deserialize)] #[serde(crate = "serde_")] struct NewLayout { order: u32, mask: Option<u64>, map: Option<Vec<CustomChannel>>, } #[derive(Deserialize)] #[serde(untagged, crate = "serde_")] enum VersionedLayout { Old(OldLayout), New(NewLayout), } let (order, u, nb_channels); match VersionedLayout::deserialize(deserializer)? { VersionedLayout::Old(OldLayout { bits: mask }) => { order = AVChannelOrder::AV_CHANNEL_ORDER_NATIVE; u = ChannelData { mask }; nb_channels = mask.count_ones() as i32; } VersionedLayout::New(NewLayout { order: num_order, mask, map, }) => { order = AVChannelOrder(num_order); match (order, mask, map) { (AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM, _, Some(map)) => { u = ChannelData { map: alloc_custom_channels(&map), }; nb_channels = map.len() as i32; } ( AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC | AVChannelOrder::AV_CHANNEL_ORDER_NATIVE | AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC, Some(mask), _, ) => { u = ChannelData { mask }; nb_channels = mask.count_ones() as i32 } (_, _, _) => return Err(D::Error::missing_field("mask or map")), } } } Ok(ChannelLayout(AVChannelLayout { order, nb_channels, u, opaque: ptr::null_mut(), })) } } #[cfg(test)] mod test { use std::fmt::Debug; use serde_::{de::DeserializeOwned, Serialize}; use super::super::{Channel, ChannelLayout, CustomChannel}; use crate::ffi::AVChannelOrder; fn round_trip_debug<T>(x: T) where T: Serialize + DeserializeOwned + Debug, { let json = serde_json::to_string(&x).unwrap(); let y: T = serde_json::from_str(&json).unwrap(); assert_eq!(format!("{x:?}"), format!("{y:?}")); } #[test] fn serde()
#[test] fn old_format() { let x: ChannelLayout = serde_json::from_str(r#"{ "bits": 31 }"#).unwrap(); assert_eq!(x.0.order, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE); assert_eq!(x.0.nb_channels, 5); assert_eq!(unsafe { x.0.u.mask }, 31); } } }
{ round_trip_debug(ChannelLayout::native(&[Channel::StereoRight, Channel::LowFrequency])); round_trip_debug(ChannelLayout::custom(&[ CustomChannel::new(Channel::LowFrequency, Some("low-freq")), CustomChannel::new(Channel::BackCenter, None), ])); }
identifier_body
channel_layout.rs
use std::{array, ffi::CString, fmt, mem, ptr, slice}; use crate::{ ffi::{AVChannel, AVChannelOrder, *}, Error, }; // new channel layout since 5.1 #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[repr(i32)] pub enum Channel { None = AVChannel::AV_CHAN_NONE.0, FrontLeft = AVChannel::AV_CHAN_FRONT_LEFT.0, FrontRight = AVChannel::AV_CHAN_FRONT_RIGHT.0, FrontCenter = AVChannel::AV_CHAN_FRONT_CENTER.0, LowFrequency = AVChannel::AV_CHAN_LOW_FREQUENCY.0, BackLeft = AVChannel::AV_CHAN_BACK_LEFT.0, BackRight = AVChannel::AV_CHAN_BACK_RIGHT.0, FrontLeftOfCenter = AVChannel::AV_CHAN_FRONT_LEFT_OF_CENTER.0, FrontRightOfCenter = AVChannel::AV_CHAN_FRONT_RIGHT_OF_CENTER.0, BackCenter = AVChannel::AV_CHAN_BACK_CENTER.0, SideLeft = AVChannel::AV_CHAN_SIDE_LEFT.0, SideRight = AVChannel::AV_CHAN_SIDE_RIGHT.0, TopCenter = AVChannel::AV_CHAN_TOP_CENTER.0, TopFrontLeft = AVChannel::AV_CHAN_TOP_FRONT_LEFT.0, TopFrontCenter = AVChannel::AV_CHAN_TOP_FRONT_CENTER.0, TopFrontRight = AVChannel::AV_CHAN_TOP_FRONT_RIGHT.0, TopBackLeft = AVChannel::AV_CHAN_TOP_BACK_LEFT.0, TopBackCenter = AVChannel::AV_CHAN_TOP_BACK_CENTER.0, TopBackRight = AVChannel::AV_CHAN_TOP_BACK_RIGHT.0, /// Stereo downmix. StereoLeft = AVChannel::AV_CHAN_STEREO_LEFT.0, /// Stereo downmix. StereoRight = AVChannel::AV_CHAN_STEREO_RIGHT.0, WideLeft = AVChannel::AV_CHAN_WIDE_LEFT.0, WideRight = AVChannel::AV_CHAN_WIDE_RIGHT.0, SurroundDirectLeft = AVChannel::AV_CHAN_SURROUND_DIRECT_LEFT.0, SurroundDirectRight = AVChannel::AV_CHAN_SURROUND_DIRECT_RIGHT.0, LowFrequency2 = AVChannel::AV_CHAN_LOW_FREQUENCY_2.0, TopSideLeft = AVChannel::AV_CHAN_TOP_SIDE_LEFT.0, TopSideRight = AVChannel::AV_CHAN_TOP_SIDE_RIGHT.0, BottomFrontCenter = AVChannel::AV_CHAN_BOTTOM_FRONT_CENTER.0, BottomFrontLeft = AVChannel::AV_CHAN_BOTTOM_FRONT_LEFT.0, BottomFrontRight = AVChannel::AV_CHAN_BOTTOM_FRONT_RIGHT.0, Unknown = AVChannel::AV_CHAN_UNKNOWN.0, } #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[repr(u32)] pub enum ChannelOrder { Unspecified = AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC.0, Native = AVChannelOrder::AV_CHANNEL_ORDER_NATIVE.0, Custom = AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM.0, Ambisonic = AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC.0, } #[derive(Copy, Clone)] pub struct ChannelLayout(AVChannelLayout); /// SAFETY: these are not auto-implemented due to the `opaque: *mut c_void` in /// `AVChannelLayout`. The `ChannelLayout` wrapper does not expose `opaque` /// directly, but only through `From` conversion into an `AVChannelLayout`, /// which doesn't have these unsafe impls. unsafe impl Sync for ChannelLayout {} unsafe impl Send for ChannelLayout {} type ChannelData = AVChannelLayout__bindgen_ty_1; fn alloc_custom_channels(channels: &[CustomChannel]) -> *mut AVChannelCustom { unsafe { let map = av_malloc_array(channels.len(), mem::size_of::<AVChannelCustom>()).cast::<AVChannelCustom>(); if map.is_null() { panic!("out of memory") } for (i, c) in channels.iter().enumerate() { *map.offset(i as isize) = c.0.clone(); } map } } impl ChannelLayout { pub const fn new() -> Self { Self(AVChannelLayout { order: AVChannelOrder::AV_CHANNEL_ORDER_NATIVE, nb_channels: 0, u: ChannelData { mask: 0 }, opaque: ptr::null_mut(), }) } pub fn default(n_channels: i32) -> ChannelLayout { let mut layout = Self::new(); unsafe { av_channel_layout_default(&mut layout.0 as *mut _, n_channels); } layout } pub fn from_name(name: impl Into<Vec<u8>>) -> Result<ChannelLayout, Error> { let s = CString::new(name.into()).unwrap(); let mut layout = Self::new(); unsafe { match av_channel_layout_from_string(&mut layout.0 as *mut _, s.as_ptr()) { 0 => Ok(layout), err => Err(Error::from(err)), } } } pub fn custom(channels: &[CustomChannel]) -> Self { assert!(channels.len() < i32::MAX as usize); Self(AVChannelLayout { order: AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM, nb_channels: channels.len() as i32, u: ChannelData { map: alloc_custom_channels(channels), }, opaque: ptr::null_mut(), }) } const fn const_clone(&self) -> Self { Self(AVChannelLayout { order: self.0.order, nb_channels: self.0.nb_channels, u: self.0.u, opaque: self.0.opaque, }) } pub const fn native(channels: &[Channel]) -> Self { Self::new().with_channels_native(channels) } pub const fn with_channels_native(&self, channels: &[Channel]) -> Self { let mut layout = self.const_clone(); let mask = { let mut mask = 0; let mut idx = 0; while idx < channels.len() { let ch = channels[idx]; if ch as u64 == Channel::None as u64 { continue; } mask |= 1 << ch as u64; idx += 1; } mask }; unsafe { layout.0.u.mask |= mask; layout.0.nb_channels = layout.0.u.mask.count_ones() as i32; } layout } pub fn is_zeroed(&self) -> bool { unsafe { self.0.order == AVChannelOrder(0) && self.0.nb_channels == 0 && self.0.u.mask == 0 && self.0.opaque == ptr::null_mut() } } fn contains_avchannel(&self, channel: AVChannel) -> Option<bool> { match self.0.order { AVChannelOrder::AV_CHANNEL_ORDER_NATIVE => unsafe { Some(self.0.u.mask & (1 << channel.0 as u64)!= 0) }, AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM => unsafe { let channels = self.custom_channels_unchecked(); Some(channels.iter().any(|ch| ch.0.id == channel)) }, // no information about channels available AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC => None, AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC => None, order => panic!("invalid channel order: {order:?}"), } } /// Returns `Some(v)` if the membership of `channel` in `self` can be tested, /// where `v` is only true if `channel` is contained in `self`. /// For `ChannelOrder::Unspecified` and `ChannelOrder::Ambisonic`, this /// function currently returns `None`, though this may change. /// /// Panics if called on a layout with an invalid channel order. pub fn contains(&self, channel: Channel) -> Option<bool> { self.contains_avchannel(AVChannel(channel as i32)) } /// Similar to `contains`, check if all channels in `layout` are also /// contained in `self`. Only a few order combinations are supported: /// /// - native to native /// - pub fn contains_all(&self, layout: &ChannelLayout) -> Option<bool> { match (self.0.order, layout.0.order) { (AVChannelOrder::AV_CHANNEL_ORDER_NATIVE, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE) => unsafe { Some(self.0.u.mask & layout.0.u.mask == layout.0.u.mask) }, // could be implemented in the future (AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE) => None, (_, AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM) => unsafe { let channels = self.custom_channels_unchecked(); Some( channels .iter() .all(|ch| self.contains_avchannel(ch.0.id).unwrap_or(false)), ) }, // no information about channels available (AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC, _) | (_, AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC) => None, (AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC, _) | (_, AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC) => None, (self_order, layout_order) => panic!("invalid channel orders: {self_order:?}, {layout_order:?}"), } } // this would need only one pass with the bprint API, but that's currently // unwrapped pub fn describe(&self) -> Result<String, Error> { fn describe_into(buf: &mut [u8], layout: &ChannelLayout) -> Result<Result<String, usize>, Error> { unsafe { let bytes_needed = match av_channel_layout_describe(layout.as_ptr(), buf.as_mut_ptr() as *mut _, buf.len()) { e if e < 0 => return Err(Error::from(e))?, needed => needed as usize, }; if bytes_needed <= buf.len() { let s = String::from_utf8_lossy(&buf[..bytes_needed]); Ok(Ok(s.into_owned())) } else { Ok(Err(bytes_needed)) } } } const BUF_SIZE: usize = 64; let mut buf = [0u8; BUF_SIZE]; match describe_into(&mut buf[..], self)? { Ok(s) => Ok(s), Err(needed) => { let mut buf = vec![0; needed + 1]; Ok(describe_into(&mut buf[..], self)?.expect("allocated buffer should have been big enough")) } } } pub fn is_empty(&self) -> bool { self.0.nb_channels == 0 } pub fn order(&self) -> ChannelOrder { match self.0.order { AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC => ChannelOrder::Unspecified, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE => ChannelOrder::Native, AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM => ChannelOrder::Custom, AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC => ChannelOrder::Ambisonic, order => panic!("invalid channel order: {order:?}"), } } pub fn set_order(&mut self, order: ChannelOrder) { self.0.order = AVChannelOrder(order as u32); } pub fn channels(&self) -> i32 { self.0.nb_channels } pub fn as_ptr(&self) -> *const AVChannelLayout { &self.0 as *const _ } pub fn native_order_bits(&self) -> Option<u64> { (self.0.order == AVChannelOrder::AV_CHANNEL_ORDER_NATIVE).then_some(unsafe { self.0.u.mask }) } unsafe fn custom_channels_unchecked(&self) -> &[CustomChannel] { slice::from_raw_parts(self.0.u.map.cast::<CustomChannel>(), self.0.nb_channels.max(0) as usize) } pub fn custom_channels(&self) -> Option<&[CustomChannel]> { (self.0.order == AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM).then_some(unsafe { self.custom_channels_unchecked() }) } } impl ChannelLayout { pub const CUBE: ChannelLayout = Self::QUAD.with_channels_native(&[ Channel::TopFrontLeft, Channel::TopFrontRight, Channel::TopBackLeft, Channel::TopBackRight, ]); pub const HEXADECAGONAL: ChannelLayout = Self::OCTAGONAL.with_channels_native(&[ Channel::WideLeft, Channel::WideRight, Channel::TopBackLeft, Channel::TopBackRight, Channel::TopBackCenter, Channel::TopFrontCenter, Channel::TopFrontLeft, Channel::TopFrontRight, ]); pub const HEXAGONAL: ChannelLayout = Self::_5POINT0_BACK.with_channels_native(&[Channel::BackCenter]); pub const MONO: ChannelLayout = Self::native(&[Channel::FrontCenter]); pub const OCTAGONAL: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackLeft, Channel::BackCenter, Channel::BackRight]); pub const QUAD: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const STEREO: ChannelLayout = Self::native(&[Channel::FrontLeft, Channel::FrontRight]); pub const STEREO_DOWNMIX: ChannelLayout = Self::native(&[Channel::StereoLeft, Channel::StereoRight]); pub const SURROUND: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::FrontCenter]); pub const _22POINT2: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[ Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter, Channel::BackCenter, Channel::LowFrequency2, Channel::SideLeft, Channel::SideRight, Channel::TopFrontLeft, Channel::TopFrontRight, Channel::TopFrontCenter, Channel::TopCenter, Channel::TopBackLeft, Channel::TopBackRight, Channel::TopSideLeft, Channel::TopSideRight, Channel::TopBackCenter, Channel::BottomFrontCenter, Channel::BottomFrontLeft, Channel::BottomFrontRight, ]); pub const _2POINT1: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::LowFrequency]); pub const _2_1: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::BackCenter]); pub const _2_2: ChannelLayout = Self::STEREO.with_channels_native(&[Channel::SideLeft, Channel::SideRight]); pub const _3POINT1: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::LowFrequency]); pub const _4POINT0: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::BackCenter]); pub const _4POINT1: ChannelLayout = Self::_4POINT0.with_channels_native(&[Channel::LowFrequency]); pub const _5POINT0: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::SideLeft, Channel::SideRight]); pub const _5POINT0_BACK: ChannelLayout = Self::SURROUND.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const _5POINT1: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::LowFrequency]); pub const _5POINT1_BACK: ChannelLayout = Self::_5POINT0_BACK.with_channels_native(&[Channel::LowFrequency]); pub const _6POINT0: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackCenter]); pub const _6POINT0_FRONT: ChannelLayout = Self::_2_2.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); pub const _6POINT1: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::BackCenter]); pub const _6POINT1_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::BackCenter]); pub const _6POINT1_FRONT: ChannelLayout = Self::_6POINT0_FRONT.with_channels_native(&[Channel::LowFrequency]); pub const _7POINT0: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const _7POINT0_FRONT: ChannelLayout = Self::_5POINT0.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); pub const _7POINT1: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::BackLeft, Channel::BackRight]); pub const _7POINT1_TOP_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::TopFrontLeft, Channel::TopFrontRight]); pub const _7POINT1_WIDE: ChannelLayout = Self::_5POINT1.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); pub const _7POINT1_WIDE_BACK: ChannelLayout = Self::_5POINT1_BACK.with_channels_native(&[Channel::FrontLeftOfCenter, Channel::FrontRightOfCenter]); } impl From<ChannelLayout> for AVChannelLayout { fn from(v: ChannelLayout) -> AVChannelLayout { v.0 } } impl From<AVChannelLayout> for ChannelLayout { fn from(v: AVChannelLayout) -> ChannelLayout { Self(v) } } impl fmt::Debug for ChannelLayout { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut d = f.debug_struct("ChannelLayout"); d.field("order", &self.0.order); d.field("nb_channels", &self.0.nb_channels); if let Some(custom) = self.custom_channels() { d.field("map", &custom); } else { unsafe { d.field("mask", &self.0.u.mask);
} d.field("opaque", &self.0.opaque); d.finish() } } impl PartialEq for ChannelLayout { fn eq(&self, other: &ChannelLayout) -> bool { unsafe { let ord = av_channel_layout_compare(self.as_ptr(), other.as_ptr()); match ord { // negative return values for invalid layouts ..=-1 => false, 0 => true, 1 => false, 2.. => panic!("illegal return value"), } } } } #[derive(Debug, Clone, PartialEq, Eq)] #[repr(transparent)] pub struct CustomChannel(AVChannelCustom); impl CustomChannel { pub fn new(channel: Channel, name: Option<&str>) -> Self { Self::new_raw(channel as i32, name) } pub fn new_raw(channel: i32, name: Option<&str>) -> Self { let name = name.unwrap_or("").as_bytes(); let mut name_with_zero = [0; 16]; let len = name.len().min(15); name_with_zero[..len].copy_from_slice(&name[..len]); Self::custom(channel as i32, array::from_fn(|i| name_with_zero[i] as i8)) } pub fn custom(channel: i32, name: [i8; 16]) -> Self { assert_eq!(name[15], 0); Self(AVChannelCustom { id: AVChannel(channel as i32), name, opaque: ptr::null_mut(), }) } } impl From<Channel> for CustomChannel { fn from(v: Channel) -> CustomChannel { CustomChannel::new(v, None) } } impl From<CustomChannel> for AVChannelCustom { fn from(v: CustomChannel) -> AVChannelCustom { v.0 } } impl From<AVChannelCustom> for CustomChannel { fn from(v: AVChannelCustom) -> CustomChannel { Self(v) } } #[cfg(feature = "serde")] mod serde { //! It is expected that `CustomChannel::name` contains human-readable names in //! zero-terminated UTF-8. They are serialized as text instead of byte arrays //! to make them easily readable in e.g. JSON output. You'll need a different //! serde impl if you cleverly hid extra data after the null terminator, or //! use the name field to smuggle non-UTF-8 data. use std::{array, ffi::CStr, ptr, str}; use serde_::{ de::Error as _, ser::{Error as _, SerializeStruct}, Deserialize, Deserializer, Serialize, Serializer, }; use super::{alloc_custom_channels, ChannelData, ChannelLayout, CustomChannel}; use crate::ffi::{AVChannelLayout, AVChannelOrder}; impl Serialize for CustomChannel { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut s = serializer.serialize_struct("CustomChannel", 2)?; s.serialize_field("id", &self.0.id.0)?; if self.0.name[0]!= 0 { let u8_name = array::from_fn::<u8, 16, _>(|i| self.0.name[i] as u8); let str_name = CStr::from_bytes_until_nul(&u8_name[..]) .map_err(|_| S::Error::custom("name is not a null-terminated string"))? .to_str() .map_err(|_| S::Error::custom("name is not valid UTF-8"))?; s.serialize_field("name", &str_name)?; } s.end() } } impl<'de> Deserialize<'de> for CustomChannel { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] #[serde(crate = "serde_")] struct Channel<'a> { id: i32, name: Option<&'a str>, } let Channel { id, name } = Channel::deserialize(deserializer)?; Ok(CustomChannel::new_raw(id, name.as_deref())) } } impl Serialize for ChannelLayout { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut s = serializer.serialize_struct("ChannelLayout", 2)?; // provide type hints in order to get compile-time errors if ffmpeg // changes the struct definition s.serialize_field::<u32>("order", &self.0.order.0)?; if let Some(custom) = self.custom_channels() { s.serialize_field("map", &custom)?; } else { s.serialize_field::<u64>("mask", unsafe { &self.0.u.mask })?; } s.end() } } impl<'de> Deserialize<'de> for ChannelLayout { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] #[serde(crate = "serde_")] struct OldLayout { bits: u64, } #[derive(Deserialize)] #[serde(crate = "serde_")] struct NewLayout { order: u32, mask: Option<u64>, map: Option<Vec<CustomChannel>>, } #[derive(Deserialize)] #[serde(untagged, crate = "serde_")] enum VersionedLayout { Old(OldLayout), New(NewLayout), } let (order, u, nb_channels); match VersionedLayout::deserialize(deserializer)? { VersionedLayout::Old(OldLayout { bits: mask }) => { order = AVChannelOrder::AV_CHANNEL_ORDER_NATIVE; u = ChannelData { mask }; nb_channels = mask.count_ones() as i32; } VersionedLayout::New(NewLayout { order: num_order, mask, map, }) => { order = AVChannelOrder(num_order); match (order, mask, map) { (AVChannelOrder::AV_CHANNEL_ORDER_CUSTOM, _, Some(map)) => { u = ChannelData { map: alloc_custom_channels(&map), }; nb_channels = map.len() as i32; } ( AVChannelOrder::AV_CHANNEL_ORDER_UNSPEC | AVChannelOrder::AV_CHANNEL_ORDER_NATIVE | AVChannelOrder::AV_CHANNEL_ORDER_AMBISONIC, Some(mask), _, ) => { u = ChannelData { mask }; nb_channels = mask.count_ones() as i32 } (_, _, _) => return Err(D::Error::missing_field("mask or map")), } } } Ok(ChannelLayout(AVChannelLayout { order, nb_channels, u, opaque: ptr::null_mut(), })) } } #[cfg(test)] mod test { use std::fmt::Debug; use serde_::{de::DeserializeOwned, Serialize}; use super::super::{Channel, ChannelLayout, CustomChannel}; use crate::ffi::AVChannelOrder; fn round_trip_debug<T>(x: T) where T: Serialize + DeserializeOwned + Debug, { let json = serde_json::to_string(&x).unwrap(); let y: T = serde_json::from_str(&json).unwrap(); assert_eq!(format!("{x:?}"), format!("{y:?}")); } #[test] fn serde() { round_trip_debug(ChannelLayout::native(&[Channel::StereoRight, Channel::LowFrequency])); round_trip_debug(ChannelLayout::custom(&[ CustomChannel::new(Channel::LowFrequency, Some("low-freq")), CustomChannel::new(Channel::BackCenter, None), ])); } #[test] fn old_format() { let x: ChannelLayout = serde_json::from_str(r#"{ "bits": 31 }"#).unwrap(); assert_eq!(x.0.order, AVChannelOrder::AV_CHANNEL_ORDER_NATIVE); assert_eq!(x.0.nb_channels, 5); assert_eq!(unsafe { x.0.u.mask }, 31); } } }
}
random_line_split
main.rs
log::warn!("{}", log_line); } } }); } })) }; #[cfg(unix)] for socketpath in &ARGS.sockets { let arc = mimetypes.clone(); if socketpath.exists() && socketpath.metadata() .expect("Failed to get existing socket metadata") .file_type() .is_socket() { log::warn!("Socket already exists, attempting to remove {}", socketpath.display()); let _ = std::fs::remove_file(socketpath); } let listener = match UnixListener::bind(socketpath) { Err(e) => { panic!("Failed to listen on {}: {}", socketpath.display(), e) } Ok(listener) => listener, }; handles.push(tokio::spawn(async move { log::info!("Started listener on {}", socketpath.display()); loop { let (stream, _) = listener.accept().await.unwrap_or_else(|e| { panic!("could not accept new connection on {}: {}", socketpath.display(), e) }); let arc = arc.clone(); tokio::spawn(async { match RequestHandle::new_unix(stream, arc).await { Ok(handle) => match handle.handle().await { Ok(info) => log::info!("{}", info), Err(err) => log::warn!("{}", err), }, Err(log_line) => { log::warn!("{}", log_line); } } }); } })) }; futures_util::future::join_all(handles).await; }); } type Result<T = (), E = Box<dyn Error + Send + Sync>> = std::result::Result<T, E>; static ARGS: Lazy<Args> = Lazy::new(|| { args().unwrap_or_else(|s| { eprintln!("{s}"); std::process::exit(1); }) }); struct Args { addrs: Vec<SocketAddr>, #[cfg(unix)] sockets: Vec<PathBuf>, content_dir: PathBuf, certs: Arc<certificates::CertStore>, hostnames: Vec<Host>, language: Option<String>, serve_secret: bool, log_ips: bool, only_tls13: bool, central_config: bool, skip_port_check: bool, } fn args() -> Result<Args> { let args: Vec<String> = std::env::args().collect(); let mut opts = getopts::Options::new(); opts.optopt( "", "content", "Root of the content directory (default./content/)", "DIR", ); opts.optopt( "", "certs", "Root of the certificate directory (default./.certificates/)", "DIR", ); opts.optmulti( "", "addr", &format!("Address to listen on (default 0.0.0.0:{DEFAULT_PORT} and [::]:{DEFAULT_PORT}; multiple occurences means listening on multiple interfaces)"), "IP:PORT", ); #[cfg(unix)] opts.optmulti( "", "socket", "Unix socket to listen on (multiple occurences means listening on multiple sockets)", "PATH", ); opts.optmulti( "", "hostname", "Domain name of this Gemini server, enables checking hostname and port in requests. (multiple occurences means basic vhosts)", "NAME", ); opts.optopt( "", "lang", "RFC 4646 Language code for text/gemini documents", "LANG", ); opts.optflag("h", "help", "Print this help text and exit."); opts.optflag("V", "version", "Print version information and exit."); opts.optflag( "3", "only-tls13", "Only use TLSv1.3 (default also allows TLSv1.2)", ); opts.optflag( "", "serve-secret", "Enable serving secret files (files/directories starting with a dot)", ); opts.optflag("", "log-ip", "Output the remote IP address when logging."); opts.optflag( "C", "central-conf", "Use a central.meta file in the content root directory. Decentral config files will be ignored.", ); opts.optflag( "e", "ed25519", "Generate keys using the Ed25519 signature algorithm instead of the default ECDSA.", ); opts.optflag( "", "skip-port-check", "Skip URL port check even when a hostname is specified.", ); let matches = opts.parse(&args[1..]).map_err(|f| f.to_string())?; if matches.opt_present("h") { eprintln!("{}", opts.usage(&format!("Usage: {} [options]", &args[0]))); std::process::exit(0); } if matches.opt_present("V") { eprintln!("agate {}", env!("CARGO_PKG_VERSION")); std::process::exit(0); } // try to open the certificate directory let certs_path = matches.opt_get_default("certs", ".certificates".to_string())?; let (certs, certs_path) = match check_path(certs_path.clone()) { // the directory exists, try to load certificates Ok(certs_path) => match certificates::CertStore::load_from(&certs_path) { // all is good Ok(certs) => (Some(certs), certs_path), // the certificate directory did not contain certificates, but we can generate some // because the hostname option was given Err(certificates::CertLoadError::Empty) if matches.opt_present("hostname") => { (None, certs_path) } // failed loading certificates or missing hostname to generate them Err(e) => return Err(e.into()), }, // the directory does not exist Err(_) => { // since certificate management should be automated, we are going to create the directory too log::info!( "The certificate directory {:?} does not exist, creating it.", certs_path ); std::fs::create_dir(&certs_path).expect("could not create certificate directory"); // we just created the directory, skip loading from it (None, PathBuf::from(certs_path)) } }; // If we have not loaded any certificates yet, we have to try to reload them later. // This ensures we get the right error message. let mut reload_certs = certs.is_none(); let mut hostnames = vec![]; for s in matches.opt_strs("hostname") { // normalize hostname, add punycoding if necessary let hostname = Host::parse(&s)?; // check if we have a certificate for that domain if let Host::Domain(ref domain) = hostname { if!matches!(certs, Some(ref certs) if certs.has_domain(domain)) { log::info!("No certificate or key found for {:?}, generating them.", s); let mut cert_params = CertificateParams::new(vec![domain.clone()]); cert_params .distinguished_name .push(DnType::CommonName, domain); // <CertificateParams as Default>::default() already implements a // date in the far future from the time of writing: 4096-01-01 if matches.opt_present("e") { cert_params.alg = &rcgen::PKCS_ED25519; } // generate the certificate with the configuration let cert = Certificate::from_params(cert_params)?; // make sure the certificate directory exists fs::create_dir(certs_path.join(domain))?; // write certificate data to disk let mut cert_file = File::create(certs_path.join(format!( "{}/{}", domain, certificates::CERT_FILE_NAME )))?; cert_file.write_all(&cert.serialize_der()?)?; // write key data to disk let key_file_path = certs_path.join(format!("{}/{}", domain, certificates::KEY_FILE_NAME)); let mut key_file = File::create(&key_file_path)?; #[cfg(unix)] { // set permissions so only owner can read match key_file.set_permissions(std::fs::Permissions::from_mode(0o400)) { Ok(_) => (), Err(_) => log::warn!( "could not set permissions for new key file {}", key_file_path.display() ), } } key_file.write_all(&cert.serialize_private_key_der())?; reload_certs = true; } } hostnames.push(hostname); } // if new certificates were generated, reload the certificate store let certs = if reload_certs { certificates::CertStore::load_from(&certs_path)? } else { // there must already have been certificates loaded certs.unwrap() }; // parse listening addresses let mut addrs = vec![]; for i in matches.opt_strs("addr") { addrs.push(i.parse()?); } #[cfg_attr(not(unix), allow(unused_mut))] let mut empty = addrs.is_empty(); #[cfg(unix)] let mut sockets = vec![]; #[cfg(unix)] { for i in matches.opt_strs("socket") { sockets.push(i.parse()?); } empty &= sockets.is_empty(); } if empty { addrs = vec![ SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), DEFAULT_PORT), SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), DEFAULT_PORT), ]; } Ok(Args { addrs, #[cfg(unix)] sockets, content_dir: check_path(matches.opt_get_default("content", "content".into())?)?, certs: Arc::new(certs), hostnames, language: matches.opt_str("lang"), serve_secret: matches.opt_present("serve-secret"), log_ips: matches.opt_present("log-ip"), only_tls13: matches.opt_present("only-tls13"), central_config: matches.opt_present("central-conf"), skip_port_check: matches.opt_present("skip-port-check"), }) } fn check_path(s: String) -> Result<PathBuf, String> { let p = PathBuf::from(s); if p.as_path().exists() { Ok(p) } else { Err(format!("No such file: {p:?}")) } } /// TLS configuration. static TLS: Lazy<TlsAcceptor> = Lazy::new(acceptor); fn acceptor() -> TlsAcceptor { let config = if ARGS.only_tls13 { ServerConfig::builder() .with_safe_default_cipher_suites() .with_safe_default_kx_groups() .with_protocol_versions(&[&rustls::version::TLS13]) .expect("could not build server config") } else { ServerConfig::builder().with_safe_defaults() } .with_no_client_auth() .with_cert_resolver(ARGS.certs.clone()); TlsAcceptor::from(Arc::new(config)) } struct RequestHandle<T> { stream: TlsStream<T>, local_port_check: Option<u16>, log_line: String, metadata: Arc<Mutex<FileOptions>>, } impl RequestHandle<TcpStream> { /// Creates a new request handle for the given stream. If establishing the TLS /// session fails, returns a corresponding log line. async fn new(stream: TcpStream, metadata: Arc<Mutex<FileOptions>>) -> Result<Self, String> { let local_addr = stream.local_addr().unwrap().to_string(); // try to get the remote IP address if desired let peer_addr = if ARGS.log_ips { stream .peer_addr() .map_err(|_| { format!( // use nonexistent status code 01 if peer IP is unknown "{local_addr} - \"\" 01 \"IP error\" error:could not get peer address", ) })? .ip() .to_string() } else { // Do not log IP address, but something else so columns still line up. "-".into() }; let log_line = format!("{local_addr} {peer_addr}",); let local_port_check = if ARGS.skip_port_check { None } else { Some(stream.local_addr().unwrap().port()) }; match TLS.accept(stream).await { Ok(stream) => Ok(Self { stream, local_port_check, log_line, metadata, }), // use nonexistent status code 00 if connection was not established Err(e) => Err(format!("{log_line} \"\" 00 \"TLS error\" error:{e}")), } } } #[cfg(unix)] impl RequestHandle<UnixStream> { async fn new_unix( stream: UnixStream, metadata: Arc<Mutex<FileOptions>>, ) -> Result<Self, String> { let log_line = format!( "unix:{} -", stream .local_addr() .ok() .and_then(|addr| Some(addr.as_pathname()?.to_string_lossy().into_owned())) .unwrap_or_default() ); match TLS.accept(stream).await { Ok(stream) => Ok(Self { stream, // TODO add port check for unix sockets, requires extra arg for port local_port_check: None, log_line, metadata, }), // use nonexistent status code 00 if connection was not established Err(e) => Err(format!("{} \"\" 00 \"TLS error\" error:{}", log_line, e)), } } } impl<T> RequestHandle<T> where T: AsyncWriteExt + AsyncReadExt + Unpin, { /// Do the necessary actions to handle this request. Returns a corresponding /// log line as Err or Ok, depending on if the request finished with or /// without errors. async fn handle(mut self) -> Result<String, String> { // not already in error condition let result = match self.parse_request().await { Ok(url) => self.send_response(url).await, Err((status, msg)) => self.send_header(status, msg).await, }; let close_result = self.stream.shutdown().await; match (result, close_result) { (Err(e), _) => Err(format!("{} error:{}", self.log_line, e)), (Ok(_), Err(e)) => Err(format!("{} error:{}", self.log_line, e)), (Ok(_), Ok(_)) => Ok(self.log_line), } } /// Return the URL requested by the client. async fn parse_request(&mut self) -> std::result::Result<Url, (u8, &'static str)> { // Because requests are limited to 1024 bytes (plus 2 bytes for CRLF), we // can use a fixed-sized buffer on the stack, avoiding allocations and // copying, and stopping bad clients from making us use too much memory. let mut request = [0; 1026]; let mut buf = &mut request[..]; let mut len = 0; // Read until CRLF, end-of-stream, or there's no buffer space left. // // Since neither CR nor LF can be part of a URI according to // ISOC-RFC 3986, we could use BufRead::read_line here, but that does // not allow us to cap the number of read bytes at 1024+2. let result = loop { let bytes_read = if let Ok(read) = self.stream.read(buf).await { read } else { break Err((BAD_REQUEST, "Request ended unexpectedly")); }; len += bytes_read; if request[..len].ends_with(b"\r\n") { break Ok(()); } else if bytes_read == 0 { break Err((BAD_REQUEST, "Request ended unexpectedly")); } buf = &mut request[len..]; } .and_then(|()| { std::str::from_utf8(&request[..len - 2]).or(Err((BAD_REQUEST, "Non-UTF-8 request"))) }); let request = result.map_err(|e| { // write empty request to log line for uniformity write!(self.log_line, " \"\"").unwrap(); e })?; // log literal request (might be different from or not an actual URL) write!(self.log_line, " \"{request}\"").unwrap(); let mut url = Url::parse(request).or(Err((BAD_REQUEST, "Invalid URL")))?; // Validate the URL: // correct scheme if url.scheme()!= "gemini" { return Err((PROXY_REQUEST_REFUSED, "Unsupported URL scheme")); }
// correct host if let Some(domain) = url.domain() { // because the gemini scheme is not special enough for WHATWG, normalize // it ourselves let host = Host::parse( &percent_decode_str(domain) .decode_utf8() .or(Err((BAD_REQUEST, "Invalid URL")))?, )
// no userinfo and no fragment if url.password().is_some() || !url.username().is_empty() || url.fragment().is_some() { return Err((BAD_REQUEST, "URL contains fragment or userinfo")); }
random_line_split
main.rs
else { // already listening on the other unspecified address log::warn!("Could not start listener on {}, but already listening on another unspecified address. Probably your system automatically listens in dual stack?", addr); continue; } } Ok(listener) => listener, }; listening_unspecified |= addr.ip().is_unspecified(); handles.push(tokio::spawn(async move { log::info!("Started listener on {}", addr); loop { let (stream, _) = listener.accept().await.unwrap_or_else(|e| { panic!("could not accept new connection on {addr}: {e}") }); let arc = arc.clone(); tokio::spawn(async { match RequestHandle::new(stream, arc).await { Ok(handle) => match handle.handle().await { Ok(info) => log::info!("{}", info), Err(err) => log::warn!("{}", err), }, Err(log_line) => { log::warn!("{}", log_line); } } }); } })) }; #[cfg(unix)] for socketpath in &ARGS.sockets { let arc = mimetypes.clone(); if socketpath.exists() && socketpath.metadata() .expect("Failed to get existing socket metadata") .file_type() .is_socket() { log::warn!("Socket already exists, attempting to remove {}", socketpath.display()); let _ = std::fs::remove_file(socketpath); } let listener = match UnixListener::bind(socketpath) { Err(e) => { panic!("Failed to listen on {}: {}", socketpath.display(), e) } Ok(listener) => listener, }; handles.push(tokio::spawn(async move { log::info!("Started listener on {}", socketpath.display()); loop { let (stream, _) = listener.accept().await.unwrap_or_else(|e| { panic!("could not accept new connection on {}: {}", socketpath.display(), e) }); let arc = arc.clone(); tokio::spawn(async { match RequestHandle::new_unix(stream, arc).await { Ok(handle) => match handle.handle().await { Ok(info) => log::info!("{}", info), Err(err) => log::warn!("{}", err), }, Err(log_line) => { log::warn!("{}", log_line); } } }); } })) }; futures_util::future::join_all(handles).await; }); } type Result<T = (), E = Box<dyn Error + Send + Sync>> = std::result::Result<T, E>; static ARGS: Lazy<Args> = Lazy::new(|| { args().unwrap_or_else(|s| { eprintln!("{s}"); std::process::exit(1); }) }); struct Args { addrs: Vec<SocketAddr>, #[cfg(unix)] sockets: Vec<PathBuf>, content_dir: PathBuf, certs: Arc<certificates::CertStore>, hostnames: Vec<Host>, language: Option<String>, serve_secret: bool, log_ips: bool, only_tls13: bool, central_config: bool, skip_port_check: bool, } fn args() -> Result<Args> { let args: Vec<String> = std::env::args().collect(); let mut opts = getopts::Options::new(); opts.optopt( "", "content", "Root of the content directory (default./content/)", "DIR", ); opts.optopt( "", "certs", "Root of the certificate directory (default./.certificates/)", "DIR", ); opts.optmulti( "", "addr", &format!("Address to listen on (default 0.0.0.0:{DEFAULT_PORT} and [::]:{DEFAULT_PORT}; multiple occurences means listening on multiple interfaces)"), "IP:PORT", ); #[cfg(unix)] opts.optmulti( "", "socket", "Unix socket to listen on (multiple occurences means listening on multiple sockets)", "PATH", ); opts.optmulti( "", "hostname", "Domain name of this Gemini server, enables checking hostname and port in requests. (multiple occurences means basic vhosts)", "NAME", ); opts.optopt( "", "lang", "RFC 4646 Language code for text/gemini documents", "LANG", ); opts.optflag("h", "help", "Print this help text and exit."); opts.optflag("V", "version", "Print version information and exit."); opts.optflag( "3", "only-tls13", "Only use TLSv1.3 (default also allows TLSv1.2)", ); opts.optflag( "", "serve-secret", "Enable serving secret files (files/directories starting with a dot)", ); opts.optflag("", "log-ip", "Output the remote IP address when logging."); opts.optflag( "C", "central-conf", "Use a central.meta file in the content root directory. Decentral config files will be ignored.", ); opts.optflag( "e", "ed25519", "Generate keys using the Ed25519 signature algorithm instead of the default ECDSA.", ); opts.optflag( "", "skip-port-check", "Skip URL port check even when a hostname is specified.", ); let matches = opts.parse(&args[1..]).map_err(|f| f.to_string())?; if matches.opt_present("h") { eprintln!("{}", opts.usage(&format!("Usage: {} [options]", &args[0]))); std::process::exit(0); } if matches.opt_present("V") { eprintln!("agate {}", env!("CARGO_PKG_VERSION")); std::process::exit(0); } // try to open the certificate directory let certs_path = matches.opt_get_default("certs", ".certificates".to_string())?; let (certs, certs_path) = match check_path(certs_path.clone()) { // the directory exists, try to load certificates Ok(certs_path) => match certificates::CertStore::load_from(&certs_path) { // all is good Ok(certs) => (Some(certs), certs_path), // the certificate directory did not contain certificates, but we can generate some // because the hostname option was given Err(certificates::CertLoadError::Empty) if matches.opt_present("hostname") => { (None, certs_path) } // failed loading certificates or missing hostname to generate them Err(e) => return Err(e.into()), }, // the directory does not exist Err(_) => { // since certificate management should be automated, we are going to create the directory too log::info!( "The certificate directory {:?} does not exist, creating it.", certs_path ); std::fs::create_dir(&certs_path).expect("could not create certificate directory"); // we just created the directory, skip loading from it (None, PathBuf::from(certs_path)) } }; // If we have not loaded any certificates yet, we have to try to reload them later. // This ensures we get the right error message. let mut reload_certs = certs.is_none(); let mut hostnames = vec![]; for s in matches.opt_strs("hostname") { // normalize hostname, add punycoding if necessary let hostname = Host::parse(&s)?; // check if we have a certificate for that domain if let Host::Domain(ref domain) = hostname { if!matches!(certs, Some(ref certs) if certs.has_domain(domain)) { log::info!("No certificate or key found for {:?}, generating them.", s); let mut cert_params = CertificateParams::new(vec![domain.clone()]); cert_params .distinguished_name .push(DnType::CommonName, domain); // <CertificateParams as Default>::default() already implements a // date in the far future from the time of writing: 4096-01-01 if matches.opt_present("e") { cert_params.alg = &rcgen::PKCS_ED25519; } // generate the certificate with the configuration let cert = Certificate::from_params(cert_params)?; // make sure the certificate directory exists fs::create_dir(certs_path.join(domain))?; // write certificate data to disk let mut cert_file = File::create(certs_path.join(format!( "{}/{}", domain, certificates::CERT_FILE_NAME )))?; cert_file.write_all(&cert.serialize_der()?)?; // write key data to disk let key_file_path = certs_path.join(format!("{}/{}", domain, certificates::KEY_FILE_NAME)); let mut key_file = File::create(&key_file_path)?; #[cfg(unix)] { // set permissions so only owner can read match key_file.set_permissions(std::fs::Permissions::from_mode(0o400)) { Ok(_) => (), Err(_) => log::warn!( "could not set permissions for new key file {}", key_file_path.display() ), } } key_file.write_all(&cert.serialize_private_key_der())?; reload_certs = true; } } hostnames.push(hostname); } // if new certificates were generated, reload the certificate store let certs = if reload_certs { certificates::CertStore::load_from(&certs_path)? } else { // there must already have been certificates loaded certs.unwrap() }; // parse listening addresses let mut addrs = vec![]; for i in matches.opt_strs("addr") { addrs.push(i.parse()?); } #[cfg_attr(not(unix), allow(unused_mut))] let mut empty = addrs.is_empty(); #[cfg(unix)] let mut sockets = vec![]; #[cfg(unix)] { for i in matches.opt_strs("socket") { sockets.push(i.parse()?); } empty &= sockets.is_empty(); } if empty { addrs = vec![ SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), DEFAULT_PORT), SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), DEFAULT_PORT), ]; } Ok(Args { addrs, #[cfg(unix)] sockets, content_dir: check_path(matches.opt_get_default("content", "content".into())?)?, certs: Arc::new(certs), hostnames, language: matches.opt_str("lang"), serve_secret: matches.opt_present("serve-secret"), log_ips: matches.opt_present("log-ip"), only_tls13: matches.opt_present("only-tls13"), central_config: matches.opt_present("central-conf"), skip_port_check: matches.opt_present("skip-port-check"), }) } fn check_path(s: String) -> Result<PathBuf, String> { let p = PathBuf::from(s); if p.as_path().exists() { Ok(p) } else { Err(format!("No such file: {p:?}")) } } /// TLS configuration. static TLS: Lazy<TlsAcceptor> = Lazy::new(acceptor); fn acceptor() -> TlsAcceptor { let config = if ARGS.only_tls13 { ServerConfig::builder() .with_safe_default_cipher_suites() .with_safe_default_kx_groups() .with_protocol_versions(&[&rustls::version::TLS13]) .expect("could not build server config") } else { ServerConfig::builder().with_safe_defaults() } .with_no_client_auth() .with_cert_resolver(ARGS.certs.clone()); TlsAcceptor::from(Arc::new(config)) } struct RequestHandle<T> { stream: TlsStream<T>, local_port_check: Option<u16>, log_line: String, metadata: Arc<Mutex<FileOptions>>, } impl RequestHandle<TcpStream> { /// Creates a new request handle for the given stream. If establishing the TLS /// session fails, returns a corresponding log line. async fn new(stream: TcpStream, metadata: Arc<Mutex<FileOptions>>) -> Result<Self, String> { let local_addr = stream.local_addr().unwrap().to_string(); // try to get the remote IP address if desired let peer_addr = if ARGS.log_ips { stream .peer_addr() .map_err(|_| { format!( // use nonexistent status code 01 if peer IP is unknown "{local_addr} - \"\" 01 \"IP error\" error:could not get peer address", ) })? .ip() .to_string() } else { // Do not log IP address, but something else so columns still line up. "-".into() }; let log_line = format!("{local_addr} {peer_addr}",); let local_port_check = if ARGS.skip_port_check { None } else { Some(stream.local_addr().unwrap().port()) }; match TLS.accept(stream).await { Ok(stream) => Ok(Self { stream, local_port_check, log_line, metadata, }), // use nonexistent status code 00 if connection was not established Err(e) => Err(format!("{log_line} \"\" 00 \"TLS error\" error:{e}")), } } } #[cfg(unix)] impl RequestHandle<UnixStream> { async fn new_unix( stream: UnixStream, metadata: Arc<Mutex<FileOptions>>, ) -> Result<Self, String> { let log_line = format!( "unix:{} -", stream .local_addr() .ok() .and_then(|addr| Some(addr.as_pathname()?.to_string_lossy().into_owned())) .unwrap_or_default() ); match TLS.accept(stream).await { Ok(stream) => Ok(Self { stream, // TODO add port check for unix sockets, requires extra arg for port local_port_check: None, log_line, metadata, }), // use nonexistent status code 00 if connection was not established Err(e) => Err(format!("{} \"\" 00 \"TLS error\" error:{}", log_line, e)), } } } impl<T> RequestHandle<T> where T: AsyncWriteExt + AsyncReadExt + Unpin, { /// Do the necessary actions to handle this request. Returns a corresponding /// log line as Err or Ok, depending on if the request finished with or /// without errors. async fn handle(mut self) -> Result<String, String> { // not already in error condition let result = match self.parse_request().await { Ok(url) => self.send_response(url).await, Err((status, msg)) => self.send_header(status, msg).await, }; let close_result = self.stream.shutdown().await; match (result, close_result) { (Err(e), _) => Err(format!("{} error:{}", self.log_line, e)), (Ok(_), Err(e)) => Err(format!("{} error:{}", self.log_line, e)), (Ok(_), Ok(_)) => Ok(self.log_line), } } /// Return the URL requested by the client. async fn parse_request(&mut self) -> std::result::Result<Url, (u8, &'static str)> { // Because requests are limited to 1024 bytes (plus 2 bytes for CRLF), we // can use a fixed-sized buffer on the stack, avoiding allocations and // copying, and stopping bad clients from making us use too much memory. let mut request = [0; 1026]; let mut buf = &mut request[..]; let mut len = 0; // Read until CRLF, end-of-stream, or there's no buffer space left. // // Since neither CR nor LF can be part of a URI according to // ISOC-RFC 3986, we could use BufRead::read_line here, but that does // not allow us to cap the number of read bytes at 1024+2. let result = loop { let bytes_read = if let Ok(read) = self.stream.read(buf).await { read
{ panic!("Failed to listen on {addr}: {e}") }
conditional_block
main.rs
log::warn!("{}", log_line); } } }); } })) }; #[cfg(unix)] for socketpath in &ARGS.sockets { let arc = mimetypes.clone(); if socketpath.exists() && socketpath.metadata() .expect("Failed to get existing socket metadata") .file_type() .is_socket() { log::warn!("Socket already exists, attempting to remove {}", socketpath.display()); let _ = std::fs::remove_file(socketpath); } let listener = match UnixListener::bind(socketpath) { Err(e) => { panic!("Failed to listen on {}: {}", socketpath.display(), e) } Ok(listener) => listener, }; handles.push(tokio::spawn(async move { log::info!("Started listener on {}", socketpath.display()); loop { let (stream, _) = listener.accept().await.unwrap_or_else(|e| { panic!("could not accept new connection on {}: {}", socketpath.display(), e) }); let arc = arc.clone(); tokio::spawn(async { match RequestHandle::new_unix(stream, arc).await { Ok(handle) => match handle.handle().await { Ok(info) => log::info!("{}", info), Err(err) => log::warn!("{}", err), }, Err(log_line) => { log::warn!("{}", log_line); } } }); } })) }; futures_util::future::join_all(handles).await; }); } type Result<T = (), E = Box<dyn Error + Send + Sync>> = std::result::Result<T, E>; static ARGS: Lazy<Args> = Lazy::new(|| { args().unwrap_or_else(|s| { eprintln!("{s}"); std::process::exit(1); }) }); struct Args { addrs: Vec<SocketAddr>, #[cfg(unix)] sockets: Vec<PathBuf>, content_dir: PathBuf, certs: Arc<certificates::CertStore>, hostnames: Vec<Host>, language: Option<String>, serve_secret: bool, log_ips: bool, only_tls13: bool, central_config: bool, skip_port_check: bool, } fn args() -> Result<Args> { let args: Vec<String> = std::env::args().collect(); let mut opts = getopts::Options::new(); opts.optopt( "", "content", "Root of the content directory (default./content/)", "DIR", ); opts.optopt( "", "certs", "Root of the certificate directory (default./.certificates/)", "DIR", ); opts.optmulti( "", "addr", &format!("Address to listen on (default 0.0.0.0:{DEFAULT_PORT} and [::]:{DEFAULT_PORT}; multiple occurences means listening on multiple interfaces)"), "IP:PORT", ); #[cfg(unix)] opts.optmulti( "", "socket", "Unix socket to listen on (multiple occurences means listening on multiple sockets)", "PATH", ); opts.optmulti( "", "hostname", "Domain name of this Gemini server, enables checking hostname and port in requests. (multiple occurences means basic vhosts)", "NAME", ); opts.optopt( "", "lang", "RFC 4646 Language code for text/gemini documents", "LANG", ); opts.optflag("h", "help", "Print this help text and exit."); opts.optflag("V", "version", "Print version information and exit."); opts.optflag( "3", "only-tls13", "Only use TLSv1.3 (default also allows TLSv1.2)", ); opts.optflag( "", "serve-secret", "Enable serving secret files (files/directories starting with a dot)", ); opts.optflag("", "log-ip", "Output the remote IP address when logging."); opts.optflag( "C", "central-conf", "Use a central.meta file in the content root directory. Decentral config files will be ignored.", ); opts.optflag( "e", "ed25519", "Generate keys using the Ed25519 signature algorithm instead of the default ECDSA.", ); opts.optflag( "", "skip-port-check", "Skip URL port check even when a hostname is specified.", ); let matches = opts.parse(&args[1..]).map_err(|f| f.to_string())?; if matches.opt_present("h") { eprintln!("{}", opts.usage(&format!("Usage: {} [options]", &args[0]))); std::process::exit(0); } if matches.opt_present("V") { eprintln!("agate {}", env!("CARGO_PKG_VERSION")); std::process::exit(0); } // try to open the certificate directory let certs_path = matches.opt_get_default("certs", ".certificates".to_string())?; let (certs, certs_path) = match check_path(certs_path.clone()) { // the directory exists, try to load certificates Ok(certs_path) => match certificates::CertStore::load_from(&certs_path) { // all is good Ok(certs) => (Some(certs), certs_path), // the certificate directory did not contain certificates, but we can generate some // because the hostname option was given Err(certificates::CertLoadError::Empty) if matches.opt_present("hostname") => { (None, certs_path) } // failed loading certificates or missing hostname to generate them Err(e) => return Err(e.into()), }, // the directory does not exist Err(_) => { // since certificate management should be automated, we are going to create the directory too log::info!( "The certificate directory {:?} does not exist, creating it.", certs_path ); std::fs::create_dir(&certs_path).expect("could not create certificate directory"); // we just created the directory, skip loading from it (None, PathBuf::from(certs_path)) } }; // If we have not loaded any certificates yet, we have to try to reload them later. // This ensures we get the right error message. let mut reload_certs = certs.is_none(); let mut hostnames = vec![]; for s in matches.opt_strs("hostname") { // normalize hostname, add punycoding if necessary let hostname = Host::parse(&s)?; // check if we have a certificate for that domain if let Host::Domain(ref domain) = hostname { if!matches!(certs, Some(ref certs) if certs.has_domain(domain)) { log::info!("No certificate or key found for {:?}, generating them.", s); let mut cert_params = CertificateParams::new(vec![domain.clone()]); cert_params .distinguished_name .push(DnType::CommonName, domain); // <CertificateParams as Default>::default() already implements a // date in the far future from the time of writing: 4096-01-01 if matches.opt_present("e") { cert_params.alg = &rcgen::PKCS_ED25519; } // generate the certificate with the configuration let cert = Certificate::from_params(cert_params)?; // make sure the certificate directory exists fs::create_dir(certs_path.join(domain))?; // write certificate data to disk let mut cert_file = File::create(certs_path.join(format!( "{}/{}", domain, certificates::CERT_FILE_NAME )))?; cert_file.write_all(&cert.serialize_der()?)?; // write key data to disk let key_file_path = certs_path.join(format!("{}/{}", domain, certificates::KEY_FILE_NAME)); let mut key_file = File::create(&key_file_path)?; #[cfg(unix)] { // set permissions so only owner can read match key_file.set_permissions(std::fs::Permissions::from_mode(0o400)) { Ok(_) => (), Err(_) => log::warn!( "could not set permissions for new key file {}", key_file_path.display() ), } } key_file.write_all(&cert.serialize_private_key_der())?; reload_certs = true; } } hostnames.push(hostname); } // if new certificates were generated, reload the certificate store let certs = if reload_certs { certificates::CertStore::load_from(&certs_path)? } else { // there must already have been certificates loaded certs.unwrap() }; // parse listening addresses let mut addrs = vec![]; for i in matches.opt_strs("addr") { addrs.push(i.parse()?); } #[cfg_attr(not(unix), allow(unused_mut))] let mut empty = addrs.is_empty(); #[cfg(unix)] let mut sockets = vec![]; #[cfg(unix)] { for i in matches.opt_strs("socket") { sockets.push(i.parse()?); } empty &= sockets.is_empty(); } if empty { addrs = vec![ SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), DEFAULT_PORT), SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), DEFAULT_PORT), ]; } Ok(Args { addrs, #[cfg(unix)] sockets, content_dir: check_path(matches.opt_get_default("content", "content".into())?)?, certs: Arc::new(certs), hostnames, language: matches.opt_str("lang"), serve_secret: matches.opt_present("serve-secret"), log_ips: matches.opt_present("log-ip"), only_tls13: matches.opt_present("only-tls13"), central_config: matches.opt_present("central-conf"), skip_port_check: matches.opt_present("skip-port-check"), }) } fn check_path(s: String) -> Result<PathBuf, String> { let p = PathBuf::from(s); if p.as_path().exists() { Ok(p) } else { Err(format!("No such file: {p:?}")) } } /// TLS configuration. static TLS: Lazy<TlsAcceptor> = Lazy::new(acceptor); fn acceptor() -> TlsAcceptor { let config = if ARGS.only_tls13 { ServerConfig::builder() .with_safe_default_cipher_suites() .with_safe_default_kx_groups() .with_protocol_versions(&[&rustls::version::TLS13]) .expect("could not build server config") } else { ServerConfig::builder().with_safe_defaults() } .with_no_client_auth() .with_cert_resolver(ARGS.certs.clone()); TlsAcceptor::from(Arc::new(config)) } struct
<T> { stream: TlsStream<T>, local_port_check: Option<u16>, log_line: String, metadata: Arc<Mutex<FileOptions>>, } impl RequestHandle<TcpStream> { /// Creates a new request handle for the given stream. If establishing the TLS /// session fails, returns a corresponding log line. async fn new(stream: TcpStream, metadata: Arc<Mutex<FileOptions>>) -> Result<Self, String> { let local_addr = stream.local_addr().unwrap().to_string(); // try to get the remote IP address if desired let peer_addr = if ARGS.log_ips { stream .peer_addr() .map_err(|_| { format!( // use nonexistent status code 01 if peer IP is unknown "{local_addr} - \"\" 01 \"IP error\" error:could not get peer address", ) })? .ip() .to_string() } else { // Do not log IP address, but something else so columns still line up. "-".into() }; let log_line = format!("{local_addr} {peer_addr}",); let local_port_check = if ARGS.skip_port_check { None } else { Some(stream.local_addr().unwrap().port()) }; match TLS.accept(stream).await { Ok(stream) => Ok(Self { stream, local_port_check, log_line, metadata, }), // use nonexistent status code 00 if connection was not established Err(e) => Err(format!("{log_line} \"\" 00 \"TLS error\" error:{e}")), } } } #[cfg(unix)] impl RequestHandle<UnixStream> { async fn new_unix( stream: UnixStream, metadata: Arc<Mutex<FileOptions>>, ) -> Result<Self, String> { let log_line = format!( "unix:{} -", stream .local_addr() .ok() .and_then(|addr| Some(addr.as_pathname()?.to_string_lossy().into_owned())) .unwrap_or_default() ); match TLS.accept(stream).await { Ok(stream) => Ok(Self { stream, // TODO add port check for unix sockets, requires extra arg for port local_port_check: None, log_line, metadata, }), // use nonexistent status code 00 if connection was not established Err(e) => Err(format!("{} \"\" 00 \"TLS error\" error:{}", log_line, e)), } } } impl<T> RequestHandle<T> where T: AsyncWriteExt + AsyncReadExt + Unpin, { /// Do the necessary actions to handle this request. Returns a corresponding /// log line as Err or Ok, depending on if the request finished with or /// without errors. async fn handle(mut self) -> Result<String, String> { // not already in error condition let result = match self.parse_request().await { Ok(url) => self.send_response(url).await, Err((status, msg)) => self.send_header(status, msg).await, }; let close_result = self.stream.shutdown().await; match (result, close_result) { (Err(e), _) => Err(format!("{} error:{}", self.log_line, e)), (Ok(_), Err(e)) => Err(format!("{} error:{}", self.log_line, e)), (Ok(_), Ok(_)) => Ok(self.log_line), } } /// Return the URL requested by the client. async fn parse_request(&mut self) -> std::result::Result<Url, (u8, &'static str)> { // Because requests are limited to 1024 bytes (plus 2 bytes for CRLF), we // can use a fixed-sized buffer on the stack, avoiding allocations and // copying, and stopping bad clients from making us use too much memory. let mut request = [0; 1026]; let mut buf = &mut request[..]; let mut len = 0; // Read until CRLF, end-of-stream, or there's no buffer space left. // // Since neither CR nor LF can be part of a URI according to // ISOC-RFC 3986, we could use BufRead::read_line here, but that does // not allow us to cap the number of read bytes at 1024+2. let result = loop { let bytes_read = if let Ok(read) = self.stream.read(buf).await { read } else { break Err((BAD_REQUEST, "Request ended unexpectedly")); }; len += bytes_read; if request[..len].ends_with(b"\r\n") { break Ok(()); } else if bytes_read == 0 { break Err((BAD_REQUEST, "Request ended unexpectedly")); } buf = &mut request[len..]; } .and_then(|()| { std::str::from_utf8(&request[..len - 2]).or(Err((BAD_REQUEST, "Non-UTF-8 request"))) }); let request = result.map_err(|e| { // write empty request to log line for uniformity write!(self.log_line, " \"\"").unwrap(); e })?; // log literal request (might be different from or not an actual URL) write!(self.log_line, " \"{request}\"").unwrap(); let mut url = Url::parse(request).or(Err((BAD_REQUEST, "Invalid URL")))?; // Validate the URL: // correct scheme if url.scheme()!= "gemini" { return Err((PROXY_REQUEST_REFUSED, "Unsupported URL scheme")); } // no userinfo and no fragment if url.password().is_some() ||!url.username().is_empty() || url.fragment().is_some() { return Err((BAD_REQUEST, "URL contains fragment or userinfo")); } // correct host if let Some(domain) = url.domain() { // because the gemini scheme is not special enough for WHATWG, normalize // it ourselves let host = Host::parse( &percent_decode_str(domain) .decode_utf8() .or(Err((BAD_REQUEST, "Invalid URL")))?, )
RequestHandle
identifier_name
certificate_manager.rs
// Copyright (c) Microsoft. All rights reserved. use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; use futures::future::Either; #[cfg(unix)] use openssl::pkcs12::Pkcs12; #[cfg(unix)] use openssl::pkey::PKey; #[cfg(unix)] use openssl::stack::Stack; #[cfg(unix)] use openssl::x509::X509; use tokio::prelude::*; use tokio::timer::Delay;
use edgelet_core::CertificateProperties; use failure::ResultExt; pub use crate::error::{Error, ErrorKind}; pub struct CertificateManager<C: CreateCertificate + Clone> { certificate: Arc<RwLock<Option<Certificate>>>, crypto: C, props: CertificateProperties, creation_time: Instant, } #[derive(Clone)] struct Certificate { cert: String, private_key: String, } impl<C: CreateCertificate + Clone> CertificateManager<C> { pub fn new(crypto: C, props: CertificateProperties) -> Result<Self, Error> { let cert_manager = Self { certificate: Arc::new(RwLock::new(None)), crypto, props, creation_time: Instant::now(), }; { let mut cert = cert_manager .certificate .write() .expect("Locking the certificate for write failed."); let created_certificate = cert_manager.create_cert()?; *cert = Some(created_certificate); } Ok(cert_manager) } // Convenience function since native-tls does not yet support PEM // and since everything else uses PEM certificates, we want to keep // the actual storage of the certificate in the PEM format. #[cfg(unix)] pub fn get_pkcs12_certificate(&self) -> Result<Vec<u8>, Error> { let stored_cert_bundle = self.get_certificate()?; let cert = stored_cert_bundle.cert.as_bytes(); let mut certs = X509::stack_from_pem(cert).with_context(|_| ErrorKind::CertificateConversionError)?; let mut ca_certs = Stack::new().with_context(|_| ErrorKind::CertificateConversionError)?; for cert in certs.split_off(1) { ca_certs .push(cert) .with_context(|_| ErrorKind::CertificateConversionError)?; } let key = PKey::private_key_from_pem(stored_cert_bundle.private_key.as_bytes()) .expect("Error processing private key from pem"); let server_cert = &certs[0]; let mut builder = Pkcs12::builder(); builder.ca(ca_certs); let pkcs_certs = builder .build("", "", &key, &server_cert) .with_context(|_| ErrorKind::CertificateConversionError)?; Ok(pkcs_certs .to_der() .with_context(|_| ErrorKind::CertificateConversionError)?) } pub fn get_stored_cert_bytes(&self) -> Result<String, Error> { let stored_cert = self.get_certificate()?; Ok(stored_cert.cert) } pub fn schedule_expiration_timer<F>( &self, expiration_callback: F, ) -> impl Future<Item = (), Error = Error> where F: FnOnce() -> Result<(), ()> + Sync + Send +'static, { // Now, let's set a timer to expire this certificate // expire the certificate with 2 minutes remaining in it's lifetime let when = self.compute_certificate_alarm_time(); // Fail if the cert has already been expired when the call to create // a timer happens. if when < (Instant::now() + Duration::from_secs(1)) { Either::A(future::err(Error::from( ErrorKind::CertificateTimerCreationError, ))) } else { Either::B( Delay::new(when) .map_err(|_| Error::from(ErrorKind::CertificateTimerCreationError)) .and_then(move |_| match expiration_callback() { Ok(_) => Ok(()), Err(_) => Err(Error::from(ErrorKind::CertificateTimerRuntimeError)), }), ) } } fn get_certificate(&self) -> Result<Certificate, Error> { // Try to directly read let stored_cert = self .certificate .read() .expect("Locking the certificate for read failed."); match stored_cert.as_ref() { Some(stored_cert) => Ok(stored_cert.clone()), None => Err(Error::from(ErrorKind::CertificateNotFound)), } } fn create_cert(&self) -> Result<Certificate, Error> { // In some use cases, the CA cert might change - to protect against that, // we will retry once (after attempting to delete) if the cert creation fails. let cert = if let Ok(val) = self.crypto.create_certificate(&self.props) { val } else { self.crypto .destroy_certificate(self.props.alias().to_string()) .with_context(|_| ErrorKind::CertificateDeletionError)?; self.crypto .create_certificate(&self.props) .with_context(|_| ErrorKind::CertificateCreationError)? }; let cert_pem = cert .pem() .with_context(|_| ErrorKind::CertificateCreationError)?; let cert_private_key = cert .get_private_key() .with_context(|_| ErrorKind::CertificateCreationError)?; let pk = match cert_private_key { Some(pk) => pk, None => panic!("Unable to acquire a private key."), }; // Our implementations do not return a ref, and if they did, it would be unusable by Tokio // a ref simply is a label/alias to a private key, not the actual bits. let pk_bytes = match pk { PrivateKey::Ref(_) => panic!( "A reference private key does not contain the bits needed for the TLS certificate." ), PrivateKey::Key(KeyBytes::Pem(k)) => k, }; let cert_str = String::from_utf8(cert_pem.as_ref().to_vec()) .with_context(|_| ErrorKind::CertificateCreationError)?; let key_str = String::from_utf8(pk_bytes.as_bytes().to_vec()) .with_context(|_| ErrorKind::CertificateCreationError)?; Ok(Certificate { cert: cert_str, private_key: key_str, }) } // Determine when to sound the alarm and renew the certificate. #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_precision_loss)] fn compute_certificate_alarm_time(&self) -> Instant { self.creation_time + Duration::from_secs((*self.props.validity_in_secs() as f64 * 0.95) as u64) } #[cfg(test)] fn has_certificate(&self) -> bool { !self .certificate .read() .expect("Locking the certificate for read failed.") .is_none() } } #[cfg(test)] mod tests { use super::{CertificateManager, ErrorKind, Future}; use edgelet_core::crypto::{KeyBytes, PrivateKey}; use edgelet_core::{CertificateProperties, CertificateType}; use chrono::{DateTime, Utc}; use edgelet_core::{ Certificate as CoreCertificate, CertificateProperties as CoreCertificateProperties, CreateCertificate as CoreCreateCertificate, Error as CoreError, PrivateKey as CorePrivateKey, }; #[test] pub fn test_cert_manager_pem_has_cert() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 123_456, "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let cert = manager.get_certificate().unwrap(); assert_eq!(cert.cert, "test".to_string()); assert_eq!(manager.has_certificate(), true); } #[test] pub fn test_cert_manager_expired_timer_creation() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 1, // 150 second validity "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let _timer = manager.schedule_expiration_timer(|| Ok(())); } #[test] pub fn test_cert_manager_expired_timer_creation_fails() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 50, // 50 second validity "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let timer = manager.schedule_expiration_timer(|| Ok(())).wait(); match timer { Ok(_) => panic!("Should not be okay to create this timer..."), Err(err) => { if let ErrorKind::CertificateTimerCreationError = err.kind() { assert_eq!(true, true); } else { panic!( "Expected a CertificteTimerCreationError type, but got {:?}", err ); } } } } #[derive(Clone)] struct TestCrypto { created: bool, } impl TestCrypto { pub fn new() -> Result<Self, CoreError> { Ok(Self { created: true }) } } impl CoreCreateCertificate for TestCrypto { type Certificate = TestCertificate; fn create_certificate( &self, _properties: &CoreCertificateProperties, ) -> Result<Self::Certificate, CoreError> { Ok(TestCertificate {}) } fn destroy_certificate(&self, _alias: String) -> Result<(), CoreError> { Ok(()) } fn get_certificate(&self, _alias: String) -> Result<Self::Certificate, CoreError> { Ok(TestCertificate {}) } } struct TestCertificate {} impl CoreCertificate for TestCertificate { type Buffer = String; type KeyBuffer = Vec<u8>; fn pem(&self) -> Result<Self::Buffer, CoreError> { Ok("test".to_string()) } fn get_private_key(&self) -> Result<Option<CorePrivateKey<Self::KeyBuffer>>, CoreError> { Ok(Some(PrivateKey::Key(KeyBytes::Pem( "akey".to_string().as_bytes().to_vec(), )))) } fn get_valid_to(&self) -> Result<DateTime<Utc>, CoreError> { Ok(DateTime::parse_from_rfc3339("2025-12-19T16:39:57-08:00") .unwrap() .with_timezone(&Utc)) } fn get_common_name(&self) -> Result<String, CoreError> { Ok("IOTEDGED_TLS_COMMONNAME".to_string()) } } }
use edgelet_core::crypto::{ Certificate as CryptoCertificate, CreateCertificate, KeyBytes, PrivateKey, Signature, };
random_line_split
certificate_manager.rs
// Copyright (c) Microsoft. All rights reserved. use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; use futures::future::Either; #[cfg(unix)] use openssl::pkcs12::Pkcs12; #[cfg(unix)] use openssl::pkey::PKey; #[cfg(unix)] use openssl::stack::Stack; #[cfg(unix)] use openssl::x509::X509; use tokio::prelude::*; use tokio::timer::Delay; use edgelet_core::crypto::{ Certificate as CryptoCertificate, CreateCertificate, KeyBytes, PrivateKey, Signature, }; use edgelet_core::CertificateProperties; use failure::ResultExt; pub use crate::error::{Error, ErrorKind}; pub struct CertificateManager<C: CreateCertificate + Clone> { certificate: Arc<RwLock<Option<Certificate>>>, crypto: C, props: CertificateProperties, creation_time: Instant, } #[derive(Clone)] struct Certificate { cert: String, private_key: String, } impl<C: CreateCertificate + Clone> CertificateManager<C> { pub fn new(crypto: C, props: CertificateProperties) -> Result<Self, Error> { let cert_manager = Self { certificate: Arc::new(RwLock::new(None)), crypto, props, creation_time: Instant::now(), }; { let mut cert = cert_manager .certificate .write() .expect("Locking the certificate for write failed."); let created_certificate = cert_manager.create_cert()?; *cert = Some(created_certificate); } Ok(cert_manager) } // Convenience function since native-tls does not yet support PEM // and since everything else uses PEM certificates, we want to keep // the actual storage of the certificate in the PEM format. #[cfg(unix)] pub fn get_pkcs12_certificate(&self) -> Result<Vec<u8>, Error> { let stored_cert_bundle = self.get_certificate()?; let cert = stored_cert_bundle.cert.as_bytes(); let mut certs = X509::stack_from_pem(cert).with_context(|_| ErrorKind::CertificateConversionError)?; let mut ca_certs = Stack::new().with_context(|_| ErrorKind::CertificateConversionError)?; for cert in certs.split_off(1) { ca_certs .push(cert) .with_context(|_| ErrorKind::CertificateConversionError)?; } let key = PKey::private_key_from_pem(stored_cert_bundle.private_key.as_bytes()) .expect("Error processing private key from pem"); let server_cert = &certs[0]; let mut builder = Pkcs12::builder(); builder.ca(ca_certs); let pkcs_certs = builder .build("", "", &key, &server_cert) .with_context(|_| ErrorKind::CertificateConversionError)?; Ok(pkcs_certs .to_der() .with_context(|_| ErrorKind::CertificateConversionError)?) } pub fn get_stored_cert_bytes(&self) -> Result<String, Error> { let stored_cert = self.get_certificate()?; Ok(stored_cert.cert) } pub fn schedule_expiration_timer<F>( &self, expiration_callback: F, ) -> impl Future<Item = (), Error = Error> where F: FnOnce() -> Result<(), ()> + Sync + Send +'static, { // Now, let's set a timer to expire this certificate // expire the certificate with 2 minutes remaining in it's lifetime let when = self.compute_certificate_alarm_time(); // Fail if the cert has already been expired when the call to create // a timer happens. if when < (Instant::now() + Duration::from_secs(1)) { Either::A(future::err(Error::from( ErrorKind::CertificateTimerCreationError, ))) } else { Either::B( Delay::new(when) .map_err(|_| Error::from(ErrorKind::CertificateTimerCreationError)) .and_then(move |_| match expiration_callback() { Ok(_) => Ok(()), Err(_) => Err(Error::from(ErrorKind::CertificateTimerRuntimeError)), }), ) } } fn get_certificate(&self) -> Result<Certificate, Error> { // Try to directly read let stored_cert = self .certificate .read() .expect("Locking the certificate for read failed."); match stored_cert.as_ref() { Some(stored_cert) => Ok(stored_cert.clone()), None => Err(Error::from(ErrorKind::CertificateNotFound)), } } fn create_cert(&self) -> Result<Certificate, Error> { // In some use cases, the CA cert might change - to protect against that, // we will retry once (after attempting to delete) if the cert creation fails. let cert = if let Ok(val) = self.crypto.create_certificate(&self.props) { val } else { self.crypto .destroy_certificate(self.props.alias().to_string()) .with_context(|_| ErrorKind::CertificateDeletionError)?; self.crypto .create_certificate(&self.props) .with_context(|_| ErrorKind::CertificateCreationError)? }; let cert_pem = cert .pem() .with_context(|_| ErrorKind::CertificateCreationError)?; let cert_private_key = cert .get_private_key() .with_context(|_| ErrorKind::CertificateCreationError)?; let pk = match cert_private_key { Some(pk) => pk, None => panic!("Unable to acquire a private key."), }; // Our implementations do not return a ref, and if they did, it would be unusable by Tokio // a ref simply is a label/alias to a private key, not the actual bits. let pk_bytes = match pk { PrivateKey::Ref(_) => panic!( "A reference private key does not contain the bits needed for the TLS certificate." ), PrivateKey::Key(KeyBytes::Pem(k)) => k, }; let cert_str = String::from_utf8(cert_pem.as_ref().to_vec()) .with_context(|_| ErrorKind::CertificateCreationError)?; let key_str = String::from_utf8(pk_bytes.as_bytes().to_vec()) .with_context(|_| ErrorKind::CertificateCreationError)?; Ok(Certificate { cert: cert_str, private_key: key_str, }) } // Determine when to sound the alarm and renew the certificate. #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_precision_loss)] fn compute_certificate_alarm_time(&self) -> Instant { self.creation_time + Duration::from_secs((*self.props.validity_in_secs() as f64 * 0.95) as u64) } #[cfg(test)] fn has_certificate(&self) -> bool { !self .certificate .read() .expect("Locking the certificate for read failed.") .is_none() } } #[cfg(test)] mod tests { use super::{CertificateManager, ErrorKind, Future}; use edgelet_core::crypto::{KeyBytes, PrivateKey}; use edgelet_core::{CertificateProperties, CertificateType}; use chrono::{DateTime, Utc}; use edgelet_core::{ Certificate as CoreCertificate, CertificateProperties as CoreCertificateProperties, CreateCertificate as CoreCreateCertificate, Error as CoreError, PrivateKey as CorePrivateKey, }; #[test] pub fn test_cert_manager_pem_has_cert() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 123_456, "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let cert = manager.get_certificate().unwrap(); assert_eq!(cert.cert, "test".to_string()); assert_eq!(manager.has_certificate(), true); } #[test] pub fn test_cert_manager_expired_timer_creation() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 1, // 150 second validity "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let _timer = manager.schedule_expiration_timer(|| Ok(())); } #[test] pub fn test_cert_manager_expired_timer_creation_fails() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 50, // 50 second validity "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let timer = manager.schedule_expiration_timer(|| Ok(())).wait(); match timer { Ok(_) => panic!("Should not be okay to create this timer..."), Err(err) =>
} } #[derive(Clone)] struct TestCrypto { created: bool, } impl TestCrypto { pub fn new() -> Result<Self, CoreError> { Ok(Self { created: true }) } } impl CoreCreateCertificate for TestCrypto { type Certificate = TestCertificate; fn create_certificate( &self, _properties: &CoreCertificateProperties, ) -> Result<Self::Certificate, CoreError> { Ok(TestCertificate {}) } fn destroy_certificate(&self, _alias: String) -> Result<(), CoreError> { Ok(()) } fn get_certificate(&self, _alias: String) -> Result<Self::Certificate, CoreError> { Ok(TestCertificate {}) } } struct TestCertificate {} impl CoreCertificate for TestCertificate { type Buffer = String; type KeyBuffer = Vec<u8>; fn pem(&self) -> Result<Self::Buffer, CoreError> { Ok("test".to_string()) } fn get_private_key(&self) -> Result<Option<CorePrivateKey<Self::KeyBuffer>>, CoreError> { Ok(Some(PrivateKey::Key(KeyBytes::Pem( "akey".to_string().as_bytes().to_vec(), )))) } fn get_valid_to(&self) -> Result<DateTime<Utc>, CoreError> { Ok(DateTime::parse_from_rfc3339("2025-12-19T16:39:57-08:00") .unwrap() .with_timezone(&Utc)) } fn get_common_name(&self) -> Result<String, CoreError> { Ok("IOTEDGED_TLS_COMMONNAME".to_string()) } } }
{ if let ErrorKind::CertificateTimerCreationError = err.kind() { assert_eq!(true, true); } else { panic!( "Expected a CertificteTimerCreationError type, but got {:?}", err ); } }
conditional_block
certificate_manager.rs
// Copyright (c) Microsoft. All rights reserved. use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; use futures::future::Either; #[cfg(unix)] use openssl::pkcs12::Pkcs12; #[cfg(unix)] use openssl::pkey::PKey; #[cfg(unix)] use openssl::stack::Stack; #[cfg(unix)] use openssl::x509::X509; use tokio::prelude::*; use tokio::timer::Delay; use edgelet_core::crypto::{ Certificate as CryptoCertificate, CreateCertificate, KeyBytes, PrivateKey, Signature, }; use edgelet_core::CertificateProperties; use failure::ResultExt; pub use crate::error::{Error, ErrorKind}; pub struct CertificateManager<C: CreateCertificate + Clone> { certificate: Arc<RwLock<Option<Certificate>>>, crypto: C, props: CertificateProperties, creation_time: Instant, } #[derive(Clone)] struct Certificate { cert: String, private_key: String, } impl<C: CreateCertificate + Clone> CertificateManager<C> { pub fn new(crypto: C, props: CertificateProperties) -> Result<Self, Error> { let cert_manager = Self { certificate: Arc::new(RwLock::new(None)), crypto, props, creation_time: Instant::now(), }; { let mut cert = cert_manager .certificate .write() .expect("Locking the certificate for write failed."); let created_certificate = cert_manager.create_cert()?; *cert = Some(created_certificate); } Ok(cert_manager) } // Convenience function since native-tls does not yet support PEM // and since everything else uses PEM certificates, we want to keep // the actual storage of the certificate in the PEM format. #[cfg(unix)] pub fn get_pkcs12_certificate(&self) -> Result<Vec<u8>, Error> { let stored_cert_bundle = self.get_certificate()?; let cert = stored_cert_bundle.cert.as_bytes(); let mut certs = X509::stack_from_pem(cert).with_context(|_| ErrorKind::CertificateConversionError)?; let mut ca_certs = Stack::new().with_context(|_| ErrorKind::CertificateConversionError)?; for cert in certs.split_off(1) { ca_certs .push(cert) .with_context(|_| ErrorKind::CertificateConversionError)?; } let key = PKey::private_key_from_pem(stored_cert_bundle.private_key.as_bytes()) .expect("Error processing private key from pem"); let server_cert = &certs[0]; let mut builder = Pkcs12::builder(); builder.ca(ca_certs); let pkcs_certs = builder .build("", "", &key, &server_cert) .with_context(|_| ErrorKind::CertificateConversionError)?; Ok(pkcs_certs .to_der() .with_context(|_| ErrorKind::CertificateConversionError)?) } pub fn get_stored_cert_bytes(&self) -> Result<String, Error> { let stored_cert = self.get_certificate()?; Ok(stored_cert.cert) } pub fn schedule_expiration_timer<F>( &self, expiration_callback: F, ) -> impl Future<Item = (), Error = Error> where F: FnOnce() -> Result<(), ()> + Sync + Send +'static, { // Now, let's set a timer to expire this certificate // expire the certificate with 2 minutes remaining in it's lifetime let when = self.compute_certificate_alarm_time(); // Fail if the cert has already been expired when the call to create // a timer happens. if when < (Instant::now() + Duration::from_secs(1)) { Either::A(future::err(Error::from( ErrorKind::CertificateTimerCreationError, ))) } else { Either::B( Delay::new(when) .map_err(|_| Error::from(ErrorKind::CertificateTimerCreationError)) .and_then(move |_| match expiration_callback() { Ok(_) => Ok(()), Err(_) => Err(Error::from(ErrorKind::CertificateTimerRuntimeError)), }), ) } } fn get_certificate(&self) -> Result<Certificate, Error> { // Try to directly read let stored_cert = self .certificate .read() .expect("Locking the certificate for read failed."); match stored_cert.as_ref() { Some(stored_cert) => Ok(stored_cert.clone()), None => Err(Error::from(ErrorKind::CertificateNotFound)), } } fn create_cert(&self) -> Result<Certificate, Error> { // In some use cases, the CA cert might change - to protect against that, // we will retry once (after attempting to delete) if the cert creation fails. let cert = if let Ok(val) = self.crypto.create_certificate(&self.props) { val } else { self.crypto .destroy_certificate(self.props.alias().to_string()) .with_context(|_| ErrorKind::CertificateDeletionError)?; self.crypto .create_certificate(&self.props) .with_context(|_| ErrorKind::CertificateCreationError)? }; let cert_pem = cert .pem() .with_context(|_| ErrorKind::CertificateCreationError)?; let cert_private_key = cert .get_private_key() .with_context(|_| ErrorKind::CertificateCreationError)?; let pk = match cert_private_key { Some(pk) => pk, None => panic!("Unable to acquire a private key."), }; // Our implementations do not return a ref, and if they did, it would be unusable by Tokio // a ref simply is a label/alias to a private key, not the actual bits. let pk_bytes = match pk { PrivateKey::Ref(_) => panic!( "A reference private key does not contain the bits needed for the TLS certificate." ), PrivateKey::Key(KeyBytes::Pem(k)) => k, }; let cert_str = String::from_utf8(cert_pem.as_ref().to_vec()) .with_context(|_| ErrorKind::CertificateCreationError)?; let key_str = String::from_utf8(pk_bytes.as_bytes().to_vec()) .with_context(|_| ErrorKind::CertificateCreationError)?; Ok(Certificate { cert: cert_str, private_key: key_str, }) } // Determine when to sound the alarm and renew the certificate. #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_precision_loss)] fn compute_certificate_alarm_time(&self) -> Instant { self.creation_time + Duration::from_secs((*self.props.validity_in_secs() as f64 * 0.95) as u64) } #[cfg(test)] fn has_certificate(&self) -> bool { !self .certificate .read() .expect("Locking the certificate for read failed.") .is_none() } } #[cfg(test)] mod tests { use super::{CertificateManager, ErrorKind, Future}; use edgelet_core::crypto::{KeyBytes, PrivateKey}; use edgelet_core::{CertificateProperties, CertificateType}; use chrono::{DateTime, Utc}; use edgelet_core::{ Certificate as CoreCertificate, CertificateProperties as CoreCertificateProperties, CreateCertificate as CoreCreateCertificate, Error as CoreError, PrivateKey as CorePrivateKey, }; #[test] pub fn test_cert_manager_pem_has_cert() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 123_456, "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let cert = manager.get_certificate().unwrap(); assert_eq!(cert.cert, "test".to_string()); assert_eq!(manager.has_certificate(), true); } #[test] pub fn test_cert_manager_expired_timer_creation() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 1, // 150 second validity "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let _timer = manager.schedule_expiration_timer(|| Ok(())); } #[test] pub fn test_cert_manager_expired_timer_creation_fails() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 50, // 50 second validity "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let timer = manager.schedule_expiration_timer(|| Ok(())).wait(); match timer { Ok(_) => panic!("Should not be okay to create this timer..."), Err(err) => { if let ErrorKind::CertificateTimerCreationError = err.kind() { assert_eq!(true, true); } else { panic!( "Expected a CertificteTimerCreationError type, but got {:?}", err ); } } } } #[derive(Clone)] struct TestCrypto { created: bool, } impl TestCrypto { pub fn new() -> Result<Self, CoreError> { Ok(Self { created: true }) } } impl CoreCreateCertificate for TestCrypto { type Certificate = TestCertificate; fn create_certificate( &self, _properties: &CoreCertificateProperties, ) -> Result<Self::Certificate, CoreError> { Ok(TestCertificate {}) } fn destroy_certificate(&self, _alias: String) -> Result<(), CoreError> { Ok(()) } fn get_certificate(&self, _alias: String) -> Result<Self::Certificate, CoreError> { Ok(TestCertificate {}) } } struct
{} impl CoreCertificate for TestCertificate { type Buffer = String; type KeyBuffer = Vec<u8>; fn pem(&self) -> Result<Self::Buffer, CoreError> { Ok("test".to_string()) } fn get_private_key(&self) -> Result<Option<CorePrivateKey<Self::KeyBuffer>>, CoreError> { Ok(Some(PrivateKey::Key(KeyBytes::Pem( "akey".to_string().as_bytes().to_vec(), )))) } fn get_valid_to(&self) -> Result<DateTime<Utc>, CoreError> { Ok(DateTime::parse_from_rfc3339("2025-12-19T16:39:57-08:00") .unwrap() .with_timezone(&Utc)) } fn get_common_name(&self) -> Result<String, CoreError> { Ok("IOTEDGED_TLS_COMMONNAME".to_string()) } } }
TestCertificate
identifier_name
certificate_manager.rs
// Copyright (c) Microsoft. All rights reserved. use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; use futures::future::Either; #[cfg(unix)] use openssl::pkcs12::Pkcs12; #[cfg(unix)] use openssl::pkey::PKey; #[cfg(unix)] use openssl::stack::Stack; #[cfg(unix)] use openssl::x509::X509; use tokio::prelude::*; use tokio::timer::Delay; use edgelet_core::crypto::{ Certificate as CryptoCertificate, CreateCertificate, KeyBytes, PrivateKey, Signature, }; use edgelet_core::CertificateProperties; use failure::ResultExt; pub use crate::error::{Error, ErrorKind}; pub struct CertificateManager<C: CreateCertificate + Clone> { certificate: Arc<RwLock<Option<Certificate>>>, crypto: C, props: CertificateProperties, creation_time: Instant, } #[derive(Clone)] struct Certificate { cert: String, private_key: String, } impl<C: CreateCertificate + Clone> CertificateManager<C> { pub fn new(crypto: C, props: CertificateProperties) -> Result<Self, Error> { let cert_manager = Self { certificate: Arc::new(RwLock::new(None)), crypto, props, creation_time: Instant::now(), }; { let mut cert = cert_manager .certificate .write() .expect("Locking the certificate for write failed."); let created_certificate = cert_manager.create_cert()?; *cert = Some(created_certificate); } Ok(cert_manager) } // Convenience function since native-tls does not yet support PEM // and since everything else uses PEM certificates, we want to keep // the actual storage of the certificate in the PEM format. #[cfg(unix)] pub fn get_pkcs12_certificate(&self) -> Result<Vec<u8>, Error> { let stored_cert_bundle = self.get_certificate()?; let cert = stored_cert_bundle.cert.as_bytes(); let mut certs = X509::stack_from_pem(cert).with_context(|_| ErrorKind::CertificateConversionError)?; let mut ca_certs = Stack::new().with_context(|_| ErrorKind::CertificateConversionError)?; for cert in certs.split_off(1) { ca_certs .push(cert) .with_context(|_| ErrorKind::CertificateConversionError)?; } let key = PKey::private_key_from_pem(stored_cert_bundle.private_key.as_bytes()) .expect("Error processing private key from pem"); let server_cert = &certs[0]; let mut builder = Pkcs12::builder(); builder.ca(ca_certs); let pkcs_certs = builder .build("", "", &key, &server_cert) .with_context(|_| ErrorKind::CertificateConversionError)?; Ok(pkcs_certs .to_der() .with_context(|_| ErrorKind::CertificateConversionError)?) } pub fn get_stored_cert_bytes(&self) -> Result<String, Error> { let stored_cert = self.get_certificate()?; Ok(stored_cert.cert) } pub fn schedule_expiration_timer<F>( &self, expiration_callback: F, ) -> impl Future<Item = (), Error = Error> where F: FnOnce() -> Result<(), ()> + Sync + Send +'static, { // Now, let's set a timer to expire this certificate // expire the certificate with 2 minutes remaining in it's lifetime let when = self.compute_certificate_alarm_time(); // Fail if the cert has already been expired when the call to create // a timer happens. if when < (Instant::now() + Duration::from_secs(1)) { Either::A(future::err(Error::from( ErrorKind::CertificateTimerCreationError, ))) } else { Either::B( Delay::new(when) .map_err(|_| Error::from(ErrorKind::CertificateTimerCreationError)) .and_then(move |_| match expiration_callback() { Ok(_) => Ok(()), Err(_) => Err(Error::from(ErrorKind::CertificateTimerRuntimeError)), }), ) } } fn get_certificate(&self) -> Result<Certificate, Error> { // Try to directly read let stored_cert = self .certificate .read() .expect("Locking the certificate for read failed."); match stored_cert.as_ref() { Some(stored_cert) => Ok(stored_cert.clone()), None => Err(Error::from(ErrorKind::CertificateNotFound)), } } fn create_cert(&self) -> Result<Certificate, Error> { // In some use cases, the CA cert might change - to protect against that, // we will retry once (after attempting to delete) if the cert creation fails. let cert = if let Ok(val) = self.crypto.create_certificate(&self.props) { val } else { self.crypto .destroy_certificate(self.props.alias().to_string()) .with_context(|_| ErrorKind::CertificateDeletionError)?; self.crypto .create_certificate(&self.props) .with_context(|_| ErrorKind::CertificateCreationError)? }; let cert_pem = cert .pem() .with_context(|_| ErrorKind::CertificateCreationError)?; let cert_private_key = cert .get_private_key() .with_context(|_| ErrorKind::CertificateCreationError)?; let pk = match cert_private_key { Some(pk) => pk, None => panic!("Unable to acquire a private key."), }; // Our implementations do not return a ref, and if they did, it would be unusable by Tokio // a ref simply is a label/alias to a private key, not the actual bits. let pk_bytes = match pk { PrivateKey::Ref(_) => panic!( "A reference private key does not contain the bits needed for the TLS certificate." ), PrivateKey::Key(KeyBytes::Pem(k)) => k, }; let cert_str = String::from_utf8(cert_pem.as_ref().to_vec()) .with_context(|_| ErrorKind::CertificateCreationError)?; let key_str = String::from_utf8(pk_bytes.as_bytes().to_vec()) .with_context(|_| ErrorKind::CertificateCreationError)?; Ok(Certificate { cert: cert_str, private_key: key_str, }) } // Determine when to sound the alarm and renew the certificate. #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_precision_loss)] fn compute_certificate_alarm_time(&self) -> Instant { self.creation_time + Duration::from_secs((*self.props.validity_in_secs() as f64 * 0.95) as u64) } #[cfg(test)] fn has_certificate(&self) -> bool { !self .certificate .read() .expect("Locking the certificate for read failed.") .is_none() } } #[cfg(test)] mod tests { use super::{CertificateManager, ErrorKind, Future}; use edgelet_core::crypto::{KeyBytes, PrivateKey}; use edgelet_core::{CertificateProperties, CertificateType}; use chrono::{DateTime, Utc}; use edgelet_core::{ Certificate as CoreCertificate, CertificateProperties as CoreCertificateProperties, CreateCertificate as CoreCreateCertificate, Error as CoreError, PrivateKey as CorePrivateKey, }; #[test] pub fn test_cert_manager_pem_has_cert() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 123_456, "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let cert = manager.get_certificate().unwrap(); assert_eq!(cert.cert, "test".to_string()); assert_eq!(manager.has_certificate(), true); } #[test] pub fn test_cert_manager_expired_timer_creation() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 1, // 150 second validity "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let _timer = manager.schedule_expiration_timer(|| Ok(())); } #[test] pub fn test_cert_manager_expired_timer_creation_fails() { let crypto = TestCrypto::new().unwrap(); let edgelet_cert_props = CertificateProperties::new( 50, // 50 second validity "IOTEDGED_TLS_COMMONNAME".to_string(), CertificateType::Server, "iotedge-tls".to_string(), ); let manager = CertificateManager::new(crypto, edgelet_cert_props).unwrap(); let timer = manager.schedule_expiration_timer(|| Ok(())).wait(); match timer { Ok(_) => panic!("Should not be okay to create this timer..."), Err(err) => { if let ErrorKind::CertificateTimerCreationError = err.kind() { assert_eq!(true, true); } else { panic!( "Expected a CertificteTimerCreationError type, but got {:?}", err ); } } } } #[derive(Clone)] struct TestCrypto { created: bool, } impl TestCrypto { pub fn new() -> Result<Self, CoreError> { Ok(Self { created: true }) } } impl CoreCreateCertificate for TestCrypto { type Certificate = TestCertificate; fn create_certificate( &self, _properties: &CoreCertificateProperties, ) -> Result<Self::Certificate, CoreError> { Ok(TestCertificate {}) } fn destroy_certificate(&self, _alias: String) -> Result<(), CoreError> { Ok(()) } fn get_certificate(&self, _alias: String) -> Result<Self::Certificate, CoreError> { Ok(TestCertificate {}) } } struct TestCertificate {} impl CoreCertificate for TestCertificate { type Buffer = String; type KeyBuffer = Vec<u8>; fn pem(&self) -> Result<Self::Buffer, CoreError>
fn get_private_key(&self) -> Result<Option<CorePrivateKey<Self::KeyBuffer>>, CoreError> { Ok(Some(PrivateKey::Key(KeyBytes::Pem( "akey".to_string().as_bytes().to_vec(), )))) } fn get_valid_to(&self) -> Result<DateTime<Utc>, CoreError> { Ok(DateTime::parse_from_rfc3339("2025-12-19T16:39:57-08:00") .unwrap() .with_timezone(&Utc)) } fn get_common_name(&self) -> Result<String, CoreError> { Ok("IOTEDGED_TLS_COMMONNAME".to_string()) } } }
{ Ok("test".to_string()) }
identifier_body
main.rs
use anyhow::{Context, Result}; use std::cell::RefCell; use std::collections::HashMap; use std::fs; use std::path::Path; use std::rc::{Rc, Weak}; use structopt::StructOpt; use xml::{ attribute::OwnedAttribute, name::OwnedName, reader::{EventReader, XmlEvent}, }; #[derive(Debug, StructOpt)] #[structopt(name = "ooxml", about = "An example of parsing docx")] struct Opt { /// Specify file name of.docx, I.E. demo.docx #[structopt()] file_name: String, /// Activate verbose mode #[structopt(short, long)] verbose: bool, } /// 运行 /// ```sh /// cargo run -- demo.docx /// ``` /// 输出字体,并且带字体的颜色值. fn main() -> Result<()> { let opt = Opt::from_args(); let file_name = Path::new(&opt.file_name); let file = fs::File::open(file_name).with_context(|| format!("open file {:?} err", file_name))?; // 使用 zip 创建该文件的 Archive let mut archive = zip::ZipArchive::new(file).context("create zip archive err")?; for i in 0..archive.len() { let file = archive.by_index(i).unwrap(); if opt.verbose { println!("filename: {}", file.name()); } } // 直接解析 main document: word/document.xml // TODO 这个是写死的路径,正常应该先解析 [Content_types].xml 找到 main document. let word_doc = archive .by_name("word/document.xml") .context("found no word/document.xml")?; // xml parse let mut doc_parsing = MainDocParsing::new(); let parser = EventReader::new(word_doc); let mut depth = 0; for e in parser { let event = e.context("xml parser got err")?; match event { XmlEvent::StartElement { name, attributes, namespace: _, } => { // 调试信息 if opt.verbose { print_xml_owned_name(&name, depth, true); } depth += 1; // 新元素开始解析 doc_parsing.feed_element(name, attributes); } XmlEvent::EndElement { name } => { depth -= 1; // 调试信息 if opt.verbose { print_xml_owned_name(&name, depth, false); } // 当前元素解析完成 doc_parsing.fish_feed_element(); } XmlEvent::Comment(_) => {} XmlEvent::CData(_) => {} XmlEvent::Characters(data) => { // 调试信息 if opt.verbose { println!(r#"{}Characters("{}")"#, " ".repeat(depth), data,); } // 当前元素添加 text data doc_parsing.feed_characters(data); } XmlEvent::Whitespace(_) => {} _ => { // TODO } } } // 打印 文中的字体颜色和字体内容 print_elements(&doc_parsing.root, opt.verbose); Ok(()) } /// 辅助调试函数,打印元素 fn print_xml_owned_name(name: &OwnedName, indent: usize, start: bool) { print!("{}", " ".repeat(indent)); if start { print!("+"); } else { print!("-"); } if let Some(v) = &name.prefix { print!("{}:", v); } println!("{}", name.local_name); } /// Main document 中我们支持的一些元素类型 /// 保存原始的格式(例如 w:t)到 String 只是为了方便调试. #[derive(Debug)] enum ElementType { Document(String), Body(String), Paragraph(String), Run(String), Text(String), /// 属性 ParagraphProperty(String), RunProperty(String), Color(String), /// 其他剩余的不支持的类型 Unknown(String), } impl ElementType { /// 从 xml的 OwnedName 中构建 ElementType fn from_name(name: &OwnedName) -> Self { let raw = format!( "{}:{}", name.prefix.as_ref().unwrap_or(&String::new()), name.local_name ); // 目前 只识别 `w:xxx` 格式, 且只是部分标签 if name.prefix.is_none() || name.prefix.as_ref().unwrap().ne("w") { return Self::Unknown(raw); } match &*name.local_name { "document" => Self::Document(raw), "body" => Self::Body(raw), "p" => Self::Paragraph(raw), "r" => Self::Run(raw), "t" => Self::Text(raw), "pPr" => Self::ParagraphProperty(raw), "rPr" => Self::RunProperty(raw), "color" => Self::Color(raw), _ => Self::Unknown(raw), } } /// 是否是 Text类型(w:t) fn is_text(&self) -> bool { matches!(self, Self::Text(_)) } /// 是否是Run property(w:rPr) fn is_run_property(&self) -> bool { matches!(self, Self::RunProperty(_)) } /// 是否是 Color 类型(color) fn is_color(&self) -> bool { matches!(self, Self::Color(_)) } } /// main document中的元素. struct Element { element_type: ElementType, parent: Option<Weak<RefCell<Element>>>, children: Vec<Rc<RefCell<Element>>>, attributes: HashMap<String, String>, literal_text: Option<String>, // 目前只有 w:t 有 depth: usize, // for debug } impl Element { /// new Element, 需要指定 parent 和 type, parent 可以为 None fn new( element_type: ElementType, parent: &Option<Rc<RefCell<Element>>>, attributes: Vec<OwnedAttribute>, depth: usize, ) -> Self { let mut attrs = HashMap::new(); attributes.iter().for_each(|v| { attrs.insert(v.name.local_name.clone(), v.value.clone()); }); Self { element_type, parent: parent.as_ref().map(Rc::downgrade), children: vec![], attributes: attrs, literal_text: None, depth, } } fn append_child(&mut self, child: Rc<RefCell<Element>>) { self.children.push(child); } // 下面是一些辅助方法 /// 寻找本节点最近的 run property fn find_run_property(element: &Option<Rc<RefCell<Element>>>) -> Option<Rc<RefCell<Element>>> { if let Some(ele) = element { if let Some(parent) = &ele.borrow().parent { if let Some(parent) = parent.upgrade() { // find run property from parent's children for child in parent.borrow().children.iter() { if child.borrow().element_type.is_run_property() { return Some(Rc::clone(child)); } } // if not found, goes up return Self::find_run_property(&Some(parent)); } } } None } /// 如果自己是 run property, 从中获取 color 属性 fn get_color(element: &Option<Rc<RefCell<Element>>>) -> Option<String> { if let Some(ele) = &element { // 本身不是 run property if!ele.borrow().element_type.is_run_property() { return None; } // 从 children 中寻找 w:color for child in ele.borrow().children.iter() { let child_ref = child.borrow(); if child_ref.element_type.is_color() { return child_ref.attributes.get("val").cloned(); } } } None } fn display(root: &Option<Rc<RefCell<Element>>>) -> String { if let Some(root_rc) = root { let attrs: Vec<_> = root_rc .borrow() .attributes .iter() .map(|(k, v)| format!("{}={}", k, v)) .collect(); let indent = " ".repeat(root_rc.borrow().depth); format!( "{}{:?}, attrs: {:?},", indent, root_rc.borrow().element_type, attrs ) } else { "None<Element>".to_string() } } } /// Main document 解析过程. /// 流程: /// 内部维护一颗 Element 的树 root, 并且维护当前解析的节点的指针 cur. /// 1. 当新的元素解析到,调用 feed_element, 会将新的 Element 添加到 cur 的子元素中(children), /// 并将 cur 指向新的 Element /// 2. 当一个元素解析完成,调用 fish_feed_element, /// 会将 cur 指向其父节点 /// 3. 当有新的 text data 时,调用 feed_characters, 将 data 填空到当前的 Element中. /// 目前只是针对 w:t 类型 struct MainDocParsing { // 这里假设有一个唯一的 root root: Option<Rc<RefCell<Element>>>, cur: Option<Rc<RefCell<Element>>>, depth: usize, } impl MainDocParsing { fn new() -> Self { Self { root: None, cur: None, depth: 0, } } /// 一个新的元素开始解析 fn feed_element(&mut self, name: OwnedName, attributes: Vec<OwnedAttribute>) { self.depth += 1; let element_type = ElementType::from_name(&name); let element = Rc::new(RefCell::new(Element::new( element_type, &self.cur, attributes, self.depth, ))); if let Some(cur_parent) = &self.cur { // 最新节点添加为 parent 的子节点 cur_parent.borrow_mut().append_child(Rc::clone(&element));
self.cur.replace(element); } } /// 当前元素解析完成 fn fish_feed_element(&mut self) { self.depth -= 1; // 当前父节点指向上一层的节点 let mut parent = None; if let Some(cur) = &self.cur { if let Some(p) = &cur.borrow().parent { parent = p.upgrade(); } } self.cur = parent; } /// 向当前的 element 中添加text, 目前只有 w:t 类型会有 fn feed_characters(&mut self, data: String) { if let Some(cur) = &self.cur { cur.borrow_mut().literal_text = Some(data); } } } fn print_elements(root: &Option<Rc<RefCell<Element>>>, verbose: bool) { if verbose { println!("{}", Element::display(root)); } if let Some(root_rc) = root { if root_rc.borrow().element_type.is_text() { let run_property = Element::find_run_property(&root); let color_val = Element::get_color(&run_property); let text = root_rc .borrow() .literal_text .as_ref() .cloned() .unwrap_or_default(); println!("[color={}], text: {}", color_val.unwrap_or_default(), text); } for child in root_rc.borrow().children.iter() { print_elements(&Some(Rc::clone(child)), verbose); } } }
// cur parent 变更为 最新节点 self.cur.replace(element); } else { // 第一个节点 self.root.replace(Rc::clone(&element));
random_line_split
main.rs
use anyhow::{Context, Result}; use std::cell::RefCell; use std::collections::HashMap; use std::fs; use std::path::Path; use std::rc::{Rc, Weak}; use structopt::StructOpt; use xml::{ attribute::OwnedAttribute, name::OwnedName, reader::{EventReader, XmlEvent}, }; #[derive(Debug, StructOpt)] #[structopt(name = "ooxml", about = "An example of parsing docx")] struct Opt { /// Specify file name of.docx, I.E. demo.docx #[structopt()] file_name: String, /// Activate verbose mode #[structopt(short, long)] verbose: bool, } /// 运行 /// ```sh /// cargo run -- demo.docx /// ``` /// 输出字体,并且带字体的颜色值. fn main() -> Result<()> { let opt = Opt::from_args(); let file_name = Path::new(&opt.file_name); let file = fs::File::open(file_name).with_context(|| format!("open file {:?} err", file_name))?; // 使用 zip 创建该文件的 Archive let mut archive = zip::ZipArchive::new(file).context("create zip archive err")?; for i in 0..archive.len() { let file = archive.by_index(i).unwrap(); if opt.verbose { println!("filename: {}", file.name()); } } // 直接解析 main document: word/document.xml // TODO 这个是写死的路径,正常应该先解析 [Content_types].xml 找到 main document. let word_doc = archive .by_name("word/document.xml") .context("found no word/document.xml")?; // xml parse let mut doc_parsing = MainDocParsing::new(); let parser = EventReader::new(word_doc); let mut depth = 0; for e in parser { let event = e.context("xml parser got err")?; match event { XmlEvent::StartElement { name, attributes, namespace: _, } => { // 调试信息 if opt.verbose { print_xml_owned_name(&name, depth, true); } depth += 1; // 新元素开始解析 doc_parsing.feed_element(name, attributes); } XmlEvent::EndElement { name } => { depth -= 1; // 调试信息 if opt.verbose { print_xml_owned_name(&name, depth, false); } // 当前元素解析完成 doc_parsing.fish_feed_element(); } XmlEvent::Comment(_) => {} XmlEvent::CData(_) => {} XmlEvent::Characters(data) => { // 调试信息 if opt.verbose { println!(r#"{}Characters("{}")"#, " ".repeat(depth), data,); } // 当前元素添加 text data doc_parsing.feed_characters(data); } XmlEvent::Whitespace(_) => {} _ => { // TODO } } } // 打印 文中的字体颜色和字体内容 print_elements(&doc_parsing.root, opt.verbose); Ok(()) } /// 辅助调试函数,打印元素 fn print_xml_owned_name(name: &OwnedName, indent: usize, start: bool) { print!("{}", " ".repeat(indent)); if start { print!("+"); } else { print!("-"); } if let Some(v) = &name.prefix { print!("{}:", v); } println!("{}", name.local_name); } /// Main document 中我们支持的一些元素类型 /// 保存原始的格式(例如 w:t)到 String 只是为了方便调试. #[derive(Debug)] enum ElementType { Document(String), Body(String), Paragraph(String), Run(String), Text(String), /// 属性 ParagraphProperty(String), RunProperty(String), Color(String), /// 其他剩余的不支持的类型 Unknown(String), } impl ElementType { /// 从 xml的 OwnedName 中构建 ElementType fn from_name(name: &OwnedName) -> Self { let raw = format!( "{}:{}", name.prefix.as_ref().unwrap_or(&String::new()), name.local_name ); // 目前 只识别 `w:xxx` 格式, 且只是部分标签 if name.prefix.is_none() || name.prefix.as_ref().unwrap().ne("w") { return Self::Unknown(raw); } match &*name.local_name { "document" => Self::Document(raw), "body" => Self::Body(raw), "p" => Self::Paragraph(raw), "r" => Self::Run(raw), "t" => Self::Text(raw), "pPr" => Self::ParagraphProperty(raw), "rPr" => Self::RunProperty(raw), "color" => Self::Color(raw), _ => Self::Unknown(raw), } } /// 是否是 Text类型(w:t) fn is_text(&self) -> bool { matches!(self, Self::Text(_)) } /// 是否是Run property(w:rPr) fn is_run_property(&self) -> bool { matches!(self, Self::RunProperty(_)) } /// 是否是 Color 类型(color) fn is_color(&self) -> bool { matches!(self, Self::Color(_)) } } /// main document中的元素. struct Element { element_type: ElementType, parent: Option<Weak<RefCell<Element>>>, children: Vec<Rc<RefCell<Element>>>, attributes: HashMap<String, String>, literal_text: Option<String>, // 目前只有 w:t 有 depth: usize, // for debug } impl Element { /// new Element, 需要指定 parent 和 type, parent 可以为 None fn new( element_type: ElementType, parent: &Option<Rc<RefCell<Element>>>, attributes: Vec<OwnedAttribute>, depth: usize, ) -> Self { let mut attrs = HashMap::new(); attributes.iter().for_each(|v| { attrs.insert(v.name.local_name.clone(), v.value.clone()); }); Self { element_type, parent: parent.as_ref().map(Rc::downgrade), children: vec![], attributes: attrs, literal_text: None, depth, } } fn append_child(&mut self, child: Rc<RefCell<Element>>) { self.children.push(child); } // 下面是一些辅助方法 /// 寻找本节点最近的 run property fn find_run_property(element: &Option<Rc<RefCell<Element>>>) -> Option<Rc<RefCell<Element>>> { if let Some(ele) = element { if let Some(parent) = &ele.borrow().parent { if let Some(parent) = parent.upgrade() { // find run property from parent's children for child in parent.borrow().children.iter() { if child.borrow().element_type.is_run_property() { return Some(Rc::clone(child)); } } // if not found, goes up return Self::find_run_property(&Some(parent)); } } } None } /// 如果自己是 run property, 从中获取 color 属性 fn get_color(element: &Option<Rc<RefCell<Element>>>) -> Option<String> { if let Some(ele) = &element { // 本身不是 run property if!ele.borrow().element_type.is_run_property() { return None; } // 从 children 中寻找 w:color for child in ele.borrow().children.iter() { let child_ref = child.borrow(); if child_ref.element_type.is_color() { return child_ref.attributes.get("val").cloned(); } } } None } fn display(root: &Option<Rc<RefCell<Element>>>) -> String { if let Some(root_rc) = root { let attrs: Vec<_> = root_rc .borrow() .attributes .iter() .map(|(k, v)| format!("{}={}", k, v)) .collect(); let indent = " ".repeat(root_rc.borrow().depth); format!( "{}{:?}, attrs: {:?},", indent, root_rc.borrow().element_type, attrs ) } else { "None<Element>".to_string() } } } /// Main document 解析过程. /// 流程: /// 内部维护一颗 Element 的树 root, 并且维护当前解析的节点的指针 cur. /// 1. 当新的元素解析到,调用 feed_element, 会将新的 Element 添加到 cur 的子元素中(children), /// 并将 cur 指向新的 Element /// 2. 当一个元素解析完成,调用 fish_feed_element, /// 会将 cur 指向其父节点 /// 3. 当有新的 text data 时,调用 feed_characters, 将 data 填空到当前的 Element中. /// 目前只是针对 w:t 类型 struct MainDocParsing { // 这里假设有一个唯一的 root root: Option<Rc<RefCell<Element>>>, cur: Option<Rc<RefCell<Element>>>, depth: usize, } impl MainDocParsing { fn new() -> Self { Self { root: None, cur: None, depth: 0, } } /// 一个新的元素开始解析 fn feed_element(&mut self, name: OwnedName, attributes: Vec<OwnedAttribute>) { self.depth += 1; let element_type = ElementType::from_name(&name); let element = Rc::new(RefCell::new(Element::new( element_type, &self.cur, attributes, self.depth, ))); if let Some(cur_parent) = &self.cur { // 最新节点添加为 parent 的子节点 cur_parent.borrow_mut().append_child(Rc::clone(&element)); // cur parent 变更为 最新节点 self.cur.replace(element); } else { // 第一个节点 self.root.replace(Rc::clone(&element)); self.cur.replace(element); } } /// 当前元素解析完成 fn fish_feed_element(&mut self) { self.de
fn print_elements(root: &Option<Rc<RefCell<Element>>>, verbose: bool) { if verbose { println!("{}", Element::display(root)); } if let Some(root_rc) = root { if root_rc.borrow(). element_type.is_text() { let run_property = Element::find_run_property(&root); let color_val = Element::get_color(&run_property); let text = root_rc .borrow() .literal_text .as_ref() .cloned() .unwrap_or_default(); println!("[color={}], text: {}", color_val.unwrap_or_default(), text); } for child in root_rc.borrow().children.iter() { print_elements(&Some(Rc::clone(child)), verbose); } } }
pth -= 1; // 当前父节点指向上一层的节点 let mut parent = None; if let Some(cur) = &self.cur { if let Some(p) = &cur.borrow().parent { parent = p.upgrade(); } } self.cur = parent; } /// 向当前的 element 中添加text, 目前只有 w:t 类型会有 fn feed_characters(&mut self, data: String) { if let Some(cur) = &self.cur { cur.borrow_mut().literal_text = Some(data); } } }
identifier_body
main.rs
use anyhow::{Context, Result}; use std::cell::RefCell; use std::collections::HashMap; use std::fs; use std::path::Path; use std::rc::{Rc, Weak}; use structopt::StructOpt; use xml::{ attribute::OwnedAttribute, name::OwnedName, reader::{EventReader, XmlEvent}, }; #[derive(Debug, StructOpt)] #[structopt(name = "ooxml", about = "An example of parsing docx")] struct Opt { /// Specify file name of.docx, I.E. demo.docx #[structopt()] file_name: String, /// Activate verbose mode #[structopt(short, long)] verbose: bool, } /// 运行 /// ```sh /// cargo run -- demo.docx /// ``` /// 输出字体,并且带字体的颜色值. fn main() -> Result<()> { let opt = Opt::from_args(); let file_name = Path::new(&opt.file_name); let file = fs::File::open(file_name).with_context(|| format!("open file {:?} err", file_name))?; // 使用 zip 创建该文件的 Archive let mut archive = zip::ZipArchive::new(file).context("create zip archive err")?; for i in 0..archive.len() { let file = archive.by_index(i).unwrap(); if opt.verbose { println!("filename: {}", file.name()); } } // 直接解析 main document: word/document.xml // TODO 这个是写死的路径,正常应该先解析 [Content_types].xml 找到 main document. let word_doc = archive .by_name("word/document.xml") .context("found no word/document.xml")?; // xml parse let mut doc_parsing = MainDocParsing::new(); let parser = EventReader::new(word_doc); let mut depth = 0; for e in parser { let event = e.context("xml parser got err")?; match event { XmlEvent::StartElement { name, attributes, namespace: _, } => { // 调试信息 if opt.verbose { print_xml_owned_name(&name, depth, true); } depth += 1; // 新元素开始解析 doc_parsing.feed_element(name, attributes); } XmlEvent::EndElement { name } => { depth -= 1; // 调试信息 if opt.verbose { print_xml_owned_name(&name, depth, false); } // 当前元素解析完成 doc_parsing.fish_feed_element(); } XmlEvent::Comment(_) => {} XmlEvent::CData(_) => {} XmlEvent::Characters(data) => { // 调试信息 if opt.verbose { println!(r#"{}Characters("{}")"#, " ".repeat(depth), data,); } // 当前元素添加 text data doc_parsing.feed_characters(data); } XmlEvent::Whitespace(_) => {} _ => { // TODO } } } // 打印 文中的字体颜色和字体内容 print_elements(&doc_parsing.root, opt.verbose); Ok(()) } /// 辅助调试函数,打印元素 fn print_xml_owned_name(name: &OwnedName, indent: usize, start: bool) { print!("{}", " ".repeat(indent)); if start { print!("+"); } else { print!("-"); } if let Some(v) = &name.prefix { print!("{}:", v); } println!("{}", name.local_name); } /// Main document 中我们支持的一些元素类型 /// 保存原始的格式(例如 w:t)到 String 只是为了方便调试. #[derive(Debug)] enum ElementType { Document(String), Body(String), Paragraph(String), Run(String), Text(String), /// 属性 ParagraphProperty(String), RunProperty(String), Color(String), /// 其他剩余的不支持的类型 Unknown(String), } impl ElementType {
l的 OwnedName 中构建 ElementType fn from_name(name: &OwnedName) -> Self { let raw = format!( "{}:{}", name.prefix.as_ref().unwrap_or(&String::new()), name.local_name ); // 目前 只识别 `w:xxx` 格式, 且只是部分标签 if name.prefix.is_none() || name.prefix.as_ref().unwrap().ne("w") { return Self::Unknown(raw); } match &*name.local_name { "document" => Self::Document(raw), "body" => Self::Body(raw), "p" => Self::Paragraph(raw), "r" => Self::Run(raw), "t" => Self::Text(raw), "pPr" => Self::ParagraphProperty(raw), "rPr" => Self::RunProperty(raw), "color" => Self::Color(raw), _ => Self::Unknown(raw), } } /// 是否是 Text类型(w:t) fn is_text(&self) -> bool { matches!(self, Self::Text(_)) } /// 是否是Run property(w:rPr) fn is_run_property(&self) -> bool { matches!(self, Self::RunProperty(_)) } /// 是否是 Color 类型(color) fn is_color(&self) -> bool { matches!(self, Self::Color(_)) } } /// main document中的元素. struct Element { element_type: ElementType, parent: Option<Weak<RefCell<Element>>>, children: Vec<Rc<RefCell<Element>>>, attributes: HashMap<String, String>, literal_text: Option<String>, // 目前只有 w:t 有 depth: usize, // for debug } impl Element { /// new Element, 需要指定 parent 和 type, parent 可以为 None fn new( element_type: ElementType, parent: &Option<Rc<RefCell<Element>>>, attributes: Vec<OwnedAttribute>, depth: usize, ) -> Self { let mut attrs = HashMap::new(); attributes.iter().for_each(|v| { attrs.insert(v.name.local_name.clone(), v.value.clone()); }); Self { element_type, parent: parent.as_ref().map(Rc::downgrade), children: vec![], attributes: attrs, literal_text: None, depth, } } fn append_child(&mut self, child: Rc<RefCell<Element>>) { self.children.push(child); } // 下面是一些辅助方法 /// 寻找本节点最近的 run property fn find_run_property(element: &Option<Rc<RefCell<Element>>>) -> Option<Rc<RefCell<Element>>> { if let Some(ele) = element { if let Some(parent) = &ele.borrow().parent { if let Some(parent) = parent.upgrade() { // find run property from parent's children for child in parent.borrow().children.iter() { if child.borrow().element_type.is_run_property() { return Some(Rc::clone(child)); } } // if not found, goes up return Self::find_run_property(&Some(parent)); } } } None } /// 如果自己是 run property, 从中获取 color 属性 fn get_color(element: &Option<Rc<RefCell<Element>>>) -> Option<String> { if let Some(ele) = &element { // 本身不是 run property if!ele.borrow().element_type.is_run_property() { return None; } // 从 children 中寻找 w:color for child in ele.borrow().children.iter() { let child_ref = child.borrow(); if child_ref.element_type.is_color() { return child_ref.attributes.get("val").cloned(); } } } None } fn display(root: &Option<Rc<RefCell<Element>>>) -> String { if let Some(root_rc) = root { let attrs: Vec<_> = root_rc .borrow() .attributes .iter() .map(|(k, v)| format!("{}={}", k, v)) .collect(); let indent = " ".repeat(root_rc.borrow().depth); format!( "{}{:?}, attrs: {:?},", indent, root_rc.borrow().element_type, attrs ) } else { "None<Element>".to_string() } } } /// Main document 解析过程. /// 流程: /// 内部维护一颗 Element 的树 root, 并且维护当前解析的节点的指针 cur. /// 1. 当新的元素解析到,调用 feed_element, 会将新的 Element 添加到 cur 的子元素中(children), /// 并将 cur 指向新的 Element /// 2. 当一个元素解析完成,调用 fish_feed_element, /// 会将 cur 指向其父节点 /// 3. 当有新的 text data 时,调用 feed_characters, 将 data 填空到当前的 Element中. /// 目前只是针对 w:t 类型 struct MainDocParsing { // 这里假设有一个唯一的 root root: Option<Rc<RefCell<Element>>>, cur: Option<Rc<RefCell<Element>>>, depth: usize, } impl MainDocParsing { fn new() -> Self { Self { root: None, cur: None, depth: 0, } } /// 一个新的元素开始解析 fn feed_element(&mut self, name: OwnedName, attributes: Vec<OwnedAttribute>) { self.depth += 1; let element_type = ElementType::from_name(&name); let element = Rc::new(RefCell::new(Element::new( element_type, &self.cur, attributes, self.depth, ))); if let Some(cur_parent) = &self.cur { // 最新节点添加为 parent 的子节点 cur_parent.borrow_mut().append_child(Rc::clone(&element)); // cur parent 变更为 最新节点 self.cur.replace(element); } else { // 第一个节点 self.root.replace(Rc::clone(&element)); self.cur.replace(element); } } /// 当前元素解析完成 fn fish_feed_element(&mut self) { self.depth -= 1; // 当前父节点指向上一层的节点 let mut parent = None; if let Some(cur) = &self.cur { if let Some(p) = &cur.borrow().parent { parent = p.upgrade(); } } self.cur = parent; } /// 向当前的 element 中添加text, 目前只有 w:t 类型会有 fn feed_characters(&mut self, data: String) { if let Some(cur) = &self.cur { cur.borrow_mut().literal_text = Some(data); } } } fn print_elements(root: &Option<Rc<RefCell<Element>>>, verbose: bool) { if verbose { println!("{}", Element::display(root)); } if let Some(root_rc) = root { if root_rc.borrow().element_type.is_text() { let run_property = Element::find_run_property(&root); let color_val = Element::get_color(&run_property); let text = root_rc .borrow() .literal_text .as_ref() .cloned() .unwrap_or_default(); println!("[color={}], text: {}", color_val.unwrap_or_default(), text); } for child in root_rc.borrow().children.iter() { print_elements(&Some(Rc::clone(child)), verbose); } } }
/// 从 xm
identifier_name
main.rs
use anyhow::{Context, Result}; use std::cell::RefCell; use std::collections::HashMap; use std::fs; use std::path::Path; use std::rc::{Rc, Weak}; use structopt::StructOpt; use xml::{ attribute::OwnedAttribute, name::OwnedName, reader::{EventReader, XmlEvent}, }; #[derive(Debug, StructOpt)] #[structopt(name = "ooxml", about = "An example of parsing docx")] struct Opt { /// Specify file name of.docx, I.E. demo.docx #[structopt()] file_name: String, /// Activate verbose mode #[structopt(short, long)] verbose: bool, } /// 运行 /// ```sh /// cargo run -- demo.docx /// ``` /// 输出字体,并且带字体的颜色值. fn main() -> Result<()> { let opt = Opt::from_args(); let file_name = Path::new(&opt.file_name); let file = fs::File::open(file_name).with_context(|| format!("open file {:?} err", file_name))?; // 使用 zip 创建该文件的 Archive let mut archive = zip::ZipArchive::new(file).context("create zip archive err")?; for i in 0..archive.len() { let file = archive.by_index(i).unwrap(); if opt.verbose { println!("filename: {}", file.name()); } } // 直接解析 main document: word/document.xml // TODO 这个是写死的路径,正常应该先解析 [Content_types].xml 找到 main document. let word_doc = archive .by_name("word/document.xml") .context("found no word/document.xml")?; // xml parse let mut doc_parsing = MainDocParsing::new(); let parser = EventReader::new(word_doc); let mut depth = 0; for e in parser { let event = e.context("xml parser got err")?; match event { XmlEvent::StartElement { name, attributes, namespace: _, } => { // 调试信息 if opt.verbose { print_xml_owned_name(&name, depth, true); } depth += 1; // 新元素开始解析 doc_parsing.feed_element(name, attributes); } XmlEvent::EndElement { name } => { depth -= 1; // 调试信息 if opt.verbose { pri
// 调试信息 if opt.verbose { println!(r#"{}Characters("{}")"#, " ".repeat(depth), data,); } // 当前元素添加 text data doc_parsing.feed_characters(data); } XmlEvent::Whitespace(_) => {} _ => { // TODO } } } // 打印 文中的字体颜色和字体内容 print_elements(&doc_parsing.root, opt.verbose); Ok(()) } /// 辅助调试函数,打印元素 fn print_xml_owned_name(name: &OwnedName, indent: usize, start: bool) { print!("{}", " ".repeat(indent)); if start { print!("+"); } else { print!("-"); } if let Some(v) = &name.prefix { print!("{}:", v); } println!("{}", name.local_name); } /// Main document 中我们支持的一些元素类型 /// 保存原始的格式(例如 w:t)到 String 只是为了方便调试. #[derive(Debug)] enum ElementType { Document(String), Body(String), Paragraph(String), Run(String), Text(String), /// 属性 ParagraphProperty(String), RunProperty(String), Color(String), /// 其他剩余的不支持的类型 Unknown(String), } impl ElementType { /// 从 xml的 OwnedName 中构建 ElementType fn from_name(name: &OwnedName) -> Self { let raw = format!( "{}:{}", name.prefix.as_ref().unwrap_or(&String::new()), name.local_name ); // 目前 只识别 `w:xxx` 格式, 且只是部分标签 if name.prefix.is_none() || name.prefix.as_ref().unwrap().ne("w") { return Self::Unknown(raw); } match &*name.local_name { "document" => Self::Document(raw), "body" => Self::Body(raw), "p" => Self::Paragraph(raw), "r" => Self::Run(raw), "t" => Self::Text(raw), "pPr" => Self::ParagraphProperty(raw), "rPr" => Self::RunProperty(raw), "color" => Self::Color(raw), _ => Self::Unknown(raw), } } /// 是否是 Text类型(w:t) fn is_text(&self) -> bool { matches!(self, Self::Text(_)) } /// 是否是Run property(w:rPr) fn is_run_property(&self) -> bool { matches!(self, Self::RunProperty(_)) } /// 是否是 Color 类型(color) fn is_color(&self) -> bool { matches!(self, Self::Color(_)) } } /// main document中的元素. struct Element { element_type: ElementType, parent: Option<Weak<RefCell<Element>>>, children: Vec<Rc<RefCell<Element>>>, attributes: HashMap<String, String>, literal_text: Option<String>, // 目前只有 w:t 有 depth: usize, // for debug } impl Element { /// new Element, 需要指定 parent 和 type, parent 可以为 None fn new( element_type: ElementType, parent: &Option<Rc<RefCell<Element>>>, attributes: Vec<OwnedAttribute>, depth: usize, ) -> Self { let mut attrs = HashMap::new(); attributes.iter().for_each(|v| { attrs.insert(v.name.local_name.clone(), v.value.clone()); }); Self { element_type, parent: parent.as_ref().map(Rc::downgrade), children: vec![], attributes: attrs, literal_text: None, depth, } } fn append_child(&mut self, child: Rc<RefCell<Element>>) { self.children.push(child); } // 下面是一些辅助方法 /// 寻找本节点最近的 run property fn find_run_property(element: &Option<Rc<RefCell<Element>>>) -> Option<Rc<RefCell<Element>>> { if let Some(ele) = element { if let Some(parent) = &ele.borrow().parent { if let Some(parent) = parent.upgrade() { // find run property from parent's children for child in parent.borrow().children.iter() { if child.borrow().element_type.is_run_property() { return Some(Rc::clone(child)); } } // if not found, goes up return Self::find_run_property(&Some(parent)); } } } None } /// 如果自己是 run property, 从中获取 color 属性 fn get_color(element: &Option<Rc<RefCell<Element>>>) -> Option<String> { if let Some(ele) = &element { // 本身不是 run property if!ele.borrow().element_type.is_run_property() { return None; } // 从 children 中寻找 w:color for child in ele.borrow().children.iter() { let child_ref = child.borrow(); if child_ref.element_type.is_color() { return child_ref.attributes.get("val").cloned(); } } } None } fn display(root: &Option<Rc<RefCell<Element>>>) -> String { if let Some(root_rc) = root { let attrs: Vec<_> = root_rc .borrow() .attributes .iter() .map(|(k, v)| format!("{}={}", k, v)) .collect(); let indent = " ".repeat(root_rc.borrow().depth); format!( "{}{:?}, attrs: {:?},", indent, root_rc.borrow().element_type, attrs ) } else { "None<Element>".to_string() } } } /// Main document 解析过程. /// 流程: /// 内部维护一颗 Element 的树 root, 并且维护当前解析的节点的指针 cur. /// 1. 当新的元素解析到,调用 feed_element, 会将新的 Element 添加到 cur 的子元素中(children), /// 并将 cur 指向新的 Element /// 2. 当一个元素解析完成,调用 fish_feed_element, /// 会将 cur 指向其父节点 /// 3. 当有新的 text data 时,调用 feed_characters, 将 data 填空到当前的 Element中. /// 目前只是针对 w:t 类型 struct MainDocParsing { // 这里假设有一个唯一的 root root: Option<Rc<RefCell<Element>>>, cur: Option<Rc<RefCell<Element>>>, depth: usize, } impl MainDocParsing { fn new() -> Self { Self { root: None, cur: None, depth: 0, } } /// 一个新的元素开始解析 fn feed_element(&mut self, name: OwnedName, attributes: Vec<OwnedAttribute>) { self.depth += 1; let element_type = ElementType::from_name(&name); let element = Rc::new(RefCell::new(Element::new( element_type, &self.cur, attributes, self.depth, ))); if let Some(cur_parent) = &self.cur { // 最新节点添加为 parent 的子节点 cur_parent.borrow_mut().append_child(Rc::clone(&element)); // cur parent 变更为 最新节点 self.cur.replace(element); } else { // 第一个节点 self.root.replace(Rc::clone(&element)); self.cur.replace(element); } } /// 当前元素解析完成 fn fish_feed_element(&mut self) { self.depth -= 1; // 当前父节点指向上一层的节点 let mut parent = None; if let Some(cur) = &self.cur { if let Some(p) = &cur.borrow().parent { parent = p.upgrade(); } } self.cur = parent; } /// 向当前的 element 中添加text, 目前只有 w:t 类型会有 fn feed_characters(&mut self, data: String) { if let Some(cur) = &self.cur { cur.borrow_mut().literal_text = Some(data); } } } fn print_elements(root: &Option<Rc<RefCell<Element>>>, verbose: bool) { if verbose { println!("{}", Element::display(root)); } if let Some(root_rc) = root { if root_rc.borrow().element_type.is_text() { let run_property = Element::find_run_property(&root); let color_val = Element::get_color(&run_property); let text = root_rc .borrow() .literal_text .as_ref() .cloned() .unwrap_or_default(); println!("[color={}], text: {}", color_val.unwrap_or_default(), text); } for child in root_rc.borrow().children.iter() { print_elements(&Some(Rc::clone(child)), verbose); } } }
nt_xml_owned_name(&name, depth, false); } // 当前元素解析完成 doc_parsing.fish_feed_element(); } XmlEvent::Comment(_) => {} XmlEvent::CData(_) => {} XmlEvent::Characters(data) => {
conditional_block
lib.rs
//! # mio-serial - A mio-compatable serial port implementation for *nix //! //! This crate provides a SerialPort implementation compatable with mio. //! //! ** This crate ONLY provides a unix implementation ** //! //! Some basic helper methods are provided for setting a few serial port //! parameters such as the baud rate. For everything else you'll //! have to set the flags in the `termios::Termios` struct yourself! All //! the relavent settings can be found consulting your system's `man` page //! for termios (e.g. `man termios`) //! //! This crate is influenced heavily by the [serial](https://github.com/dcuddeback/serial-rs) //! crate (by David Cuddeback, same author of the helpful [termios](https://github.com/dcuddeback/termios-rs) //! crate!) #![cfg(unix)] #![deny(missing_docs)] extern crate termios; extern crate libc; extern crate mio; use std::os::unix::prelude::*; use std::io; use std::ffi::CString; use std::path::Path; use std::convert::AsRef; /// A mio compatable serial port for *nix pub struct SerialPort { fd: RawFd, orig_settings: termios::Termios, is_raw: bool, } impl SerialPort { /// Construct a new SerialPort /// /// Opens the a serial port at the location provided by `path` with the following /// default settings: /// /// - 9600,8N1 (9600 Baud, 8-bit data, no parity, 1 stop bit) /// - Receiver enabled in "Cannonical mode" /// - Non-blocking /// - No flow control (software OR hardware) /// - Ignores hardware control lines /// /// # Errors /// /// SerialPort construction can fail for a few reasons: /// /// - An invalid path is provided /// - The path does not represent a serial port device /// - We are unable to configure the serial port /// ANY of the default settings. (Unlikely... but IS possible) pub fn open<T: AsRef<Path>>(path: T) -> io::Result<Self> { // Create a CString from the provided path. let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes()) .map_err(|_| io::Error::last_os_error())?; // Attempt to open the desired path as a serial port. Set it read/write, nonblocking, and // don't set it as the controlling terminal let fd = unsafe { libc::open(path_cstr.as_ptr(), libc::O_RDWR | libc::O_NONBLOCK | libc::O_NOCTTY, 0) }; // Make sure the file descriptor is valid. if fd < 0 { return Err(io::Error::last_os_error()); } // Get the existing termios settings. Close the file descriptor on errors. let orig_settings = termios::Termios::from_fd(fd).map_err(|e| unsafe {libc::close(fd); e})?; // Default port settings: Cannonical 9600-8N1 let mut default_settings = orig_settings.clone(); default_settings.c_cflag = termios::CS8 | termios::CLOCAL | termios::CREAD; default_settings.c_oflag = 0; default_settings.c_iflag = termios::IGNPAR; default_settings.c_lflag = termios::ICANON; default_settings.c_cc[termios::VMIN] = 0; default_settings.c_cc[termios::VTIME] = 0; termios::cfsetspeed(&mut default_settings, termios::B9600).unwrap(); // tcsetattr only errors out if we cannot set ANY attribute. Something is seriously wrong // if that happens, so just close the file descriptor and raise the error. termios::tcsetattr(fd, termios::TCSANOW, &default_settings).map_err(|e| unsafe {libc::close(fd); e})?; Ok(SerialPort{ fd: fd, orig_settings: orig_settings, is_raw: false, }) } /// Retrieve the termios structure for the serial port. pub fn termios(&self) -> io::Result<termios::Termios> { termios::Termios::from_fd(self.fd) } /// Set low-level serial port settings /// /// The `action` parameter must be one of the following: /// /// - `termios::TCSANOW` Update immediately /// - `termios::TCSADRAIN` Finish reading buffered data before updating. /// - `termios::TCSAFLUSH` Finish writing buffered data before updating. /// /// # Errors /// /// Will return `ErrorKind::InvalidInput` if `action` is not one of the three constants /// defined above. pub fn set_termios(&mut self, action: i32, t: &termios::Termios) -> io::Result<()> { match action { termios::TCSANOW | termios::TCSADRAIN | termios::TCSAFLUSH => { termios::tcsetattr(self.fd, action, t) }, _ => Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Illegal action: {}", action))), } } /// Enable or disable blocking reads and writes. /// /// # Panics /// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1 pub fn set_nonblocking(&mut self, blocking: bool) -> io::Result<()> { match unsafe {libc::fcntl(self.fd, libc::F_SETFL, libc::O_NONBLOCK, blocking as libc::c_int)} { 0 => Ok(()), -1 => Err(io::Error::last_os_error()), e @ _ => unreachable!(format!("Unexpected return code from F_SETFL O_NONBLOCK: {}", e)), } } /// Get the current blocking mode for the serial port /// /// # Panics /// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1 pub fn is_blocking(&self) -> io::Result<bool> { match unsafe {libc::fcntl(self.fd, libc::F_GETFL, libc::O_NONBLOCK)} { 0 => Ok(false), 1 => Ok(true), -1 => Err(io::Error::last_os_error()), e @ _ => unreachable!(format!("Unexpected return code from F_GETFL O_NONBLOCK: {}", e)), } } /// Try writing some data. /// /// Similar to the standard `io::Write` implementation, but errors /// due to blocking IO are translated into Ok(None) results. /// /// # Returns /// /// - `Ok(Some(size))` on successful writes /// - `Ok(None)` if calling write would block. /// - `Err(e)` for all other IO errors pub fn maybe_write(&mut self, buf: &[u8]) -> io::Result<Option<usize>> { match self.write(buf) { Ok(s) => Ok(Some(s)), Err(e) => { if let io::ErrorKind::WouldBlock = e.kind() { Ok(None) } else { Err(e) } } } } /// Try reading some data. /// /// Similar to the standard `io::Read` implementation, but errors /// due to blocking IO are translated into Ok(None) results. /// /// # Returns /// /// - `Ok(Some(size))` on successful reads /// - `Ok(None)` if calling read would block. /// - `Err(e)` for all other IO errors pub fn maybe_read(&mut self, buf: &mut [u8]) -> io::Result<Option<usize>> { match self.read(buf) { Ok(s) => Ok(Some(s)), Err(e) => { if let io::ErrorKind::WouldBlock = e.kind() { Ok(None) } else { Err(e) } } } } /// Set the serial baudrate /// /// Valid baudrates are: /// /// - 0 /// - 50 /// - 75 /// - 110 /// - 134 /// - 150 /// - 200 /// - 300 /// - 600 /// - 1200 /// - 1800 /// - 2400 /// - 4800 /// - 9600 /// - 19200 /// - 38400 /// /// # Errors /// /// Returns an io::ErrorKind::InvalidInput for baud rates no in the list /// above. pub fn set_baudrate(&mut self, baud: i32) -> io::Result<()> { use termios::{B0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400, B4800, B9600, B19200, B38400}; let b = match baud { 4800 => B4800, 9600 => B9600, 19200 => B19200, 38400 => B38400, 0 => B0, 50 => B50, 75 => B75, 110 => B110, 134 => B134, 150 => B150, 200 => B200, 300 => B300, 600 => B600, 1200 => B1200, 1800 => B1800, 2400 => B2400, _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("{} is not a legal baudrate", baud))), }; // Get the termios structure let mut s = self.termios()?; // And the original rate // let orig_rate = termios::cfgetospeed(&s); // Set the new rate termios::cfsetspeed(&mut s, b)?; // Now set the structure self.set_termios(termios::TCSAFLUSH, &s) } /// Get the serial baudrate /// /// Valid baudrates are: /// /// - 0 /// - 50 /// - 75 /// - 110 /// - 134 /// - 150 /// - 200 /// - 300 /// - 600 /// - 1200 /// - 1800 /// - 2400 /// - 4800 /// - 9600 /// - 19200 /// - 38400 /// /// # Errors /// /// Returns an io::ErrorKind::InvalidInput for baud rates no in the list /// above. pub fn baudrate(&self) -> io::Result<i32>
B150 => 150, B200 => 200, B300 => 300, B600 => 600, B1200 => 1200, B1800 => 1800, B2400 => 2400, _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Unknown baud bitmask: {}", baud))), }; Ok(b) } /// Enable or disable raw mode /// /// In raw mode, input is available character by character, echoing is disabled, and all /// special processing of terminal input and output characters is disabled. pub fn set_raw(&mut self, raw: bool) -> io::Result<()> { if raw == self.is_raw() { return Ok(()) } let mut s = self.termios()?; if raw { termios::cfmakeraw(&mut s); } else { s.c_iflag |= termios::IGNBRK | termios::PARMRK; s.c_lflag |= termios::ICANON; } self.set_termios(termios::TCSANOW, &s)?; self.is_raw = raw; Ok(()) } /// Return if raw mode is enabled or not. pub fn is_raw(&self) -> bool { self.is_raw } } impl Drop for SerialPort { fn drop(&mut self) { #[allow(unused_must_use)] unsafe { // Reset termios settings to their original state. let s = self.orig_settings.clone(); self.set_termios(termios::TCSANOW, &s); // Close the file descriptor libc::close(self.fd); } } } impl AsRawFd for SerialPort { fn as_raw_fd(&self) -> RawFd { self.fd } } use std::io::Read; impl Read for SerialPort { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { match unsafe {libc::read(self.fd, buf.as_mut_ptr() as *mut libc::c_void, buf.len())} { x if x >= 0 => Ok(x as usize), _ => Err(io::Error::last_os_error()), } } } use std::io::Write; impl Write for SerialPort { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { match unsafe {libc::write(self.fd, buf.as_ptr() as *const libc::c_void, buf.len())} { x if x >= 0 => Ok(x as usize), _ => Err(io::Error::last_os_error()), } } fn flush(&mut self) -> io::Result<()> { termios::tcflush(self.fd, termios::TCOFLUSH) } } use mio::{Evented, PollOpt, Token, Poll, Ready}; use mio::unix::EventedFd; impl Evented for SerialPort { fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts) } fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts) } fn deregister(&self, poll: &Poll) -> io::Result<()> { EventedFd(&self.as_raw_fd()).deregister(poll) } }
{ use termios::{B0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400, B4800, B9600, B19200, B38400}; let s = self.termios()?; // And the original rate let baud = termios::cfgetospeed(&s); let b = match baud { B4800 => 4800, B9600 => 9600, B19200 => 19200, B38400 => 38400, B0 => 0, B50 => 50, B75 => 75, B110 => 110, B134 => 134,
identifier_body
lib.rs
//! # mio-serial - A mio-compatable serial port implementation for *nix //! //! This crate provides a SerialPort implementation compatable with mio. //! //! ** This crate ONLY provides a unix implementation ** //! //! Some basic helper methods are provided for setting a few serial port //! parameters such as the baud rate. For everything else you'll //! have to set the flags in the `termios::Termios` struct yourself! All //! the relavent settings can be found consulting your system's `man` page //! for termios (e.g. `man termios`) //! //! This crate is influenced heavily by the [serial](https://github.com/dcuddeback/serial-rs) //! crate (by David Cuddeback, same author of the helpful [termios](https://github.com/dcuddeback/termios-rs) //! crate!) #![cfg(unix)] #![deny(missing_docs)] extern crate termios; extern crate libc; extern crate mio; use std::os::unix::prelude::*; use std::io; use std::ffi::CString; use std::path::Path; use std::convert::AsRef; /// A mio compatable serial port for *nix pub struct
{ fd: RawFd, orig_settings: termios::Termios, is_raw: bool, } impl SerialPort { /// Construct a new SerialPort /// /// Opens the a serial port at the location provided by `path` with the following /// default settings: /// /// - 9600,8N1 (9600 Baud, 8-bit data, no parity, 1 stop bit) /// - Receiver enabled in "Cannonical mode" /// - Non-blocking /// - No flow control (software OR hardware) /// - Ignores hardware control lines /// /// # Errors /// /// SerialPort construction can fail for a few reasons: /// /// - An invalid path is provided /// - The path does not represent a serial port device /// - We are unable to configure the serial port /// ANY of the default settings. (Unlikely... but IS possible) pub fn open<T: AsRef<Path>>(path: T) -> io::Result<Self> { // Create a CString from the provided path. let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes()) .map_err(|_| io::Error::last_os_error())?; // Attempt to open the desired path as a serial port. Set it read/write, nonblocking, and // don't set it as the controlling terminal let fd = unsafe { libc::open(path_cstr.as_ptr(), libc::O_RDWR | libc::O_NONBLOCK | libc::O_NOCTTY, 0) }; // Make sure the file descriptor is valid. if fd < 0 { return Err(io::Error::last_os_error()); } // Get the existing termios settings. Close the file descriptor on errors. let orig_settings = termios::Termios::from_fd(fd).map_err(|e| unsafe {libc::close(fd); e})?; // Default port settings: Cannonical 9600-8N1 let mut default_settings = orig_settings.clone(); default_settings.c_cflag = termios::CS8 | termios::CLOCAL | termios::CREAD; default_settings.c_oflag = 0; default_settings.c_iflag = termios::IGNPAR; default_settings.c_lflag = termios::ICANON; default_settings.c_cc[termios::VMIN] = 0; default_settings.c_cc[termios::VTIME] = 0; termios::cfsetspeed(&mut default_settings, termios::B9600).unwrap(); // tcsetattr only errors out if we cannot set ANY attribute. Something is seriously wrong // if that happens, so just close the file descriptor and raise the error. termios::tcsetattr(fd, termios::TCSANOW, &default_settings).map_err(|e| unsafe {libc::close(fd); e})?; Ok(SerialPort{ fd: fd, orig_settings: orig_settings, is_raw: false, }) } /// Retrieve the termios structure for the serial port. pub fn termios(&self) -> io::Result<termios::Termios> { termios::Termios::from_fd(self.fd) } /// Set low-level serial port settings /// /// The `action` parameter must be one of the following: /// /// - `termios::TCSANOW` Update immediately /// - `termios::TCSADRAIN` Finish reading buffered data before updating. /// - `termios::TCSAFLUSH` Finish writing buffered data before updating. /// /// # Errors /// /// Will return `ErrorKind::InvalidInput` if `action` is not one of the three constants /// defined above. pub fn set_termios(&mut self, action: i32, t: &termios::Termios) -> io::Result<()> { match action { termios::TCSANOW | termios::TCSADRAIN | termios::TCSAFLUSH => { termios::tcsetattr(self.fd, action, t) }, _ => Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Illegal action: {}", action))), } } /// Enable or disable blocking reads and writes. /// /// # Panics /// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1 pub fn set_nonblocking(&mut self, blocking: bool) -> io::Result<()> { match unsafe {libc::fcntl(self.fd, libc::F_SETFL, libc::O_NONBLOCK, blocking as libc::c_int)} { 0 => Ok(()), -1 => Err(io::Error::last_os_error()), e @ _ => unreachable!(format!("Unexpected return code from F_SETFL O_NONBLOCK: {}", e)), } } /// Get the current blocking mode for the serial port /// /// # Panics /// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1 pub fn is_blocking(&self) -> io::Result<bool> { match unsafe {libc::fcntl(self.fd, libc::F_GETFL, libc::O_NONBLOCK)} { 0 => Ok(false), 1 => Ok(true), -1 => Err(io::Error::last_os_error()), e @ _ => unreachable!(format!("Unexpected return code from F_GETFL O_NONBLOCK: {}", e)), } } /// Try writing some data. /// /// Similar to the standard `io::Write` implementation, but errors /// due to blocking IO are translated into Ok(None) results. /// /// # Returns /// /// - `Ok(Some(size))` on successful writes /// - `Ok(None)` if calling write would block. /// - `Err(e)` for all other IO errors pub fn maybe_write(&mut self, buf: &[u8]) -> io::Result<Option<usize>> { match self.write(buf) { Ok(s) => Ok(Some(s)), Err(e) => { if let io::ErrorKind::WouldBlock = e.kind() { Ok(None) } else { Err(e) } } } } /// Try reading some data. /// /// Similar to the standard `io::Read` implementation, but errors /// due to blocking IO are translated into Ok(None) results. /// /// # Returns /// /// - `Ok(Some(size))` on successful reads /// - `Ok(None)` if calling read would block. /// - `Err(e)` for all other IO errors pub fn maybe_read(&mut self, buf: &mut [u8]) -> io::Result<Option<usize>> { match self.read(buf) { Ok(s) => Ok(Some(s)), Err(e) => { if let io::ErrorKind::WouldBlock = e.kind() { Ok(None) } else { Err(e) } } } } /// Set the serial baudrate /// /// Valid baudrates are: /// /// - 0 /// - 50 /// - 75 /// - 110 /// - 134 /// - 150 /// - 200 /// - 300 /// - 600 /// - 1200 /// - 1800 /// - 2400 /// - 4800 /// - 9600 /// - 19200 /// - 38400 /// /// # Errors /// /// Returns an io::ErrorKind::InvalidInput for baud rates no in the list /// above. pub fn set_baudrate(&mut self, baud: i32) -> io::Result<()> { use termios::{B0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400, B4800, B9600, B19200, B38400}; let b = match baud { 4800 => B4800, 9600 => B9600, 19200 => B19200, 38400 => B38400, 0 => B0, 50 => B50, 75 => B75, 110 => B110, 134 => B134, 150 => B150, 200 => B200, 300 => B300, 600 => B600, 1200 => B1200, 1800 => B1800, 2400 => B2400, _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("{} is not a legal baudrate", baud))), }; // Get the termios structure let mut s = self.termios()?; // And the original rate // let orig_rate = termios::cfgetospeed(&s); // Set the new rate termios::cfsetspeed(&mut s, b)?; // Now set the structure self.set_termios(termios::TCSAFLUSH, &s) } /// Get the serial baudrate /// /// Valid baudrates are: /// /// - 0 /// - 50 /// - 75 /// - 110 /// - 134 /// - 150 /// - 200 /// - 300 /// - 600 /// - 1200 /// - 1800 /// - 2400 /// - 4800 /// - 9600 /// - 19200 /// - 38400 /// /// # Errors /// /// Returns an io::ErrorKind::InvalidInput for baud rates no in the list /// above. pub fn baudrate(&self) -> io::Result<i32> { use termios::{B0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400, B4800, B9600, B19200, B38400}; let s = self.termios()?; // And the original rate let baud = termios::cfgetospeed(&s); let b = match baud { B4800 => 4800, B9600 => 9600, B19200 => 19200, B38400 => 38400, B0 => 0, B50 => 50, B75 => 75, B110 => 110, B134 => 134, B150 => 150, B200 => 200, B300 => 300, B600 => 600, B1200 => 1200, B1800 => 1800, B2400 => 2400, _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Unknown baud bitmask: {}", baud))), }; Ok(b) } /// Enable or disable raw mode /// /// In raw mode, input is available character by character, echoing is disabled, and all /// special processing of terminal input and output characters is disabled. pub fn set_raw(&mut self, raw: bool) -> io::Result<()> { if raw == self.is_raw() { return Ok(()) } let mut s = self.termios()?; if raw { termios::cfmakeraw(&mut s); } else { s.c_iflag |= termios::IGNBRK | termios::PARMRK; s.c_lflag |= termios::ICANON; } self.set_termios(termios::TCSANOW, &s)?; self.is_raw = raw; Ok(()) } /// Return if raw mode is enabled or not. pub fn is_raw(&self) -> bool { self.is_raw } } impl Drop for SerialPort { fn drop(&mut self) { #[allow(unused_must_use)] unsafe { // Reset termios settings to their original state. let s = self.orig_settings.clone(); self.set_termios(termios::TCSANOW, &s); // Close the file descriptor libc::close(self.fd); } } } impl AsRawFd for SerialPort { fn as_raw_fd(&self) -> RawFd { self.fd } } use std::io::Read; impl Read for SerialPort { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { match unsafe {libc::read(self.fd, buf.as_mut_ptr() as *mut libc::c_void, buf.len())} { x if x >= 0 => Ok(x as usize), _ => Err(io::Error::last_os_error()), } } } use std::io::Write; impl Write for SerialPort { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { match unsafe {libc::write(self.fd, buf.as_ptr() as *const libc::c_void, buf.len())} { x if x >= 0 => Ok(x as usize), _ => Err(io::Error::last_os_error()), } } fn flush(&mut self) -> io::Result<()> { termios::tcflush(self.fd, termios::TCOFLUSH) } } use mio::{Evented, PollOpt, Token, Poll, Ready}; use mio::unix::EventedFd; impl Evented for SerialPort { fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts) } fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts) } fn deregister(&self, poll: &Poll) -> io::Result<()> { EventedFd(&self.as_raw_fd()).deregister(poll) } }
SerialPort
identifier_name
lib.rs
//! # mio-serial - A mio-compatable serial port implementation for *nix //! //! This crate provides a SerialPort implementation compatable with mio. //! //! ** This crate ONLY provides a unix implementation ** //! //! Some basic helper methods are provided for setting a few serial port //! parameters such as the baud rate. For everything else you'll //! have to set the flags in the `termios::Termios` struct yourself! All //! the relavent settings can be found consulting your system's `man` page //! for termios (e.g. `man termios`) //! //! This crate is influenced heavily by the [serial](https://github.com/dcuddeback/serial-rs) //! crate (by David Cuddeback, same author of the helpful [termios](https://github.com/dcuddeback/termios-rs) //! crate!) #![cfg(unix)] #![deny(missing_docs)] extern crate termios; extern crate libc; extern crate mio; use std::os::unix::prelude::*; use std::io; use std::ffi::CString; use std::path::Path; use std::convert::AsRef; /// A mio compatable serial port for *nix pub struct SerialPort { fd: RawFd, orig_settings: termios::Termios, is_raw: bool, } impl SerialPort { /// Construct a new SerialPort /// /// Opens the a serial port at the location provided by `path` with the following /// default settings: /// /// - 9600,8N1 (9600 Baud, 8-bit data, no parity, 1 stop bit) /// - Receiver enabled in "Cannonical mode" /// - Non-blocking /// - No flow control (software OR hardware) /// - Ignores hardware control lines /// /// # Errors /// /// SerialPort construction can fail for a few reasons: /// /// - An invalid path is provided /// - The path does not represent a serial port device /// - We are unable to configure the serial port /// ANY of the default settings. (Unlikely... but IS possible) pub fn open<T: AsRef<Path>>(path: T) -> io::Result<Self> { // Create a CString from the provided path. let path_cstr = CString::new(path.as_ref().as_os_str().as_bytes()) .map_err(|_| io::Error::last_os_error())?; // Attempt to open the desired path as a serial port. Set it read/write, nonblocking, and // don't set it as the controlling terminal let fd = unsafe { libc::open(path_cstr.as_ptr(), libc::O_RDWR | libc::O_NONBLOCK | libc::O_NOCTTY, 0) }; // Make sure the file descriptor is valid. if fd < 0 { return Err(io::Error::last_os_error()); } // Get the existing termios settings. Close the file descriptor on errors. let orig_settings = termios::Termios::from_fd(fd).map_err(|e| unsafe {libc::close(fd); e})?; // Default port settings: Cannonical 9600-8N1 let mut default_settings = orig_settings.clone(); default_settings.c_cflag = termios::CS8 | termios::CLOCAL | termios::CREAD; default_settings.c_oflag = 0; default_settings.c_iflag = termios::IGNPAR; default_settings.c_lflag = termios::ICANON; default_settings.c_cc[termios::VMIN] = 0; default_settings.c_cc[termios::VTIME] = 0; termios::cfsetspeed(&mut default_settings, termios::B9600).unwrap(); // tcsetattr only errors out if we cannot set ANY attribute. Something is seriously wrong // if that happens, so just close the file descriptor and raise the error. termios::tcsetattr(fd, termios::TCSANOW, &default_settings).map_err(|e| unsafe {libc::close(fd); e})?; Ok(SerialPort{ fd: fd, orig_settings: orig_settings, is_raw: false, }) } /// Retrieve the termios structure for the serial port. pub fn termios(&self) -> io::Result<termios::Termios> { termios::Termios::from_fd(self.fd) } /// Set low-level serial port settings /// /// The `action` parameter must be one of the following: /// /// - `termios::TCSANOW` Update immediately /// - `termios::TCSADRAIN` Finish reading buffered data before updating. /// - `termios::TCSAFLUSH` Finish writing buffered data before updating. /// /// # Errors /// /// Will return `ErrorKind::InvalidInput` if `action` is not one of the three constants /// defined above. pub fn set_termios(&mut self, action: i32, t: &termios::Termios) -> io::Result<()> { match action { termios::TCSANOW | termios::TCSADRAIN | termios::TCSAFLUSH => { termios::tcsetattr(self.fd, action, t) }, _ => Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Illegal action: {}", action))), } } /// Enable or disable blocking reads and writes. /// /// # Panics /// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1 pub fn set_nonblocking(&mut self, blocking: bool) -> io::Result<()> { match unsafe {libc::fcntl(self.fd, libc::F_SETFL, libc::O_NONBLOCK, blocking as libc::c_int)} { 0 => Ok(()), -1 => Err(io::Error::last_os_error()), e @ _ => unreachable!(format!("Unexpected return code from F_SETFL O_NONBLOCK: {}", e)), } } /// Get the current blocking mode for the serial port /// /// # Panics /// Will panic if the underlying `fcntl` system call returns a value other than 0 or -1 pub fn is_blocking(&self) -> io::Result<bool> { match unsafe {libc::fcntl(self.fd, libc::F_GETFL, libc::O_NONBLOCK)} { 0 => Ok(false), 1 => Ok(true), -1 => Err(io::Error::last_os_error()), e @ _ => unreachable!(format!("Unexpected return code from F_GETFL O_NONBLOCK: {}", e)), } } /// Try writing some data. /// /// Similar to the standard `io::Write` implementation, but errors /// due to blocking IO are translated into Ok(None) results. /// /// # Returns /// /// - `Ok(Some(size))` on successful writes /// - `Ok(None)` if calling write would block. /// - `Err(e)` for all other IO errors pub fn maybe_write(&mut self, buf: &[u8]) -> io::Result<Option<usize>> { match self.write(buf) { Ok(s) => Ok(Some(s)), Err(e) => { if let io::ErrorKind::WouldBlock = e.kind() { Ok(None) } else { Err(e) } } } } /// Try reading some data. /// /// Similar to the standard `io::Read` implementation, but errors /// due to blocking IO are translated into Ok(None) results. ///
/// - `Err(e)` for all other IO errors pub fn maybe_read(&mut self, buf: &mut [u8]) -> io::Result<Option<usize>> { match self.read(buf) { Ok(s) => Ok(Some(s)), Err(e) => { if let io::ErrorKind::WouldBlock = e.kind() { Ok(None) } else { Err(e) } } } } /// Set the serial baudrate /// /// Valid baudrates are: /// /// - 0 /// - 50 /// - 75 /// - 110 /// - 134 /// - 150 /// - 200 /// - 300 /// - 600 /// - 1200 /// - 1800 /// - 2400 /// - 4800 /// - 9600 /// - 19200 /// - 38400 /// /// # Errors /// /// Returns an io::ErrorKind::InvalidInput for baud rates no in the list /// above. pub fn set_baudrate(&mut self, baud: i32) -> io::Result<()> { use termios::{B0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400, B4800, B9600, B19200, B38400}; let b = match baud { 4800 => B4800, 9600 => B9600, 19200 => B19200, 38400 => B38400, 0 => B0, 50 => B50, 75 => B75, 110 => B110, 134 => B134, 150 => B150, 200 => B200, 300 => B300, 600 => B600, 1200 => B1200, 1800 => B1800, 2400 => B2400, _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("{} is not a legal baudrate", baud))), }; // Get the termios structure let mut s = self.termios()?; // And the original rate // let orig_rate = termios::cfgetospeed(&s); // Set the new rate termios::cfsetspeed(&mut s, b)?; // Now set the structure self.set_termios(termios::TCSAFLUSH, &s) } /// Get the serial baudrate /// /// Valid baudrates are: /// /// - 0 /// - 50 /// - 75 /// - 110 /// - 134 /// - 150 /// - 200 /// - 300 /// - 600 /// - 1200 /// - 1800 /// - 2400 /// - 4800 /// - 9600 /// - 19200 /// - 38400 /// /// # Errors /// /// Returns an io::ErrorKind::InvalidInput for baud rates no in the list /// above. pub fn baudrate(&self) -> io::Result<i32> { use termios::{B0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400, B4800, B9600, B19200, B38400}; let s = self.termios()?; // And the original rate let baud = termios::cfgetospeed(&s); let b = match baud { B4800 => 4800, B9600 => 9600, B19200 => 19200, B38400 => 38400, B0 => 0, B50 => 50, B75 => 75, B110 => 110, B134 => 134, B150 => 150, B200 => 200, B300 => 300, B600 => 600, B1200 => 1200, B1800 => 1800, B2400 => 2400, _ => return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Unknown baud bitmask: {}", baud))), }; Ok(b) } /// Enable or disable raw mode /// /// In raw mode, input is available character by character, echoing is disabled, and all /// special processing of terminal input and output characters is disabled. pub fn set_raw(&mut self, raw: bool) -> io::Result<()> { if raw == self.is_raw() { return Ok(()) } let mut s = self.termios()?; if raw { termios::cfmakeraw(&mut s); } else { s.c_iflag |= termios::IGNBRK | termios::PARMRK; s.c_lflag |= termios::ICANON; } self.set_termios(termios::TCSANOW, &s)?; self.is_raw = raw; Ok(()) } /// Return if raw mode is enabled or not. pub fn is_raw(&self) -> bool { self.is_raw } } impl Drop for SerialPort { fn drop(&mut self) { #[allow(unused_must_use)] unsafe { // Reset termios settings to their original state. let s = self.orig_settings.clone(); self.set_termios(termios::TCSANOW, &s); // Close the file descriptor libc::close(self.fd); } } } impl AsRawFd for SerialPort { fn as_raw_fd(&self) -> RawFd { self.fd } } use std::io::Read; impl Read for SerialPort { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { match unsafe {libc::read(self.fd, buf.as_mut_ptr() as *mut libc::c_void, buf.len())} { x if x >= 0 => Ok(x as usize), _ => Err(io::Error::last_os_error()), } } } use std::io::Write; impl Write for SerialPort { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { match unsafe {libc::write(self.fd, buf.as_ptr() as *const libc::c_void, buf.len())} { x if x >= 0 => Ok(x as usize), _ => Err(io::Error::last_os_error()), } } fn flush(&mut self) -> io::Result<()> { termios::tcflush(self.fd, termios::TCOFLUSH) } } use mio::{Evented, PollOpt, Token, Poll, Ready}; use mio::unix::EventedFd; impl Evented for SerialPort { fn register(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts) } fn reregister(&self, poll: &Poll, token: Token, interest: Ready, opts: PollOpt) -> io::Result<()> { EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts) } fn deregister(&self, poll: &Poll) -> io::Result<()> { EventedFd(&self.as_raw_fd()).deregister(poll) } }
/// # Returns /// /// - `Ok(Some(size))` on successful reads /// - `Ok(None)` if calling read would block.
random_line_split
psd_channel.rs
use crate::sections::image_data_section::ChannelBytes; use crate::sections::PsdCursor; use thiserror::Error; pub trait IntoRgba { /// Given an index of a pixel in the current rectangle /// (top left is 0.. to the right of that is 1.. etc) return the index of that pixel in the /// RGBA image that will be generated. /// /// If the final image or layer is the size of the PSD then this will return the same idx, /// otherwise it will get transformed. fn rgba_idx(&self, idx: usize) -> usize; /// The first channel fn red(&self) -> &ChannelBytes; /// The second channel fn green(&self) -> Option<&ChannelBytes>; /// The third channel fn blue(&self) -> Option<&ChannelBytes>; /// The fourth channel fn alpha(&self) -> Option<&ChannelBytes>; /// The width of the PSD fn psd_width(&self) -> u32; /// The height of the PSD fn psd_height(&self) -> u32; fn generate_rgba(&self) -> Vec<u8> { let rgba_len = (self.psd_width() * self.psd_height() * 4) as usize; let red = self.red(); let green = self.green(); let blue = self.blue(); let alpha = self.alpha(); // TODO: We're assuming that if we only see two channels it is a 16 bit grayscale // PSD. Instead we should just check the Psd's color mode and depth to see if // they are grayscale and sixteen. As we run into more cases we'll clean things like // this up over time. // if green.is_some() && blue.is_none() && alpha.is_none() { // return self.generate_16_bit_grayscale_rgba(); // } let mut rgba = vec![0; rgba_len]; use crate::psd_channel::PsdChannelKind::*; self.insert_channel_bytes(&mut rgba, Red, red); // If there is a green channel we use it, otherwise we use the red channel since this is // a single channel grey image (such as a heightmap). if let Some(green) = green { self.insert_channel_bytes(&mut rgba, Green, green); } else { self.insert_channel_bytes(&mut rgba, Green, red); } // If there is a blue channel we use it, otherwise we use the red channel since this is // a single channel grey image (such as a heightmap). if let Some(blue) = blue { self.insert_channel_bytes(&mut rgba, Blue, blue); } else { self.insert_channel_bytes(&mut rgba, Blue, red); } if let Some(alpha_channel) = alpha { self.insert_channel_bytes(&mut rgba, TransparencyMask, alpha_channel); } else { // If there is no transparency data then the image is opaque for idx in 0..rgba_len / 4 { rgba[idx * 4 + 3] = 255; } } rgba } /// Generate an RGBA Vec<u8> from a composite image or layer that uses 16 bits per /// pixel. We do this by mapping the 16 bits back down to 8 bits. /// /// The 16 bits are stored across the red and green channels (first and second). fn generate_16_bit_grayscale_rgba(&self) -> Vec<u8> { match self.red() { ChannelBytes::RawData(red) => match self.green().unwrap() { ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green), ChannelBytes::RleCompressed(green) => { let green = &rle_decompress(green); sixteen_to_eight_rgba(red, green) } }, ChannelBytes::RleCompressed(red) => { let red = &rle_decompress(red); match self.green().unwrap() { ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green), ChannelBytes::RleCompressed(green) => { let green = &rle_decompress(green); sixteen_to_eight_rgba(red, green) } } } } } /// Given some vector of bytes, insert the bytes from the given channel into the vector. /// /// Doing it this way allows us to allocate for one vector and insert all 4 (RGBA) channels into /// it. fn insert_channel_bytes( &self, rgba: &mut Vec<u8>, channel_kind: PsdChannelKind, channel_bytes: &ChannelBytes, ) { match channel_bytes { ChannelBytes::RawData(channel_bytes) => { let offset = channel_kind.rgba_offset().unwrap(); for (idx, byte) in channel_bytes.iter().enumerate() { let rgba_idx = self.rgba_idx(idx); rgba[rgba_idx * 4 + offset] = *byte; } } // https://en.wikipedia.org/wiki/PackBits ChannelBytes::RleCompressed(channel_bytes) => { self.insert_rle_channel(rgba, channel_kind, &channel_bytes); } } } /// rle decompress a channel (R,G,B or A) and insert it into a vector of RGBA pixels. /// /// We use the channels offset to know where to put it.. So red would go in 0, 4, 8.. /// blue would go in 1, 5, 9.. etc /// /// https://en.wikipedia.org/wiki/PackBits - algorithm used for decompression fn
( &self, rgba: &mut Vec<u8>, channel_kind: PsdChannelKind, channel_bytes: &[u8], ) { let mut cursor = PsdCursor::new(&channel_bytes[..]); let mut idx = 0; let offset = channel_kind.rgba_offset().unwrap(); let len = cursor.get_ref().len() as u64; while cursor.position() < len { let header = cursor.read_i8() as i16; if header == -128 { continue; } else if header >= 0 { let bytes_to_read = 1 + header; if cursor.position() + bytes_to_read as u64 > len { break; } for byte in cursor.read(bytes_to_read as u32) { let rgba_idx = self.rgba_idx(idx); rgba[rgba_idx * 4 + offset] = *byte; idx += 1; } } else { let repeat = 1 - header; if cursor.position() + 1 > len { break; } let byte = cursor.read_1()[0]; for _ in 0..repeat as usize { let rgba_idx = self.rgba_idx(idx); rgba[rgba_idx * 4 + offset] = byte; idx += 1; } }; } } } /// Rle decompress a channel fn rle_decompress(bytes: &[u8]) -> Vec<u8> { let mut cursor = PsdCursor::new(&bytes[..]); let mut decompressed = vec![]; while cursor.position()!= cursor.get_ref().len() as u64 { let header = cursor.read_i8() as i16; if header == -128 { continue; } else if header >= 0 { let bytes_to_read = 1 + header; for byte in cursor.read(bytes_to_read as u32) { decompressed.push(*byte); } } else { let repeat = 1 - header; let byte = cursor.read_1()[0]; for _ in 0..repeat as usize { decompressed.push(byte); } }; } decompressed } /// Take two 8 bit channels that together represent a 16 bit channel and convert them down /// into an 8 bit channel. /// /// We store the final bytes in the first channel (overwriting the old bytes) fn sixteen_to_eight_rgba(channel1: &[u8], channel2: &[u8]) -> Vec<u8> { let mut eight = Vec::with_capacity(channel1.len()); for idx in 0..channel1.len() { if idx % 2 == 1 { continue; } let sixteen_bit = [channel1[idx], channel1[idx + 1]]; let sixteen_bit = u16::from_be_bytes(sixteen_bit); let eight_bit = (sixteen_bit / 256) as u8; eight.push(eight_bit); eight.push(eight_bit); eight.push(eight_bit); eight.push(255); } for idx in 0..channel2.len() { if idx % 2 == 1 { continue; } let sixteen_bit = [channel2[idx], channel2[idx + 1]]; let sixteen_bit = u16::from_be_bytes(sixteen_bit); let eight_bit = (sixteen_bit / 256) as u8; eight.push(eight_bit); eight.push(eight_bit); eight.push(eight_bit); eight.push(255); } eight } /// Indicates how a channe'sl data is compressed #[derive(Debug, Eq, PartialEq)] #[allow(missing_docs)] pub enum PsdChannelCompression { /// Not compressed RawData = 0, /// Compressed using [PackBits RLE compression](https://en.wikipedia.org/wiki/PackBits) RleCompressed = 1, /// Currently unsupported ZipWithoutPrediction = 2, /// Currently unsupported ZipWithPrediction = 3, } impl PsdChannelCompression { /// Create a new PsdLayerChannelCompression pub fn new(compression: u16) -> Option<PsdChannelCompression> { match compression { 0 => Some(PsdChannelCompression::RawData), 1 => Some(PsdChannelCompression::RleCompressed), 2 => Some(PsdChannelCompression::ZipWithoutPrediction), 3 => Some(PsdChannelCompression::ZipWithPrediction), _ => None, } } } /// The different kinds of channels in a layer (red, green, blue,...). #[derive(Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Copy, Clone)] #[allow(missing_docs)] pub enum PsdChannelKind { Red = 0, Green = 1, Blue = 2, TransparencyMask = -1, UserSuppliedLayerMask = -2, RealUserSuppliedLayerMask = -3, } /// Represents an invalid channel #[derive(Debug, Error)] pub enum PsdChannelError { #[error("Channel {channel:#?} not present")] ChannelNotFound { channel: PsdChannelKind }, } impl PsdChannelKind { /// Create a new PsdLayerChannel pub fn new(channel_id: i16) -> Option<PsdChannelKind> { match channel_id { 0 => Some(PsdChannelKind::Red), 1 => Some(PsdChannelKind::Green), 2 => Some(PsdChannelKind::Blue), -1 => Some(PsdChannelKind::TransparencyMask), -2 => Some(PsdChannelKind::UserSuppliedLayerMask), -3 => Some(PsdChannelKind::RealUserSuppliedLayerMask), _ => None, } } /// R -> 0 /// G -> 1 /// B -> 2 /// A -> 3 pub fn rgba_offset(self) -> Result<usize, String> { match self { PsdChannelKind::Red => Ok(0), PsdChannelKind::Green => Ok(1), PsdChannelKind::Blue => Ok(2), PsdChannelKind::TransparencyMask => Ok(3), _ => Err(format!("{:#?} is not an RGBA channel", &self)), } } } #[cfg(test)] mod tests { use crate::sections::layer_and_mask_information_section::layer::{ BlendMode, LayerChannels, LayerProperties, }; use crate::PsdLayer; use super::*; /// Verify that when inserting an RLE channel's bytes into an RGBA byte vec we do not attempt to /// read beyond the channel's length. #[test] fn does_not_read_beyond_rle_channels_bytes() { let layer_properties = LayerProperties { name: "".into(), layer_top: 0, layer_left: 0, layer_bottom: 0, layer_right: 0, visible: true, opacity: 0, clipping_mask: false, psd_width: 1, psd_height: 1, blend_mode: BlendMode::Normal, group_id: None, }; let layer = PsdLayer { channels: LayerChannels::from([( PsdChannelKind::Red, ChannelBytes::RleCompressed(vec![0, 0, 0]), )]), layer_properties, }; let mut rgba = vec![0; (layer.width() * layer.height() * 4) as usize]; layer.insert_channel_bytes(&mut rgba, PsdChannelKind::Red, layer.red()); assert_eq!(rgba, vec![0; 4]); } }
insert_rle_channel
identifier_name
psd_channel.rs
use crate::sections::image_data_section::ChannelBytes; use crate::sections::PsdCursor; use thiserror::Error; pub trait IntoRgba { /// Given an index of a pixel in the current rectangle /// (top left is 0.. to the right of that is 1.. etc) return the index of that pixel in the /// RGBA image that will be generated. /// /// If the final image or layer is the size of the PSD then this will return the same idx, /// otherwise it will get transformed. fn rgba_idx(&self, idx: usize) -> usize; /// The first channel fn red(&self) -> &ChannelBytes; /// The second channel fn green(&self) -> Option<&ChannelBytes>; /// The third channel fn blue(&self) -> Option<&ChannelBytes>; /// The fourth channel fn alpha(&self) -> Option<&ChannelBytes>; /// The width of the PSD fn psd_width(&self) -> u32; /// The height of the PSD fn psd_height(&self) -> u32; fn generate_rgba(&self) -> Vec<u8> { let rgba_len = (self.psd_width() * self.psd_height() * 4) as usize; let red = self.red(); let green = self.green(); let blue = self.blue(); let alpha = self.alpha(); // TODO: We're assuming that if we only see two channels it is a 16 bit grayscale // PSD. Instead we should just check the Psd's color mode and depth to see if // they are grayscale and sixteen. As we run into more cases we'll clean things like // this up over time. // if green.is_some() && blue.is_none() && alpha.is_none() { // return self.generate_16_bit_grayscale_rgba(); // } let mut rgba = vec![0; rgba_len]; use crate::psd_channel::PsdChannelKind::*; self.insert_channel_bytes(&mut rgba, Red, red); // If there is a green channel we use it, otherwise we use the red channel since this is // a single channel grey image (such as a heightmap). if let Some(green) = green { self.insert_channel_bytes(&mut rgba, Green, green); } else { self.insert_channel_bytes(&mut rgba, Green, red); } // If there is a blue channel we use it, otherwise we use the red channel since this is // a single channel grey image (such as a heightmap). if let Some(blue) = blue { self.insert_channel_bytes(&mut rgba, Blue, blue); } else { self.insert_channel_bytes(&mut rgba, Blue, red); } if let Some(alpha_channel) = alpha { self.insert_channel_bytes(&mut rgba, TransparencyMask, alpha_channel); } else { // If there is no transparency data then the image is opaque for idx in 0..rgba_len / 4 { rgba[idx * 4 + 3] = 255; } } rgba } /// Generate an RGBA Vec<u8> from a composite image or layer that uses 16 bits per /// pixel. We do this by mapping the 16 bits back down to 8 bits. /// /// The 16 bits are stored across the red and green channels (first and second). fn generate_16_bit_grayscale_rgba(&self) -> Vec<u8> { match self.red() {
sixteen_to_eight_rgba(red, green) } }, ChannelBytes::RleCompressed(red) => { let red = &rle_decompress(red); match self.green().unwrap() { ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green), ChannelBytes::RleCompressed(green) => { let green = &rle_decompress(green); sixteen_to_eight_rgba(red, green) } } } } } /// Given some vector of bytes, insert the bytes from the given channel into the vector. /// /// Doing it this way allows us to allocate for one vector and insert all 4 (RGBA) channels into /// it. fn insert_channel_bytes( &self, rgba: &mut Vec<u8>, channel_kind: PsdChannelKind, channel_bytes: &ChannelBytes, ) { match channel_bytes { ChannelBytes::RawData(channel_bytes) => { let offset = channel_kind.rgba_offset().unwrap(); for (idx, byte) in channel_bytes.iter().enumerate() { let rgba_idx = self.rgba_idx(idx); rgba[rgba_idx * 4 + offset] = *byte; } } // https://en.wikipedia.org/wiki/PackBits ChannelBytes::RleCompressed(channel_bytes) => { self.insert_rle_channel(rgba, channel_kind, &channel_bytes); } } } /// rle decompress a channel (R,G,B or A) and insert it into a vector of RGBA pixels. /// /// We use the channels offset to know where to put it.. So red would go in 0, 4, 8.. /// blue would go in 1, 5, 9.. etc /// /// https://en.wikipedia.org/wiki/PackBits - algorithm used for decompression fn insert_rle_channel( &self, rgba: &mut Vec<u8>, channel_kind: PsdChannelKind, channel_bytes: &[u8], ) { let mut cursor = PsdCursor::new(&channel_bytes[..]); let mut idx = 0; let offset = channel_kind.rgba_offset().unwrap(); let len = cursor.get_ref().len() as u64; while cursor.position() < len { let header = cursor.read_i8() as i16; if header == -128 { continue; } else if header >= 0 { let bytes_to_read = 1 + header; if cursor.position() + bytes_to_read as u64 > len { break; } for byte in cursor.read(bytes_to_read as u32) { let rgba_idx = self.rgba_idx(idx); rgba[rgba_idx * 4 + offset] = *byte; idx += 1; } } else { let repeat = 1 - header; if cursor.position() + 1 > len { break; } let byte = cursor.read_1()[0]; for _ in 0..repeat as usize { let rgba_idx = self.rgba_idx(idx); rgba[rgba_idx * 4 + offset] = byte; idx += 1; } }; } } } /// Rle decompress a channel fn rle_decompress(bytes: &[u8]) -> Vec<u8> { let mut cursor = PsdCursor::new(&bytes[..]); let mut decompressed = vec![]; while cursor.position()!= cursor.get_ref().len() as u64 { let header = cursor.read_i8() as i16; if header == -128 { continue; } else if header >= 0 { let bytes_to_read = 1 + header; for byte in cursor.read(bytes_to_read as u32) { decompressed.push(*byte); } } else { let repeat = 1 - header; let byte = cursor.read_1()[0]; for _ in 0..repeat as usize { decompressed.push(byte); } }; } decompressed } /// Take two 8 bit channels that together represent a 16 bit channel and convert them down /// into an 8 bit channel. /// /// We store the final bytes in the first channel (overwriting the old bytes) fn sixteen_to_eight_rgba(channel1: &[u8], channel2: &[u8]) -> Vec<u8> { let mut eight = Vec::with_capacity(channel1.len()); for idx in 0..channel1.len() { if idx % 2 == 1 { continue; } let sixteen_bit = [channel1[idx], channel1[idx + 1]]; let sixteen_bit = u16::from_be_bytes(sixteen_bit); let eight_bit = (sixteen_bit / 256) as u8; eight.push(eight_bit); eight.push(eight_bit); eight.push(eight_bit); eight.push(255); } for idx in 0..channel2.len() { if idx % 2 == 1 { continue; } let sixteen_bit = [channel2[idx], channel2[idx + 1]]; let sixteen_bit = u16::from_be_bytes(sixteen_bit); let eight_bit = (sixteen_bit / 256) as u8; eight.push(eight_bit); eight.push(eight_bit); eight.push(eight_bit); eight.push(255); } eight } /// Indicates how a channe'sl data is compressed #[derive(Debug, Eq, PartialEq)] #[allow(missing_docs)] pub enum PsdChannelCompression { /// Not compressed RawData = 0, /// Compressed using [PackBits RLE compression](https://en.wikipedia.org/wiki/PackBits) RleCompressed = 1, /// Currently unsupported ZipWithoutPrediction = 2, /// Currently unsupported ZipWithPrediction = 3, } impl PsdChannelCompression { /// Create a new PsdLayerChannelCompression pub fn new(compression: u16) -> Option<PsdChannelCompression> { match compression { 0 => Some(PsdChannelCompression::RawData), 1 => Some(PsdChannelCompression::RleCompressed), 2 => Some(PsdChannelCompression::ZipWithoutPrediction), 3 => Some(PsdChannelCompression::ZipWithPrediction), _ => None, } } } /// The different kinds of channels in a layer (red, green, blue,...). #[derive(Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Copy, Clone)] #[allow(missing_docs)] pub enum PsdChannelKind { Red = 0, Green = 1, Blue = 2, TransparencyMask = -1, UserSuppliedLayerMask = -2, RealUserSuppliedLayerMask = -3, } /// Represents an invalid channel #[derive(Debug, Error)] pub enum PsdChannelError { #[error("Channel {channel:#?} not present")] ChannelNotFound { channel: PsdChannelKind }, } impl PsdChannelKind { /// Create a new PsdLayerChannel pub fn new(channel_id: i16) -> Option<PsdChannelKind> { match channel_id { 0 => Some(PsdChannelKind::Red), 1 => Some(PsdChannelKind::Green), 2 => Some(PsdChannelKind::Blue), -1 => Some(PsdChannelKind::TransparencyMask), -2 => Some(PsdChannelKind::UserSuppliedLayerMask), -3 => Some(PsdChannelKind::RealUserSuppliedLayerMask), _ => None, } } /// R -> 0 /// G -> 1 /// B -> 2 /// A -> 3 pub fn rgba_offset(self) -> Result<usize, String> { match self { PsdChannelKind::Red => Ok(0), PsdChannelKind::Green => Ok(1), PsdChannelKind::Blue => Ok(2), PsdChannelKind::TransparencyMask => Ok(3), _ => Err(format!("{:#?} is not an RGBA channel", &self)), } } } #[cfg(test)] mod tests { use crate::sections::layer_and_mask_information_section::layer::{ BlendMode, LayerChannels, LayerProperties, }; use crate::PsdLayer; use super::*; /// Verify that when inserting an RLE channel's bytes into an RGBA byte vec we do not attempt to /// read beyond the channel's length. #[test] fn does_not_read_beyond_rle_channels_bytes() { let layer_properties = LayerProperties { name: "".into(), layer_top: 0, layer_left: 0, layer_bottom: 0, layer_right: 0, visible: true, opacity: 0, clipping_mask: false, psd_width: 1, psd_height: 1, blend_mode: BlendMode::Normal, group_id: None, }; let layer = PsdLayer { channels: LayerChannels::from([( PsdChannelKind::Red, ChannelBytes::RleCompressed(vec![0, 0, 0]), )]), layer_properties, }; let mut rgba = vec![0; (layer.width() * layer.height() * 4) as usize]; layer.insert_channel_bytes(&mut rgba, PsdChannelKind::Red, layer.red()); assert_eq!(rgba, vec![0; 4]); } }
ChannelBytes::RawData(red) => match self.green().unwrap() { ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green), ChannelBytes::RleCompressed(green) => { let green = &rle_decompress(green);
random_line_split
psd_channel.rs
use crate::sections::image_data_section::ChannelBytes; use crate::sections::PsdCursor; use thiserror::Error; pub trait IntoRgba { /// Given an index of a pixel in the current rectangle /// (top left is 0.. to the right of that is 1.. etc) return the index of that pixel in the /// RGBA image that will be generated. /// /// If the final image or layer is the size of the PSD then this will return the same idx, /// otherwise it will get transformed. fn rgba_idx(&self, idx: usize) -> usize; /// The first channel fn red(&self) -> &ChannelBytes; /// The second channel fn green(&self) -> Option<&ChannelBytes>; /// The third channel fn blue(&self) -> Option<&ChannelBytes>; /// The fourth channel fn alpha(&self) -> Option<&ChannelBytes>; /// The width of the PSD fn psd_width(&self) -> u32; /// The height of the PSD fn psd_height(&self) -> u32; fn generate_rgba(&self) -> Vec<u8> { let rgba_len = (self.psd_width() * self.psd_height() * 4) as usize; let red = self.red(); let green = self.green(); let blue = self.blue(); let alpha = self.alpha(); // TODO: We're assuming that if we only see two channels it is a 16 bit grayscale // PSD. Instead we should just check the Psd's color mode and depth to see if // they are grayscale and sixteen. As we run into more cases we'll clean things like // this up over time. // if green.is_some() && blue.is_none() && alpha.is_none() { // return self.generate_16_bit_grayscale_rgba(); // } let mut rgba = vec![0; rgba_len]; use crate::psd_channel::PsdChannelKind::*; self.insert_channel_bytes(&mut rgba, Red, red); // If there is a green channel we use it, otherwise we use the red channel since this is // a single channel grey image (such as a heightmap). if let Some(green) = green { self.insert_channel_bytes(&mut rgba, Green, green); } else { self.insert_channel_bytes(&mut rgba, Green, red); } // If there is a blue channel we use it, otherwise we use the red channel since this is // a single channel grey image (such as a heightmap). if let Some(blue) = blue { self.insert_channel_bytes(&mut rgba, Blue, blue); } else { self.insert_channel_bytes(&mut rgba, Blue, red); } if let Some(alpha_channel) = alpha { self.insert_channel_bytes(&mut rgba, TransparencyMask, alpha_channel); } else { // If there is no transparency data then the image is opaque for idx in 0..rgba_len / 4 { rgba[idx * 4 + 3] = 255; } } rgba } /// Generate an RGBA Vec<u8> from a composite image or layer that uses 16 bits per /// pixel. We do this by mapping the 16 bits back down to 8 bits. /// /// The 16 bits are stored across the red and green channels (first and second). fn generate_16_bit_grayscale_rgba(&self) -> Vec<u8> { match self.red() { ChannelBytes::RawData(red) => match self.green().unwrap() { ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green), ChannelBytes::RleCompressed(green) => { let green = &rle_decompress(green); sixteen_to_eight_rgba(red, green) } }, ChannelBytes::RleCompressed(red) => { let red = &rle_decompress(red); match self.green().unwrap() { ChannelBytes::RawData(green) => sixteen_to_eight_rgba(red, green), ChannelBytes::RleCompressed(green) => { let green = &rle_decompress(green); sixteen_to_eight_rgba(red, green) } } } } } /// Given some vector of bytes, insert the bytes from the given channel into the vector. /// /// Doing it this way allows us to allocate for one vector and insert all 4 (RGBA) channels into /// it. fn insert_channel_bytes( &self, rgba: &mut Vec<u8>, channel_kind: PsdChannelKind, channel_bytes: &ChannelBytes, ) { match channel_bytes { ChannelBytes::RawData(channel_bytes) => { let offset = channel_kind.rgba_offset().unwrap(); for (idx, byte) in channel_bytes.iter().enumerate() { let rgba_idx = self.rgba_idx(idx); rgba[rgba_idx * 4 + offset] = *byte; } } // https://en.wikipedia.org/wiki/PackBits ChannelBytes::RleCompressed(channel_bytes) => { self.insert_rle_channel(rgba, channel_kind, &channel_bytes); } } } /// rle decompress a channel (R,G,B or A) and insert it into a vector of RGBA pixels. /// /// We use the channels offset to know where to put it.. So red would go in 0, 4, 8.. /// blue would go in 1, 5, 9.. etc /// /// https://en.wikipedia.org/wiki/PackBits - algorithm used for decompression fn insert_rle_channel( &self, rgba: &mut Vec<u8>, channel_kind: PsdChannelKind, channel_bytes: &[u8], ) { let mut cursor = PsdCursor::new(&channel_bytes[..]); let mut idx = 0; let offset = channel_kind.rgba_offset().unwrap(); let len = cursor.get_ref().len() as u64; while cursor.position() < len { let header = cursor.read_i8() as i16; if header == -128 { continue; } else if header >= 0 { let bytes_to_read = 1 + header; if cursor.position() + bytes_to_read as u64 > len { break; } for byte in cursor.read(bytes_to_read as u32) { let rgba_idx = self.rgba_idx(idx); rgba[rgba_idx * 4 + offset] = *byte; idx += 1; } } else { let repeat = 1 - header; if cursor.position() + 1 > len
let byte = cursor.read_1()[0]; for _ in 0..repeat as usize { let rgba_idx = self.rgba_idx(idx); rgba[rgba_idx * 4 + offset] = byte; idx += 1; } }; } } } /// Rle decompress a channel fn rle_decompress(bytes: &[u8]) -> Vec<u8> { let mut cursor = PsdCursor::new(&bytes[..]); let mut decompressed = vec![]; while cursor.position()!= cursor.get_ref().len() as u64 { let header = cursor.read_i8() as i16; if header == -128 { continue; } else if header >= 0 { let bytes_to_read = 1 + header; for byte in cursor.read(bytes_to_read as u32) { decompressed.push(*byte); } } else { let repeat = 1 - header; let byte = cursor.read_1()[0]; for _ in 0..repeat as usize { decompressed.push(byte); } }; } decompressed } /// Take two 8 bit channels that together represent a 16 bit channel and convert them down /// into an 8 bit channel. /// /// We store the final bytes in the first channel (overwriting the old bytes) fn sixteen_to_eight_rgba(channel1: &[u8], channel2: &[u8]) -> Vec<u8> { let mut eight = Vec::with_capacity(channel1.len()); for idx in 0..channel1.len() { if idx % 2 == 1 { continue; } let sixteen_bit = [channel1[idx], channel1[idx + 1]]; let sixteen_bit = u16::from_be_bytes(sixteen_bit); let eight_bit = (sixteen_bit / 256) as u8; eight.push(eight_bit); eight.push(eight_bit); eight.push(eight_bit); eight.push(255); } for idx in 0..channel2.len() { if idx % 2 == 1 { continue; } let sixteen_bit = [channel2[idx], channel2[idx + 1]]; let sixteen_bit = u16::from_be_bytes(sixteen_bit); let eight_bit = (sixteen_bit / 256) as u8; eight.push(eight_bit); eight.push(eight_bit); eight.push(eight_bit); eight.push(255); } eight } /// Indicates how a channe'sl data is compressed #[derive(Debug, Eq, PartialEq)] #[allow(missing_docs)] pub enum PsdChannelCompression { /// Not compressed RawData = 0, /// Compressed using [PackBits RLE compression](https://en.wikipedia.org/wiki/PackBits) RleCompressed = 1, /// Currently unsupported ZipWithoutPrediction = 2, /// Currently unsupported ZipWithPrediction = 3, } impl PsdChannelCompression { /// Create a new PsdLayerChannelCompression pub fn new(compression: u16) -> Option<PsdChannelCompression> { match compression { 0 => Some(PsdChannelCompression::RawData), 1 => Some(PsdChannelCompression::RleCompressed), 2 => Some(PsdChannelCompression::ZipWithoutPrediction), 3 => Some(PsdChannelCompression::ZipWithPrediction), _ => None, } } } /// The different kinds of channels in a layer (red, green, blue,...). #[derive(Debug, Hash, Eq, PartialEq, Ord, PartialOrd, Copy, Clone)] #[allow(missing_docs)] pub enum PsdChannelKind { Red = 0, Green = 1, Blue = 2, TransparencyMask = -1, UserSuppliedLayerMask = -2, RealUserSuppliedLayerMask = -3, } /// Represents an invalid channel #[derive(Debug, Error)] pub enum PsdChannelError { #[error("Channel {channel:#?} not present")] ChannelNotFound { channel: PsdChannelKind }, } impl PsdChannelKind { /// Create a new PsdLayerChannel pub fn new(channel_id: i16) -> Option<PsdChannelKind> { match channel_id { 0 => Some(PsdChannelKind::Red), 1 => Some(PsdChannelKind::Green), 2 => Some(PsdChannelKind::Blue), -1 => Some(PsdChannelKind::TransparencyMask), -2 => Some(PsdChannelKind::UserSuppliedLayerMask), -3 => Some(PsdChannelKind::RealUserSuppliedLayerMask), _ => None, } } /// R -> 0 /// G -> 1 /// B -> 2 /// A -> 3 pub fn rgba_offset(self) -> Result<usize, String> { match self { PsdChannelKind::Red => Ok(0), PsdChannelKind::Green => Ok(1), PsdChannelKind::Blue => Ok(2), PsdChannelKind::TransparencyMask => Ok(3), _ => Err(format!("{:#?} is not an RGBA channel", &self)), } } } #[cfg(test)] mod tests { use crate::sections::layer_and_mask_information_section::layer::{ BlendMode, LayerChannels, LayerProperties, }; use crate::PsdLayer; use super::*; /// Verify that when inserting an RLE channel's bytes into an RGBA byte vec we do not attempt to /// read beyond the channel's length. #[test] fn does_not_read_beyond_rle_channels_bytes() { let layer_properties = LayerProperties { name: "".into(), layer_top: 0, layer_left: 0, layer_bottom: 0, layer_right: 0, visible: true, opacity: 0, clipping_mask: false, psd_width: 1, psd_height: 1, blend_mode: BlendMode::Normal, group_id: None, }; let layer = PsdLayer { channels: LayerChannels::from([( PsdChannelKind::Red, ChannelBytes::RleCompressed(vec![0, 0, 0]), )]), layer_properties, }; let mut rgba = vec![0; (layer.width() * layer.height() * 4) as usize]; layer.insert_channel_bytes(&mut rgba, PsdChannelKind::Red, layer.red()); assert_eq!(rgba, vec![0; 4]); } }
{ break; }
conditional_block
prime_field.rs
At a minimum `UInt` should implement [`Clone`], [`PartialEq`], /// [`PartialOrd`], [`Zero`], [`One`], [`AddInline`]`<&Self>`, /// [`SubInline`]`<&Self>` and [`Montgomery`]. /// /// For [`Root`] it should also implment [`Binary`] and [`DivRem`]. For /// [`SquareRoot`] it requires [`Binary`] and [`Shr`]`<usize>`. For rand /// support it requires [`rand::distributions::uniform::SampleUniform`]. For /// `proptest` support `Parameters` needs to be `'static + Send` (which it /// really should anyway).
pub struct PrimeField<P: Parameters> { // TODO: un-pub. They are pub so FieldElement can have const-fn constructors. pub uint: P::UInt, pub _parameters: PhantomData<P>, } /// Required constant parameters for the prime field // TODO: Fix naming #[allow(clippy::module_name_repetitions)] // UInt can not have interior mutability #[allow(clippy::declare_interior_mutable_const)] // HACK: Ideally we'd use MontgomeryParameters<UInt: FieldUInt> // See <https://github.com/rust-lang/rust/issues/52662> pub trait Parameters:'static + Send + Sync + Sized { type UInt: FieldUInt; /// The modulus to implement in Montgomery form const MODULUS: Self::UInt; /// M64 = -MODULUS^(-1) mod 2^64 const M64: u64; // R1 = 2^256 mod MODULUS const R1: Self::UInt; // R2 = 2^512 mod MODULUS const R2: Self::UInt; // R3 = 2^768 mod MODULUS const R3: Self::UInt; // Generator and quadratic non-residue const GENERATOR: Self::UInt; // Multiplicative order: Modulus - 1 const ORDER: Self::UInt; } // Derive `MontgomeryParameters` from `Parameters` as `Montgomery<P: // Parameters>` struct Montgomery<P: Parameters>(PhantomData<P>); impl<P: Parameters> MontgomeryParameters for Montgomery<P> { type UInt = P::UInt; const M64: u64 = P::M64; const MODULUS: Self::UInt = P::MODULUS; const R1: Self::UInt = P::R1; const R2: Self::UInt = P::R2; const R3: Self::UInt = P::R3; } impl<P: Parameters> PrimeField<P> { // UInt can not have interior mutability #[allow(clippy::declare_interior_mutable_const)] pub const MODULUS: P::UInt = P::MODULUS; #[inline(always)] pub fn modulus() -> P::UInt { P::MODULUS } /// The multiplicative order of the field. /// /// Equal to `modulus() - 1` for prime fields. #[inline(always)] pub fn order() -> P::UInt { P::ORDER } #[inline(always)] pub fn generator() -> Self { Self::from_montgomery(P::GENERATOR) } #[inline(always)] pub fn as_montgomery(&self) -> &P::UInt { debug_assert!(self.uint < Self::modulus()); &self.uint } /// Construct from `UInt` in Montgomery form. /// /// This is a trivial function. // TODO: Make `const fn` after <https://github.com/rust-lang/rust/issues/57563> #[inline(always)] pub fn from_montgomery(uint: P::UInt) -> Self { debug_assert!(uint < Self::modulus()); Self { uint, _parameters: PhantomData, } } // TODO: from_radix_str // #[cfg(feature = "std")] // pub fn from_hex_str(s: &str) -> Self { // Self::from(UInt::from_hex_str(s)) // } /// Convert to `UInt`. #[inline(always)] // Simple wrapper for `from_montgomery` pub fn to_uint(&self) -> P::UInt { debug_assert!(self.uint < Self::modulus()); P::UInt::from_montgomery::<Montgomery<P>>(self.as_montgomery()) } /// Construct from `UInt` /// /// It does the montgomery conversion. pub fn from_uint(uint: &P::UInt) -> Self { debug_assert!(uint < &Self::modulus()); Self::from_montgomery(uint.to_montgomery::<Montgomery<P>>()) } /// Reduce and construct from `UInt` pub fn from_uint_reduce(uint: &P::UInt) -> Self { let uint = P::UInt::redc_inline::<Montgomery<P>>(uint, &P::UInt::zero()); // UInt should not have interior mutability #[allow(clippy::borrow_interior_mutable_const)] let uint = uint.mul_redc_inline::<Montgomery<P>>(&P::R3); Self::from_montgomery(uint) } #[inline(always)] pub fn double(&self) -> Self { // TODO: Optimize self.clone() + self } #[inline(always)] pub fn triple(&self) -> Self { // TODO: Optimize self.clone() + self + self } } impl<P: Parameters> Clone for PrimeField<P> { fn clone(&self) -> Self { Self::from_montgomery(self.as_montgomery().clone()) } } impl<P: Parameters> PartialEq for PrimeField<P> { fn eq(&self, other: &Self) -> bool { self.as_montgomery() == other.as_montgomery() } } impl<P: Parameters> Eq for PrimeField<P> {} /// Implements [`Hash`] when `UInt` does. impl<U, P> Hash for PrimeField<P> where U: FieldUInt + Hash, P: Parameters<UInt = U>, { fn hash<H: Hasher>(&self, state: &mut H) { self.as_montgomery().hash::<H>(state) } } impl<U, P> fmt::Debug for PrimeField<P> where U: FieldUInt + fmt::Debug, P: Parameters<UInt = U>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "field_element!(\"{:?}\")", self.to_uint()) } } impl<P: Parameters> Zero for PrimeField<P> { #[inline(always)] fn zero() -> Self { Self::from_montgomery(P::UInt::zero()) } #[inline(always)] fn is_zero(&self) -> bool { self.as_montgomery().is_zero() } } impl<P: Parameters> One for PrimeField<P> { #[inline(always)] fn one() -> Self { Self::from_montgomery(P::R1) } // UInt should not have interior mutability #[allow(clippy::borrow_interior_mutable_const)] #[inline(always)] fn is_one(&self) -> bool { self.as_montgomery() == &P::R1 } } impl<P: Parameters> AddInline<&Self> for PrimeField<P> { #[inline(always)] fn add_inline(&self, rhs: &Self) -> Self { let result = self.as_montgomery().add_inline(rhs.as_montgomery()); let result = result.reduce_1_inline::<Montgomery<P>>(); Self::from_montgomery(result) } } impl<P: Parameters> SubInline<&Self> for PrimeField<P> { #[inline(always)] fn sub_inline(&self, rhs: &Self) -> Self { let lhs = self.as_montgomery(); let rhs = rhs.as_montgomery(); let borrow = rhs > lhs; let mut result = lhs.sub_inline(rhs); if borrow { result.add_assign_inline(&Self::modulus()); } Self::from_montgomery(result) } } impl<P: Parameters> NegInline for PrimeField<P> { #[inline(always)] fn neg_inline(&self) -> Self { if self.is_zero() { Self::zero() } else { Self::from_montgomery(Self::modulus().sub_inline(self.as_montgomery())) } } } impl<P: Parameters> SquareInline for PrimeField<P> { #[inline(always)] fn square_inline(&self) -> Self { Self::from_montgomery(self.as_montgomery().square_redc_inline::<Montgomery<P>>()) } } impl<P: Parameters> MulInline<&Self> for PrimeField<P> { #[inline(always)] fn mul_inline(&self, rhs: &Self) -> Self { Self::from_montgomery( self.as_montgomery() .mul_redc_inline::<Montgomery<P>>(rhs.as_montgomery()), ) } } impl<P: Parameters> Inv for &PrimeField<P> { type Output = Option<PrimeField<P>>; #[inline(always)] // Simple wrapper fn inv(self) -> Self::Output { self.as_montgomery() .inv_redc::<Montgomery<P>>() .map(PrimeField::<P>::from_montgomery) } } impl<P: Parameters> Pow<usize> for &PrimeField<P> { type Output = PrimeField<P>; fn pow(self, exponent: usize) -> Self::Output { self.pow(&exponent) } } impl<P: Parameters> Pow<isize> for &PrimeField<P> { type Output = Option<PrimeField<P>>; fn pow(self, exponent: isize) -> Self::Output { let negative = exponent < 0; let abs = exponent.abs() as usize; if negative { self.inv().map(|n| n.pow(&abs)) } else { Some(self.pow(&abs)) } } } impl<P: Parameters, Exponent> Pow<&Exponent> for &PrimeField<P> where Exponent: Binary, { type Output = PrimeField<P>; fn pow(self, exponent: &Exponent) -> Self::Output { if let Some(msb) = exponent.most_significant_bit() { let mut result = Self::Output::one(); let mut square = self.clone(); for i in 0..=msb { if exponent.bit(i) { result *= &square; } if i < msb { square.square_assign(); } } result } else { // exponent = 0 Self::Output::one() } } } impl<U, P> Root<usize> for PrimeField<P> where U: FieldUInt + Binary + DivRem<u64, Quotient = U, Remainder = u64>, P: Parameters<UInt = U>, { // OPT: replace this with a constant array of roots of unity. fn root(order: usize) -> Option<Self> { let order = order as u64; if let Some((q, rem)) = Self::order().div_rem(order) { if rem.is_zero() { Some(Self::generator().pow(&q)) } else { None } } else { Some(Self::one()) } } } // TODO: Generalize over order type // Lint has a false positive here #[allow(single_use_lifetimes)] impl<U, P> Root<&U> for PrimeField<P> where U: FieldUInt + Binary + for<'a> DivRem<&'a U, Quotient = U, Remainder = U>, P: Parameters<UInt = U>, { // OPT: replace this with a constant array of roots of unity. fn root(order: &P::UInt) -> Option<Self> { if let Some((q, rem)) = Self::order().div_rem(order) { if rem.is_zero() { Some(Self::generator().pow(&q)) } else { None } } else { Some(Self::one()) } } } impl<U, P> SquareRoot for PrimeField<P> where U: FieldUInt + Binary + Shr<usize, Output = U>, P: Parameters<UInt = U>, { fn is_quadratic_residue(&self) -> bool { self.pow(&(Self::MODULUS >> 1_usize))!= -Self::one() } // Tonelli-Shanks square root algorithm for prime fields // See 'Handbook of Applied Cryptography' algorithm 3.34 // OPT: Use algorithm 3.39 for Proth primes. fn square_root(&self) -> Option<Self> { if self.is_zero() { return Some(Self::zero()); } if!self.is_quadratic_residue() { return None; } // TODO: Provide as a constant parameter? // Factor order as `signifcant` * 2 ^ `trailing_zeros` let trailing_zeros = Self::order().trailing_zeros(); let signifcant = Self::order() >> trailing_zeros; // The starting value of c in the Tonelli Shanks algorithm. We are using the // prefered generator, as the quadratic nonresidue the algorithm requires. let c_start = Self::generator().pow(&signifcant); // This algorithm is still correct when the following assertion fails. However, // more efficient algorithms exist when MODULUS % 4 == 3 or MODULUS % 8 == 5 // (3.36 and 3.37 in HAC). // debug_assert!(&FieldElement::MODULUS & 7_u64 == 1); // OPT: Raising a to a fixed power is a good candidate for an addition chain. let mut root = self.pow(&((signifcant + P::UInt::one()) >> 1)); let mut c = c_start; let inverse = self.inv().unwrap(); // Zero case is handled above for i in 1..trailing_zeros { if (root.square() * &inverse).pow(&(P::UInt::one() << (trailing_zeros - i - 1))) == -Self::one() { root *= &c; } // OPT: Create lookup table for squares of c. c.square_assign(); } Some(root) } } impl<P: Parameters> Default for PrimeField<P> { fn default() -> Self { Self::zero() } } // TODO: Find a way to create generic implementations of these impl<P: Parameters<UInt = U256>> From<PrimeField<P>> for U256 { #[inline(always)] fn from(other: PrimeField<P>) -> Self { other.to_uint() } } impl<P: Parameters<UInt = U256>> From<&PrimeField<P>> for U256 { #[inline(always)] fn from(other: &PrimeField<P>) -> Self { other.to_uint() } } #[cfg(test)] mod tests { use super::*; use crate::FieldElement; use itertools::repeat_n; use num_traits::ToPrimitive; use proptest::prelude::*; use zkp_macros_decl::{field_element, u256h}; use zkp_u256::U256; #[test] fn test_literal() { const SMALL: FieldElement = field_element!("0F"); const NUM: FieldElement = field_element!("0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c"); assert_eq!(SMALL, FieldElement::from(15)); assert_eq!( NUM, u256h!("0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c").into() ); } #[test] fn minus_zero_equals_zero() { assert!(FieldElement::zero().is_zero()); assert!(field_element!("00").is_zero()); assert_eq!(FieldElement::zero(), FieldElement::zero()); assert_eq!(-FieldElement::zero(), FieldElement::zero()); } #[test] fn test_add() { let a = field_element!("06eabe184aa9caca2e17f6073bcc10bb9714c0e3866ff00e0d386f4396392852"); let b = field_element!("0313000a764a9a5514efc99070de3f70586794f9bb0add62ac689763aadea7e8"); let c = field_element!("01fdbe22c0f4650e4307bf97acaa502bef7c55dd417acd70b9a106a74117d039"); assert_eq!(a + b, c); } #[test] fn test_sub() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("03d7be0dd45f307519282c76caedd14b3ead2be9cb6512ab60cfd7dfeb5a806a"); assert_eq!(a - b, c); } #[test] fn test_mul() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("0738900c5dcab24b419674df19d2cfeb9782eca6d1107be18577eb060390365b"); assert_eq!(a * b, c); } #[test] fn test_div() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("003a9a346e7103c74dfcddd0eeb4e16ca71d8887c2bed3d4ee718b62015e87b2"); assert_eq!(a / b, c); } proptest!( #[test] fn from_as_isize(n: isize) { prop_assert_eq!(FieldElement::from(n).to_isize().unwrap(), n) } #[test] fn from_as_i128(n: i128) { prop_assert_eq!(FieldElement::from(n).to_i128().unwrap(), n); } #[test] fn add_identity(a: FieldElement) { prop_assert_eq!(&a + FieldElement::zero(), a); } #[test] fn mul_identity(a: FieldElement) { prop_assert_eq!(&a * FieldElement::one(), a); } #[test] fn commutative_add(a: FieldElement, b: FieldElement) { prop_assert_eq!(&a + &b, b + a); } #[test] fn commutative_mul(a: FieldElement, b: FieldElement) { prop_assert_eq!(&a * &b, b * a); } #[test] fn associative_add(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a + (&b + &c), (a + b) + c); } #[test] fn associative_mul(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a * (&b * &c), (a * b) * c); } #[test] fn inverse_add(a: FieldElement) { prop_assert!((&a + a.neg()).is_zero()); } #[test] fn inverse_mul(a: FieldElement) { let inverse = a.inv(); match inverse { None => prop_assert!(a.is_zero()), Some(ai) => prop_assert!((a * ai).is_one()), } } #[test] fn distributivity(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a * (&b + &c), (&a * b) + (a * c)); } #[test] fn square(a: FieldElement) { prop_assert_eq!(a.square(), &a * &a); } #[test] fn pow_0(a: FieldElement) { prop_assert!(a.pow(0_usize).is_one()); } #[test] fn pow_1(a: FieldElement) { prop_assert_eq!(a.pow(1_usize), a); } #[test] fn pow_2(a: FieldElement) { prop_assert_eq!(a.pow(2_usize), &a * &a); } #[test] fn pow_n(a: FieldElement, n: usize) { let exponent = n % 512; prop_assert_eq!(a.pow(exponent), repeat_n(a, exponent).product()); } #[test] fn fermats_little_theorem(a: FieldElement) { prop_assert_eq!(a.pow(&FieldElement::MODULUS), a); } #[test] fn square_root(a: FieldElement) { let s = a.square(); let r = s.square_root().unwrap();
// Derive fails for Clone, PartialEq, Eq, Hash
random_line_split
prime_field.rs
minimum `UInt` should implement [`Clone`], [`PartialEq`], /// [`PartialOrd`], [`Zero`], [`One`], [`AddInline`]`<&Self>`, /// [`SubInline`]`<&Self>` and [`Montgomery`]. /// /// For [`Root`] it should also implment [`Binary`] and [`DivRem`]. For /// [`SquareRoot`] it requires [`Binary`] and [`Shr`]`<usize>`. For rand /// support it requires [`rand::distributions::uniform::SampleUniform`]. For /// `proptest` support `Parameters` needs to be `'static + Send` (which it /// really should anyway). // Derive fails for Clone, PartialEq, Eq, Hash pub struct PrimeField<P: Parameters> { // TODO: un-pub. They are pub so FieldElement can have const-fn constructors. pub uint: P::UInt, pub _parameters: PhantomData<P>, } /// Required constant parameters for the prime field // TODO: Fix naming #[allow(clippy::module_name_repetitions)] // UInt can not have interior mutability #[allow(clippy::declare_interior_mutable_const)] // HACK: Ideally we'd use MontgomeryParameters<UInt: FieldUInt> // See <https://github.com/rust-lang/rust/issues/52662> pub trait Parameters:'static + Send + Sync + Sized { type UInt: FieldUInt; /// The modulus to implement in Montgomery form const MODULUS: Self::UInt; /// M64 = -MODULUS^(-1) mod 2^64 const M64: u64; // R1 = 2^256 mod MODULUS const R1: Self::UInt; // R2 = 2^512 mod MODULUS const R2: Self::UInt; // R3 = 2^768 mod MODULUS const R3: Self::UInt; // Generator and quadratic non-residue const GENERATOR: Self::UInt; // Multiplicative order: Modulus - 1 const ORDER: Self::UInt; } // Derive `MontgomeryParameters` from `Parameters` as `Montgomery<P: // Parameters>` struct Montgomery<P: Parameters>(PhantomData<P>); impl<P: Parameters> MontgomeryParameters for Montgomery<P> { type UInt = P::UInt; const M64: u64 = P::M64; const MODULUS: Self::UInt = P::MODULUS; const R1: Self::UInt = P::R1; const R2: Self::UInt = P::R2; const R3: Self::UInt = P::R3; } impl<P: Parameters> PrimeField<P> { // UInt can not have interior mutability #[allow(clippy::declare_interior_mutable_const)] pub const MODULUS: P::UInt = P::MODULUS; #[inline(always)] pub fn modulus() -> P::UInt { P::MODULUS } /// The multiplicative order of the field. /// /// Equal to `modulus() - 1` for prime fields. #[inline(always)] pub fn order() -> P::UInt { P::ORDER } #[inline(always)] pub fn generator() -> Self { Self::from_montgomery(P::GENERATOR) } #[inline(always)] pub fn as_montgomery(&self) -> &P::UInt { debug_assert!(self.uint < Self::modulus()); &self.uint } /// Construct from `UInt` in Montgomery form. /// /// This is a trivial function. // TODO: Make `const fn` after <https://github.com/rust-lang/rust/issues/57563> #[inline(always)] pub fn from_montgomery(uint: P::UInt) -> Self { debug_assert!(uint < Self::modulus()); Self { uint, _parameters: PhantomData, } } // TODO: from_radix_str // #[cfg(feature = "std")] // pub fn from_hex_str(s: &str) -> Self { // Self::from(UInt::from_hex_str(s)) // } /// Convert to `UInt`. #[inline(always)] // Simple wrapper for `from_montgomery` pub fn to_uint(&self) -> P::UInt { debug_assert!(self.uint < Self::modulus()); P::UInt::from_montgomery::<Montgomery<P>>(self.as_montgomery()) } /// Construct from `UInt` /// /// It does the montgomery conversion. pub fn from_uint(uint: &P::UInt) -> Self { debug_assert!(uint < &Self::modulus()); Self::from_montgomery(uint.to_montgomery::<Montgomery<P>>()) } /// Reduce and construct from `UInt` pub fn from_uint_reduce(uint: &P::UInt) -> Self { let uint = P::UInt::redc_inline::<Montgomery<P>>(uint, &P::UInt::zero()); // UInt should not have interior mutability #[allow(clippy::borrow_interior_mutable_const)] let uint = uint.mul_redc_inline::<Montgomery<P>>(&P::R3); Self::from_montgomery(uint) } #[inline(always)] pub fn double(&self) -> Self { // TODO: Optimize self.clone() + self } #[inline(always)] pub fn triple(&self) -> Self { // TODO: Optimize self.clone() + self + self } } impl<P: Parameters> Clone for PrimeField<P> { fn clone(&self) -> Self { Self::from_montgomery(self.as_montgomery().clone()) } } impl<P: Parameters> PartialEq for PrimeField<P> { fn eq(&self, other: &Self) -> bool { self.as_montgomery() == other.as_montgomery() } } impl<P: Parameters> Eq for PrimeField<P> {} /// Implements [`Hash`] when `UInt` does. impl<U, P> Hash for PrimeField<P> where U: FieldUInt + Hash, P: Parameters<UInt = U>, { fn hash<H: Hasher>(&self, state: &mut H) { self.as_montgomery().hash::<H>(state) } } impl<U, P> fmt::Debug for PrimeField<P> where U: FieldUInt + fmt::Debug, P: Parameters<UInt = U>, { fn
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "field_element!(\"{:?}\")", self.to_uint()) } } impl<P: Parameters> Zero for PrimeField<P> { #[inline(always)] fn zero() -> Self { Self::from_montgomery(P::UInt::zero()) } #[inline(always)] fn is_zero(&self) -> bool { self.as_montgomery().is_zero() } } impl<P: Parameters> One for PrimeField<P> { #[inline(always)] fn one() -> Self { Self::from_montgomery(P::R1) } // UInt should not have interior mutability #[allow(clippy::borrow_interior_mutable_const)] #[inline(always)] fn is_one(&self) -> bool { self.as_montgomery() == &P::R1 } } impl<P: Parameters> AddInline<&Self> for PrimeField<P> { #[inline(always)] fn add_inline(&self, rhs: &Self) -> Self { let result = self.as_montgomery().add_inline(rhs.as_montgomery()); let result = result.reduce_1_inline::<Montgomery<P>>(); Self::from_montgomery(result) } } impl<P: Parameters> SubInline<&Self> for PrimeField<P> { #[inline(always)] fn sub_inline(&self, rhs: &Self) -> Self { let lhs = self.as_montgomery(); let rhs = rhs.as_montgomery(); let borrow = rhs > lhs; let mut result = lhs.sub_inline(rhs); if borrow { result.add_assign_inline(&Self::modulus()); } Self::from_montgomery(result) } } impl<P: Parameters> NegInline for PrimeField<P> { #[inline(always)] fn neg_inline(&self) -> Self { if self.is_zero() { Self::zero() } else { Self::from_montgomery(Self::modulus().sub_inline(self.as_montgomery())) } } } impl<P: Parameters> SquareInline for PrimeField<P> { #[inline(always)] fn square_inline(&self) -> Self { Self::from_montgomery(self.as_montgomery().square_redc_inline::<Montgomery<P>>()) } } impl<P: Parameters> MulInline<&Self> for PrimeField<P> { #[inline(always)] fn mul_inline(&self, rhs: &Self) -> Self { Self::from_montgomery( self.as_montgomery() .mul_redc_inline::<Montgomery<P>>(rhs.as_montgomery()), ) } } impl<P: Parameters> Inv for &PrimeField<P> { type Output = Option<PrimeField<P>>; #[inline(always)] // Simple wrapper fn inv(self) -> Self::Output { self.as_montgomery() .inv_redc::<Montgomery<P>>() .map(PrimeField::<P>::from_montgomery) } } impl<P: Parameters> Pow<usize> for &PrimeField<P> { type Output = PrimeField<P>; fn pow(self, exponent: usize) -> Self::Output { self.pow(&exponent) } } impl<P: Parameters> Pow<isize> for &PrimeField<P> { type Output = Option<PrimeField<P>>; fn pow(self, exponent: isize) -> Self::Output { let negative = exponent < 0; let abs = exponent.abs() as usize; if negative { self.inv().map(|n| n.pow(&abs)) } else { Some(self.pow(&abs)) } } } impl<P: Parameters, Exponent> Pow<&Exponent> for &PrimeField<P> where Exponent: Binary, { type Output = PrimeField<P>; fn pow(self, exponent: &Exponent) -> Self::Output { if let Some(msb) = exponent.most_significant_bit() { let mut result = Self::Output::one(); let mut square = self.clone(); for i in 0..=msb { if exponent.bit(i) { result *= &square; } if i < msb { square.square_assign(); } } result } else { // exponent = 0 Self::Output::one() } } } impl<U, P> Root<usize> for PrimeField<P> where U: FieldUInt + Binary + DivRem<u64, Quotient = U, Remainder = u64>, P: Parameters<UInt = U>, { // OPT: replace this with a constant array of roots of unity. fn root(order: usize) -> Option<Self> { let order = order as u64; if let Some((q, rem)) = Self::order().div_rem(order) { if rem.is_zero() { Some(Self::generator().pow(&q)) } else { None } } else { Some(Self::one()) } } } // TODO: Generalize over order type // Lint has a false positive here #[allow(single_use_lifetimes)] impl<U, P> Root<&U> for PrimeField<P> where U: FieldUInt + Binary + for<'a> DivRem<&'a U, Quotient = U, Remainder = U>, P: Parameters<UInt = U>, { // OPT: replace this with a constant array of roots of unity. fn root(order: &P::UInt) -> Option<Self> { if let Some((q, rem)) = Self::order().div_rem(order) { if rem.is_zero() { Some(Self::generator().pow(&q)) } else { None } } else { Some(Self::one()) } } } impl<U, P> SquareRoot for PrimeField<P> where U: FieldUInt + Binary + Shr<usize, Output = U>, P: Parameters<UInt = U>, { fn is_quadratic_residue(&self) -> bool { self.pow(&(Self::MODULUS >> 1_usize))!= -Self::one() } // Tonelli-Shanks square root algorithm for prime fields // See 'Handbook of Applied Cryptography' algorithm 3.34 // OPT: Use algorithm 3.39 for Proth primes. fn square_root(&self) -> Option<Self> { if self.is_zero() { return Some(Self::zero()); } if!self.is_quadratic_residue() { return None; } // TODO: Provide as a constant parameter? // Factor order as `signifcant` * 2 ^ `trailing_zeros` let trailing_zeros = Self::order().trailing_zeros(); let signifcant = Self::order() >> trailing_zeros; // The starting value of c in the Tonelli Shanks algorithm. We are using the // prefered generator, as the quadratic nonresidue the algorithm requires. let c_start = Self::generator().pow(&signifcant); // This algorithm is still correct when the following assertion fails. However, // more efficient algorithms exist when MODULUS % 4 == 3 or MODULUS % 8 == 5 // (3.36 and 3.37 in HAC). // debug_assert!(&FieldElement::MODULUS & 7_u64 == 1); // OPT: Raising a to a fixed power is a good candidate for an addition chain. let mut root = self.pow(&((signifcant + P::UInt::one()) >> 1)); let mut c = c_start; let inverse = self.inv().unwrap(); // Zero case is handled above for i in 1..trailing_zeros { if (root.square() * &inverse).pow(&(P::UInt::one() << (trailing_zeros - i - 1))) == -Self::one() { root *= &c; } // OPT: Create lookup table for squares of c. c.square_assign(); } Some(root) } } impl<P: Parameters> Default for PrimeField<P> { fn default() -> Self { Self::zero() } } // TODO: Find a way to create generic implementations of these impl<P: Parameters<UInt = U256>> From<PrimeField<P>> for U256 { #[inline(always)] fn from(other: PrimeField<P>) -> Self { other.to_uint() } } impl<P: Parameters<UInt = U256>> From<&PrimeField<P>> for U256 { #[inline(always)] fn from(other: &PrimeField<P>) -> Self { other.to_uint() } } #[cfg(test)] mod tests { use super::*; use crate::FieldElement; use itertools::repeat_n; use num_traits::ToPrimitive; use proptest::prelude::*; use zkp_macros_decl::{field_element, u256h}; use zkp_u256::U256; #[test] fn test_literal() { const SMALL: FieldElement = field_element!("0F"); const NUM: FieldElement = field_element!("0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c"); assert_eq!(SMALL, FieldElement::from(15)); assert_eq!( NUM, u256h!("0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c").into() ); } #[test] fn minus_zero_equals_zero() { assert!(FieldElement::zero().is_zero()); assert!(field_element!("00").is_zero()); assert_eq!(FieldElement::zero(), FieldElement::zero()); assert_eq!(-FieldElement::zero(), FieldElement::zero()); } #[test] fn test_add() { let a = field_element!("06eabe184aa9caca2e17f6073bcc10bb9714c0e3866ff00e0d386f4396392852"); let b = field_element!("0313000a764a9a5514efc99070de3f70586794f9bb0add62ac689763aadea7e8"); let c = field_element!("01fdbe22c0f4650e4307bf97acaa502bef7c55dd417acd70b9a106a74117d039"); assert_eq!(a + b, c); } #[test] fn test_sub() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("03d7be0dd45f307519282c76caedd14b3ead2be9cb6512ab60cfd7dfeb5a806a"); assert_eq!(a - b, c); } #[test] fn test_mul() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("0738900c5dcab24b419674df19d2cfeb9782eca6d1107be18577eb060390365b"); assert_eq!(a * b, c); } #[test] fn test_div() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("003a9a346e7103c74dfcddd0eeb4e16ca71d8887c2bed3d4ee718b62015e87b2"); assert_eq!(a / b, c); } proptest!( #[test] fn from_as_isize(n: isize) { prop_assert_eq!(FieldElement::from(n).to_isize().unwrap(), n) } #[test] fn from_as_i128(n: i128) { prop_assert_eq!(FieldElement::from(n).to_i128().unwrap(), n); } #[test] fn add_identity(a: FieldElement) { prop_assert_eq!(&a + FieldElement::zero(), a); } #[test] fn mul_identity(a: FieldElement) { prop_assert_eq!(&a * FieldElement::one(), a); } #[test] fn commutative_add(a: FieldElement, b: FieldElement) { prop_assert_eq!(&a + &b, b + a); } #[test] fn commutative_mul(a: FieldElement, b: FieldElement) { prop_assert_eq!(&a * &b, b * a); } #[test] fn associative_add(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a + (&b + &c), (a + b) + c); } #[test] fn associative_mul(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a * (&b * &c), (a * b) * c); } #[test] fn inverse_add(a: FieldElement) { prop_assert!((&a + a.neg()).is_zero()); } #[test] fn inverse_mul(a: FieldElement) { let inverse = a.inv(); match inverse { None => prop_assert!(a.is_zero()), Some(ai) => prop_assert!((a * ai).is_one()), } } #[test] fn distributivity(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a * (&b + &c), (&a * b) + (a * c)); } #[test] fn square(a: FieldElement) { prop_assert_eq!(a.square(), &a * &a); } #[test] fn pow_0(a: FieldElement) { prop_assert!(a.pow(0_usize).is_one()); } #[test] fn pow_1(a: FieldElement) { prop_assert_eq!(a.pow(1_usize), a); } #[test] fn pow_2(a: FieldElement) { prop_assert_eq!(a.pow(2_usize), &a * &a); } #[test] fn pow_n(a: FieldElement, n: usize) { let exponent = n % 512; prop_assert_eq!(a.pow(exponent), repeat_n(a, exponent).product()); } #[test] fn fermats_little_theorem(a: FieldElement) { prop_assert_eq!(a.pow(&FieldElement::MODULUS), a); } #[test] fn square_root(a: FieldElement) { let s = a.square(); let r = s.square_root().unwrap();
fmt
identifier_name
prime_field.rs
minimum `UInt` should implement [`Clone`], [`PartialEq`], /// [`PartialOrd`], [`Zero`], [`One`], [`AddInline`]`<&Self>`, /// [`SubInline`]`<&Self>` and [`Montgomery`]. /// /// For [`Root`] it should also implment [`Binary`] and [`DivRem`]. For /// [`SquareRoot`] it requires [`Binary`] and [`Shr`]`<usize>`. For rand /// support it requires [`rand::distributions::uniform::SampleUniform`]. For /// `proptest` support `Parameters` needs to be `'static + Send` (which it /// really should anyway). // Derive fails for Clone, PartialEq, Eq, Hash pub struct PrimeField<P: Parameters> { // TODO: un-pub. They are pub so FieldElement can have const-fn constructors. pub uint: P::UInt, pub _parameters: PhantomData<P>, } /// Required constant parameters for the prime field // TODO: Fix naming #[allow(clippy::module_name_repetitions)] // UInt can not have interior mutability #[allow(clippy::declare_interior_mutable_const)] // HACK: Ideally we'd use MontgomeryParameters<UInt: FieldUInt> // See <https://github.com/rust-lang/rust/issues/52662> pub trait Parameters:'static + Send + Sync + Sized { type UInt: FieldUInt; /// The modulus to implement in Montgomery form const MODULUS: Self::UInt; /// M64 = -MODULUS^(-1) mod 2^64 const M64: u64; // R1 = 2^256 mod MODULUS const R1: Self::UInt; // R2 = 2^512 mod MODULUS const R2: Self::UInt; // R3 = 2^768 mod MODULUS const R3: Self::UInt; // Generator and quadratic non-residue const GENERATOR: Self::UInt; // Multiplicative order: Modulus - 1 const ORDER: Self::UInt; } // Derive `MontgomeryParameters` from `Parameters` as `Montgomery<P: // Parameters>` struct Montgomery<P: Parameters>(PhantomData<P>); impl<P: Parameters> MontgomeryParameters for Montgomery<P> { type UInt = P::UInt; const M64: u64 = P::M64; const MODULUS: Self::UInt = P::MODULUS; const R1: Self::UInt = P::R1; const R2: Self::UInt = P::R2; const R3: Self::UInt = P::R3; } impl<P: Parameters> PrimeField<P> { // UInt can not have interior mutability #[allow(clippy::declare_interior_mutable_const)] pub const MODULUS: P::UInt = P::MODULUS; #[inline(always)] pub fn modulus() -> P::UInt { P::MODULUS } /// The multiplicative order of the field. /// /// Equal to `modulus() - 1` for prime fields. #[inline(always)] pub fn order() -> P::UInt { P::ORDER } #[inline(always)] pub fn generator() -> Self { Self::from_montgomery(P::GENERATOR) } #[inline(always)] pub fn as_montgomery(&self) -> &P::UInt { debug_assert!(self.uint < Self::modulus()); &self.uint } /// Construct from `UInt` in Montgomery form. /// /// This is a trivial function. // TODO: Make `const fn` after <https://github.com/rust-lang/rust/issues/57563> #[inline(always)] pub fn from_montgomery(uint: P::UInt) -> Self { debug_assert!(uint < Self::modulus()); Self { uint, _parameters: PhantomData, } } // TODO: from_radix_str // #[cfg(feature = "std")] // pub fn from_hex_str(s: &str) -> Self { // Self::from(UInt::from_hex_str(s)) // } /// Convert to `UInt`. #[inline(always)] // Simple wrapper for `from_montgomery` pub fn to_uint(&self) -> P::UInt { debug_assert!(self.uint < Self::modulus()); P::UInt::from_montgomery::<Montgomery<P>>(self.as_montgomery()) } /// Construct from `UInt` /// /// It does the montgomery conversion. pub fn from_uint(uint: &P::UInt) -> Self { debug_assert!(uint < &Self::modulus()); Self::from_montgomery(uint.to_montgomery::<Montgomery<P>>()) } /// Reduce and construct from `UInt` pub fn from_uint_reduce(uint: &P::UInt) -> Self { let uint = P::UInt::redc_inline::<Montgomery<P>>(uint, &P::UInt::zero()); // UInt should not have interior mutability #[allow(clippy::borrow_interior_mutable_const)] let uint = uint.mul_redc_inline::<Montgomery<P>>(&P::R3); Self::from_montgomery(uint) } #[inline(always)] pub fn double(&self) -> Self { // TODO: Optimize self.clone() + self } #[inline(always)] pub fn triple(&self) -> Self { // TODO: Optimize self.clone() + self + self } } impl<P: Parameters> Clone for PrimeField<P> { fn clone(&self) -> Self { Self::from_montgomery(self.as_montgomery().clone()) } } impl<P: Parameters> PartialEq for PrimeField<P> { fn eq(&self, other: &Self) -> bool { self.as_montgomery() == other.as_montgomery() } } impl<P: Parameters> Eq for PrimeField<P> {} /// Implements [`Hash`] when `UInt` does. impl<U, P> Hash for PrimeField<P> where U: FieldUInt + Hash, P: Parameters<UInt = U>, { fn hash<H: Hasher>(&self, state: &mut H) { self.as_montgomery().hash::<H>(state) } } impl<U, P> fmt::Debug for PrimeField<P> where U: FieldUInt + fmt::Debug, P: Parameters<UInt = U>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "field_element!(\"{:?}\")", self.to_uint()) } } impl<P: Parameters> Zero for PrimeField<P> { #[inline(always)] fn zero() -> Self { Self::from_montgomery(P::UInt::zero()) } #[inline(always)] fn is_zero(&self) -> bool { self.as_montgomery().is_zero() } } impl<P: Parameters> One for PrimeField<P> { #[inline(always)] fn one() -> Self { Self::from_montgomery(P::R1) } // UInt should not have interior mutability #[allow(clippy::borrow_interior_mutable_const)] #[inline(always)] fn is_one(&self) -> bool { self.as_montgomery() == &P::R1 } } impl<P: Parameters> AddInline<&Self> for PrimeField<P> { #[inline(always)] fn add_inline(&self, rhs: &Self) -> Self { let result = self.as_montgomery().add_inline(rhs.as_montgomery()); let result = result.reduce_1_inline::<Montgomery<P>>(); Self::from_montgomery(result) } } impl<P: Parameters> SubInline<&Self> for PrimeField<P> { #[inline(always)] fn sub_inline(&self, rhs: &Self) -> Self { let lhs = self.as_montgomery(); let rhs = rhs.as_montgomery(); let borrow = rhs > lhs; let mut result = lhs.sub_inline(rhs); if borrow { result.add_assign_inline(&Self::modulus()); } Self::from_montgomery(result) } } impl<P: Parameters> NegInline for PrimeField<P> { #[inline(always)] fn neg_inline(&self) -> Self { if self.is_zero() { Self::zero() } else { Self::from_montgomery(Self::modulus().sub_inline(self.as_montgomery())) } } } impl<P: Parameters> SquareInline for PrimeField<P> { #[inline(always)] fn square_inline(&self) -> Self { Self::from_montgomery(self.as_montgomery().square_redc_inline::<Montgomery<P>>()) } } impl<P: Parameters> MulInline<&Self> for PrimeField<P> { #[inline(always)] fn mul_inline(&self, rhs: &Self) -> Self { Self::from_montgomery( self.as_montgomery() .mul_redc_inline::<Montgomery<P>>(rhs.as_montgomery()), ) } } impl<P: Parameters> Inv for &PrimeField<P> { type Output = Option<PrimeField<P>>; #[inline(always)] // Simple wrapper fn inv(self) -> Self::Output { self.as_montgomery() .inv_redc::<Montgomery<P>>() .map(PrimeField::<P>::from_montgomery) } } impl<P: Parameters> Pow<usize> for &PrimeField<P> { type Output = PrimeField<P>; fn pow(self, exponent: usize) -> Self::Output { self.pow(&exponent) } } impl<P: Parameters> Pow<isize> for &PrimeField<P> { type Output = Option<PrimeField<P>>; fn pow(self, exponent: isize) -> Self::Output { let negative = exponent < 0; let abs = exponent.abs() as usize; if negative { self.inv().map(|n| n.pow(&abs)) } else { Some(self.pow(&abs)) } } } impl<P: Parameters, Exponent> Pow<&Exponent> for &PrimeField<P> where Exponent: Binary, { type Output = PrimeField<P>; fn pow(self, exponent: &Exponent) -> Self::Output { if let Some(msb) = exponent.most_significant_bit() { let mut result = Self::Output::one(); let mut square = self.clone(); for i in 0..=msb { if exponent.bit(i) { result *= &square; } if i < msb { square.square_assign(); } } result } else { // exponent = 0 Self::Output::one() } } } impl<U, P> Root<usize> for PrimeField<P> where U: FieldUInt + Binary + DivRem<u64, Quotient = U, Remainder = u64>, P: Parameters<UInt = U>, { // OPT: replace this with a constant array of roots of unity. fn root(order: usize) -> Option<Self> { let order = order as u64; if let Some((q, rem)) = Self::order().div_rem(order) { if rem.is_zero()
else { None } } else { Some(Self::one()) } } } // TODO: Generalize over order type // Lint has a false positive here #[allow(single_use_lifetimes)] impl<U, P> Root<&U> for PrimeField<P> where U: FieldUInt + Binary + for<'a> DivRem<&'a U, Quotient = U, Remainder = U>, P: Parameters<UInt = U>, { // OPT: replace this with a constant array of roots of unity. fn root(order: &P::UInt) -> Option<Self> { if let Some((q, rem)) = Self::order().div_rem(order) { if rem.is_zero() { Some(Self::generator().pow(&q)) } else { None } } else { Some(Self::one()) } } } impl<U, P> SquareRoot for PrimeField<P> where U: FieldUInt + Binary + Shr<usize, Output = U>, P: Parameters<UInt = U>, { fn is_quadratic_residue(&self) -> bool { self.pow(&(Self::MODULUS >> 1_usize))!= -Self::one() } // Tonelli-Shanks square root algorithm for prime fields // See 'Handbook of Applied Cryptography' algorithm 3.34 // OPT: Use algorithm 3.39 for Proth primes. fn square_root(&self) -> Option<Self> { if self.is_zero() { return Some(Self::zero()); } if!self.is_quadratic_residue() { return None; } // TODO: Provide as a constant parameter? // Factor order as `signifcant` * 2 ^ `trailing_zeros` let trailing_zeros = Self::order().trailing_zeros(); let signifcant = Self::order() >> trailing_zeros; // The starting value of c in the Tonelli Shanks algorithm. We are using the // prefered generator, as the quadratic nonresidue the algorithm requires. let c_start = Self::generator().pow(&signifcant); // This algorithm is still correct when the following assertion fails. However, // more efficient algorithms exist when MODULUS % 4 == 3 or MODULUS % 8 == 5 // (3.36 and 3.37 in HAC). // debug_assert!(&FieldElement::MODULUS & 7_u64 == 1); // OPT: Raising a to a fixed power is a good candidate for an addition chain. let mut root = self.pow(&((signifcant + P::UInt::one()) >> 1)); let mut c = c_start; let inverse = self.inv().unwrap(); // Zero case is handled above for i in 1..trailing_zeros { if (root.square() * &inverse).pow(&(P::UInt::one() << (trailing_zeros - i - 1))) == -Self::one() { root *= &c; } // OPT: Create lookup table for squares of c. c.square_assign(); } Some(root) } } impl<P: Parameters> Default for PrimeField<P> { fn default() -> Self { Self::zero() } } // TODO: Find a way to create generic implementations of these impl<P: Parameters<UInt = U256>> From<PrimeField<P>> for U256 { #[inline(always)] fn from(other: PrimeField<P>) -> Self { other.to_uint() } } impl<P: Parameters<UInt = U256>> From<&PrimeField<P>> for U256 { #[inline(always)] fn from(other: &PrimeField<P>) -> Self { other.to_uint() } } #[cfg(test)] mod tests { use super::*; use crate::FieldElement; use itertools::repeat_n; use num_traits::ToPrimitive; use proptest::prelude::*; use zkp_macros_decl::{field_element, u256h}; use zkp_u256::U256; #[test] fn test_literal() { const SMALL: FieldElement = field_element!("0F"); const NUM: FieldElement = field_element!("0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c"); assert_eq!(SMALL, FieldElement::from(15)); assert_eq!( NUM, u256h!("0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c").into() ); } #[test] fn minus_zero_equals_zero() { assert!(FieldElement::zero().is_zero()); assert!(field_element!("00").is_zero()); assert_eq!(FieldElement::zero(), FieldElement::zero()); assert_eq!(-FieldElement::zero(), FieldElement::zero()); } #[test] fn test_add() { let a = field_element!("06eabe184aa9caca2e17f6073bcc10bb9714c0e3866ff00e0d386f4396392852"); let b = field_element!("0313000a764a9a5514efc99070de3f70586794f9bb0add62ac689763aadea7e8"); let c = field_element!("01fdbe22c0f4650e4307bf97acaa502bef7c55dd417acd70b9a106a74117d039"); assert_eq!(a + b, c); } #[test] fn test_sub() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("03d7be0dd45f307519282c76caedd14b3ead2be9cb6512ab60cfd7dfeb5a806a"); assert_eq!(a - b, c); } #[test] fn test_mul() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("0738900c5dcab24b419674df19d2cfeb9782eca6d1107be18577eb060390365b"); assert_eq!(a * b, c); } #[test] fn test_div() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("003a9a346e7103c74dfcddd0eeb4e16ca71d8887c2bed3d4ee718b62015e87b2"); assert_eq!(a / b, c); } proptest!( #[test] fn from_as_isize(n: isize) { prop_assert_eq!(FieldElement::from(n).to_isize().unwrap(), n) } #[test] fn from_as_i128(n: i128) { prop_assert_eq!(FieldElement::from(n).to_i128().unwrap(), n); } #[test] fn add_identity(a: FieldElement) { prop_assert_eq!(&a + FieldElement::zero(), a); } #[test] fn mul_identity(a: FieldElement) { prop_assert_eq!(&a * FieldElement::one(), a); } #[test] fn commutative_add(a: FieldElement, b: FieldElement) { prop_assert_eq!(&a + &b, b + a); } #[test] fn commutative_mul(a: FieldElement, b: FieldElement) { prop_assert_eq!(&a * &b, b * a); } #[test] fn associative_add(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a + (&b + &c), (a + b) + c); } #[test] fn associative_mul(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a * (&b * &c), (a * b) * c); } #[test] fn inverse_add(a: FieldElement) { prop_assert!((&a + a.neg()).is_zero()); } #[test] fn inverse_mul(a: FieldElement) { let inverse = a.inv(); match inverse { None => prop_assert!(a.is_zero()), Some(ai) => prop_assert!((a * ai).is_one()), } } #[test] fn distributivity(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a * (&b + &c), (&a * b) + (a * c)); } #[test] fn square(a: FieldElement) { prop_assert_eq!(a.square(), &a * &a); } #[test] fn pow_0(a: FieldElement) { prop_assert!(a.pow(0_usize).is_one()); } #[test] fn pow_1(a: FieldElement) { prop_assert_eq!(a.pow(1_usize), a); } #[test] fn pow_2(a: FieldElement) { prop_assert_eq!(a.pow(2_usize), &a * &a); } #[test] fn pow_n(a: FieldElement, n: usize) { let exponent = n % 512; prop_assert_eq!(a.pow(exponent), repeat_n(a, exponent).product()); } #[test] fn fermats_little_theorem(a: FieldElement) { prop_assert_eq!(a.pow(&FieldElement::MODULUS), a); } #[test] fn square_root(a: FieldElement) { let s = a.square(); let r = s.square_root().unwrap();
{ Some(Self::generator().pow(&q)) }
conditional_block
prime_field.rs
: FieldUInt; /// The modulus to implement in Montgomery form const MODULUS: Self::UInt; /// M64 = -MODULUS^(-1) mod 2^64 const M64: u64; // R1 = 2^256 mod MODULUS const R1: Self::UInt; // R2 = 2^512 mod MODULUS const R2: Self::UInt; // R3 = 2^768 mod MODULUS const R3: Self::UInt; // Generator and quadratic non-residue const GENERATOR: Self::UInt; // Multiplicative order: Modulus - 1 const ORDER: Self::UInt; } // Derive `MontgomeryParameters` from `Parameters` as `Montgomery<P: // Parameters>` struct Montgomery<P: Parameters>(PhantomData<P>); impl<P: Parameters> MontgomeryParameters for Montgomery<P> { type UInt = P::UInt; const M64: u64 = P::M64; const MODULUS: Self::UInt = P::MODULUS; const R1: Self::UInt = P::R1; const R2: Self::UInt = P::R2; const R3: Self::UInt = P::R3; } impl<P: Parameters> PrimeField<P> { // UInt can not have interior mutability #[allow(clippy::declare_interior_mutable_const)] pub const MODULUS: P::UInt = P::MODULUS; #[inline(always)] pub fn modulus() -> P::UInt { P::MODULUS } /// The multiplicative order of the field. /// /// Equal to `modulus() - 1` for prime fields. #[inline(always)] pub fn order() -> P::UInt { P::ORDER } #[inline(always)] pub fn generator() -> Self { Self::from_montgomery(P::GENERATOR) } #[inline(always)] pub fn as_montgomery(&self) -> &P::UInt { debug_assert!(self.uint < Self::modulus()); &self.uint } /// Construct from `UInt` in Montgomery form. /// /// This is a trivial function. // TODO: Make `const fn` after <https://github.com/rust-lang/rust/issues/57563> #[inline(always)] pub fn from_montgomery(uint: P::UInt) -> Self { debug_assert!(uint < Self::modulus()); Self { uint, _parameters: PhantomData, } } // TODO: from_radix_str // #[cfg(feature = "std")] // pub fn from_hex_str(s: &str) -> Self { // Self::from(UInt::from_hex_str(s)) // } /// Convert to `UInt`. #[inline(always)] // Simple wrapper for `from_montgomery` pub fn to_uint(&self) -> P::UInt { debug_assert!(self.uint < Self::modulus()); P::UInt::from_montgomery::<Montgomery<P>>(self.as_montgomery()) } /// Construct from `UInt` /// /// It does the montgomery conversion. pub fn from_uint(uint: &P::UInt) -> Self { debug_assert!(uint < &Self::modulus()); Self::from_montgomery(uint.to_montgomery::<Montgomery<P>>()) } /// Reduce and construct from `UInt` pub fn from_uint_reduce(uint: &P::UInt) -> Self { let uint = P::UInt::redc_inline::<Montgomery<P>>(uint, &P::UInt::zero()); // UInt should not have interior mutability #[allow(clippy::borrow_interior_mutable_const)] let uint = uint.mul_redc_inline::<Montgomery<P>>(&P::R3); Self::from_montgomery(uint) } #[inline(always)] pub fn double(&self) -> Self { // TODO: Optimize self.clone() + self } #[inline(always)] pub fn triple(&self) -> Self { // TODO: Optimize self.clone() + self + self } } impl<P: Parameters> Clone for PrimeField<P> { fn clone(&self) -> Self { Self::from_montgomery(self.as_montgomery().clone()) } } impl<P: Parameters> PartialEq for PrimeField<P> { fn eq(&self, other: &Self) -> bool { self.as_montgomery() == other.as_montgomery() } } impl<P: Parameters> Eq for PrimeField<P> {} /// Implements [`Hash`] when `UInt` does. impl<U, P> Hash for PrimeField<P> where U: FieldUInt + Hash, P: Parameters<UInt = U>, { fn hash<H: Hasher>(&self, state: &mut H) { self.as_montgomery().hash::<H>(state) } } impl<U, P> fmt::Debug for PrimeField<P> where U: FieldUInt + fmt::Debug, P: Parameters<UInt = U>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "field_element!(\"{:?}\")", self.to_uint()) } } impl<P: Parameters> Zero for PrimeField<P> { #[inline(always)] fn zero() -> Self { Self::from_montgomery(P::UInt::zero()) } #[inline(always)] fn is_zero(&self) -> bool { self.as_montgomery().is_zero() } } impl<P: Parameters> One for PrimeField<P> { #[inline(always)] fn one() -> Self { Self::from_montgomery(P::R1) } // UInt should not have interior mutability #[allow(clippy::borrow_interior_mutable_const)] #[inline(always)] fn is_one(&self) -> bool { self.as_montgomery() == &P::R1 } } impl<P: Parameters> AddInline<&Self> for PrimeField<P> { #[inline(always)] fn add_inline(&self, rhs: &Self) -> Self { let result = self.as_montgomery().add_inline(rhs.as_montgomery()); let result = result.reduce_1_inline::<Montgomery<P>>(); Self::from_montgomery(result) } } impl<P: Parameters> SubInline<&Self> for PrimeField<P> { #[inline(always)] fn sub_inline(&self, rhs: &Self) -> Self { let lhs = self.as_montgomery(); let rhs = rhs.as_montgomery(); let borrow = rhs > lhs; let mut result = lhs.sub_inline(rhs); if borrow { result.add_assign_inline(&Self::modulus()); } Self::from_montgomery(result) } } impl<P: Parameters> NegInline for PrimeField<P> { #[inline(always)] fn neg_inline(&self) -> Self { if self.is_zero() { Self::zero() } else { Self::from_montgomery(Self::modulus().sub_inline(self.as_montgomery())) } } } impl<P: Parameters> SquareInline for PrimeField<P> { #[inline(always)] fn square_inline(&self) -> Self { Self::from_montgomery(self.as_montgomery().square_redc_inline::<Montgomery<P>>()) } } impl<P: Parameters> MulInline<&Self> for PrimeField<P> { #[inline(always)] fn mul_inline(&self, rhs: &Self) -> Self { Self::from_montgomery( self.as_montgomery() .mul_redc_inline::<Montgomery<P>>(rhs.as_montgomery()), ) } } impl<P: Parameters> Inv for &PrimeField<P> { type Output = Option<PrimeField<P>>; #[inline(always)] // Simple wrapper fn inv(self) -> Self::Output { self.as_montgomery() .inv_redc::<Montgomery<P>>() .map(PrimeField::<P>::from_montgomery) } } impl<P: Parameters> Pow<usize> for &PrimeField<P> { type Output = PrimeField<P>; fn pow(self, exponent: usize) -> Self::Output { self.pow(&exponent) } } impl<P: Parameters> Pow<isize> for &PrimeField<P> { type Output = Option<PrimeField<P>>; fn pow(self, exponent: isize) -> Self::Output { let negative = exponent < 0; let abs = exponent.abs() as usize; if negative { self.inv().map(|n| n.pow(&abs)) } else { Some(self.pow(&abs)) } } } impl<P: Parameters, Exponent> Pow<&Exponent> for &PrimeField<P> where Exponent: Binary, { type Output = PrimeField<P>; fn pow(self, exponent: &Exponent) -> Self::Output { if let Some(msb) = exponent.most_significant_bit() { let mut result = Self::Output::one(); let mut square = self.clone(); for i in 0..=msb { if exponent.bit(i) { result *= &square; } if i < msb { square.square_assign(); } } result } else { // exponent = 0 Self::Output::one() } } } impl<U, P> Root<usize> for PrimeField<P> where U: FieldUInt + Binary + DivRem<u64, Quotient = U, Remainder = u64>, P: Parameters<UInt = U>, { // OPT: replace this with a constant array of roots of unity. fn root(order: usize) -> Option<Self> { let order = order as u64; if let Some((q, rem)) = Self::order().div_rem(order) { if rem.is_zero() { Some(Self::generator().pow(&q)) } else { None } } else { Some(Self::one()) } } } // TODO: Generalize over order type // Lint has a false positive here #[allow(single_use_lifetimes)] impl<U, P> Root<&U> for PrimeField<P> where U: FieldUInt + Binary + for<'a> DivRem<&'a U, Quotient = U, Remainder = U>, P: Parameters<UInt = U>, { // OPT: replace this with a constant array of roots of unity. fn root(order: &P::UInt) -> Option<Self> { if let Some((q, rem)) = Self::order().div_rem(order) { if rem.is_zero() { Some(Self::generator().pow(&q)) } else { None } } else { Some(Self::one()) } } } impl<U, P> SquareRoot for PrimeField<P> where U: FieldUInt + Binary + Shr<usize, Output = U>, P: Parameters<UInt = U>, { fn is_quadratic_residue(&self) -> bool { self.pow(&(Self::MODULUS >> 1_usize))!= -Self::one() } // Tonelli-Shanks square root algorithm for prime fields // See 'Handbook of Applied Cryptography' algorithm 3.34 // OPT: Use algorithm 3.39 for Proth primes. fn square_root(&self) -> Option<Self> { if self.is_zero() { return Some(Self::zero()); } if!self.is_quadratic_residue() { return None; } // TODO: Provide as a constant parameter? // Factor order as `signifcant` * 2 ^ `trailing_zeros` let trailing_zeros = Self::order().trailing_zeros(); let signifcant = Self::order() >> trailing_zeros; // The starting value of c in the Tonelli Shanks algorithm. We are using the // prefered generator, as the quadratic nonresidue the algorithm requires. let c_start = Self::generator().pow(&signifcant); // This algorithm is still correct when the following assertion fails. However, // more efficient algorithms exist when MODULUS % 4 == 3 or MODULUS % 8 == 5 // (3.36 and 3.37 in HAC). // debug_assert!(&FieldElement::MODULUS & 7_u64 == 1); // OPT: Raising a to a fixed power is a good candidate for an addition chain. let mut root = self.pow(&((signifcant + P::UInt::one()) >> 1)); let mut c = c_start; let inverse = self.inv().unwrap(); // Zero case is handled above for i in 1..trailing_zeros { if (root.square() * &inverse).pow(&(P::UInt::one() << (trailing_zeros - i - 1))) == -Self::one() { root *= &c; } // OPT: Create lookup table for squares of c. c.square_assign(); } Some(root) } } impl<P: Parameters> Default for PrimeField<P> { fn default() -> Self { Self::zero() } } // TODO: Find a way to create generic implementations of these impl<P: Parameters<UInt = U256>> From<PrimeField<P>> for U256 { #[inline(always)] fn from(other: PrimeField<P>) -> Self { other.to_uint() } } impl<P: Parameters<UInt = U256>> From<&PrimeField<P>> for U256 { #[inline(always)] fn from(other: &PrimeField<P>) -> Self { other.to_uint() } } #[cfg(test)] mod tests { use super::*; use crate::FieldElement; use itertools::repeat_n; use num_traits::ToPrimitive; use proptest::prelude::*; use zkp_macros_decl::{field_element, u256h}; use zkp_u256::U256; #[test] fn test_literal() { const SMALL: FieldElement = field_element!("0F"); const NUM: FieldElement = field_element!("0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c"); assert_eq!(SMALL, FieldElement::from(15)); assert_eq!( NUM, u256h!("0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c").into() ); } #[test] fn minus_zero_equals_zero() { assert!(FieldElement::zero().is_zero()); assert!(field_element!("00").is_zero()); assert_eq!(FieldElement::zero(), FieldElement::zero()); assert_eq!(-FieldElement::zero(), FieldElement::zero()); } #[test] fn test_add() { let a = field_element!("06eabe184aa9caca2e17f6073bcc10bb9714c0e3866ff00e0d386f4396392852"); let b = field_element!("0313000a764a9a5514efc99070de3f70586794f9bb0add62ac689763aadea7e8"); let c = field_element!("01fdbe22c0f4650e4307bf97acaa502bef7c55dd417acd70b9a106a74117d039"); assert_eq!(a + b, c); } #[test] fn test_sub() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("03d7be0dd45f307519282c76caedd14b3ead2be9cb6512ab60cfd7dfeb5a806a"); assert_eq!(a - b, c); } #[test] fn test_mul() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("0738900c5dcab24b419674df19d2cfeb9782eca6d1107be18577eb060390365b"); assert_eq!(a * b, c); } #[test] fn test_div() { let a = FieldElement::from_montgomery(u256h!( "0548c135e26faa9c977fb2eda057b54b2e0baa9a77a0be7c80278f4f03462d4c" )); let b = FieldElement::from_montgomery(u256h!( "024385f6bebc1c496e09955db534ef4b1eaff9a78e27d4093cfa8f7c8f886f6b" )); let c = field_element!("003a9a346e7103c74dfcddd0eeb4e16ca71d8887c2bed3d4ee718b62015e87b2"); assert_eq!(a / b, c); } proptest!( #[test] fn from_as_isize(n: isize) { prop_assert_eq!(FieldElement::from(n).to_isize().unwrap(), n) } #[test] fn from_as_i128(n: i128) { prop_assert_eq!(FieldElement::from(n).to_i128().unwrap(), n); } #[test] fn add_identity(a: FieldElement) { prop_assert_eq!(&a + FieldElement::zero(), a); } #[test] fn mul_identity(a: FieldElement) { prop_assert_eq!(&a * FieldElement::one(), a); } #[test] fn commutative_add(a: FieldElement, b: FieldElement) { prop_assert_eq!(&a + &b, b + a); } #[test] fn commutative_mul(a: FieldElement, b: FieldElement) { prop_assert_eq!(&a * &b, b * a); } #[test] fn associative_add(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a + (&b + &c), (a + b) + c); } #[test] fn associative_mul(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a * (&b * &c), (a * b) * c); } #[test] fn inverse_add(a: FieldElement) { prop_assert!((&a + a.neg()).is_zero()); } #[test] fn inverse_mul(a: FieldElement) { let inverse = a.inv(); match inverse { None => prop_assert!(a.is_zero()), Some(ai) => prop_assert!((a * ai).is_one()), } } #[test] fn distributivity(a: FieldElement, b: FieldElement, c: FieldElement) { prop_assert_eq!(&a * (&b + &c), (&a * b) + (a * c)); } #[test] fn square(a: FieldElement) { prop_assert_eq!(a.square(), &a * &a); } #[test] fn pow_0(a: FieldElement) { prop_assert!(a.pow(0_usize).is_one()); } #[test] fn pow_1(a: FieldElement) { prop_assert_eq!(a.pow(1_usize), a); } #[test] fn pow_2(a: FieldElement) { prop_assert_eq!(a.pow(2_usize), &a * &a); } #[test] fn pow_n(a: FieldElement, n: usize) { let exponent = n % 512; prop_assert_eq!(a.pow(exponent), repeat_n(a, exponent).product()); } #[test] fn fermats_little_theorem(a: FieldElement) { prop_assert_eq!(a.pow(&FieldElement::MODULUS), a); } #[test] fn square_root(a: FieldElement) { let s = a.square(); let r = s.square_root().unwrap(); prop_assert!(r == a || r == -a); } ); #[test] fn zeroth_root_of_unity() { assert_eq!(FieldElement::root(0).unwrap(), FieldElement::one()); } #[test] fn roots_of_unity_squared()
{ let powers_of_two = (0..193).map(|n| U256::ONE << n); let roots_of_unity: Vec<_> = powers_of_two .map(|n| FieldElement::root(&n).unwrap()) .collect(); for (smaller_root, larger_root) in roots_of_unity[1..].iter().zip(roots_of_unity.as_slice()) { assert_eq!(smaller_root.square(), *larger_root); assert!(!smaller_root.is_one()); } }
identifier_body
mod.rs
// See https://github.com/apache/parquet-format/blob/master/Encodings.md#run-length-encoding--bit-packing-hybrid-rle--3 mod bitmap; mod decoder; mod encoder; pub use bitmap::{encode_bool as bitpacked_encode, BitmapIter}; pub use decoder::Decoder; pub use encoder::{encode_bool, encode_u32}; use super::bitpacking; #[derive(Debug, PartialEq, Eq)] pub enum HybridEncoded<'a> { /// A bitpacked slice. The consumer must know its bit-width to unpack it. Bitpacked(&'a [u8]), /// A RLE-encoded slice. The first attribute corresponds to the slice (that can be interpreted) /// the second attribute corresponds to the number of repetitions. Rle(&'a [u8], usize), } enum State<'a> { None, Bitpacked(bitpacking::Decoder<'a>), Rle(std::iter::Take<std::iter::Repeat<u32>>), } // Decoder of Hybrid-RLE encoded values. pub struct HybridRleDecoder<'a> { decoder: Decoder<'a>, state: State<'a>, remaining: usize, } #[inline] fn read_next<'a, 'b>(decoder: &'b mut Decoder<'a>, remaining: usize) -> State<'a> { if decoder.num_bits() == 0 { return State::None; }; let state = decoder.next().unwrap(); match state { HybridEncoded::Bitpacked(packed) => { let num_bits = decoder.num_bits(); let length = std::cmp::min(packed.len() * 8 / num_bits as usize, remaining); let decoder = bitpacking::Decoder::new(packed, num_bits as u8, length); State::Bitpacked(decoder) } HybridEncoded::Rle(pack, additional) => { let mut bytes = [0u8; std::mem::size_of::<u32>()]; pack.iter() .enumerate() .for_each(|(i, byte)| bytes[i] = *byte); let value = u32::from_le_bytes(bytes); State::Rle(std::iter::repeat(value).take(additional)) } } } impl<'a> HybridRleDecoder<'a> { pub fn new(data: &'a [u8], num_bits: u32, num_values: usize) -> Self { let mut decoder = Decoder::new(data, num_bits); let state = read_next(&mut decoder, num_values); Self { decoder, state, remaining: num_values, } } } impl<'a> Iterator for HybridRleDecoder<'a> { type Item = u32; fn next(&mut self) -> Option<Self::Item> { if self.remaining == 0 { return None; }; let result = match &mut self.state { State::Bitpacked(decoder) => decoder.next(), State::Rle(iter) => iter.next(), State::None => Some(0), }; if let Some(result) = result { self.remaining -= 1; Some(result) } else { self.state = read_next(&mut self.decoder, self.remaining); self.next() } } fn size_hint(&self) -> (usize, Option<usize>) { (self.remaining, Some(self.remaining)) } } impl<'a> ExactSizeIterator for HybridRleDecoder<'a> {} #[cfg(test)] mod tests { use super::*; #[test] fn roundtrip() { let mut buffer = vec![]; let num_bits = 10; let data = (0..1000).collect::<Vec<_>>(); encode_u32(&mut buffer, data.iter().cloned(), num_bits).unwrap(); let decoder = HybridRleDecoder::new(&buffer, num_bits as u32, data.len()); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, data); } #[test] fn pyarrow_integration() { // data encoded from pyarrow representing (0..1000) let data = vec![ 127, 0, 4, 32, 192, 0, 4, 20, 96, 192, 1, 8, 36, 160, 192, 2, 12, 52, 224, 192, 3, 16, 68, 32, 193, 4, 20, 84, 96, 193, 5, 24, 100, 160, 193, 6, 28, 116, 224, 193, 7, 32, 132, 32, 194, 8, 36, 148, 96, 194, 9, 40, 164, 160, 194, 10, 44, 180, 224, 194, 11, 48, 196, 32, 195, 12, 52, 212, 96, 195, 13, 56, 228, 160, 195, 14, 60, 244, 224, 195, 15, 64, 4, 33, 196, 16, 68, 20, 97, 196, 17, 72, 36, 161, 196, 18, 76, 52, 225, 196, 19, 80, 68, 33, 197, 20, 84, 84, 97, 197, 21, 88, 100, 161, 197, 22, 92, 116, 225, 197, 23, 96, 132, 33, 198, 24, 100, 148, 97, 198, 25, 104, 164, 161, 198, 26, 108, 180, 225, 198, 27, 112, 196, 33, 199, 28, 116, 212, 97, 199, 29, 120, 228, 161, 199, 30, 124, 244, 225, 199, 31, 128, 4, 34, 200, 32, 132, 20, 98, 200, 33, 136, 36, 162, 200, 34, 140, 52, 226, 200, 35, 144, 68, 34, 201, 36, 148, 84, 98, 201, 37, 152, 100, 162, 201, 38, 156, 116, 226, 201, 39, 160, 132, 34, 202, 40, 164, 148, 98, 202, 41, 168, 164, 162, 202, 42, 172, 180, 226, 202, 43, 176, 196, 34, 203, 44, 180, 212, 98, 203, 45, 184, 228, 162, 203, 46, 188, 244, 226, 203, 47, 192, 4, 35, 204, 48, 196, 20, 99, 204, 49, 200, 36, 163, 204, 50, 204, 52, 227, 204, 51, 208, 68, 35, 205, 52, 212, 84, 99, 205, 53, 216, 100, 163, 205, 54, 220, 116, 227, 205, 55, 224, 132, 35, 206, 56, 228, 148, 99, 206, 57, 232, 164, 163, 206, 58, 236, 180, 227, 206, 59, 240, 196, 35, 207, 60, 244, 212, 99, 207, 61, 248, 228, 163, 207, 62, 252, 244, 227, 207, 63, 0, 5, 36, 208, 64, 4, 21, 100, 208, 65, 8, 37, 164, 208, 66, 12, 53, 228, 208, 67, 16, 69, 36, 209, 68, 20, 85, 100, 209, 69, 24, 101, 164, 209, 70, 28, 117, 228, 209, 71, 32, 133, 36, 210, 72, 36, 149, 100, 210, 73, 40, 165, 164, 210, 74, 44, 181, 228, 210, 75, 48, 197, 36, 211, 76, 52, 213, 100, 211, 77, 56, 229, 164, 211, 78, 60, 245, 228, 211, 79, 64, 5, 37, 212, 80, 68, 21, 101, 212, 81, 72, 37, 165, 212, 82, 76, 53, 229, 212, 83, 80, 69, 37, 213, 84, 84, 85, 101, 213, 85, 88, 101, 165, 213, 86, 92, 117, 229, 213, 87, 96, 133, 37, 214, 88, 100, 149, 101, 214, 89, 104, 165, 165, 214, 90, 108, 181, 229, 214, 91, 112, 197, 37, 215, 92, 116, 213, 101, 215, 93, 120, 229, 165, 215, 94, 124, 245, 229, 215, 95, 128, 5, 38, 216, 96, 132, 21, 102, 216, 97, 136, 37, 166, 216, 98, 140, 53, 230, 216, 99, 144, 69, 38, 217, 100, 148, 85, 102, 217, 101, 152, 101, 166, 217, 102, 156, 117, 230, 217, 103, 160, 133, 38, 218, 104, 164, 149, 102, 218, 105, 168, 165, 166, 218, 106, 172, 181, 230, 218, 107, 176, 197, 38, 219, 108, 180, 213, 102, 219, 109, 184, 229, 166, 219, 110, 188, 245, 230, 219, 111, 192, 5, 39, 220, 112, 196, 21, 103, 220, 113, 200, 37, 167, 220, 114, 204, 53, 231, 220, 115, 208, 69, 39, 221, 116, 212, 85, 103, 221, 117, 216, 101, 167, 221, 118, 220, 117, 231, 221, 119, 224, 133, 39, 222, 120, 228, 149, 103, 222, 121, 232, 165, 167, 222, 122, 236, 181, 231, 222, 123, 240, 197, 39, 223, 124, 244, 213, 103, 223, 125, 125, 248, 229, 167, 223, 126, 252, 245, 231, 223, 127, 0, 6, 40, 224, 128, 4, 22, 104, 224, 129, 8, 38, 168, 224, 130, 12, 54, 232, 224, 131, 16, 70, 40, 225, 132, 20, 86, 104, 225, 133, 24, 102, 168, 225, 134, 28, 118, 232, 225, 135, 32, 134, 40, 226, 136, 36, 150, 104, 226, 137, 40, 166, 168, 226, 138, 44, 182, 232, 226, 139, 48, 198, 40, 227, 140, 52, 214, 104, 227, 141, 56, 230, 168, 227, 142, 60, 246, 232, 227, 143, 64, 6, 41, 228, 144, 68, 22, 105, 228, 145, 72, 38, 169, 228, 146, 76, 54, 233, 228, 147, 80, 70, 41, 229, 148, 84, 86, 105, 229, 149, 88, 102, 169, 229, 150, 92, 118, 233, 229, 151, 96, 134, 41, 230, 152, 100, 150, 105, 230, 153, 104, 166, 169, 230, 154, 108, 182, 233, 230, 155, 112, 198, 41, 231, 156, 116, 214, 105, 231, 157, 120, 230, 169, 231, 158, 124, 246, 233, 231, 159, 128, 6, 42, 232, 160, 132, 22, 106, 232, 161, 136, 38, 170, 232, 162, 140, 54, 234, 232, 163, 144, 70, 42, 233, 164, 148, 86, 106, 233, 165, 152, 102, 170, 233, 166, 156, 118, 234, 233, 167, 160, 134, 42, 234, 168, 164, 150, 106, 234, 169, 168, 166, 170, 234, 170, 172, 182, 234, 234, 171, 176, 198, 42, 235, 172, 180, 214, 106, 235, 173, 184, 230, 170, 235, 174, 188, 246, 234, 235, 175, 192, 6, 43, 236, 176, 196, 22, 107, 236, 177, 200, 38, 171, 236, 178, 204, 54, 235, 236, 179, 208, 70, 43, 237, 180, 212, 86, 107, 237, 181, 216, 102, 171, 237, 182, 220, 118, 235, 237, 183, 224, 134, 43, 238, 184, 228, 150, 107, 238, 185, 232, 166, 171, 238, 186, 236, 182, 235, 238, 187, 240, 198, 43, 239, 188, 244, 214, 107, 239, 189, 248, 230, 171, 239, 190, 252, 246, 235, 239, 191, 0, 7, 44, 240, 192, 4, 23, 108, 240, 193, 8, 39, 172, 240, 194, 12, 55, 236, 240, 195, 16, 71, 44, 241, 196, 20, 87, 108, 241, 197, 24, 103, 172, 241, 198, 28, 119, 236, 241, 199, 32, 135, 44, 242, 200, 36, 151, 108, 242, 201, 40, 167, 172, 242, 202, 44, 183, 236, 242, 203, 48, 199, 44, 243, 204, 52, 215, 108, 243, 205, 56, 231, 172, 243, 206, 60, 247, 236, 243, 207, 64, 7, 45, 244, 208, 68, 23, 109, 244, 209, 72, 39, 173, 244, 210, 76, 55, 237, 244, 211, 80, 71, 45, 245, 212, 84, 87, 109, 245, 213, 88, 103, 173, 245, 214, 92, 119, 237, 245, 215, 96, 135, 45, 246, 216, 100, 151, 109, 246, 217, 104, 167, 173, 246, 218, 108, 183, 237, 246, 219, 112, 199, 45, 247, 220, 116, 215, 109, 247, 221, 120, 231, 173, 247, 222, 124, 247, 237, 247, 223, 128, 7, 46, 248, 224, 132, 23, 110, 248, 225, 136, 39, 174, 248, 226, 140, 55, 238, 248, 227, 144, 71, 46, 249, 228, 148, 87, 110, 249, 229, 152, 103, 174, 249, 230, 156, 119, 238, 249, 231, 160, 135, 46, 250, 232, 164, 151, 110, 250, 233, 168, 167, 174, 250, 234, 172, 183, 238, 250, 235, 176, 199, 46, 251, 236, 180, 215, 110, 251, 237, 184, 231, 174, 251, 238, 188, 247, 238, 251, 239, 192, 7, 47, 252, 240, 196, 23, 111, 252, 241, 200, 39, 175, 252, 242, 204, 55, 239, 252, 243, 208, 71, 47, 253, 244, 212, 87, 111, 253, 245, 216, 103, 175, 253, 246, 220, 119, 239, 253, 247, 224, 135, 47, 254, 248, 228, 151, 111, 254, 249, ]; let num_bits = 10; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 1000); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, (0..1000).collect::<Vec<_>>()); } #[test] fn small() { let data = vec![3, 2]; let num_bits = 3; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 1); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, &[2]); } #[test] fn
() { let data = vec![3]; let num_bits = 0; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 2); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, &[0, 0]); } }
zero_bit_width
identifier_name
mod.rs
// See https://github.com/apache/parquet-format/blob/master/Encodings.md#run-length-encoding--bit-packing-hybrid-rle--3 mod bitmap; mod decoder; mod encoder; pub use bitmap::{encode_bool as bitpacked_encode, BitmapIter}; pub use decoder::Decoder; pub use encoder::{encode_bool, encode_u32}; use super::bitpacking; #[derive(Debug, PartialEq, Eq)] pub enum HybridEncoded<'a> { /// A bitpacked slice. The consumer must know its bit-width to unpack it.
} enum State<'a> { None, Bitpacked(bitpacking::Decoder<'a>), Rle(std::iter::Take<std::iter::Repeat<u32>>), } // Decoder of Hybrid-RLE encoded values. pub struct HybridRleDecoder<'a> { decoder: Decoder<'a>, state: State<'a>, remaining: usize, } #[inline] fn read_next<'a, 'b>(decoder: &'b mut Decoder<'a>, remaining: usize) -> State<'a> { if decoder.num_bits() == 0 { return State::None; }; let state = decoder.next().unwrap(); match state { HybridEncoded::Bitpacked(packed) => { let num_bits = decoder.num_bits(); let length = std::cmp::min(packed.len() * 8 / num_bits as usize, remaining); let decoder = bitpacking::Decoder::new(packed, num_bits as u8, length); State::Bitpacked(decoder) } HybridEncoded::Rle(pack, additional) => { let mut bytes = [0u8; std::mem::size_of::<u32>()]; pack.iter() .enumerate() .for_each(|(i, byte)| bytes[i] = *byte); let value = u32::from_le_bytes(bytes); State::Rle(std::iter::repeat(value).take(additional)) } } } impl<'a> HybridRleDecoder<'a> { pub fn new(data: &'a [u8], num_bits: u32, num_values: usize) -> Self { let mut decoder = Decoder::new(data, num_bits); let state = read_next(&mut decoder, num_values); Self { decoder, state, remaining: num_values, } } } impl<'a> Iterator for HybridRleDecoder<'a> { type Item = u32; fn next(&mut self) -> Option<Self::Item> { if self.remaining == 0 { return None; }; let result = match &mut self.state { State::Bitpacked(decoder) => decoder.next(), State::Rle(iter) => iter.next(), State::None => Some(0), }; if let Some(result) = result { self.remaining -= 1; Some(result) } else { self.state = read_next(&mut self.decoder, self.remaining); self.next() } } fn size_hint(&self) -> (usize, Option<usize>) { (self.remaining, Some(self.remaining)) } } impl<'a> ExactSizeIterator for HybridRleDecoder<'a> {} #[cfg(test)] mod tests { use super::*; #[test] fn roundtrip() { let mut buffer = vec![]; let num_bits = 10; let data = (0..1000).collect::<Vec<_>>(); encode_u32(&mut buffer, data.iter().cloned(), num_bits).unwrap(); let decoder = HybridRleDecoder::new(&buffer, num_bits as u32, data.len()); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, data); } #[test] fn pyarrow_integration() { // data encoded from pyarrow representing (0..1000) let data = vec![ 127, 0, 4, 32, 192, 0, 4, 20, 96, 192, 1, 8, 36, 160, 192, 2, 12, 52, 224, 192, 3, 16, 68, 32, 193, 4, 20, 84, 96, 193, 5, 24, 100, 160, 193, 6, 28, 116, 224, 193, 7, 32, 132, 32, 194, 8, 36, 148, 96, 194, 9, 40, 164, 160, 194, 10, 44, 180, 224, 194, 11, 48, 196, 32, 195, 12, 52, 212, 96, 195, 13, 56, 228, 160, 195, 14, 60, 244, 224, 195, 15, 64, 4, 33, 196, 16, 68, 20, 97, 196, 17, 72, 36, 161, 196, 18, 76, 52, 225, 196, 19, 80, 68, 33, 197, 20, 84, 84, 97, 197, 21, 88, 100, 161, 197, 22, 92, 116, 225, 197, 23, 96, 132, 33, 198, 24, 100, 148, 97, 198, 25, 104, 164, 161, 198, 26, 108, 180, 225, 198, 27, 112, 196, 33, 199, 28, 116, 212, 97, 199, 29, 120, 228, 161, 199, 30, 124, 244, 225, 199, 31, 128, 4, 34, 200, 32, 132, 20, 98, 200, 33, 136, 36, 162, 200, 34, 140, 52, 226, 200, 35, 144, 68, 34, 201, 36, 148, 84, 98, 201, 37, 152, 100, 162, 201, 38, 156, 116, 226, 201, 39, 160, 132, 34, 202, 40, 164, 148, 98, 202, 41, 168, 164, 162, 202, 42, 172, 180, 226, 202, 43, 176, 196, 34, 203, 44, 180, 212, 98, 203, 45, 184, 228, 162, 203, 46, 188, 244, 226, 203, 47, 192, 4, 35, 204, 48, 196, 20, 99, 204, 49, 200, 36, 163, 204, 50, 204, 52, 227, 204, 51, 208, 68, 35, 205, 52, 212, 84, 99, 205, 53, 216, 100, 163, 205, 54, 220, 116, 227, 205, 55, 224, 132, 35, 206, 56, 228, 148, 99, 206, 57, 232, 164, 163, 206, 58, 236, 180, 227, 206, 59, 240, 196, 35, 207, 60, 244, 212, 99, 207, 61, 248, 228, 163, 207, 62, 252, 244, 227, 207, 63, 0, 5, 36, 208, 64, 4, 21, 100, 208, 65, 8, 37, 164, 208, 66, 12, 53, 228, 208, 67, 16, 69, 36, 209, 68, 20, 85, 100, 209, 69, 24, 101, 164, 209, 70, 28, 117, 228, 209, 71, 32, 133, 36, 210, 72, 36, 149, 100, 210, 73, 40, 165, 164, 210, 74, 44, 181, 228, 210, 75, 48, 197, 36, 211, 76, 52, 213, 100, 211, 77, 56, 229, 164, 211, 78, 60, 245, 228, 211, 79, 64, 5, 37, 212, 80, 68, 21, 101, 212, 81, 72, 37, 165, 212, 82, 76, 53, 229, 212, 83, 80, 69, 37, 213, 84, 84, 85, 101, 213, 85, 88, 101, 165, 213, 86, 92, 117, 229, 213, 87, 96, 133, 37, 214, 88, 100, 149, 101, 214, 89, 104, 165, 165, 214, 90, 108, 181, 229, 214, 91, 112, 197, 37, 215, 92, 116, 213, 101, 215, 93, 120, 229, 165, 215, 94, 124, 245, 229, 215, 95, 128, 5, 38, 216, 96, 132, 21, 102, 216, 97, 136, 37, 166, 216, 98, 140, 53, 230, 216, 99, 144, 69, 38, 217, 100, 148, 85, 102, 217, 101, 152, 101, 166, 217, 102, 156, 117, 230, 217, 103, 160, 133, 38, 218, 104, 164, 149, 102, 218, 105, 168, 165, 166, 218, 106, 172, 181, 230, 218, 107, 176, 197, 38, 219, 108, 180, 213, 102, 219, 109, 184, 229, 166, 219, 110, 188, 245, 230, 219, 111, 192, 5, 39, 220, 112, 196, 21, 103, 220, 113, 200, 37, 167, 220, 114, 204, 53, 231, 220, 115, 208, 69, 39, 221, 116, 212, 85, 103, 221, 117, 216, 101, 167, 221, 118, 220, 117, 231, 221, 119, 224, 133, 39, 222, 120, 228, 149, 103, 222, 121, 232, 165, 167, 222, 122, 236, 181, 231, 222, 123, 240, 197, 39, 223, 124, 244, 213, 103, 223, 125, 125, 248, 229, 167, 223, 126, 252, 245, 231, 223, 127, 0, 6, 40, 224, 128, 4, 22, 104, 224, 129, 8, 38, 168, 224, 130, 12, 54, 232, 224, 131, 16, 70, 40, 225, 132, 20, 86, 104, 225, 133, 24, 102, 168, 225, 134, 28, 118, 232, 225, 135, 32, 134, 40, 226, 136, 36, 150, 104, 226, 137, 40, 166, 168, 226, 138, 44, 182, 232, 226, 139, 48, 198, 40, 227, 140, 52, 214, 104, 227, 141, 56, 230, 168, 227, 142, 60, 246, 232, 227, 143, 64, 6, 41, 228, 144, 68, 22, 105, 228, 145, 72, 38, 169, 228, 146, 76, 54, 233, 228, 147, 80, 70, 41, 229, 148, 84, 86, 105, 229, 149, 88, 102, 169, 229, 150, 92, 118, 233, 229, 151, 96, 134, 41, 230, 152, 100, 150, 105, 230, 153, 104, 166, 169, 230, 154, 108, 182, 233, 230, 155, 112, 198, 41, 231, 156, 116, 214, 105, 231, 157, 120, 230, 169, 231, 158, 124, 246, 233, 231, 159, 128, 6, 42, 232, 160, 132, 22, 106, 232, 161, 136, 38, 170, 232, 162, 140, 54, 234, 232, 163, 144, 70, 42, 233, 164, 148, 86, 106, 233, 165, 152, 102, 170, 233, 166, 156, 118, 234, 233, 167, 160, 134, 42, 234, 168, 164, 150, 106, 234, 169, 168, 166, 170, 234, 170, 172, 182, 234, 234, 171, 176, 198, 42, 235, 172, 180, 214, 106, 235, 173, 184, 230, 170, 235, 174, 188, 246, 234, 235, 175, 192, 6, 43, 236, 176, 196, 22, 107, 236, 177, 200, 38, 171, 236, 178, 204, 54, 235, 236, 179, 208, 70, 43, 237, 180, 212, 86, 107, 237, 181, 216, 102, 171, 237, 182, 220, 118, 235, 237, 183, 224, 134, 43, 238, 184, 228, 150, 107, 238, 185, 232, 166, 171, 238, 186, 236, 182, 235, 238, 187, 240, 198, 43, 239, 188, 244, 214, 107, 239, 189, 248, 230, 171, 239, 190, 252, 246, 235, 239, 191, 0, 7, 44, 240, 192, 4, 23, 108, 240, 193, 8, 39, 172, 240, 194, 12, 55, 236, 240, 195, 16, 71, 44, 241, 196, 20, 87, 108, 241, 197, 24, 103, 172, 241, 198, 28, 119, 236, 241, 199, 32, 135, 44, 242, 200, 36, 151, 108, 242, 201, 40, 167, 172, 242, 202, 44, 183, 236, 242, 203, 48, 199, 44, 243, 204, 52, 215, 108, 243, 205, 56, 231, 172, 243, 206, 60, 247, 236, 243, 207, 64, 7, 45, 244, 208, 68, 23, 109, 244, 209, 72, 39, 173, 244, 210, 76, 55, 237, 244, 211, 80, 71, 45, 245, 212, 84, 87, 109, 245, 213, 88, 103, 173, 245, 214, 92, 119, 237, 245, 215, 96, 135, 45, 246, 216, 100, 151, 109, 246, 217, 104, 167, 173, 246, 218, 108, 183, 237, 246, 219, 112, 199, 45, 247, 220, 116, 215, 109, 247, 221, 120, 231, 173, 247, 222, 124, 247, 237, 247, 223, 128, 7, 46, 248, 224, 132, 23, 110, 248, 225, 136, 39, 174, 248, 226, 140, 55, 238, 248, 227, 144, 71, 46, 249, 228, 148, 87, 110, 249, 229, 152, 103, 174, 249, 230, 156, 119, 238, 249, 231, 160, 135, 46, 250, 232, 164, 151, 110, 250, 233, 168, 167, 174, 250, 234, 172, 183, 238, 250, 235, 176, 199, 46, 251, 236, 180, 215, 110, 251, 237, 184, 231, 174, 251, 238, 188, 247, 238, 251, 239, 192, 7, 47, 252, 240, 196, 23, 111, 252, 241, 200, 39, 175, 252, 242, 204, 55, 239, 252, 243, 208, 71, 47, 253, 244, 212, 87, 111, 253, 245, 216, 103, 175, 253, 246, 220, 119, 239, 253, 247, 224, 135, 47, 254, 248, 228, 151, 111, 254, 249, ]; let num_bits = 10; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 1000); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, (0..1000).collect::<Vec<_>>()); } #[test] fn small() { let data = vec![3, 2]; let num_bits = 3; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 1); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, &[2]); } #[test] fn zero_bit_width() { let data = vec![3]; let num_bits = 0; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 2); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, &[0, 0]); } }
Bitpacked(&'a [u8]), /// A RLE-encoded slice. The first attribute corresponds to the slice (that can be interpreted) /// the second attribute corresponds to the number of repetitions. Rle(&'a [u8], usize),
random_line_split
mod.rs
// See https://github.com/apache/parquet-format/blob/master/Encodings.md#run-length-encoding--bit-packing-hybrid-rle--3 mod bitmap; mod decoder; mod encoder; pub use bitmap::{encode_bool as bitpacked_encode, BitmapIter}; pub use decoder::Decoder; pub use encoder::{encode_bool, encode_u32}; use super::bitpacking; #[derive(Debug, PartialEq, Eq)] pub enum HybridEncoded<'a> { /// A bitpacked slice. The consumer must know its bit-width to unpack it. Bitpacked(&'a [u8]), /// A RLE-encoded slice. The first attribute corresponds to the slice (that can be interpreted) /// the second attribute corresponds to the number of repetitions. Rle(&'a [u8], usize), } enum State<'a> { None, Bitpacked(bitpacking::Decoder<'a>), Rle(std::iter::Take<std::iter::Repeat<u32>>), } // Decoder of Hybrid-RLE encoded values. pub struct HybridRleDecoder<'a> { decoder: Decoder<'a>, state: State<'a>, remaining: usize, } #[inline] fn read_next<'a, 'b>(decoder: &'b mut Decoder<'a>, remaining: usize) -> State<'a> { if decoder.num_bits() == 0 { return State::None; }; let state = decoder.next().unwrap(); match state { HybridEncoded::Bitpacked(packed) => { let num_bits = decoder.num_bits(); let length = std::cmp::min(packed.len() * 8 / num_bits as usize, remaining); let decoder = bitpacking::Decoder::new(packed, num_bits as u8, length); State::Bitpacked(decoder) } HybridEncoded::Rle(pack, additional) => { let mut bytes = [0u8; std::mem::size_of::<u32>()]; pack.iter() .enumerate() .for_each(|(i, byte)| bytes[i] = *byte); let value = u32::from_le_bytes(bytes); State::Rle(std::iter::repeat(value).take(additional)) } } } impl<'a> HybridRleDecoder<'a> { pub fn new(data: &'a [u8], num_bits: u32, num_values: usize) -> Self { let mut decoder = Decoder::new(data, num_bits); let state = read_next(&mut decoder, num_values); Self { decoder, state, remaining: num_values, } } } impl<'a> Iterator for HybridRleDecoder<'a> { type Item = u32; fn next(&mut self) -> Option<Self::Item> { if self.remaining == 0 { return None; }; let result = match &mut self.state { State::Bitpacked(decoder) => decoder.next(), State::Rle(iter) => iter.next(), State::None => Some(0), }; if let Some(result) = result { self.remaining -= 1; Some(result) } else { self.state = read_next(&mut self.decoder, self.remaining); self.next() } } fn size_hint(&self) -> (usize, Option<usize>) { (self.remaining, Some(self.remaining)) } } impl<'a> ExactSizeIterator for HybridRleDecoder<'a> {} #[cfg(test)] mod tests { use super::*; #[test] fn roundtrip() { let mut buffer = vec![]; let num_bits = 10; let data = (0..1000).collect::<Vec<_>>(); encode_u32(&mut buffer, data.iter().cloned(), num_bits).unwrap(); let decoder = HybridRleDecoder::new(&buffer, num_bits as u32, data.len()); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, data); } #[test] fn pyarrow_integration()
208, 64, 4, 21, 100, 208, 65, 8, 37, 164, 208, 66, 12, 53, 228, 208, 67, 16, 69, 36, 209, 68, 20, 85, 100, 209, 69, 24, 101, 164, 209, 70, 28, 117, 228, 209, 71, 32, 133, 36, 210, 72, 36, 149, 100, 210, 73, 40, 165, 164, 210, 74, 44, 181, 228, 210, 75, 48, 197, 36, 211, 76, 52, 213, 100, 211, 77, 56, 229, 164, 211, 78, 60, 245, 228, 211, 79, 64, 5, 37, 212, 80, 68, 21, 101, 212, 81, 72, 37, 165, 212, 82, 76, 53, 229, 212, 83, 80, 69, 37, 213, 84, 84, 85, 101, 213, 85, 88, 101, 165, 213, 86, 92, 117, 229, 213, 87, 96, 133, 37, 214, 88, 100, 149, 101, 214, 89, 104, 165, 165, 214, 90, 108, 181, 229, 214, 91, 112, 197, 37, 215, 92, 116, 213, 101, 215, 93, 120, 229, 165, 215, 94, 124, 245, 229, 215, 95, 128, 5, 38, 216, 96, 132, 21, 102, 216, 97, 136, 37, 166, 216, 98, 140, 53, 230, 216, 99, 144, 69, 38, 217, 100, 148, 85, 102, 217, 101, 152, 101, 166, 217, 102, 156, 117, 230, 217, 103, 160, 133, 38, 218, 104, 164, 149, 102, 218, 105, 168, 165, 166, 218, 106, 172, 181, 230, 218, 107, 176, 197, 38, 219, 108, 180, 213, 102, 219, 109, 184, 229, 166, 219, 110, 188, 245, 230, 219, 111, 192, 5, 39, 220, 112, 196, 21, 103, 220, 113, 200, 37, 167, 220, 114, 204, 53, 231, 220, 115, 208, 69, 39, 221, 116, 212, 85, 103, 221, 117, 216, 101, 167, 221, 118, 220, 117, 231, 221, 119, 224, 133, 39, 222, 120, 228, 149, 103, 222, 121, 232, 165, 167, 222, 122, 236, 181, 231, 222, 123, 240, 197, 39, 223, 124, 244, 213, 103, 223, 125, 125, 248, 229, 167, 223, 126, 252, 245, 231, 223, 127, 0, 6, 40, 224, 128, 4, 22, 104, 224, 129, 8, 38, 168, 224, 130, 12, 54, 232, 224, 131, 16, 70, 40, 225, 132, 20, 86, 104, 225, 133, 24, 102, 168, 225, 134, 28, 118, 232, 225, 135, 32, 134, 40, 226, 136, 36, 150, 104, 226, 137, 40, 166, 168, 226, 138, 44, 182, 232, 226, 139, 48, 198, 40, 227, 140, 52, 214, 104, 227, 141, 56, 230, 168, 227, 142, 60, 246, 232, 227, 143, 64, 6, 41, 228, 144, 68, 22, 105, 228, 145, 72, 38, 169, 228, 146, 76, 54, 233, 228, 147, 80, 70, 41, 229, 148, 84, 86, 105, 229, 149, 88, 102, 169, 229, 150, 92, 118, 233, 229, 151, 96, 134, 41, 230, 152, 100, 150, 105, 230, 153, 104, 166, 169, 230, 154, 108, 182, 233, 230, 155, 112, 198, 41, 231, 156, 116, 214, 105, 231, 157, 120, 230, 169, 231, 158, 124, 246, 233, 231, 159, 128, 6, 42, 232, 160, 132, 22, 106, 232, 161, 136, 38, 170, 232, 162, 140, 54, 234, 232, 163, 144, 70, 42, 233, 164, 148, 86, 106, 233, 165, 152, 102, 170, 233, 166, 156, 118, 234, 233, 167, 160, 134, 42, 234, 168, 164, 150, 106, 234, 169, 168, 166, 170, 234, 170, 172, 182, 234, 234, 171, 176, 198, 42, 235, 172, 180, 214, 106, 235, 173, 184, 230, 170, 235, 174, 188, 246, 234, 235, 175, 192, 6, 43, 236, 176, 196, 22, 107, 236, 177, 200, 38, 171, 236, 178, 204, 54, 235, 236, 179, 208, 70, 43, 237, 180, 212, 86, 107, 237, 181, 216, 102, 171, 237, 182, 220, 118, 235, 237, 183, 224, 134, 43, 238, 184, 228, 150, 107, 238, 185, 232, 166, 171, 238, 186, 236, 182, 235, 238, 187, 240, 198, 43, 239, 188, 244, 214, 107, 239, 189, 248, 230, 171, 239, 190, 252, 246, 235, 239, 191, 0, 7, 44, 240, 192, 4, 23, 108, 240, 193, 8, 39, 172, 240, 194, 12, 55, 236, 240, 195, 16, 71, 44, 241, 196, 20, 87, 108, 241, 197, 24, 103, 172, 241, 198, 28, 119, 236, 241, 199, 32, 135, 44, 242, 200, 36, 151, 108, 242, 201, 40, 167, 172, 242, 202, 44, 183, 236, 242, 203, 48, 199, 44, 243, 204, 52, 215, 108, 243, 205, 56, 231, 172, 243, 206, 60, 247, 236, 243, 207, 64, 7, 45, 244, 208, 68, 23, 109, 244, 209, 72, 39, 173, 244, 210, 76, 55, 237, 244, 211, 80, 71, 45, 245, 212, 84, 87, 109, 245, 213, 88, 103, 173, 245, 214, 92, 119, 237, 245, 215, 96, 135, 45, 246, 216, 100, 151, 109, 246, 217, 104, 167, 173, 246, 218, 108, 183, 237, 246, 219, 112, 199, 45, 247, 220, 116, 215, 109, 247, 221, 120, 231, 173, 247, 222, 124, 247, 237, 247, 223, 128, 7, 46, 248, 224, 132, 23, 110, 248, 225, 136, 39, 174, 248, 226, 140, 55, 238, 248, 227, 144, 71, 46, 249, 228, 148, 87, 110, 249, 229, 152, 103, 174, 249, 230, 156, 119, 238, 249, 231, 160, 135, 46, 250, 232, 164, 151, 110, 250, 233, 168, 167, 174, 250, 234, 172, 183, 238, 250, 235, 176, 199, 46, 251, 236, 180, 215, 110, 251, 237, 184, 231, 174, 251, 238, 188, 247, 238, 251, 239, 192, 7, 47, 252, 240, 196, 23, 111, 252, 241, 200, 39, 175, 252, 242, 204, 55, 239, 252, 243, 208, 71, 47, 253, 244, 212, 87, 111, 253, 245, 216, 103, 175, 253, 246, 220, 119, 239, 253, 247, 224, 135, 47, 254, 248, 228, 151, 111, 254, 249, ]; let num_bits = 10; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 1000); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, (0..1000).collect::<Vec<_>>()); } #[test] fn small() { let data = vec![3, 2]; let num_bits = 3; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 1); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, &[2]); } #[test] fn zero_bit_width() { let data = vec![3]; let num_bits = 0; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 2); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, &[0, 0]); } }
{ // data encoded from pyarrow representing (0..1000) let data = vec![ 127, 0, 4, 32, 192, 0, 4, 20, 96, 192, 1, 8, 36, 160, 192, 2, 12, 52, 224, 192, 3, 16, 68, 32, 193, 4, 20, 84, 96, 193, 5, 24, 100, 160, 193, 6, 28, 116, 224, 193, 7, 32, 132, 32, 194, 8, 36, 148, 96, 194, 9, 40, 164, 160, 194, 10, 44, 180, 224, 194, 11, 48, 196, 32, 195, 12, 52, 212, 96, 195, 13, 56, 228, 160, 195, 14, 60, 244, 224, 195, 15, 64, 4, 33, 196, 16, 68, 20, 97, 196, 17, 72, 36, 161, 196, 18, 76, 52, 225, 196, 19, 80, 68, 33, 197, 20, 84, 84, 97, 197, 21, 88, 100, 161, 197, 22, 92, 116, 225, 197, 23, 96, 132, 33, 198, 24, 100, 148, 97, 198, 25, 104, 164, 161, 198, 26, 108, 180, 225, 198, 27, 112, 196, 33, 199, 28, 116, 212, 97, 199, 29, 120, 228, 161, 199, 30, 124, 244, 225, 199, 31, 128, 4, 34, 200, 32, 132, 20, 98, 200, 33, 136, 36, 162, 200, 34, 140, 52, 226, 200, 35, 144, 68, 34, 201, 36, 148, 84, 98, 201, 37, 152, 100, 162, 201, 38, 156, 116, 226, 201, 39, 160, 132, 34, 202, 40, 164, 148, 98, 202, 41, 168, 164, 162, 202, 42, 172, 180, 226, 202, 43, 176, 196, 34, 203, 44, 180, 212, 98, 203, 45, 184, 228, 162, 203, 46, 188, 244, 226, 203, 47, 192, 4, 35, 204, 48, 196, 20, 99, 204, 49, 200, 36, 163, 204, 50, 204, 52, 227, 204, 51, 208, 68, 35, 205, 52, 212, 84, 99, 205, 53, 216, 100, 163, 205, 54, 220, 116, 227, 205, 55, 224, 132, 35, 206, 56, 228, 148, 99, 206, 57, 232, 164, 163, 206, 58, 236, 180, 227, 206, 59, 240, 196, 35, 207, 60, 244, 212, 99, 207, 61, 248, 228, 163, 207, 62, 252, 244, 227, 207, 63, 0, 5, 36,
identifier_body
mod.rs
// See https://github.com/apache/parquet-format/blob/master/Encodings.md#run-length-encoding--bit-packing-hybrid-rle--3 mod bitmap; mod decoder; mod encoder; pub use bitmap::{encode_bool as bitpacked_encode, BitmapIter}; pub use decoder::Decoder; pub use encoder::{encode_bool, encode_u32}; use super::bitpacking; #[derive(Debug, PartialEq, Eq)] pub enum HybridEncoded<'a> { /// A bitpacked slice. The consumer must know its bit-width to unpack it. Bitpacked(&'a [u8]), /// A RLE-encoded slice. The first attribute corresponds to the slice (that can be interpreted) /// the second attribute corresponds to the number of repetitions. Rle(&'a [u8], usize), } enum State<'a> { None, Bitpacked(bitpacking::Decoder<'a>), Rle(std::iter::Take<std::iter::Repeat<u32>>), } // Decoder of Hybrid-RLE encoded values. pub struct HybridRleDecoder<'a> { decoder: Decoder<'a>, state: State<'a>, remaining: usize, } #[inline] fn read_next<'a, 'b>(decoder: &'b mut Decoder<'a>, remaining: usize) -> State<'a> { if decoder.num_bits() == 0 { return State::None; }; let state = decoder.next().unwrap(); match state { HybridEncoded::Bitpacked(packed) => { let num_bits = decoder.num_bits(); let length = std::cmp::min(packed.len() * 8 / num_bits as usize, remaining); let decoder = bitpacking::Decoder::new(packed, num_bits as u8, length); State::Bitpacked(decoder) } HybridEncoded::Rle(pack, additional) => { let mut bytes = [0u8; std::mem::size_of::<u32>()]; pack.iter() .enumerate() .for_each(|(i, byte)| bytes[i] = *byte); let value = u32::from_le_bytes(bytes); State::Rle(std::iter::repeat(value).take(additional)) } } } impl<'a> HybridRleDecoder<'a> { pub fn new(data: &'a [u8], num_bits: u32, num_values: usize) -> Self { let mut decoder = Decoder::new(data, num_bits); let state = read_next(&mut decoder, num_values); Self { decoder, state, remaining: num_values, } } } impl<'a> Iterator for HybridRleDecoder<'a> { type Item = u32; fn next(&mut self) -> Option<Self::Item> { if self.remaining == 0 { return None; }; let result = match &mut self.state { State::Bitpacked(decoder) => decoder.next(), State::Rle(iter) => iter.next(), State::None => Some(0), }; if let Some(result) = result
else { self.state = read_next(&mut self.decoder, self.remaining); self.next() } } fn size_hint(&self) -> (usize, Option<usize>) { (self.remaining, Some(self.remaining)) } } impl<'a> ExactSizeIterator for HybridRleDecoder<'a> {} #[cfg(test)] mod tests { use super::*; #[test] fn roundtrip() { let mut buffer = vec![]; let num_bits = 10; let data = (0..1000).collect::<Vec<_>>(); encode_u32(&mut buffer, data.iter().cloned(), num_bits).unwrap(); let decoder = HybridRleDecoder::new(&buffer, num_bits as u32, data.len()); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, data); } #[test] fn pyarrow_integration() { // data encoded from pyarrow representing (0..1000) let data = vec![ 127, 0, 4, 32, 192, 0, 4, 20, 96, 192, 1, 8, 36, 160, 192, 2, 12, 52, 224, 192, 3, 16, 68, 32, 193, 4, 20, 84, 96, 193, 5, 24, 100, 160, 193, 6, 28, 116, 224, 193, 7, 32, 132, 32, 194, 8, 36, 148, 96, 194, 9, 40, 164, 160, 194, 10, 44, 180, 224, 194, 11, 48, 196, 32, 195, 12, 52, 212, 96, 195, 13, 56, 228, 160, 195, 14, 60, 244, 224, 195, 15, 64, 4, 33, 196, 16, 68, 20, 97, 196, 17, 72, 36, 161, 196, 18, 76, 52, 225, 196, 19, 80, 68, 33, 197, 20, 84, 84, 97, 197, 21, 88, 100, 161, 197, 22, 92, 116, 225, 197, 23, 96, 132, 33, 198, 24, 100, 148, 97, 198, 25, 104, 164, 161, 198, 26, 108, 180, 225, 198, 27, 112, 196, 33, 199, 28, 116, 212, 97, 199, 29, 120, 228, 161, 199, 30, 124, 244, 225, 199, 31, 128, 4, 34, 200, 32, 132, 20, 98, 200, 33, 136, 36, 162, 200, 34, 140, 52, 226, 200, 35, 144, 68, 34, 201, 36, 148, 84, 98, 201, 37, 152, 100, 162, 201, 38, 156, 116, 226, 201, 39, 160, 132, 34, 202, 40, 164, 148, 98, 202, 41, 168, 164, 162, 202, 42, 172, 180, 226, 202, 43, 176, 196, 34, 203, 44, 180, 212, 98, 203, 45, 184, 228, 162, 203, 46, 188, 244, 226, 203, 47, 192, 4, 35, 204, 48, 196, 20, 99, 204, 49, 200, 36, 163, 204, 50, 204, 52, 227, 204, 51, 208, 68, 35, 205, 52, 212, 84, 99, 205, 53, 216, 100, 163, 205, 54, 220, 116, 227, 205, 55, 224, 132, 35, 206, 56, 228, 148, 99, 206, 57, 232, 164, 163, 206, 58, 236, 180, 227, 206, 59, 240, 196, 35, 207, 60, 244, 212, 99, 207, 61, 248, 228, 163, 207, 62, 252, 244, 227, 207, 63, 0, 5, 36, 208, 64, 4, 21, 100, 208, 65, 8, 37, 164, 208, 66, 12, 53, 228, 208, 67, 16, 69, 36, 209, 68, 20, 85, 100, 209, 69, 24, 101, 164, 209, 70, 28, 117, 228, 209, 71, 32, 133, 36, 210, 72, 36, 149, 100, 210, 73, 40, 165, 164, 210, 74, 44, 181, 228, 210, 75, 48, 197, 36, 211, 76, 52, 213, 100, 211, 77, 56, 229, 164, 211, 78, 60, 245, 228, 211, 79, 64, 5, 37, 212, 80, 68, 21, 101, 212, 81, 72, 37, 165, 212, 82, 76, 53, 229, 212, 83, 80, 69, 37, 213, 84, 84, 85, 101, 213, 85, 88, 101, 165, 213, 86, 92, 117, 229, 213, 87, 96, 133, 37, 214, 88, 100, 149, 101, 214, 89, 104, 165, 165, 214, 90, 108, 181, 229, 214, 91, 112, 197, 37, 215, 92, 116, 213, 101, 215, 93, 120, 229, 165, 215, 94, 124, 245, 229, 215, 95, 128, 5, 38, 216, 96, 132, 21, 102, 216, 97, 136, 37, 166, 216, 98, 140, 53, 230, 216, 99, 144, 69, 38, 217, 100, 148, 85, 102, 217, 101, 152, 101, 166, 217, 102, 156, 117, 230, 217, 103, 160, 133, 38, 218, 104, 164, 149, 102, 218, 105, 168, 165, 166, 218, 106, 172, 181, 230, 218, 107, 176, 197, 38, 219, 108, 180, 213, 102, 219, 109, 184, 229, 166, 219, 110, 188, 245, 230, 219, 111, 192, 5, 39, 220, 112, 196, 21, 103, 220, 113, 200, 37, 167, 220, 114, 204, 53, 231, 220, 115, 208, 69, 39, 221, 116, 212, 85, 103, 221, 117, 216, 101, 167, 221, 118, 220, 117, 231, 221, 119, 224, 133, 39, 222, 120, 228, 149, 103, 222, 121, 232, 165, 167, 222, 122, 236, 181, 231, 222, 123, 240, 197, 39, 223, 124, 244, 213, 103, 223, 125, 125, 248, 229, 167, 223, 126, 252, 245, 231, 223, 127, 0, 6, 40, 224, 128, 4, 22, 104, 224, 129, 8, 38, 168, 224, 130, 12, 54, 232, 224, 131, 16, 70, 40, 225, 132, 20, 86, 104, 225, 133, 24, 102, 168, 225, 134, 28, 118, 232, 225, 135, 32, 134, 40, 226, 136, 36, 150, 104, 226, 137, 40, 166, 168, 226, 138, 44, 182, 232, 226, 139, 48, 198, 40, 227, 140, 52, 214, 104, 227, 141, 56, 230, 168, 227, 142, 60, 246, 232, 227, 143, 64, 6, 41, 228, 144, 68, 22, 105, 228, 145, 72, 38, 169, 228, 146, 76, 54, 233, 228, 147, 80, 70, 41, 229, 148, 84, 86, 105, 229, 149, 88, 102, 169, 229, 150, 92, 118, 233, 229, 151, 96, 134, 41, 230, 152, 100, 150, 105, 230, 153, 104, 166, 169, 230, 154, 108, 182, 233, 230, 155, 112, 198, 41, 231, 156, 116, 214, 105, 231, 157, 120, 230, 169, 231, 158, 124, 246, 233, 231, 159, 128, 6, 42, 232, 160, 132, 22, 106, 232, 161, 136, 38, 170, 232, 162, 140, 54, 234, 232, 163, 144, 70, 42, 233, 164, 148, 86, 106, 233, 165, 152, 102, 170, 233, 166, 156, 118, 234, 233, 167, 160, 134, 42, 234, 168, 164, 150, 106, 234, 169, 168, 166, 170, 234, 170, 172, 182, 234, 234, 171, 176, 198, 42, 235, 172, 180, 214, 106, 235, 173, 184, 230, 170, 235, 174, 188, 246, 234, 235, 175, 192, 6, 43, 236, 176, 196, 22, 107, 236, 177, 200, 38, 171, 236, 178, 204, 54, 235, 236, 179, 208, 70, 43, 237, 180, 212, 86, 107, 237, 181, 216, 102, 171, 237, 182, 220, 118, 235, 237, 183, 224, 134, 43, 238, 184, 228, 150, 107, 238, 185, 232, 166, 171, 238, 186, 236, 182, 235, 238, 187, 240, 198, 43, 239, 188, 244, 214, 107, 239, 189, 248, 230, 171, 239, 190, 252, 246, 235, 239, 191, 0, 7, 44, 240, 192, 4, 23, 108, 240, 193, 8, 39, 172, 240, 194, 12, 55, 236, 240, 195, 16, 71, 44, 241, 196, 20, 87, 108, 241, 197, 24, 103, 172, 241, 198, 28, 119, 236, 241, 199, 32, 135, 44, 242, 200, 36, 151, 108, 242, 201, 40, 167, 172, 242, 202, 44, 183, 236, 242, 203, 48, 199, 44, 243, 204, 52, 215, 108, 243, 205, 56, 231, 172, 243, 206, 60, 247, 236, 243, 207, 64, 7, 45, 244, 208, 68, 23, 109, 244, 209, 72, 39, 173, 244, 210, 76, 55, 237, 244, 211, 80, 71, 45, 245, 212, 84, 87, 109, 245, 213, 88, 103, 173, 245, 214, 92, 119, 237, 245, 215, 96, 135, 45, 246, 216, 100, 151, 109, 246, 217, 104, 167, 173, 246, 218, 108, 183, 237, 246, 219, 112, 199, 45, 247, 220, 116, 215, 109, 247, 221, 120, 231, 173, 247, 222, 124, 247, 237, 247, 223, 128, 7, 46, 248, 224, 132, 23, 110, 248, 225, 136, 39, 174, 248, 226, 140, 55, 238, 248, 227, 144, 71, 46, 249, 228, 148, 87, 110, 249, 229, 152, 103, 174, 249, 230, 156, 119, 238, 249, 231, 160, 135, 46, 250, 232, 164, 151, 110, 250, 233, 168, 167, 174, 250, 234, 172, 183, 238, 250, 235, 176, 199, 46, 251, 236, 180, 215, 110, 251, 237, 184, 231, 174, 251, 238, 188, 247, 238, 251, 239, 192, 7, 47, 252, 240, 196, 23, 111, 252, 241, 200, 39, 175, 252, 242, 204, 55, 239, 252, 243, 208, 71, 47, 253, 244, 212, 87, 111, 253, 245, 216, 103, 175, 253, 246, 220, 119, 239, 253, 247, 224, 135, 47, 254, 248, 228, 151, 111, 254, 249, ]; let num_bits = 10; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 1000); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, (0..1000).collect::<Vec<_>>()); } #[test] fn small() { let data = vec![3, 2]; let num_bits = 3; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 1); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, &[2]); } #[test] fn zero_bit_width() { let data = vec![3]; let num_bits = 0; let decoder = HybridRleDecoder::new(&data, num_bits as u32, 2); let result = decoder.collect::<Vec<_>>(); assert_eq!(result, &[0, 0]); } }
{ self.remaining -= 1; Some(result) }
conditional_block
framebuffer_server.rs
// Copyright 2022 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! This file contains for creating and serving a `Flatland` view using a `Framebuffer`. //! //! A lot of the code in this file is temporary to enable developers to see the contents of a //! `Framebuffer` in the workstation UI (e.g., using `ffx session add`). //! //! To display the `Framebuffer` as its view, a component must add the `framebuffer` feature to its //! `.cml`. use anyhow::anyhow; use fidl::{endpoints::create_proxy, HandleBased}; use fidl_fuchsia_math as fmath; use fidl_fuchsia_sysmem as fsysmem; use fidl_fuchsia_ui_app as fuiapp; use fidl_fuchsia_ui_composition as fuicomposition; use fidl_fuchsia_ui_views as fuiviews; use flatland_frame_scheduling_lib::{ PresentationInfo, PresentedInfo, SchedulingLib, ThroughputScheduler, }; use fuchsia_async as fasync; use fuchsia_component::{client::connect_channel_to_protocol, server::ServiceFs}; use fuchsia_framebuffer::{sysmem::BufferCollectionAllocator, FrameUsage}; use fuchsia_scenic::{BufferCollectionTokenPair, ViewRefPair}; use fuchsia_zircon as zx; use futures::{StreamExt, TryStreamExt}; use std::sync::{mpsc::channel, Arc}; use crate::logging::log_warn; use crate::types::*; /// The width of the framebuffer image. pub const IMAGE_WIDTH: u32 = 720; /// The height of the framebuffer image. pub const IMAGE_HEIGHT: u32 = 1200; /// The offset at which the framebuffer will be placed. Assume a display width of 1920. pub const TRANSLATION_X: i32 = 1920 / 2 - IMAGE_WIDTH as i32 / 2; /// The Flatland identifier for the framebuffer image. const IMAGE_ID: fuicomposition::ContentId = fuicomposition::ContentId { value: 2 }; /// The Flatland identifier for the transform associated with the framebuffer. const TRANSFORM_ID: fuicomposition::TransformId = fuicomposition::TransformId { value: 3 }; /// The protocols that are exposed by the framebuffer server. enum ExposedProtocols { ViewProvider(fuiapp::ViewProviderRequestStream), } /// A `FramebufferServer` contains initialized proxies to Flatland, as well as a buffer collection /// that is registered with Flatland. pub struct FramebufferServer { /// The Flatland proxy associated with this server. flatland: fuicomposition::FlatlandSynchronousProxy, /// The buffer collection that is registered with Flatland. collection: fsysmem::BufferCollectionInfo2, } impl FramebufferServer { /// Returns a `FramebufferServer` that has created a scene and registered a buffer with /// Flatland. pub fn new() -> Result<Self, Errno> { let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?; connect_channel_to_protocol::<fuicomposition::AllocatorMarker>(server_end) .map_err(|_| errno!(ENOENT))?; let allocator = fuicomposition::AllocatorSynchronousProxy::new(client_end); let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?; connect_channel_to_protocol::<fuicomposition::FlatlandMarker>(server_end) .map_err(|_| errno!(ENOENT))?; let flatland = fuicomposition::FlatlandSynchronousProxy::new(client_end); let collection = init_scene(&flatland, &allocator).map_err(|_| errno!(EINVAL))?; Ok(Self { flatland, collection }) } /// Returns a clone of the VMO that is shared with Flatland. pub fn get_vmo(&self) -> Result<zx::Vmo, Errno> { self.collection.buffers[0] .vmo .as_ref() .ok_or_else(|| errno!(EINVAL))? .duplicate_handle(zx::Rights::SAME_RIGHTS) .map_err(|_| errno!(EINVAL)) } } /// Initializes the flatland scene, and returns the associated buffer collection. /// /// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and /// most of the failures would be unexpected and unrecoverable. fn init_scene( flatland: &fuicomposition::FlatlandSynchronousProxy, allocator: &fuicomposition::AllocatorSynchronousProxy, ) -> Result<fsysmem::BufferCollectionInfo2, anyhow::Error>
let sysmem_buffer_collection_token = executor.run_singlethreaded(buffer_allocator.duplicate_token())?; // Notify the async code that the sysmem buffer collection token is available. collection_sender.send(sysmem_buffer_collection_token).expect("Failed to send collection"); let allocation = executor.run_singlethreaded(buffer_allocator.allocate_buffers(true))?; // Notify the async code that the buffer allocation completed. allocation_sender.send(allocation).expect("Failed to send allocation"); Ok(()) }); // Wait for the async code to generate the buffer collection token. let sysmem_buffer_collection_token = collection_receiver .recv() .map_err(|_| anyhow!("Error receiving buffer collection token"))?; let mut buffer_tokens = BufferCollectionTokenPair::new(); let args = fuicomposition::RegisterBufferCollectionArgs { export_token: Some(buffer_tokens.export_token), buffer_collection_token: Some(sysmem_buffer_collection_token), ..fuicomposition::RegisterBufferCollectionArgs::EMPTY }; allocator .register_buffer_collection(args, zx::Time::INFINITE) .map_err(|_| anyhow!("FIDL error registering buffer collection"))? .map_err(|_| anyhow!("Error registering buffer collection"))?; // Now that the buffer collection is registered, wait for the buffer allocation to happen. let allocation = allocation_receiver.recv().map_err(|_| anyhow!("Error receiving buffer allocation"))?; let image_props = fuicomposition::ImageProperties { size: Some(fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }), ..fuicomposition::ImageProperties::EMPTY }; flatland .create_image(&mut IMAGE_ID.clone(), &mut buffer_tokens.import_token, 0, image_props) .map_err(|_| anyhow!("FIDL error creating image"))?; flatland .create_transform(&mut TRANSFORM_ID.clone()) .map_err(|_| anyhow!("error creating transform"))?; flatland .set_root_transform(&mut TRANSFORM_ID.clone()) .map_err(|_| anyhow!("error setting root transform"))?; flatland .set_content(&mut TRANSFORM_ID.clone(), &mut IMAGE_ID.clone()) .map_err(|_| anyhow!("error setting content"))?; flatland .set_translation(&mut TRANSFORM_ID.clone(), &mut fmath::Vec_ { x: TRANSLATION_X, y: 0 }) .map_err(|_| anyhow!("error setting translation"))?; Ok(allocation) } /// Spawns a thread to serve a `ViewProvider` in `outgoing_dir`. /// /// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and /// most of the failures would be unexpected and unrecoverable. pub fn spawn_view_provider( server: Arc<FramebufferServer>, outgoing_dir: fidl::endpoints::ServerEnd<fidl_fuchsia_io::DirectoryMarker>, ) { std::thread::spawn(|| { let mut executor = fasync::LocalExecutor::new().expect("Failed to create executor"); executor.run_singlethreaded(async move { let mut service_fs = ServiceFs::new_local(); service_fs.dir("svc").add_fidl_service(ExposedProtocols::ViewProvider); service_fs.serve_connection(outgoing_dir).expect(""); while let Some(ExposedProtocols::ViewProvider(mut request_stream)) = service_fs.next().await { while let Ok(Some(event)) = request_stream.try_next().await { match event { fuiapp::ViewProviderRequest::CreateView2 { args, control_handle: _ } => { let mut view_creation_token = args.view_creation_token.unwrap(); let mut view_identity = fuiviews::ViewIdentityOnCreation::from( ViewRefPair::new().expect("Failed to create ViewRefPair"), ); let view_bound_protocols = fuicomposition::ViewBoundProtocols { ..fuicomposition::ViewBoundProtocols::EMPTY }; // We don't actually care about the parent viewport at the moment, because we don't resize. let (_parent_viewport_watcher, parent_viewport_watcher_request) = create_proxy::<fuicomposition::ParentViewportWatcherMarker>() .expect("failed to create ParentViewportWatcherProxy"); server .flatland .create_view2( &mut view_creation_token, &mut view_identity, view_bound_protocols, parent_viewport_watcher_request, ) .expect("FIDL error"); server .flatland .set_image_destination_size( &mut IMAGE_ID.clone(), &mut fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }, ) .expect("fidl error"); // Now that the view has been created, start presenting. start_presenting(server.clone()); } r => { log_warn!("Got unexpected view provider request: {:?}", r); } } } } }); }); } /// Starts a flatland presentation loop, using the flatland proxy in `server`. fn start_presenting(server: Arc<FramebufferServer>) { fasync::Task::local(async move { let sched_lib = ThroughputScheduler::new(); // Request an initial presentation. sched_lib.request_present(); loop { let present_parameters = sched_lib.wait_to_update().await; sched_lib.request_present(); server .flatland .present(fuicomposition::PresentArgs { requested_presentation_time: Some( present_parameters.requested_presentation_time.into_nanos(), ), acquire_fences: None, release_fences: None, unsquashable: Some(present_parameters.unsquashable), ..fuicomposition::PresentArgs::EMPTY }) .unwrap_or(()); // Wait for events from flatland. If the event is `OnFramePresented` we notify the // scheduler and then wait for a `OnNextFrameBegin` before continuing. while match server.flatland.wait_for_event(zx::Time::INFINITE) { Ok(event) => match event { fuicomposition::FlatlandEvent::OnNextFrameBegin { values } => { let fuicomposition::OnNextFrameBeginValues { additional_present_credits, future_presentation_infos, .. } = values; let infos = future_presentation_infos .unwrap() .iter() .map(|x| PresentationInfo { latch_point: zx::Time::from_nanos(x.latch_point.unwrap()), presentation_time: zx::Time::from_nanos( x.presentation_time.unwrap(), ), }) .collect(); sched_lib.on_next_frame_begin(additional_present_credits.unwrap(), infos); false } fuicomposition::FlatlandEvent::OnFramePresented { frame_presented_info } => { let presented_infos = frame_presented_info .presentation_infos .iter() .map(|info| PresentedInfo { present_received_time: zx::Time::from_nanos( info.present_received_time.unwrap(), ), actual_latch_point: zx::Time::from_nanos( info.latched_time.unwrap(), ), }) .collect(); sched_lib.on_frame_presented( zx::Time::from_nanos(frame_presented_info.actual_presentation_time), presented_infos, ); true } fuicomposition::FlatlandEvent::OnError {.. } => false, }, Err(_) => false, } {} } }) .detach(); }
{ let (collection_sender, collection_receiver) = channel(); let (allocation_sender, allocation_receiver) = channel(); // This thread is spawned to deal with the mix of asynchronous and synchronous proxies. // In particular, we want to keep Framebuffer creation synchronous, while still making use of // BufferCollectionAllocator (which exposes an async api). // // The spawned thread will execute the futures and send results back to this thread via a // channel. std::thread::spawn(move || -> Result<(), anyhow::Error> { let mut executor = fasync::LocalExecutor::new()?; let mut buffer_allocator = BufferCollectionAllocator::new( IMAGE_WIDTH, IMAGE_HEIGHT, fidl_fuchsia_sysmem::PixelFormatType::R8G8B8A8, FrameUsage::Cpu, 1, )?; buffer_allocator.set_name(100, "Starnix ViewProvider")?;
identifier_body
framebuffer_server.rs
// Copyright 2022 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! This file contains for creating and serving a `Flatland` view using a `Framebuffer`. //! //! A lot of the code in this file is temporary to enable developers to see the contents of a //! `Framebuffer` in the workstation UI (e.g., using `ffx session add`). //! //! To display the `Framebuffer` as its view, a component must add the `framebuffer` feature to its //! `.cml`. use anyhow::anyhow; use fidl::{endpoints::create_proxy, HandleBased}; use fidl_fuchsia_math as fmath; use fidl_fuchsia_sysmem as fsysmem; use fidl_fuchsia_ui_app as fuiapp; use fidl_fuchsia_ui_composition as fuicomposition; use fidl_fuchsia_ui_views as fuiviews; use flatland_frame_scheduling_lib::{ PresentationInfo, PresentedInfo, SchedulingLib, ThroughputScheduler, }; use fuchsia_async as fasync; use fuchsia_component::{client::connect_channel_to_protocol, server::ServiceFs}; use fuchsia_framebuffer::{sysmem::BufferCollectionAllocator, FrameUsage}; use fuchsia_scenic::{BufferCollectionTokenPair, ViewRefPair}; use fuchsia_zircon as zx; use futures::{StreamExt, TryStreamExt}; use std::sync::{mpsc::channel, Arc}; use crate::logging::log_warn; use crate::types::*; /// The width of the framebuffer image. pub const IMAGE_WIDTH: u32 = 720; /// The height of the framebuffer image. pub const IMAGE_HEIGHT: u32 = 1200; /// The offset at which the framebuffer will be placed. Assume a display width of 1920. pub const TRANSLATION_X: i32 = 1920 / 2 - IMAGE_WIDTH as i32 / 2; /// The Flatland identifier for the framebuffer image. const IMAGE_ID: fuicomposition::ContentId = fuicomposition::ContentId { value: 2 }; /// The Flatland identifier for the transform associated with the framebuffer. const TRANSFORM_ID: fuicomposition::TransformId = fuicomposition::TransformId { value: 3 }; /// The protocols that are exposed by the framebuffer server. enum ExposedProtocols { ViewProvider(fuiapp::ViewProviderRequestStream), } /// A `FramebufferServer` contains initialized proxies to Flatland, as well as a buffer collection /// that is registered with Flatland. pub struct
{ /// The Flatland proxy associated with this server. flatland: fuicomposition::FlatlandSynchronousProxy, /// The buffer collection that is registered with Flatland. collection: fsysmem::BufferCollectionInfo2, } impl FramebufferServer { /// Returns a `FramebufferServer` that has created a scene and registered a buffer with /// Flatland. pub fn new() -> Result<Self, Errno> { let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?; connect_channel_to_protocol::<fuicomposition::AllocatorMarker>(server_end) .map_err(|_| errno!(ENOENT))?; let allocator = fuicomposition::AllocatorSynchronousProxy::new(client_end); let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?; connect_channel_to_protocol::<fuicomposition::FlatlandMarker>(server_end) .map_err(|_| errno!(ENOENT))?; let flatland = fuicomposition::FlatlandSynchronousProxy::new(client_end); let collection = init_scene(&flatland, &allocator).map_err(|_| errno!(EINVAL))?; Ok(Self { flatland, collection }) } /// Returns a clone of the VMO that is shared with Flatland. pub fn get_vmo(&self) -> Result<zx::Vmo, Errno> { self.collection.buffers[0] .vmo .as_ref() .ok_or_else(|| errno!(EINVAL))? .duplicate_handle(zx::Rights::SAME_RIGHTS) .map_err(|_| errno!(EINVAL)) } } /// Initializes the flatland scene, and returns the associated buffer collection. /// /// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and /// most of the failures would be unexpected and unrecoverable. fn init_scene( flatland: &fuicomposition::FlatlandSynchronousProxy, allocator: &fuicomposition::AllocatorSynchronousProxy, ) -> Result<fsysmem::BufferCollectionInfo2, anyhow::Error> { let (collection_sender, collection_receiver) = channel(); let (allocation_sender, allocation_receiver) = channel(); // This thread is spawned to deal with the mix of asynchronous and synchronous proxies. // In particular, we want to keep Framebuffer creation synchronous, while still making use of // BufferCollectionAllocator (which exposes an async api). // // The spawned thread will execute the futures and send results back to this thread via a // channel. std::thread::spawn(move || -> Result<(), anyhow::Error> { let mut executor = fasync::LocalExecutor::new()?; let mut buffer_allocator = BufferCollectionAllocator::new( IMAGE_WIDTH, IMAGE_HEIGHT, fidl_fuchsia_sysmem::PixelFormatType::R8G8B8A8, FrameUsage::Cpu, 1, )?; buffer_allocator.set_name(100, "Starnix ViewProvider")?; let sysmem_buffer_collection_token = executor.run_singlethreaded(buffer_allocator.duplicate_token())?; // Notify the async code that the sysmem buffer collection token is available. collection_sender.send(sysmem_buffer_collection_token).expect("Failed to send collection"); let allocation = executor.run_singlethreaded(buffer_allocator.allocate_buffers(true))?; // Notify the async code that the buffer allocation completed. allocation_sender.send(allocation).expect("Failed to send allocation"); Ok(()) }); // Wait for the async code to generate the buffer collection token. let sysmem_buffer_collection_token = collection_receiver .recv() .map_err(|_| anyhow!("Error receiving buffer collection token"))?; let mut buffer_tokens = BufferCollectionTokenPair::new(); let args = fuicomposition::RegisterBufferCollectionArgs { export_token: Some(buffer_tokens.export_token), buffer_collection_token: Some(sysmem_buffer_collection_token), ..fuicomposition::RegisterBufferCollectionArgs::EMPTY }; allocator .register_buffer_collection(args, zx::Time::INFINITE) .map_err(|_| anyhow!("FIDL error registering buffer collection"))? .map_err(|_| anyhow!("Error registering buffer collection"))?; // Now that the buffer collection is registered, wait for the buffer allocation to happen. let allocation = allocation_receiver.recv().map_err(|_| anyhow!("Error receiving buffer allocation"))?; let image_props = fuicomposition::ImageProperties { size: Some(fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }), ..fuicomposition::ImageProperties::EMPTY }; flatland .create_image(&mut IMAGE_ID.clone(), &mut buffer_tokens.import_token, 0, image_props) .map_err(|_| anyhow!("FIDL error creating image"))?; flatland .create_transform(&mut TRANSFORM_ID.clone()) .map_err(|_| anyhow!("error creating transform"))?; flatland .set_root_transform(&mut TRANSFORM_ID.clone()) .map_err(|_| anyhow!("error setting root transform"))?; flatland .set_content(&mut TRANSFORM_ID.clone(), &mut IMAGE_ID.clone()) .map_err(|_| anyhow!("error setting content"))?; flatland .set_translation(&mut TRANSFORM_ID.clone(), &mut fmath::Vec_ { x: TRANSLATION_X, y: 0 }) .map_err(|_| anyhow!("error setting translation"))?; Ok(allocation) } /// Spawns a thread to serve a `ViewProvider` in `outgoing_dir`. /// /// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and /// most of the failures would be unexpected and unrecoverable. pub fn spawn_view_provider( server: Arc<FramebufferServer>, outgoing_dir: fidl::endpoints::ServerEnd<fidl_fuchsia_io::DirectoryMarker>, ) { std::thread::spawn(|| { let mut executor = fasync::LocalExecutor::new().expect("Failed to create executor"); executor.run_singlethreaded(async move { let mut service_fs = ServiceFs::new_local(); service_fs.dir("svc").add_fidl_service(ExposedProtocols::ViewProvider); service_fs.serve_connection(outgoing_dir).expect(""); while let Some(ExposedProtocols::ViewProvider(mut request_stream)) = service_fs.next().await { while let Ok(Some(event)) = request_stream.try_next().await { match event { fuiapp::ViewProviderRequest::CreateView2 { args, control_handle: _ } => { let mut view_creation_token = args.view_creation_token.unwrap(); let mut view_identity = fuiviews::ViewIdentityOnCreation::from( ViewRefPair::new().expect("Failed to create ViewRefPair"), ); let view_bound_protocols = fuicomposition::ViewBoundProtocols { ..fuicomposition::ViewBoundProtocols::EMPTY }; // We don't actually care about the parent viewport at the moment, because we don't resize. let (_parent_viewport_watcher, parent_viewport_watcher_request) = create_proxy::<fuicomposition::ParentViewportWatcherMarker>() .expect("failed to create ParentViewportWatcherProxy"); server .flatland .create_view2( &mut view_creation_token, &mut view_identity, view_bound_protocols, parent_viewport_watcher_request, ) .expect("FIDL error"); server .flatland .set_image_destination_size( &mut IMAGE_ID.clone(), &mut fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }, ) .expect("fidl error"); // Now that the view has been created, start presenting. start_presenting(server.clone()); } r => { log_warn!("Got unexpected view provider request: {:?}", r); } } } } }); }); } /// Starts a flatland presentation loop, using the flatland proxy in `server`. fn start_presenting(server: Arc<FramebufferServer>) { fasync::Task::local(async move { let sched_lib = ThroughputScheduler::new(); // Request an initial presentation. sched_lib.request_present(); loop { let present_parameters = sched_lib.wait_to_update().await; sched_lib.request_present(); server .flatland .present(fuicomposition::PresentArgs { requested_presentation_time: Some( present_parameters.requested_presentation_time.into_nanos(), ), acquire_fences: None, release_fences: None, unsquashable: Some(present_parameters.unsquashable), ..fuicomposition::PresentArgs::EMPTY }) .unwrap_or(()); // Wait for events from flatland. If the event is `OnFramePresented` we notify the // scheduler and then wait for a `OnNextFrameBegin` before continuing. while match server.flatland.wait_for_event(zx::Time::INFINITE) { Ok(event) => match event { fuicomposition::FlatlandEvent::OnNextFrameBegin { values } => { let fuicomposition::OnNextFrameBeginValues { additional_present_credits, future_presentation_infos, .. } = values; let infos = future_presentation_infos .unwrap() .iter() .map(|x| PresentationInfo { latch_point: zx::Time::from_nanos(x.latch_point.unwrap()), presentation_time: zx::Time::from_nanos( x.presentation_time.unwrap(), ), }) .collect(); sched_lib.on_next_frame_begin(additional_present_credits.unwrap(), infos); false } fuicomposition::FlatlandEvent::OnFramePresented { frame_presented_info } => { let presented_infos = frame_presented_info .presentation_infos .iter() .map(|info| PresentedInfo { present_received_time: zx::Time::from_nanos( info.present_received_time.unwrap(), ), actual_latch_point: zx::Time::from_nanos( info.latched_time.unwrap(), ), }) .collect(); sched_lib.on_frame_presented( zx::Time::from_nanos(frame_presented_info.actual_presentation_time), presented_infos, ); true } fuicomposition::FlatlandEvent::OnError {.. } => false, }, Err(_) => false, } {} } }) .detach(); }
FramebufferServer
identifier_name
framebuffer_server.rs
// Copyright 2022 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! This file contains for creating and serving a `Flatland` view using a `Framebuffer`. //! //! A lot of the code in this file is temporary to enable developers to see the contents of a //! `Framebuffer` in the workstation UI (e.g., using `ffx session add`). //! //! To display the `Framebuffer` as its view, a component must add the `framebuffer` feature to its //! `.cml`. use anyhow::anyhow; use fidl::{endpoints::create_proxy, HandleBased}; use fidl_fuchsia_math as fmath; use fidl_fuchsia_sysmem as fsysmem; use fidl_fuchsia_ui_app as fuiapp; use fidl_fuchsia_ui_composition as fuicomposition; use fidl_fuchsia_ui_views as fuiviews; use flatland_frame_scheduling_lib::{ PresentationInfo, PresentedInfo, SchedulingLib, ThroughputScheduler, }; use fuchsia_async as fasync; use fuchsia_component::{client::connect_channel_to_protocol, server::ServiceFs}; use fuchsia_framebuffer::{sysmem::BufferCollectionAllocator, FrameUsage}; use fuchsia_scenic::{BufferCollectionTokenPair, ViewRefPair}; use fuchsia_zircon as zx; use futures::{StreamExt, TryStreamExt}; use std::sync::{mpsc::channel, Arc}; use crate::logging::log_warn; use crate::types::*; /// The width of the framebuffer image. pub const IMAGE_WIDTH: u32 = 720; /// The height of the framebuffer image. pub const IMAGE_HEIGHT: u32 = 1200; /// The offset at which the framebuffer will be placed. Assume a display width of 1920. pub const TRANSLATION_X: i32 = 1920 / 2 - IMAGE_WIDTH as i32 / 2; /// The Flatland identifier for the framebuffer image. const IMAGE_ID: fuicomposition::ContentId = fuicomposition::ContentId { value: 2 }; /// The Flatland identifier for the transform associated with the framebuffer. const TRANSFORM_ID: fuicomposition::TransformId = fuicomposition::TransformId { value: 3 }; /// The protocols that are exposed by the framebuffer server. enum ExposedProtocols { ViewProvider(fuiapp::ViewProviderRequestStream), } /// A `FramebufferServer` contains initialized proxies to Flatland, as well as a buffer collection /// that is registered with Flatland. pub struct FramebufferServer { /// The Flatland proxy associated with this server. flatland: fuicomposition::FlatlandSynchronousProxy, /// The buffer collection that is registered with Flatland. collection: fsysmem::BufferCollectionInfo2, } impl FramebufferServer { /// Returns a `FramebufferServer` that has created a scene and registered a buffer with /// Flatland. pub fn new() -> Result<Self, Errno> { let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?; connect_channel_to_protocol::<fuicomposition::AllocatorMarker>(server_end) .map_err(|_| errno!(ENOENT))?; let allocator = fuicomposition::AllocatorSynchronousProxy::new(client_end); let (server_end, client_end) = zx::Channel::create().map_err(|_| errno!(ENOENT))?; connect_channel_to_protocol::<fuicomposition::FlatlandMarker>(server_end) .map_err(|_| errno!(ENOENT))?; let flatland = fuicomposition::FlatlandSynchronousProxy::new(client_end); let collection = init_scene(&flatland, &allocator).map_err(|_| errno!(EINVAL))?; Ok(Self { flatland, collection }) } /// Returns a clone of the VMO that is shared with Flatland. pub fn get_vmo(&self) -> Result<zx::Vmo, Errno> { self.collection.buffers[0] .vmo .as_ref() .ok_or_else(|| errno!(EINVAL))? .duplicate_handle(zx::Rights::SAME_RIGHTS) .map_err(|_| errno!(EINVAL)) } } /// Initializes the flatland scene, and returns the associated buffer collection. /// /// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and /// most of the failures would be unexpected and unrecoverable. fn init_scene( flatland: &fuicomposition::FlatlandSynchronousProxy, allocator: &fuicomposition::AllocatorSynchronousProxy, ) -> Result<fsysmem::BufferCollectionInfo2, anyhow::Error> { let (collection_sender, collection_receiver) = channel(); let (allocation_sender, allocation_receiver) = channel(); // This thread is spawned to deal with the mix of asynchronous and synchronous proxies. // In particular, we want to keep Framebuffer creation synchronous, while still making use of // BufferCollectionAllocator (which exposes an async api). // // The spawned thread will execute the futures and send results back to this thread via a // channel. std::thread::spawn(move || -> Result<(), anyhow::Error> { let mut executor = fasync::LocalExecutor::new()?; let mut buffer_allocator = BufferCollectionAllocator::new( IMAGE_WIDTH, IMAGE_HEIGHT, fidl_fuchsia_sysmem::PixelFormatType::R8G8B8A8, FrameUsage::Cpu, 1, )?; buffer_allocator.set_name(100, "Starnix ViewProvider")?; let sysmem_buffer_collection_token = executor.run_singlethreaded(buffer_allocator.duplicate_token())?; // Notify the async code that the sysmem buffer collection token is available. collection_sender.send(sysmem_buffer_collection_token).expect("Failed to send collection"); let allocation = executor.run_singlethreaded(buffer_allocator.allocate_buffers(true))?; // Notify the async code that the buffer allocation completed. allocation_sender.send(allocation).expect("Failed to send allocation"); Ok(()) }); // Wait for the async code to generate the buffer collection token. let sysmem_buffer_collection_token = collection_receiver .recv() .map_err(|_| anyhow!("Error receiving buffer collection token"))?; let mut buffer_tokens = BufferCollectionTokenPair::new(); let args = fuicomposition::RegisterBufferCollectionArgs { export_token: Some(buffer_tokens.export_token), buffer_collection_token: Some(sysmem_buffer_collection_token), ..fuicomposition::RegisterBufferCollectionArgs::EMPTY }; allocator .register_buffer_collection(args, zx::Time::INFINITE) .map_err(|_| anyhow!("FIDL error registering buffer collection"))? .map_err(|_| anyhow!("Error registering buffer collection"))?; // Now that the buffer collection is registered, wait for the buffer allocation to happen. let allocation = allocation_receiver.recv().map_err(|_| anyhow!("Error receiving buffer allocation"))?; let image_props = fuicomposition::ImageProperties { size: Some(fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }), ..fuicomposition::ImageProperties::EMPTY }; flatland .create_image(&mut IMAGE_ID.clone(), &mut buffer_tokens.import_token, 0, image_props) .map_err(|_| anyhow!("FIDL error creating image"))?; flatland .create_transform(&mut TRANSFORM_ID.clone()) .map_err(|_| anyhow!("error creating transform"))?; flatland .set_root_transform(&mut TRANSFORM_ID.clone()) .map_err(|_| anyhow!("error setting root transform"))?; flatland .set_content(&mut TRANSFORM_ID.clone(), &mut IMAGE_ID.clone()) .map_err(|_| anyhow!("error setting content"))?; flatland .set_translation(&mut TRANSFORM_ID.clone(), &mut fmath::Vec_ { x: TRANSLATION_X, y: 0 }) .map_err(|_| anyhow!("error setting translation"))?; Ok(allocation) } /// Spawns a thread to serve a `ViewProvider` in `outgoing_dir`. /// /// SAFETY: This function `.expect`'s a lot, because it isn't meant to be used in the long time and /// most of the failures would be unexpected and unrecoverable. pub fn spawn_view_provider( server: Arc<FramebufferServer>, outgoing_dir: fidl::endpoints::ServerEnd<fidl_fuchsia_io::DirectoryMarker>, ) { std::thread::spawn(|| { let mut executor = fasync::LocalExecutor::new().expect("Failed to create executor"); executor.run_singlethreaded(async move { let mut service_fs = ServiceFs::new_local(); service_fs.dir("svc").add_fidl_service(ExposedProtocols::ViewProvider); service_fs.serve_connection(outgoing_dir).expect(""); while let Some(ExposedProtocols::ViewProvider(mut request_stream)) = service_fs.next().await { while let Ok(Some(event)) = request_stream.try_next().await { match event { fuiapp::ViewProviderRequest::CreateView2 { args, control_handle: _ } => { let mut view_creation_token = args.view_creation_token.unwrap(); let mut view_identity = fuiviews::ViewIdentityOnCreation::from( ViewRefPair::new().expect("Failed to create ViewRefPair"), ); let view_bound_protocols = fuicomposition::ViewBoundProtocols { ..fuicomposition::ViewBoundProtocols::EMPTY }; // We don't actually care about the parent viewport at the moment, because we don't resize. let (_parent_viewport_watcher, parent_viewport_watcher_request) = create_proxy::<fuicomposition::ParentViewportWatcherMarker>() .expect("failed to create ParentViewportWatcherProxy"); server .flatland .create_view2( &mut view_creation_token, &mut view_identity, view_bound_protocols, parent_viewport_watcher_request, ) .expect("FIDL error"); server .flatland .set_image_destination_size( &mut IMAGE_ID.clone(), &mut fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }, ) .expect("fidl error"); // Now that the view has been created, start presenting. start_presenting(server.clone()); } r => { log_warn!("Got unexpected view provider request: {:?}", r); } } } } }); }); } /// Starts a flatland presentation loop, using the flatland proxy in `server`. fn start_presenting(server: Arc<FramebufferServer>) { fasync::Task::local(async move { let sched_lib = ThroughputScheduler::new(); // Request an initial presentation. sched_lib.request_present(); loop { let present_parameters = sched_lib.wait_to_update().await; sched_lib.request_present(); server .flatland
.present(fuicomposition::PresentArgs { requested_presentation_time: Some( present_parameters.requested_presentation_time.into_nanos(), ), acquire_fences: None, release_fences: None, unsquashable: Some(present_parameters.unsquashable), ..fuicomposition::PresentArgs::EMPTY }) .unwrap_or(()); // Wait for events from flatland. If the event is `OnFramePresented` we notify the // scheduler and then wait for a `OnNextFrameBegin` before continuing. while match server.flatland.wait_for_event(zx::Time::INFINITE) { Ok(event) => match event { fuicomposition::FlatlandEvent::OnNextFrameBegin { values } => { let fuicomposition::OnNextFrameBeginValues { additional_present_credits, future_presentation_infos, .. } = values; let infos = future_presentation_infos .unwrap() .iter() .map(|x| PresentationInfo { latch_point: zx::Time::from_nanos(x.latch_point.unwrap()), presentation_time: zx::Time::from_nanos( x.presentation_time.unwrap(), ), }) .collect(); sched_lib.on_next_frame_begin(additional_present_credits.unwrap(), infos); false } fuicomposition::FlatlandEvent::OnFramePresented { frame_presented_info } => { let presented_infos = frame_presented_info .presentation_infos .iter() .map(|info| PresentedInfo { present_received_time: zx::Time::from_nanos( info.present_received_time.unwrap(), ), actual_latch_point: zx::Time::from_nanos( info.latched_time.unwrap(), ), }) .collect(); sched_lib.on_frame_presented( zx::Time::from_nanos(frame_presented_info.actual_presentation_time), presented_infos, ); true } fuicomposition::FlatlandEvent::OnError {.. } => false, }, Err(_) => false, } {} } }) .detach(); }
random_line_split
lib.rs
//! [![github]](https://github.com/dtolnay/paste)&ensp;[![crates-io]](https://crates.io/crates/paste)&ensp;[![docs-rs]](https://docs.rs/paste) //! //! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github //! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust //! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs //! //! <br> //! //! The nightly-only [`concat_idents!`] macro in the Rust standard library is //! notoriously underpowered in that its concatenated identifiers can only refer to //! existing items, they can never be used to define something new. //! //! [`concat_idents!`]: https://doc.rust-lang.org/std/macro.concat_idents.html //! //! This crate provides a flexible way to paste together identifiers in a macro, //! including using pasted identifiers to define new items. //! //! This approach works with any Rust compiler 1.31+. //! //! <br> //! //! # Pasting identifiers //! //! Within the `paste!` macro, identifiers inside `[<`...`>]` are pasted //! together to form a single identifier. //! //! ``` //! use paste::paste; //! //! paste! { //! // Defines a const called `QRST`. //! const [<Q R S T>]: &str = "success!"; //! } //! //! fn main() { //! assert_eq!( //! paste! { [<Q R S T>].len() }, //! 8, //! ); //! } //! ``` //! //! <br><br> //! //! # More elaborate example //! //! The next example shows a macro that generates accessor methods for some //! struct fields. It demonstrates how you might find it useful to bundle a //! paste invocation inside of a macro\_rules macro. //! //! ``` //! use paste::paste; //! //! macro_rules! make_a_struct_and_getters { //! ($name:ident { $($field:ident),* }) => { //! // Define a struct. This expands to: //! // //! // pub struct S { //! // a: String, //! // b: String, //! // c: String, //! // } //! pub struct $name { //! $( //! $field: String, //! )* //! } //! //! // Build an impl block with getters. This expands to: //! // //! // impl S { //! // pub fn get_a(&self) -> &str { &self.a } //! // pub fn get_b(&self) -> &str { &self.b } //! // pub fn get_c(&self) -> &str { &self.c } //! // } //! paste! { //! impl $name { //! $( //! pub fn [<get_ $field>](&self) -> &str { //! &self.$field //! } //! )* //! } //! } //! } //! } //! //! make_a_struct_and_getters!(S { a, b, c }); //! //! fn call_some_getters(s: &S) -> bool { //! s.get_a() == s.get_b() && s.get_c().is_empty() //! } //! # //! # fn main() {} //! ``` //! //! <br><br> //! //! # Case conversion //! //! Use `$var:lower` or `$var:upper` in the segment list to convert an //! interpolated segment to lower- or uppercase as part of the paste. For //! example, `[<ld_ $reg:lower _expr>]` would paste to `ld_bc_expr` if invoked //! with $reg=`Bc`. //! //! Use `$var:snake` to convert CamelCase input to snake\_case. //! Use `$var:camel` to convert snake\_case to CamelCase. //! These compose, so for example `$var:snake:upper` would give you SCREAMING\_CASE. //! //! The precise Unicode conversions are as defined by [`str::to_lowercase`] and //! [`str::to_uppercase`]. //! //! [`str::to_lowercase`]: https://doc.rust-lang.org/std/primitive.str.html#method.to_lowercase //! [`str::to_uppercase`]: https://doc.rust-lang.org/std/primitive.str.html#method.to_uppercase //! //! <br> //! //! # Pasting documentation strings //! //! Within the `paste!` macro, arguments to a #\[doc...\] attribute are //! implicitly concatenated together to form a coherent documentation string. //! //! ``` //! use paste::paste; //! //! macro_rules! method_new { //! ($ret:ident) => { //! paste! { //! #[doc = "Create a new `" $ret "` object."] //! pub fn new() -> $ret { todo!() } //! } //! }; //! } //! //! pub struct Paste {} //! //! method_new!(Paste); // expands to #[doc = "Create a new `Paste` object"] //! ``` #![doc(html_root_url = "https://docs.rs/paste/1.0.14")] #![allow( clippy::derive_partial_eq_without_eq, clippy::doc_markdown, clippy::match_same_arms, clippy::module_name_repetitions, clippy::needless_doctest_main, clippy::too_many_lines )] extern crate proc_macro; mod attr; mod error; mod segment; use crate::attr::expand_attr; use crate::error::{Error, Result}; use crate::segment::Segment; use proc_macro::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree}; use std::char; use std::iter; use std::panic; #[proc_macro] pub fn paste(input: TokenStream) -> TokenStream { let mut contains_paste = false; let flatten_single_interpolation = true; match expand( input.clone(), &mut contains_paste, flatten_single_interpolation, ) { Ok(expanded) => { if contains_paste { expanded } else { input } } Err(err) => err.to_compile_error(), } } #[doc(hidden)] #[proc_macro] pub fn item(input: TokenStream) -> TokenStream { paste(input) } #[doc(hidden)] #[proc_macro] pub fn expr(input: TokenStream) -> TokenStream { paste(input) } fn expand( input: TokenStream, contains_paste: &mut bool, flatten_single_interpolation: bool, ) -> Result<TokenStream> { let mut expanded = TokenStream::new(); let mut lookbehind = Lookbehind::Other; let mut prev_none_group = None::<Group>; let mut tokens = input.into_iter().peekable(); loop { let token = tokens.next(); if let Some(group) = prev_none_group.take() { if match (&token, tokens.peek()) { (Some(TokenTree::Punct(fst)), Some(TokenTree::Punct(snd))) => { fst.as_char() == ':' && snd.as_char() == ':' && fst.spacing() == Spacing::Joint } _ => false, } { expanded.extend(group.stream()); *contains_paste = true; } else { expanded.extend(iter::once(TokenTree::Group(group))); } } match token { Some(TokenTree::Group(group)) => { let delimiter = group.delimiter(); let content = group.stream(); let span = group.span(); if delimiter == Delimiter::Bracket && is_paste_operation(&content) { let segments = parse_bracket_as_segments(content, span)?; let pasted = segment::paste(&segments)?; let tokens = pasted_to_tokens(pasted, span)?; expanded.extend(tokens); *contains_paste = true; } else if flatten_single_interpolation && delimiter == Delimiter::None && is_single_interpolation_group(&content) { expanded.extend(content); *contains_paste = true; } else { let mut group_contains_paste = false; let is_attribute = delimiter == Delimiter::Bracket && (lookbehind == Lookbehind::Pound || lookbehind == Lookbehind::PoundBang); let mut nested = expand( content, &mut group_contains_paste, flatten_single_interpolation &&!is_attribute, )?; if is_attribute { nested = expand_attr(nested, span, &mut group_contains_paste)?; } let group = if group_contains_paste { let mut group = Group::new(delimiter, nested); group.set_span(span); *contains_paste = true; group } else { group.clone() }; if delimiter!= Delimiter::None { expanded.extend(iter::once(TokenTree::Group(group))); } else if lookbehind == Lookbehind::DoubleColon { expanded.extend(group.stream()); *contains_paste = true; } else { prev_none_group = Some(group); } } lookbehind = Lookbehind::Other; } Some(TokenTree::Punct(punct)) => { lookbehind = match punct.as_char() { ':' if lookbehind == Lookbehind::JointColon => Lookbehind::DoubleColon, ':' if punct.spacing() == Spacing::Joint => Lookbehind::JointColon, '#' => Lookbehind::Pound, '!' if lookbehind == Lookbehind::Pound => Lookbehind::PoundBang, _ => Lookbehind::Other, }; expanded.extend(iter::once(TokenTree::Punct(punct))); } Some(other) => { lookbehind = Lookbehind::Other; expanded.extend(iter::once(other)); } None => return Ok(expanded), } } } #[derive(PartialEq)] enum Lookbehind { JointColon, DoubleColon, Pound, PoundBang, Other, } // https://github.com/dtolnay/paste/issues/26 fn is_single_interpolation_group(input: &TokenStream) -> bool { #[derive(PartialEq)] enum State { Init, Ident, Literal, Apostrophe, Lifetime, Colon1, Colon2, } let mut state = State::Init; for tt in input.clone() { state = match (state, &tt) { (State::Init, TokenTree::Ident(_)) => State::Ident, (State::Init, TokenTree::Literal(_)) => State::Literal, (State::Init, TokenTree::Punct(punct)) if punct.as_char() == '\'' => State::Apostrophe, (State::Apostrophe, TokenTree::Ident(_)) => State::Lifetime, (State::Ident, TokenTree::Punct(punct)) if punct.as_char() == ':' && punct.spacing() == Spacing::Joint => { State::Colon1 } (State::Colon1, TokenTree::Punct(punct)) if punct.as_char() == ':' && punct.spacing() == Spacing::Alone => { State::Colon2 } (State::Colon2, TokenTree::Ident(_)) => State::Ident, _ => return false, }; } state == State::Ident || state == State::Literal || state == State::Lifetime } fn is_paste_operation(input: &TokenStream) -> bool { let mut tokens = input.clone().into_iter(); match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '<' => {} _ => return false, } let mut has_token = false; loop { match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '>' => { return has_token && tokens.next().is_none(); } Some(_) => has_token = true, None => return false, } } } fn parse_bracket_as_segments(input: TokenStream, scope: Span) -> Result<Vec<Segment>> { let mut tokens = input.into_iter().peekable(); match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '<' => {} Some(wrong) => return Err(Error::new(wrong.span(), "expected `<`")), None => return Err(Error::new(scope, "expected `[<... >]`")), } let mut segments = segment::parse(&mut tokens)?; match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '>' => {} Some(wrong) => return Err(Error::new(wrong.span(), "expected `>`")), None => return Err(Error::new(scope, "expected `[<... >]`")), } if let Some(unexpected) = tokens.next() { return Err(Error::new( unexpected.span(), "unexpected input, expected `[<... >]`", )); } for segment in &mut segments { if let Segment::String(string) = segment { if string.value.starts_with("'\\u{") { let hex = &string.value[4..string.value.len() - 2]; if let Ok(unsigned) = u32::from_str_radix(hex, 16) { if let Some(ch) = char::from_u32(unsigned) { string.value.clear(); string.value.push(ch);
if string.value.contains(&['#', '\\', '.', '+'][..]) || string.value.starts_with("b'") || string.value.starts_with("b\"") || string.value.starts_with("br\"") { return Err(Error::new(string.span, "unsupported literal")); } let mut range = 0..string.value.len(); if string.value.starts_with("r\"") { range.start += 2; range.end -= 1; } else if string.value.starts_with(&['"', '\''][..]) { range.start += 1; range.end -= 1; } string.value = string.value[range].replace('-', "_"); } } Ok(segments) } fn pasted_to_tokens(mut pasted: String, span: Span) -> Result<TokenStream> { let mut tokens = TokenStream::new(); #[cfg(not(no_literal_fromstr))] { use proc_macro::{LexError, Literal}; use std::str::FromStr; if pasted.starts_with(|ch: char| ch.is_ascii_digit()) { let literal = match panic::catch_unwind(|| Literal::from_str(&pasted)) { Ok(Ok(literal)) => TokenTree::Literal(literal), Ok(Err(LexError {.. })) | Err(_) => { return Err(Error::new( span, &format!("`{:?}` is not a valid literal", pasted), )); } }; tokens.extend(iter::once(literal)); return Ok(tokens); } } if pasted.starts_with('\'') { let mut apostrophe = TokenTree::Punct(Punct::new('\'', Spacing::Joint)); apostrophe.set_span(span); tokens.extend(iter::once(apostrophe)); pasted.remove(0); } let ident = match panic::catch_unwind(|| Ident::new(&pasted, span)) { Ok(ident) => TokenTree::Ident(ident), Err(_) => { return Err(Error::new( span, &format!("`{:?}` is not a valid identifier", pasted), )); } }; tokens.extend(iter::once(ident)); Ok(tokens) }
continue; } } }
random_line_split
lib.rs
//! [![github]](https://github.com/dtolnay/paste)&ensp;[![crates-io]](https://crates.io/crates/paste)&ensp;[![docs-rs]](https://docs.rs/paste) //! //! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github //! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust //! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs //! //! <br> //! //! The nightly-only [`concat_idents!`] macro in the Rust standard library is //! notoriously underpowered in that its concatenated identifiers can only refer to //! existing items, they can never be used to define something new. //! //! [`concat_idents!`]: https://doc.rust-lang.org/std/macro.concat_idents.html //! //! This crate provides a flexible way to paste together identifiers in a macro, //! including using pasted identifiers to define new items. //! //! This approach works with any Rust compiler 1.31+. //! //! <br> //! //! # Pasting identifiers //! //! Within the `paste!` macro, identifiers inside `[<`...`>]` are pasted //! together to form a single identifier. //! //! ``` //! use paste::paste; //! //! paste! { //! // Defines a const called `QRST`. //! const [<Q R S T>]: &str = "success!"; //! } //! //! fn main() { //! assert_eq!( //! paste! { [<Q R S T>].len() }, //! 8, //! ); //! } //! ``` //! //! <br><br> //! //! # More elaborate example //! //! The next example shows a macro that generates accessor methods for some //! struct fields. It demonstrates how you might find it useful to bundle a //! paste invocation inside of a macro\_rules macro. //! //! ``` //! use paste::paste; //! //! macro_rules! make_a_struct_and_getters { //! ($name:ident { $($field:ident),* }) => { //! // Define a struct. This expands to: //! // //! // pub struct S { //! // a: String, //! // b: String, //! // c: String, //! // } //! pub struct $name { //! $( //! $field: String, //! )* //! } //! //! // Build an impl block with getters. This expands to: //! // //! // impl S { //! // pub fn get_a(&self) -> &str { &self.a } //! // pub fn get_b(&self) -> &str { &self.b } //! // pub fn get_c(&self) -> &str { &self.c } //! // } //! paste! { //! impl $name { //! $( //! pub fn [<get_ $field>](&self) -> &str { //! &self.$field //! } //! )* //! } //! } //! } //! } //! //! make_a_struct_and_getters!(S { a, b, c }); //! //! fn call_some_getters(s: &S) -> bool { //! s.get_a() == s.get_b() && s.get_c().is_empty() //! } //! # //! # fn main() {} //! ``` //! //! <br><br> //! //! # Case conversion //! //! Use `$var:lower` or `$var:upper` in the segment list to convert an //! interpolated segment to lower- or uppercase as part of the paste. For //! example, `[<ld_ $reg:lower _expr>]` would paste to `ld_bc_expr` if invoked //! with $reg=`Bc`. //! //! Use `$var:snake` to convert CamelCase input to snake\_case. //! Use `$var:camel` to convert snake\_case to CamelCase. //! These compose, so for example `$var:snake:upper` would give you SCREAMING\_CASE. //! //! The precise Unicode conversions are as defined by [`str::to_lowercase`] and //! [`str::to_uppercase`]. //! //! [`str::to_lowercase`]: https://doc.rust-lang.org/std/primitive.str.html#method.to_lowercase //! [`str::to_uppercase`]: https://doc.rust-lang.org/std/primitive.str.html#method.to_uppercase //! //! <br> //! //! # Pasting documentation strings //! //! Within the `paste!` macro, arguments to a #\[doc...\] attribute are //! implicitly concatenated together to form a coherent documentation string. //! //! ``` //! use paste::paste; //! //! macro_rules! method_new { //! ($ret:ident) => { //! paste! { //! #[doc = "Create a new `" $ret "` object."] //! pub fn new() -> $ret { todo!() } //! } //! }; //! } //! //! pub struct Paste {} //! //! method_new!(Paste); // expands to #[doc = "Create a new `Paste` object"] //! ``` #![doc(html_root_url = "https://docs.rs/paste/1.0.14")] #![allow( clippy::derive_partial_eq_without_eq, clippy::doc_markdown, clippy::match_same_arms, clippy::module_name_repetitions, clippy::needless_doctest_main, clippy::too_many_lines )] extern crate proc_macro; mod attr; mod error; mod segment; use crate::attr::expand_attr; use crate::error::{Error, Result}; use crate::segment::Segment; use proc_macro::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree}; use std::char; use std::iter; use std::panic; #[proc_macro] pub fn paste(input: TokenStream) -> TokenStream { let mut contains_paste = false; let flatten_single_interpolation = true; match expand( input.clone(), &mut contains_paste, flatten_single_interpolation, ) { Ok(expanded) => { if contains_paste { expanded } else { input } } Err(err) => err.to_compile_error(), } } #[doc(hidden)] #[proc_macro] pub fn item(input: TokenStream) -> TokenStream { paste(input) } #[doc(hidden)] #[proc_macro] pub fn expr(input: TokenStream) -> TokenStream { paste(input) } fn expand( input: TokenStream, contains_paste: &mut bool, flatten_single_interpolation: bool, ) -> Result<TokenStream> { let mut expanded = TokenStream::new(); let mut lookbehind = Lookbehind::Other; let mut prev_none_group = None::<Group>; let mut tokens = input.into_iter().peekable(); loop { let token = tokens.next(); if let Some(group) = prev_none_group.take() { if match (&token, tokens.peek()) { (Some(TokenTree::Punct(fst)), Some(TokenTree::Punct(snd))) => { fst.as_char() == ':' && snd.as_char() == ':' && fst.spacing() == Spacing::Joint } _ => false, } { expanded.extend(group.stream()); *contains_paste = true; } else { expanded.extend(iter::once(TokenTree::Group(group))); } } match token { Some(TokenTree::Group(group)) => { let delimiter = group.delimiter(); let content = group.stream(); let span = group.span(); if delimiter == Delimiter::Bracket && is_paste_operation(&content) { let segments = parse_bracket_as_segments(content, span)?; let pasted = segment::paste(&segments)?; let tokens = pasted_to_tokens(pasted, span)?; expanded.extend(tokens); *contains_paste = true; } else if flatten_single_interpolation && delimiter == Delimiter::None && is_single_interpolation_group(&content) { expanded.extend(content); *contains_paste = true; } else { let mut group_contains_paste = false; let is_attribute = delimiter == Delimiter::Bracket && (lookbehind == Lookbehind::Pound || lookbehind == Lookbehind::PoundBang); let mut nested = expand( content, &mut group_contains_paste, flatten_single_interpolation &&!is_attribute, )?; if is_attribute { nested = expand_attr(nested, span, &mut group_contains_paste)?; } let group = if group_contains_paste { let mut group = Group::new(delimiter, nested); group.set_span(span); *contains_paste = true; group } else { group.clone() }; if delimiter!= Delimiter::None { expanded.extend(iter::once(TokenTree::Group(group))); } else if lookbehind == Lookbehind::DoubleColon { expanded.extend(group.stream()); *contains_paste = true; } else { prev_none_group = Some(group); } } lookbehind = Lookbehind::Other; } Some(TokenTree::Punct(punct)) => { lookbehind = match punct.as_char() { ':' if lookbehind == Lookbehind::JointColon => Lookbehind::DoubleColon, ':' if punct.spacing() == Spacing::Joint => Lookbehind::JointColon, '#' => Lookbehind::Pound, '!' if lookbehind == Lookbehind::Pound => Lookbehind::PoundBang, _ => Lookbehind::Other, }; expanded.extend(iter::once(TokenTree::Punct(punct))); } Some(other) => { lookbehind = Lookbehind::Other; expanded.extend(iter::once(other)); } None => return Ok(expanded), } } } #[derive(PartialEq)] enum Lookbehind { JointColon, DoubleColon, Pound, PoundBang, Other, } // https://github.com/dtolnay/paste/issues/26 fn is_single_interpolation_group(input: &TokenStream) -> bool { #[derive(PartialEq)] enum State { Init, Ident, Literal, Apostrophe, Lifetime, Colon1, Colon2, } let mut state = State::Init; for tt in input.clone() { state = match (state, &tt) { (State::Init, TokenTree::Ident(_)) => State::Ident, (State::Init, TokenTree::Literal(_)) => State::Literal, (State::Init, TokenTree::Punct(punct)) if punct.as_char() == '\'' => State::Apostrophe, (State::Apostrophe, TokenTree::Ident(_)) => State::Lifetime, (State::Ident, TokenTree::Punct(punct)) if punct.as_char() == ':' && punct.spacing() == Spacing::Joint => { State::Colon1 } (State::Colon1, TokenTree::Punct(punct)) if punct.as_char() == ':' && punct.spacing() == Spacing::Alone => { State::Colon2 } (State::Colon2, TokenTree::Ident(_)) => State::Ident, _ => return false, }; } state == State::Ident || state == State::Literal || state == State::Lifetime } fn is_paste_operation(input: &TokenStream) -> bool { let mut tokens = input.clone().into_iter(); match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '<' => {} _ => return false, } let mut has_token = false; loop { match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '>' => { return has_token && tokens.next().is_none(); } Some(_) => has_token = true, None => return false, } } } fn parse_bracket_as_segments(input: TokenStream, scope: Span) -> Result<Vec<Segment>> { let mut tokens = input.into_iter().peekable(); match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '<' => {} Some(wrong) => return Err(Error::new(wrong.span(), "expected `<`")), None => return Err(Error::new(scope, "expected `[<... >]`")), } let mut segments = segment::parse(&mut tokens)?; match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '>' => {} Some(wrong) => return Err(Error::new(wrong.span(), "expected `>`")), None => return Err(Error::new(scope, "expected `[<... >]`")), } if let Some(unexpected) = tokens.next() { return Err(Error::new( unexpected.span(), "unexpected input, expected `[<... >]`", )); } for segment in &mut segments { if let Segment::String(string) = segment { if string.value.starts_with("'\\u{") { let hex = &string.value[4..string.value.len() - 2]; if let Ok(unsigned) = u32::from_str_radix(hex, 16) { if let Some(ch) = char::from_u32(unsigned) { string.value.clear(); string.value.push(ch); continue; } } } if string.value.contains(&['#', '\\', '.', '+'][..]) || string.value.starts_with("b'") || string.value.starts_with("b\"") || string.value.starts_with("br\"") { return Err(Error::new(string.span, "unsupported literal")); } let mut range = 0..string.value.len(); if string.value.starts_with("r\"") { range.start += 2; range.end -= 1; } else if string.value.starts_with(&['"', '\''][..]) { range.start += 1; range.end -= 1; } string.value = string.value[range].replace('-', "_"); } } Ok(segments) } fn pasted_to_tokens(mut pasted: String, span: Span) -> Result<TokenStream>
} } if pasted.starts_with('\'') { let mut apostrophe = TokenTree::Punct(Punct::new('\'', Spacing::Joint)); apostrophe.set_span(span); tokens.extend(iter::once(apostrophe)); pasted.remove(0); } let ident = match panic::catch_unwind(|| Ident::new(&pasted, span)) { Ok(ident) => TokenTree::Ident(ident), Err(_) => { return Err(Error::new( span, &format!("`{:?}` is not a valid identifier", pasted), )); } }; tokens.extend(iter::once(ident)); Ok(tokens) }
{ let mut tokens = TokenStream::new(); #[cfg(not(no_literal_fromstr))] { use proc_macro::{LexError, Literal}; use std::str::FromStr; if pasted.starts_with(|ch: char| ch.is_ascii_digit()) { let literal = match panic::catch_unwind(|| Literal::from_str(&pasted)) { Ok(Ok(literal)) => TokenTree::Literal(literal), Ok(Err(LexError { .. })) | Err(_) => { return Err(Error::new( span, &format!("`{:?}` is not a valid literal", pasted), )); } }; tokens.extend(iter::once(literal)); return Ok(tokens);
identifier_body
lib.rs
//! [![github]](https://github.com/dtolnay/paste)&ensp;[![crates-io]](https://crates.io/crates/paste)&ensp;[![docs-rs]](https://docs.rs/paste) //! //! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github //! [crates-io]: https://img.shields.io/badge/crates.io-fc8d62?style=for-the-badge&labelColor=555555&logo=rust //! [docs-rs]: https://img.shields.io/badge/docs.rs-66c2a5?style=for-the-badge&labelColor=555555&logo=docs.rs //! //! <br> //! //! The nightly-only [`concat_idents!`] macro in the Rust standard library is //! notoriously underpowered in that its concatenated identifiers can only refer to //! existing items, they can never be used to define something new. //! //! [`concat_idents!`]: https://doc.rust-lang.org/std/macro.concat_idents.html //! //! This crate provides a flexible way to paste together identifiers in a macro, //! including using pasted identifiers to define new items. //! //! This approach works with any Rust compiler 1.31+. //! //! <br> //! //! # Pasting identifiers //! //! Within the `paste!` macro, identifiers inside `[<`...`>]` are pasted //! together to form a single identifier. //! //! ``` //! use paste::paste; //! //! paste! { //! // Defines a const called `QRST`. //! const [<Q R S T>]: &str = "success!"; //! } //! //! fn main() { //! assert_eq!( //! paste! { [<Q R S T>].len() }, //! 8, //! ); //! } //! ``` //! //! <br><br> //! //! # More elaborate example //! //! The next example shows a macro that generates accessor methods for some //! struct fields. It demonstrates how you might find it useful to bundle a //! paste invocation inside of a macro\_rules macro. //! //! ``` //! use paste::paste; //! //! macro_rules! make_a_struct_and_getters { //! ($name:ident { $($field:ident),* }) => { //! // Define a struct. This expands to: //! // //! // pub struct S { //! // a: String, //! // b: String, //! // c: String, //! // } //! pub struct $name { //! $( //! $field: String, //! )* //! } //! //! // Build an impl block with getters. This expands to: //! // //! // impl S { //! // pub fn get_a(&self) -> &str { &self.a } //! // pub fn get_b(&self) -> &str { &self.b } //! // pub fn get_c(&self) -> &str { &self.c } //! // } //! paste! { //! impl $name { //! $( //! pub fn [<get_ $field>](&self) -> &str { //! &self.$field //! } //! )* //! } //! } //! } //! } //! //! make_a_struct_and_getters!(S { a, b, c }); //! //! fn call_some_getters(s: &S) -> bool { //! s.get_a() == s.get_b() && s.get_c().is_empty() //! } //! # //! # fn main() {} //! ``` //! //! <br><br> //! //! # Case conversion //! //! Use `$var:lower` or `$var:upper` in the segment list to convert an //! interpolated segment to lower- or uppercase as part of the paste. For //! example, `[<ld_ $reg:lower _expr>]` would paste to `ld_bc_expr` if invoked //! with $reg=`Bc`. //! //! Use `$var:snake` to convert CamelCase input to snake\_case. //! Use `$var:camel` to convert snake\_case to CamelCase. //! These compose, so for example `$var:snake:upper` would give you SCREAMING\_CASE. //! //! The precise Unicode conversions are as defined by [`str::to_lowercase`] and //! [`str::to_uppercase`]. //! //! [`str::to_lowercase`]: https://doc.rust-lang.org/std/primitive.str.html#method.to_lowercase //! [`str::to_uppercase`]: https://doc.rust-lang.org/std/primitive.str.html#method.to_uppercase //! //! <br> //! //! # Pasting documentation strings //! //! Within the `paste!` macro, arguments to a #\[doc...\] attribute are //! implicitly concatenated together to form a coherent documentation string. //! //! ``` //! use paste::paste; //! //! macro_rules! method_new { //! ($ret:ident) => { //! paste! { //! #[doc = "Create a new `" $ret "` object."] //! pub fn new() -> $ret { todo!() } //! } //! }; //! } //! //! pub struct Paste {} //! //! method_new!(Paste); // expands to #[doc = "Create a new `Paste` object"] //! ``` #![doc(html_root_url = "https://docs.rs/paste/1.0.14")] #![allow( clippy::derive_partial_eq_without_eq, clippy::doc_markdown, clippy::match_same_arms, clippy::module_name_repetitions, clippy::needless_doctest_main, clippy::too_many_lines )] extern crate proc_macro; mod attr; mod error; mod segment; use crate::attr::expand_attr; use crate::error::{Error, Result}; use crate::segment::Segment; use proc_macro::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream, TokenTree}; use std::char; use std::iter; use std::panic; #[proc_macro] pub fn paste(input: TokenStream) -> TokenStream { let mut contains_paste = false; let flatten_single_interpolation = true; match expand( input.clone(), &mut contains_paste, flatten_single_interpolation, ) { Ok(expanded) => { if contains_paste { expanded } else { input } } Err(err) => err.to_compile_error(), } } #[doc(hidden)] #[proc_macro] pub fn item(input: TokenStream) -> TokenStream { paste(input) } #[doc(hidden)] #[proc_macro] pub fn expr(input: TokenStream) -> TokenStream { paste(input) } fn expand( input: TokenStream, contains_paste: &mut bool, flatten_single_interpolation: bool, ) -> Result<TokenStream> { let mut expanded = TokenStream::new(); let mut lookbehind = Lookbehind::Other; let mut prev_none_group = None::<Group>; let mut tokens = input.into_iter().peekable(); loop { let token = tokens.next(); if let Some(group) = prev_none_group.take() { if match (&token, tokens.peek()) { (Some(TokenTree::Punct(fst)), Some(TokenTree::Punct(snd))) => { fst.as_char() == ':' && snd.as_char() == ':' && fst.spacing() == Spacing::Joint } _ => false, } { expanded.extend(group.stream()); *contains_paste = true; } else { expanded.extend(iter::once(TokenTree::Group(group))); } } match token { Some(TokenTree::Group(group)) => { let delimiter = group.delimiter(); let content = group.stream(); let span = group.span(); if delimiter == Delimiter::Bracket && is_paste_operation(&content) { let segments = parse_bracket_as_segments(content, span)?; let pasted = segment::paste(&segments)?; let tokens = pasted_to_tokens(pasted, span)?; expanded.extend(tokens); *contains_paste = true; } else if flatten_single_interpolation && delimiter == Delimiter::None && is_single_interpolation_group(&content) { expanded.extend(content); *contains_paste = true; } else { let mut group_contains_paste = false; let is_attribute = delimiter == Delimiter::Bracket && (lookbehind == Lookbehind::Pound || lookbehind == Lookbehind::PoundBang); let mut nested = expand( content, &mut group_contains_paste, flatten_single_interpolation &&!is_attribute, )?; if is_attribute { nested = expand_attr(nested, span, &mut group_contains_paste)?; } let group = if group_contains_paste { let mut group = Group::new(delimiter, nested); group.set_span(span); *contains_paste = true; group } else { group.clone() }; if delimiter!= Delimiter::None { expanded.extend(iter::once(TokenTree::Group(group))); } else if lookbehind == Lookbehind::DoubleColon { expanded.extend(group.stream()); *contains_paste = true; } else { prev_none_group = Some(group); } } lookbehind = Lookbehind::Other; } Some(TokenTree::Punct(punct)) => { lookbehind = match punct.as_char() { ':' if lookbehind == Lookbehind::JointColon => Lookbehind::DoubleColon, ':' if punct.spacing() == Spacing::Joint => Lookbehind::JointColon, '#' => Lookbehind::Pound, '!' if lookbehind == Lookbehind::Pound => Lookbehind::PoundBang, _ => Lookbehind::Other, }; expanded.extend(iter::once(TokenTree::Punct(punct))); } Some(other) => { lookbehind = Lookbehind::Other; expanded.extend(iter::once(other)); } None => return Ok(expanded), } } } #[derive(PartialEq)] enum Lookbehind { JointColon, DoubleColon, Pound, PoundBang, Other, } // https://github.com/dtolnay/paste/issues/26 fn is_single_interpolation_group(input: &TokenStream) -> bool { #[derive(PartialEq)] enum
{ Init, Ident, Literal, Apostrophe, Lifetime, Colon1, Colon2, } let mut state = State::Init; for tt in input.clone() { state = match (state, &tt) { (State::Init, TokenTree::Ident(_)) => State::Ident, (State::Init, TokenTree::Literal(_)) => State::Literal, (State::Init, TokenTree::Punct(punct)) if punct.as_char() == '\'' => State::Apostrophe, (State::Apostrophe, TokenTree::Ident(_)) => State::Lifetime, (State::Ident, TokenTree::Punct(punct)) if punct.as_char() == ':' && punct.spacing() == Spacing::Joint => { State::Colon1 } (State::Colon1, TokenTree::Punct(punct)) if punct.as_char() == ':' && punct.spacing() == Spacing::Alone => { State::Colon2 } (State::Colon2, TokenTree::Ident(_)) => State::Ident, _ => return false, }; } state == State::Ident || state == State::Literal || state == State::Lifetime } fn is_paste_operation(input: &TokenStream) -> bool { let mut tokens = input.clone().into_iter(); match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '<' => {} _ => return false, } let mut has_token = false; loop { match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '>' => { return has_token && tokens.next().is_none(); } Some(_) => has_token = true, None => return false, } } } fn parse_bracket_as_segments(input: TokenStream, scope: Span) -> Result<Vec<Segment>> { let mut tokens = input.into_iter().peekable(); match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '<' => {} Some(wrong) => return Err(Error::new(wrong.span(), "expected `<`")), None => return Err(Error::new(scope, "expected `[<... >]`")), } let mut segments = segment::parse(&mut tokens)?; match &tokens.next() { Some(TokenTree::Punct(punct)) if punct.as_char() == '>' => {} Some(wrong) => return Err(Error::new(wrong.span(), "expected `>`")), None => return Err(Error::new(scope, "expected `[<... >]`")), } if let Some(unexpected) = tokens.next() { return Err(Error::new( unexpected.span(), "unexpected input, expected `[<... >]`", )); } for segment in &mut segments { if let Segment::String(string) = segment { if string.value.starts_with("'\\u{") { let hex = &string.value[4..string.value.len() - 2]; if let Ok(unsigned) = u32::from_str_radix(hex, 16) { if let Some(ch) = char::from_u32(unsigned) { string.value.clear(); string.value.push(ch); continue; } } } if string.value.contains(&['#', '\\', '.', '+'][..]) || string.value.starts_with("b'") || string.value.starts_with("b\"") || string.value.starts_with("br\"") { return Err(Error::new(string.span, "unsupported literal")); } let mut range = 0..string.value.len(); if string.value.starts_with("r\"") { range.start += 2; range.end -= 1; } else if string.value.starts_with(&['"', '\''][..]) { range.start += 1; range.end -= 1; } string.value = string.value[range].replace('-', "_"); } } Ok(segments) } fn pasted_to_tokens(mut pasted: String, span: Span) -> Result<TokenStream> { let mut tokens = TokenStream::new(); #[cfg(not(no_literal_fromstr))] { use proc_macro::{LexError, Literal}; use std::str::FromStr; if pasted.starts_with(|ch: char| ch.is_ascii_digit()) { let literal = match panic::catch_unwind(|| Literal::from_str(&pasted)) { Ok(Ok(literal)) => TokenTree::Literal(literal), Ok(Err(LexError {.. })) | Err(_) => { return Err(Error::new( span, &format!("`{:?}` is not a valid literal", pasted), )); } }; tokens.extend(iter::once(literal)); return Ok(tokens); } } if pasted.starts_with('\'') { let mut apostrophe = TokenTree::Punct(Punct::new('\'', Spacing::Joint)); apostrophe.set_span(span); tokens.extend(iter::once(apostrophe)); pasted.remove(0); } let ident = match panic::catch_unwind(|| Ident::new(&pasted, span)) { Ok(ident) => TokenTree::Ident(ident), Err(_) => { return Err(Error::new( span, &format!("`{:?}` is not a valid identifier", pasted), )); } }; tokens.extend(iter::once(ident)); Ok(tokens) }
State
identifier_name
schedule.rs
// Copyright (c) 2016 DWANGO Co., Ltd. All Rights Reserved. // See the LICENSE file at the top-level directory of this distribution. use futures::{Async, Future, Poll}; use std::cell::RefCell; use std::collections::{HashMap, VecDeque}; use std::sync::atomic; use std::sync::mpsc as std_mpsc; use super::{FiberState, Spawn}; use crate::fiber::{self, Task}; use crate::io::poll; static NEXT_SCHEDULER_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(0); thread_local! { static CURRENT_CONTEXT: RefCell<InnerContext> = { RefCell::new(InnerContext::new()) }; } type RequestSender = std_mpsc::Sender<Request>; type RequestReceiver = std_mpsc::Receiver<Request>; /// The identifier of a scheduler. pub type SchedulerId = usize; /// Scheduler of spawned fibers. /// /// Scheduler manages spawned fibers state. /// If a fiber is in runnable state (e.g., not waiting for I/O events), /// the scheduler will push the fiber in it's run queue. /// When `run_once` method is called, the first fiber (i.e., future) in the queue /// will be poped and executed (i.e., `Future::poll` method is called). /// If the future of a fiber moves to readied state, /// it will be removed from the scheduler. /// For efficiency reasons, it is recommended to run a scheduler on a dedicated thread. #[derive(Debug)] pub struct Scheduler { scheduler_id: SchedulerId, next_fiber_id: fiber::FiberId, fibers: HashMap<fiber::FiberId, fiber::FiberState>, run_queue: VecDeque<fiber::FiberId>, request_tx: RequestSender, request_rx: RequestReceiver, poller: poll::PollerHandle, } impl Scheduler { /// Creates a new scheduler instance. pub fn new(poller: poll::PollerHandle) -> Self { let (request_tx, request_rx) = std_mpsc::channel(); Scheduler { scheduler_id: NEXT_SCHEDULER_ID.fetch_add(1, atomic::Ordering::SeqCst), next_fiber_id: 0, fibers: HashMap::new(), run_queue: VecDeque::new(), request_tx, request_rx, poller, } } /// Returns the identifier of this scheduler. pub fn scheduler_id(&self) -> SchedulerId { self.scheduler_id } /// Returns the length of the run queue of this scheduler. pub fn run_queue_len(&self) -> usize { self.run_queue.len() } /// Returns the count of alive fibers (i.e., not readied futures) in this scheduler. pub fn fiber_count(&self) -> usize { self.fibers.len() } /// Returns a handle of this scheduler. pub fn handle(&self) -> SchedulerHandle { SchedulerHandle { request_tx: self.request_tx.clone(), } } /// Runs one unit of works. pub fn run_once(&mut self, block_if_idle: bool) { let mut did_something = false; loop { // Request match self.request_rx.try_recv() { Err(std_mpsc::TryRecvError::Empty) => {} Err(std_mpsc::TryRecvError::Disconnected) => unreachable!(), Ok(request) => { did_something = true; self.handle_request(request); } } // Task if let Some(fiber_id) = self.next_runnable() { did_something = true; self.run_fiber(fiber_id); } if!block_if_idle || did_something { break; } let request = self.request_rx.recv().expect("must succeed"); did_something = true; self.handle_request(request); } } fn handle_request(&mut self, request: Request) { match request { Request::Spawn(task) => self.spawn_fiber(task), Request::WakeUp(fiber_id) => { if self.fibers.contains_key(&fiber_id) { self.schedule(fiber_id); } } } } fn spawn_fiber(&mut self, task: Task) { let fiber_id = self.next_fiber_id(); self.fibers .insert(fiber_id, fiber::FiberState::new(fiber_id, task)); self.schedule(fiber_id); } fn run_fiber(&mut self, fiber_id: fiber::FiberId) {
let is_runnable = { CURRENT_CONTEXT.with(|context| { let mut context = context.borrow_mut(); if context .scheduler .as_ref() .map_or(true, |s| s.id!= self.scheduler_id) { context.switch(self); } { let scheduler = assert_some!(context.scheduler.as_mut()); if!scheduler.poller.is_alive() { // TODO: Return `Err(io::Error)` to caller and // handle the error in upper layers panic!("Poller is down"); } } assert!(context.fiber.is_none(), "Nested schedulers"); let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); context.fiber = Some(fiber as _); }); let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); finished = fiber.run_once(); CURRENT_CONTEXT.with(|context| { context.borrow_mut().fiber = None; }); fiber.is_runnable() }; if finished { self.fibers.remove(&fiber_id); } else if is_runnable { self.schedule(fiber_id); } } fn next_fiber_id(&mut self) -> fiber::FiberId { loop { let id = self.next_fiber_id; self.next_fiber_id = id.wrapping_add(1); if!self.fibers.contains_key(&id) { return id; } } } fn schedule(&mut self, fiber_id: fiber::FiberId) { let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); if!fiber.in_run_queue { self.run_queue.push_back(fiber_id); fiber.in_run_queue = true; } } fn next_runnable(&mut self) -> Option<fiber::FiberId> { while let Some(fiber_id) = self.run_queue.pop_front() { if let Some(fiber) = self.fibers.get_mut(&fiber_id) { fiber.in_run_queue = false; return Some(fiber_id); } } None } } /// A handle of a scheduler. #[derive(Debug, Clone)] pub struct SchedulerHandle { request_tx: RequestSender, } impl SchedulerHandle { /// Wakes up a specified fiber in the scheduler. /// /// This forces the fiber to be pushed to the run queue of the scheduler. pub fn wakeup(&self, fiber_id: fiber::FiberId) { let _ = self.request_tx.send(Request::WakeUp(fiber_id)); } } impl Spawn for SchedulerHandle { fn spawn_boxed(&self, fiber: Box<dyn Future<Item = (), Error = ()> + Send>) { let _ = self.request_tx.send(Request::Spawn(Task(fiber))); } } #[derive(Debug)] pub struct CurrentScheduler { pub id: SchedulerId, pub handle: SchedulerHandle, pub poller: poll::PollerHandle, } /// Calls `f` with the current execution context. /// /// If this function is called on the outside of a fiber, it will ignores `f` and returns `None`. pub fn with_current_context<F, T>(f: F) -> Option<T> where F: FnOnce(Context) -> T, { CURRENT_CONTEXT.with(|inner_context| inner_context.borrow_mut().as_context().map(f)) } /// The execution context of the currently running fiber. #[derive(Debug)] pub struct Context<'a> { scheduler: &'a mut CurrentScheduler, fiber: &'a mut FiberState, } impl<'a> Context<'a> { /// Returns the identifier of the current exeuction context. pub fn context_id(&self) -> super::ContextId { (self.scheduler.id, self.fiber.fiber_id) } /// Parks the current fiber. pub fn park(&mut self) -> super::Unpark { self.fiber .park(self.scheduler.id, self.scheduler.handle.clone()) } /// Returns the I/O event poller for this context. pub fn poller(&mut self) -> &mut poll::PollerHandle { &mut self.scheduler.poller } } /// Cooperatively gives up a poll for the current future (fiber). /// /// # Examples /// /// ``` /// # extern crate fibers; /// # extern crate futures; /// use fibers::{fiber, Executor, InPlaceExecutor, Spawn}; /// use futures::{Future, Async, Poll}; /// /// struct HeavyCalculation { /// polled_count: usize, /// loops: usize /// } /// impl HeavyCalculation { /// fn new(loop_count: usize) -> Self { /// HeavyCalculation { polled_count: 0, loops: loop_count } /// } /// } /// impl Future for HeavyCalculation { /// type Item = usize; /// type Error = (); /// fn poll(&mut self) -> Poll<Self::Item, Self::Error> { /// self.polled_count += 1; /// /// let mut per_poll_loop_limit = 10; /// while self.loops > 0 { /// self.loops -= 1; /// per_poll_loop_limit -= 1; /// if per_poll_loop_limit == 0 { /// // Suspends calculation and gives execution to other fibers. /// return fiber::yield_poll(); /// } /// } /// Ok(Async::Ready(self.polled_count)) /// } /// } /// /// let mut executor = InPlaceExecutor::new().unwrap(); /// let monitor = executor.spawn_monitor(HeavyCalculation::new(100)); /// let result = executor.run_fiber(monitor).unwrap(); /// assert_eq!(result, Ok(11)); /// ``` pub fn yield_poll<T, E>() -> Poll<T, E> { with_current_context(|context| context.fiber.yield_once()); Ok(Async::NotReady) } // TODO: rename #[derive(Debug)] struct InnerContext { pub scheduler: Option<CurrentScheduler>, fiber: Option<*mut FiberState>, } impl InnerContext { fn new() -> Self { InnerContext { scheduler: None, fiber: None, } } pub fn switch(&mut self, scheduler: &Scheduler) { self.scheduler = Some(CurrentScheduler { id: scheduler.scheduler_id, handle: scheduler.handle(), poller: scheduler.poller.clone(), }) } pub fn as_context(&mut self) -> Option<Context> { if let Some(scheduler) = self.scheduler.as_mut() { if let Some(fiber) = self.fiber { let fiber = unsafe { &mut *fiber }; return Some(Context { scheduler, fiber }); } } None } } #[derive(Debug)] enum Request { Spawn(Task), WakeUp(fiber::FiberId), }
let finished;
random_line_split
schedule.rs
// Copyright (c) 2016 DWANGO Co., Ltd. All Rights Reserved. // See the LICENSE file at the top-level directory of this distribution. use futures::{Async, Future, Poll}; use std::cell::RefCell; use std::collections::{HashMap, VecDeque}; use std::sync::atomic; use std::sync::mpsc as std_mpsc; use super::{FiberState, Spawn}; use crate::fiber::{self, Task}; use crate::io::poll; static NEXT_SCHEDULER_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(0); thread_local! { static CURRENT_CONTEXT: RefCell<InnerContext> = { RefCell::new(InnerContext::new()) }; } type RequestSender = std_mpsc::Sender<Request>; type RequestReceiver = std_mpsc::Receiver<Request>; /// The identifier of a scheduler. pub type SchedulerId = usize; /// Scheduler of spawned fibers. /// /// Scheduler manages spawned fibers state. /// If a fiber is in runnable state (e.g., not waiting for I/O events), /// the scheduler will push the fiber in it's run queue. /// When `run_once` method is called, the first fiber (i.e., future) in the queue /// will be poped and executed (i.e., `Future::poll` method is called). /// If the future of a fiber moves to readied state, /// it will be removed from the scheduler. /// For efficiency reasons, it is recommended to run a scheduler on a dedicated thread. #[derive(Debug)] pub struct Scheduler { scheduler_id: SchedulerId, next_fiber_id: fiber::FiberId, fibers: HashMap<fiber::FiberId, fiber::FiberState>, run_queue: VecDeque<fiber::FiberId>, request_tx: RequestSender, request_rx: RequestReceiver, poller: poll::PollerHandle, } impl Scheduler { /// Creates a new scheduler instance. pub fn new(poller: poll::PollerHandle) -> Self { let (request_tx, request_rx) = std_mpsc::channel(); Scheduler { scheduler_id: NEXT_SCHEDULER_ID.fetch_add(1, atomic::Ordering::SeqCst), next_fiber_id: 0, fibers: HashMap::new(), run_queue: VecDeque::new(), request_tx, request_rx, poller, } } /// Returns the identifier of this scheduler. pub fn scheduler_id(&self) -> SchedulerId { self.scheduler_id } /// Returns the length of the run queue of this scheduler. pub fn run_queue_len(&self) -> usize { self.run_queue.len() } /// Returns the count of alive fibers (i.e., not readied futures) in this scheduler. pub fn fiber_count(&self) -> usize { self.fibers.len() } /// Returns a handle of this scheduler. pub fn handle(&self) -> SchedulerHandle { SchedulerHandle { request_tx: self.request_tx.clone(), } } /// Runs one unit of works. pub fn run_once(&mut self, block_if_idle: bool) { let mut did_something = false; loop { // Request match self.request_rx.try_recv() { Err(std_mpsc::TryRecvError::Empty) =>
Err(std_mpsc::TryRecvError::Disconnected) => unreachable!(), Ok(request) => { did_something = true; self.handle_request(request); } } // Task if let Some(fiber_id) = self.next_runnable() { did_something = true; self.run_fiber(fiber_id); } if!block_if_idle || did_something { break; } let request = self.request_rx.recv().expect("must succeed"); did_something = true; self.handle_request(request); } } fn handle_request(&mut self, request: Request) { match request { Request::Spawn(task) => self.spawn_fiber(task), Request::WakeUp(fiber_id) => { if self.fibers.contains_key(&fiber_id) { self.schedule(fiber_id); } } } } fn spawn_fiber(&mut self, task: Task) { let fiber_id = self.next_fiber_id(); self.fibers .insert(fiber_id, fiber::FiberState::new(fiber_id, task)); self.schedule(fiber_id); } fn run_fiber(&mut self, fiber_id: fiber::FiberId) { let finished; let is_runnable = { CURRENT_CONTEXT.with(|context| { let mut context = context.borrow_mut(); if context .scheduler .as_ref() .map_or(true, |s| s.id!= self.scheduler_id) { context.switch(self); } { let scheduler = assert_some!(context.scheduler.as_mut()); if!scheduler.poller.is_alive() { // TODO: Return `Err(io::Error)` to caller and // handle the error in upper layers panic!("Poller is down"); } } assert!(context.fiber.is_none(), "Nested schedulers"); let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); context.fiber = Some(fiber as _); }); let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); finished = fiber.run_once(); CURRENT_CONTEXT.with(|context| { context.borrow_mut().fiber = None; }); fiber.is_runnable() }; if finished { self.fibers.remove(&fiber_id); } else if is_runnable { self.schedule(fiber_id); } } fn next_fiber_id(&mut self) -> fiber::FiberId { loop { let id = self.next_fiber_id; self.next_fiber_id = id.wrapping_add(1); if!self.fibers.contains_key(&id) { return id; } } } fn schedule(&mut self, fiber_id: fiber::FiberId) { let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); if!fiber.in_run_queue { self.run_queue.push_back(fiber_id); fiber.in_run_queue = true; } } fn next_runnable(&mut self) -> Option<fiber::FiberId> { while let Some(fiber_id) = self.run_queue.pop_front() { if let Some(fiber) = self.fibers.get_mut(&fiber_id) { fiber.in_run_queue = false; return Some(fiber_id); } } None } } /// A handle of a scheduler. #[derive(Debug, Clone)] pub struct SchedulerHandle { request_tx: RequestSender, } impl SchedulerHandle { /// Wakes up a specified fiber in the scheduler. /// /// This forces the fiber to be pushed to the run queue of the scheduler. pub fn wakeup(&self, fiber_id: fiber::FiberId) { let _ = self.request_tx.send(Request::WakeUp(fiber_id)); } } impl Spawn for SchedulerHandle { fn spawn_boxed(&self, fiber: Box<dyn Future<Item = (), Error = ()> + Send>) { let _ = self.request_tx.send(Request::Spawn(Task(fiber))); } } #[derive(Debug)] pub struct CurrentScheduler { pub id: SchedulerId, pub handle: SchedulerHandle, pub poller: poll::PollerHandle, } /// Calls `f` with the current execution context. /// /// If this function is called on the outside of a fiber, it will ignores `f` and returns `None`. pub fn with_current_context<F, T>(f: F) -> Option<T> where F: FnOnce(Context) -> T, { CURRENT_CONTEXT.with(|inner_context| inner_context.borrow_mut().as_context().map(f)) } /// The execution context of the currently running fiber. #[derive(Debug)] pub struct Context<'a> { scheduler: &'a mut CurrentScheduler, fiber: &'a mut FiberState, } impl<'a> Context<'a> { /// Returns the identifier of the current exeuction context. pub fn context_id(&self) -> super::ContextId { (self.scheduler.id, self.fiber.fiber_id) } /// Parks the current fiber. pub fn park(&mut self) -> super::Unpark { self.fiber .park(self.scheduler.id, self.scheduler.handle.clone()) } /// Returns the I/O event poller for this context. pub fn poller(&mut self) -> &mut poll::PollerHandle { &mut self.scheduler.poller } } /// Cooperatively gives up a poll for the current future (fiber). /// /// # Examples /// /// ``` /// # extern crate fibers; /// # extern crate futures; /// use fibers::{fiber, Executor, InPlaceExecutor, Spawn}; /// use futures::{Future, Async, Poll}; /// /// struct HeavyCalculation { /// polled_count: usize, /// loops: usize /// } /// impl HeavyCalculation { /// fn new(loop_count: usize) -> Self { /// HeavyCalculation { polled_count: 0, loops: loop_count } /// } /// } /// impl Future for HeavyCalculation { /// type Item = usize; /// type Error = (); /// fn poll(&mut self) -> Poll<Self::Item, Self::Error> { /// self.polled_count += 1; /// /// let mut per_poll_loop_limit = 10; /// while self.loops > 0 { /// self.loops -= 1; /// per_poll_loop_limit -= 1; /// if per_poll_loop_limit == 0 { /// // Suspends calculation and gives execution to other fibers. /// return fiber::yield_poll(); /// } /// } /// Ok(Async::Ready(self.polled_count)) /// } /// } /// /// let mut executor = InPlaceExecutor::new().unwrap(); /// let monitor = executor.spawn_monitor(HeavyCalculation::new(100)); /// let result = executor.run_fiber(monitor).unwrap(); /// assert_eq!(result, Ok(11)); /// ``` pub fn yield_poll<T, E>() -> Poll<T, E> { with_current_context(|context| context.fiber.yield_once()); Ok(Async::NotReady) } // TODO: rename #[derive(Debug)] struct InnerContext { pub scheduler: Option<CurrentScheduler>, fiber: Option<*mut FiberState>, } impl InnerContext { fn new() -> Self { InnerContext { scheduler: None, fiber: None, } } pub fn switch(&mut self, scheduler: &Scheduler) { self.scheduler = Some(CurrentScheduler { id: scheduler.scheduler_id, handle: scheduler.handle(), poller: scheduler.poller.clone(), }) } pub fn as_context(&mut self) -> Option<Context> { if let Some(scheduler) = self.scheduler.as_mut() { if let Some(fiber) = self.fiber { let fiber = unsafe { &mut *fiber }; return Some(Context { scheduler, fiber }); } } None } } #[derive(Debug)] enum Request { Spawn(Task), WakeUp(fiber::FiberId), }
{}
conditional_block
schedule.rs
// Copyright (c) 2016 DWANGO Co., Ltd. All Rights Reserved. // See the LICENSE file at the top-level directory of this distribution. use futures::{Async, Future, Poll}; use std::cell::RefCell; use std::collections::{HashMap, VecDeque}; use std::sync::atomic; use std::sync::mpsc as std_mpsc; use super::{FiberState, Spawn}; use crate::fiber::{self, Task}; use crate::io::poll; static NEXT_SCHEDULER_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(0); thread_local! { static CURRENT_CONTEXT: RefCell<InnerContext> = { RefCell::new(InnerContext::new()) }; } type RequestSender = std_mpsc::Sender<Request>; type RequestReceiver = std_mpsc::Receiver<Request>; /// The identifier of a scheduler. pub type SchedulerId = usize; /// Scheduler of spawned fibers. /// /// Scheduler manages spawned fibers state. /// If a fiber is in runnable state (e.g., not waiting for I/O events), /// the scheduler will push the fiber in it's run queue. /// When `run_once` method is called, the first fiber (i.e., future) in the queue /// will be poped and executed (i.e., `Future::poll` method is called). /// If the future of a fiber moves to readied state, /// it will be removed from the scheduler. /// For efficiency reasons, it is recommended to run a scheduler on a dedicated thread. #[derive(Debug)] pub struct Scheduler { scheduler_id: SchedulerId, next_fiber_id: fiber::FiberId, fibers: HashMap<fiber::FiberId, fiber::FiberState>, run_queue: VecDeque<fiber::FiberId>, request_tx: RequestSender, request_rx: RequestReceiver, poller: poll::PollerHandle, } impl Scheduler { /// Creates a new scheduler instance. pub fn new(poller: poll::PollerHandle) -> Self { let (request_tx, request_rx) = std_mpsc::channel(); Scheduler { scheduler_id: NEXT_SCHEDULER_ID.fetch_add(1, atomic::Ordering::SeqCst), next_fiber_id: 0, fibers: HashMap::new(), run_queue: VecDeque::new(), request_tx, request_rx, poller, } } /// Returns the identifier of this scheduler. pub fn scheduler_id(&self) -> SchedulerId { self.scheduler_id } /// Returns the length of the run queue of this scheduler. pub fn run_queue_len(&self) -> usize { self.run_queue.len() } /// Returns the count of alive fibers (i.e., not readied futures) in this scheduler. pub fn fiber_count(&self) -> usize { self.fibers.len() } /// Returns a handle of this scheduler. pub fn handle(&self) -> SchedulerHandle { SchedulerHandle { request_tx: self.request_tx.clone(), } } /// Runs one unit of works. pub fn run_once(&mut self, block_if_idle: bool) { let mut did_something = false; loop { // Request match self.request_rx.try_recv() { Err(std_mpsc::TryRecvError::Empty) => {} Err(std_mpsc::TryRecvError::Disconnected) => unreachable!(), Ok(request) => { did_something = true; self.handle_request(request); } } // Task if let Some(fiber_id) = self.next_runnable() { did_something = true; self.run_fiber(fiber_id); } if!block_if_idle || did_something { break; } let request = self.request_rx.recv().expect("must succeed"); did_something = true; self.handle_request(request); } } fn handle_request(&mut self, request: Request) { match request { Request::Spawn(task) => self.spawn_fiber(task), Request::WakeUp(fiber_id) => { if self.fibers.contains_key(&fiber_id) { self.schedule(fiber_id); } } } } fn spawn_fiber(&mut self, task: Task) { let fiber_id = self.next_fiber_id(); self.fibers .insert(fiber_id, fiber::FiberState::new(fiber_id, task)); self.schedule(fiber_id); } fn run_fiber(&mut self, fiber_id: fiber::FiberId) { let finished; let is_runnable = { CURRENT_CONTEXT.with(|context| { let mut context = context.borrow_mut(); if context .scheduler .as_ref() .map_or(true, |s| s.id!= self.scheduler_id) { context.switch(self); } { let scheduler = assert_some!(context.scheduler.as_mut()); if!scheduler.poller.is_alive() { // TODO: Return `Err(io::Error)` to caller and // handle the error in upper layers panic!("Poller is down"); } } assert!(context.fiber.is_none(), "Nested schedulers"); let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); context.fiber = Some(fiber as _); }); let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); finished = fiber.run_once(); CURRENT_CONTEXT.with(|context| { context.borrow_mut().fiber = None; }); fiber.is_runnable() }; if finished { self.fibers.remove(&fiber_id); } else if is_runnable { self.schedule(fiber_id); } } fn next_fiber_id(&mut self) -> fiber::FiberId { loop { let id = self.next_fiber_id; self.next_fiber_id = id.wrapping_add(1); if!self.fibers.contains_key(&id) { return id; } } } fn schedule(&mut self, fiber_id: fiber::FiberId) { let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); if!fiber.in_run_queue { self.run_queue.push_back(fiber_id); fiber.in_run_queue = true; } } fn next_runnable(&mut self) -> Option<fiber::FiberId> { while let Some(fiber_id) = self.run_queue.pop_front() { if let Some(fiber) = self.fibers.get_mut(&fiber_id) { fiber.in_run_queue = false; return Some(fiber_id); } } None } } /// A handle of a scheduler. #[derive(Debug, Clone)] pub struct SchedulerHandle { request_tx: RequestSender, } impl SchedulerHandle { /// Wakes up a specified fiber in the scheduler. /// /// This forces the fiber to be pushed to the run queue of the scheduler. pub fn wakeup(&self, fiber_id: fiber::FiberId) { let _ = self.request_tx.send(Request::WakeUp(fiber_id)); } } impl Spawn for SchedulerHandle { fn spawn_boxed(&self, fiber: Box<dyn Future<Item = (), Error = ()> + Send>) { let _ = self.request_tx.send(Request::Spawn(Task(fiber))); } } #[derive(Debug)] pub struct CurrentScheduler { pub id: SchedulerId, pub handle: SchedulerHandle, pub poller: poll::PollerHandle, } /// Calls `f` with the current execution context. /// /// If this function is called on the outside of a fiber, it will ignores `f` and returns `None`. pub fn with_current_context<F, T>(f: F) -> Option<T> where F: FnOnce(Context) -> T, { CURRENT_CONTEXT.with(|inner_context| inner_context.borrow_mut().as_context().map(f)) } /// The execution context of the currently running fiber. #[derive(Debug)] pub struct
<'a> { scheduler: &'a mut CurrentScheduler, fiber: &'a mut FiberState, } impl<'a> Context<'a> { /// Returns the identifier of the current exeuction context. pub fn context_id(&self) -> super::ContextId { (self.scheduler.id, self.fiber.fiber_id) } /// Parks the current fiber. pub fn park(&mut self) -> super::Unpark { self.fiber .park(self.scheduler.id, self.scheduler.handle.clone()) } /// Returns the I/O event poller for this context. pub fn poller(&mut self) -> &mut poll::PollerHandle { &mut self.scheduler.poller } } /// Cooperatively gives up a poll for the current future (fiber). /// /// # Examples /// /// ``` /// # extern crate fibers; /// # extern crate futures; /// use fibers::{fiber, Executor, InPlaceExecutor, Spawn}; /// use futures::{Future, Async, Poll}; /// /// struct HeavyCalculation { /// polled_count: usize, /// loops: usize /// } /// impl HeavyCalculation { /// fn new(loop_count: usize) -> Self { /// HeavyCalculation { polled_count: 0, loops: loop_count } /// } /// } /// impl Future for HeavyCalculation { /// type Item = usize; /// type Error = (); /// fn poll(&mut self) -> Poll<Self::Item, Self::Error> { /// self.polled_count += 1; /// /// let mut per_poll_loop_limit = 10; /// while self.loops > 0 { /// self.loops -= 1; /// per_poll_loop_limit -= 1; /// if per_poll_loop_limit == 0 { /// // Suspends calculation and gives execution to other fibers. /// return fiber::yield_poll(); /// } /// } /// Ok(Async::Ready(self.polled_count)) /// } /// } /// /// let mut executor = InPlaceExecutor::new().unwrap(); /// let monitor = executor.spawn_monitor(HeavyCalculation::new(100)); /// let result = executor.run_fiber(monitor).unwrap(); /// assert_eq!(result, Ok(11)); /// ``` pub fn yield_poll<T, E>() -> Poll<T, E> { with_current_context(|context| context.fiber.yield_once()); Ok(Async::NotReady) } // TODO: rename #[derive(Debug)] struct InnerContext { pub scheduler: Option<CurrentScheduler>, fiber: Option<*mut FiberState>, } impl InnerContext { fn new() -> Self { InnerContext { scheduler: None, fiber: None, } } pub fn switch(&mut self, scheduler: &Scheduler) { self.scheduler = Some(CurrentScheduler { id: scheduler.scheduler_id, handle: scheduler.handle(), poller: scheduler.poller.clone(), }) } pub fn as_context(&mut self) -> Option<Context> { if let Some(scheduler) = self.scheduler.as_mut() { if let Some(fiber) = self.fiber { let fiber = unsafe { &mut *fiber }; return Some(Context { scheduler, fiber }); } } None } } #[derive(Debug)] enum Request { Spawn(Task), WakeUp(fiber::FiberId), }
Context
identifier_name
schedule.rs
// Copyright (c) 2016 DWANGO Co., Ltd. All Rights Reserved. // See the LICENSE file at the top-level directory of this distribution. use futures::{Async, Future, Poll}; use std::cell::RefCell; use std::collections::{HashMap, VecDeque}; use std::sync::atomic; use std::sync::mpsc as std_mpsc; use super::{FiberState, Spawn}; use crate::fiber::{self, Task}; use crate::io::poll; static NEXT_SCHEDULER_ID: atomic::AtomicUsize = atomic::AtomicUsize::new(0); thread_local! { static CURRENT_CONTEXT: RefCell<InnerContext> = { RefCell::new(InnerContext::new()) }; } type RequestSender = std_mpsc::Sender<Request>; type RequestReceiver = std_mpsc::Receiver<Request>; /// The identifier of a scheduler. pub type SchedulerId = usize; /// Scheduler of spawned fibers. /// /// Scheduler manages spawned fibers state. /// If a fiber is in runnable state (e.g., not waiting for I/O events), /// the scheduler will push the fiber in it's run queue. /// When `run_once` method is called, the first fiber (i.e., future) in the queue /// will be poped and executed (i.e., `Future::poll` method is called). /// If the future of a fiber moves to readied state, /// it will be removed from the scheduler. /// For efficiency reasons, it is recommended to run a scheduler on a dedicated thread. #[derive(Debug)] pub struct Scheduler { scheduler_id: SchedulerId, next_fiber_id: fiber::FiberId, fibers: HashMap<fiber::FiberId, fiber::FiberState>, run_queue: VecDeque<fiber::FiberId>, request_tx: RequestSender, request_rx: RequestReceiver, poller: poll::PollerHandle, } impl Scheduler { /// Creates a new scheduler instance. pub fn new(poller: poll::PollerHandle) -> Self { let (request_tx, request_rx) = std_mpsc::channel(); Scheduler { scheduler_id: NEXT_SCHEDULER_ID.fetch_add(1, atomic::Ordering::SeqCst), next_fiber_id: 0, fibers: HashMap::new(), run_queue: VecDeque::new(), request_tx, request_rx, poller, } } /// Returns the identifier of this scheduler. pub fn scheduler_id(&self) -> SchedulerId { self.scheduler_id } /// Returns the length of the run queue of this scheduler. pub fn run_queue_len(&self) -> usize { self.run_queue.len() } /// Returns the count of alive fibers (i.e., not readied futures) in this scheduler. pub fn fiber_count(&self) -> usize { self.fibers.len() } /// Returns a handle of this scheduler. pub fn handle(&self) -> SchedulerHandle { SchedulerHandle { request_tx: self.request_tx.clone(), } } /// Runs one unit of works. pub fn run_once(&mut self, block_if_idle: bool) { let mut did_something = false; loop { // Request match self.request_rx.try_recv() { Err(std_mpsc::TryRecvError::Empty) => {} Err(std_mpsc::TryRecvError::Disconnected) => unreachable!(), Ok(request) => { did_something = true; self.handle_request(request); } } // Task if let Some(fiber_id) = self.next_runnable() { did_something = true; self.run_fiber(fiber_id); } if!block_if_idle || did_something { break; } let request = self.request_rx.recv().expect("must succeed"); did_something = true; self.handle_request(request); } } fn handle_request(&mut self, request: Request) { match request { Request::Spawn(task) => self.spawn_fiber(task), Request::WakeUp(fiber_id) => { if self.fibers.contains_key(&fiber_id) { self.schedule(fiber_id); } } } } fn spawn_fiber(&mut self, task: Task) { let fiber_id = self.next_fiber_id(); self.fibers .insert(fiber_id, fiber::FiberState::new(fiber_id, task)); self.schedule(fiber_id); } fn run_fiber(&mut self, fiber_id: fiber::FiberId) { let finished; let is_runnable = { CURRENT_CONTEXT.with(|context| { let mut context = context.borrow_mut(); if context .scheduler .as_ref() .map_or(true, |s| s.id!= self.scheduler_id) { context.switch(self); } { let scheduler = assert_some!(context.scheduler.as_mut()); if!scheduler.poller.is_alive() { // TODO: Return `Err(io::Error)` to caller and // handle the error in upper layers panic!("Poller is down"); } } assert!(context.fiber.is_none(), "Nested schedulers"); let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); context.fiber = Some(fiber as _); }); let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); finished = fiber.run_once(); CURRENT_CONTEXT.with(|context| { context.borrow_mut().fiber = None; }); fiber.is_runnable() }; if finished { self.fibers.remove(&fiber_id); } else if is_runnable { self.schedule(fiber_id); } } fn next_fiber_id(&mut self) -> fiber::FiberId { loop { let id = self.next_fiber_id; self.next_fiber_id = id.wrapping_add(1); if!self.fibers.contains_key(&id) { return id; } } } fn schedule(&mut self, fiber_id: fiber::FiberId)
fn next_runnable(&mut self) -> Option<fiber::FiberId> { while let Some(fiber_id) = self.run_queue.pop_front() { if let Some(fiber) = self.fibers.get_mut(&fiber_id) { fiber.in_run_queue = false; return Some(fiber_id); } } None } } /// A handle of a scheduler. #[derive(Debug, Clone)] pub struct SchedulerHandle { request_tx: RequestSender, } impl SchedulerHandle { /// Wakes up a specified fiber in the scheduler. /// /// This forces the fiber to be pushed to the run queue of the scheduler. pub fn wakeup(&self, fiber_id: fiber::FiberId) { let _ = self.request_tx.send(Request::WakeUp(fiber_id)); } } impl Spawn for SchedulerHandle { fn spawn_boxed(&self, fiber: Box<dyn Future<Item = (), Error = ()> + Send>) { let _ = self.request_tx.send(Request::Spawn(Task(fiber))); } } #[derive(Debug)] pub struct CurrentScheduler { pub id: SchedulerId, pub handle: SchedulerHandle, pub poller: poll::PollerHandle, } /// Calls `f` with the current execution context. /// /// If this function is called on the outside of a fiber, it will ignores `f` and returns `None`. pub fn with_current_context<F, T>(f: F) -> Option<T> where F: FnOnce(Context) -> T, { CURRENT_CONTEXT.with(|inner_context| inner_context.borrow_mut().as_context().map(f)) } /// The execution context of the currently running fiber. #[derive(Debug)] pub struct Context<'a> { scheduler: &'a mut CurrentScheduler, fiber: &'a mut FiberState, } impl<'a> Context<'a> { /// Returns the identifier of the current exeuction context. pub fn context_id(&self) -> super::ContextId { (self.scheduler.id, self.fiber.fiber_id) } /// Parks the current fiber. pub fn park(&mut self) -> super::Unpark { self.fiber .park(self.scheduler.id, self.scheduler.handle.clone()) } /// Returns the I/O event poller for this context. pub fn poller(&mut self) -> &mut poll::PollerHandle { &mut self.scheduler.poller } } /// Cooperatively gives up a poll for the current future (fiber). /// /// # Examples /// /// ``` /// # extern crate fibers; /// # extern crate futures; /// use fibers::{fiber, Executor, InPlaceExecutor, Spawn}; /// use futures::{Future, Async, Poll}; /// /// struct HeavyCalculation { /// polled_count: usize, /// loops: usize /// } /// impl HeavyCalculation { /// fn new(loop_count: usize) -> Self { /// HeavyCalculation { polled_count: 0, loops: loop_count } /// } /// } /// impl Future for HeavyCalculation { /// type Item = usize; /// type Error = (); /// fn poll(&mut self) -> Poll<Self::Item, Self::Error> { /// self.polled_count += 1; /// /// let mut per_poll_loop_limit = 10; /// while self.loops > 0 { /// self.loops -= 1; /// per_poll_loop_limit -= 1; /// if per_poll_loop_limit == 0 { /// // Suspends calculation and gives execution to other fibers. /// return fiber::yield_poll(); /// } /// } /// Ok(Async::Ready(self.polled_count)) /// } /// } /// /// let mut executor = InPlaceExecutor::new().unwrap(); /// let monitor = executor.spawn_monitor(HeavyCalculation::new(100)); /// let result = executor.run_fiber(monitor).unwrap(); /// assert_eq!(result, Ok(11)); /// ``` pub fn yield_poll<T, E>() -> Poll<T, E> { with_current_context(|context| context.fiber.yield_once()); Ok(Async::NotReady) } // TODO: rename #[derive(Debug)] struct InnerContext { pub scheduler: Option<CurrentScheduler>, fiber: Option<*mut FiberState>, } impl InnerContext { fn new() -> Self { InnerContext { scheduler: None, fiber: None, } } pub fn switch(&mut self, scheduler: &Scheduler) { self.scheduler = Some(CurrentScheduler { id: scheduler.scheduler_id, handle: scheduler.handle(), poller: scheduler.poller.clone(), }) } pub fn as_context(&mut self) -> Option<Context> { if let Some(scheduler) = self.scheduler.as_mut() { if let Some(fiber) = self.fiber { let fiber = unsafe { &mut *fiber }; return Some(Context { scheduler, fiber }); } } None } } #[derive(Debug)] enum Request { Spawn(Task), WakeUp(fiber::FiberId), }
{ let fiber = assert_some!(self.fibers.get_mut(&fiber_id)); if !fiber.in_run_queue { self.run_queue.push_back(fiber_id); fiber.in_run_queue = true; } }
identifier_body
mod.rs
//! Inter-Integrated Circuit driver for Tegra210. //! //! # Description //! //! The I²C controller (I2C) implements an I²C 3.0 specification-compliant //! I²C master and slave controller. The I²C controller supports multiple //! masters and slaves. It supports Standard mode (up to 100 Kbits/s), //! Fast mode (up to 400 Kbits/s), Fast mode plus (Fm+, up to 1 Mbits/s), //! and High-speed mode (up to 3.4 Mbits/s). //! //! Tegra X1 devices have six instances of this controller. All six //! instances have identical I2C master functionality. There are also //! three additional I2C instances in the TSEC, CL-DVFS and VI modules. //! //! The I²C controller supports DMA for the Master controller over the APB //! bus. There is no DMA support for the Slave controller. The I²C controller //! also supports packet mode transfers where the data to be transferred is //! encapsulated in a predefined packet format as payload and sent to the //! I²C controller over the APB bus. The header of the packet specifies the //! type of operation to be performed, the size and other parameters. //! //! # Implementation //! //! - The addresses of available I2C devices are exposed as constants. //! //! - The [`Registers`] struct provides abstractions over the I2C registers //! and the possibility to create pointers to each I2C mapped at a different //! address. //! //! - The [`I2c`] represents an I2C and holds the [`Clock`] to enable //! the device and the respective [`Registers`] block pointer to //! communicate over I²C. //! //! - [`I2c`] holds pre-defined constants which represent the I2C //! controllers 1 through 6 and should be preferred over creating //! instances of the [`I2c`] struct manually. //! //! - [`I2c::init`] has to be called for each device before it can //! be used. //! //! - [`I2c::read`] and [`I2c::write`] take buffers as arguments. //! For write operations, this buffer must contain the byte //! representation of the number to send in little-endian byte order. //! For read operations, the buffer wis filled with little-endian-ordered //! bytes. //! //! - The [`Sync`] trait is implemented for [`I2c`], it is considered //! safe to share references between threads. //! //! - [`send_pmic_cpu_shutdown_cmd`], [`read_ti_charger_bit_7`], //! [`clear_ti_charger_bit_7`] and [`set_ti_charger_bit_7`] are helper //! functions which wrap common I2C operations. //! //! [`Registers`]: struct.Registers.html //! [`I2c`]: struct.I2c.html //! [`Clock`]:../clock/struct.Clock.html //! [`I2c::init`]: struct.I2c.html#method.init //! [`I2c::read`]: struct.I2c.html#method.read //! [`I2c::write`]: struct.I2c.html#method.write //! [`Sync`]: https://doc.rust-lang.org/nightly/core/marker/trait.Sync.html //! [`send_pmic_cpu_shutdown_cmd`]: fn.send_pmic_cpu_shutdown_cmd.html //! [`read_ti_charger_bit_7`]: fn.read_ti_charger_bit_7.html //! [`clear_ti_charger_bit_7`]: fn.clear_ti_charger_bit_7.html //! [`set_ti_charger_bit_7`]: fn.set_ti_charger_bit_7.html use core::{convert::TryInto, marker::{Send, Sync}}; use mirage_mmio::Mmio; use crate::{clock::Clock, timer::usleep}; /// Base address for the I²C registers 1 through 4. pub(crate) const I2C_1234_BASE: u32 = 0x7000_C000; /// Base address for the I²C registers 5 through 6. pub(crate) const I2C_56_BASE: u32 = 0x7000_D000; /// The I²C device address for the Maxim 77621 CPU. pub const MAX77621_CPU_I2C_ADDR: u32 = 0x1B; /// The I²C device address for the Maxim 77621 GPU. pub const MAX77621_GPU_I2C_ADDR: u32 = 0x1C; /// The I²C device address for the Maxim 17050. pub const MAX17050_I2C_ADDR: u32 = 0x36; /// The I²C device address for the Maxim 77620 PWR. pub const MAX77620_PWR_I2C_ADDR: u32 = 0x3C; /// The I²C device address for the Maxim 77620 RTC. pub const MAX77620_RTC_I2C_ADDR: u32 = 0x68; /// The I²C device address for the TI BQ24193. pub const BQ24193_I2C_ADDR: u32 = 0x6B; /// Enumeration of possible I²C errors that may occur. #[derive(Debug)] pub enum Error { /// Returned in case the boundaries of a buffer used for /// read and write operations exceed the permitted size. BufferBoundariesBlown, /// Returned when the transmission over I²C errors. TransmissionFailed, /// Returned when a querying error for a device occurs. QueryFailed, } /// Representation of the I²C registers. #[allow(non_snake_case)] #[repr(C)] pub struct Registers { pub I2C_CNFG: Mmio<u32>, pub I2C_CMD_ADDR0: Mmio<u32>, pub I2C_CMD_ADDR1: Mmio<u32>, pub I2C_CMD_DATA1: Mmio<u32>, pub I2C_CMD_DATA2: Mmio<u32>, _0x14: Mmio<u32>, _0x18: Mmio<u32>, pub I2C_STATUS: Mmio<u32>, pub I2C_SL_CNFG: Mmio<u32>, pub I2C_SL_RCVD: Mmio<u32>, pub I2C_SL_STATUS: Mmio<u32>, pub I2C_SL_ADDR1: Mmio<u32>, pub I2C_SL_ADDR2: Mmio<u32>, pub I2C_TLOW_SEXT: Mmio<u32>, _0x38: Mmio<u32>, pub I2C_SL_DELAY_COUNT: Mmio<u32>, pub I2C_SL_INT_MASK: Mmio<u32>, pub I2C_SL_INT_SOURCE: Mmio<u32>, pub I2C_SL_INT_SET: Mmio<u32>, _0x4C: Mmio<u32>, pub I2C_TX_PACKET_FIFO: Mmio<u32>, pub I2C_RX_FIFO: Mmio<u32>, pub PACKET_TRANSFER_STATUS: Mmio<u32>, pub FIFO_CONTROL: Mmio<u32>, pub FIFO_STATUS: Mmio<u32>, pub INTERRUPT_MASK_REGISTER: Mmio<u32>, pub INTERRUPT_STATUS_REGISTER: Mmio<u32>, pub I2C_CLK_DIVISOR_REGISTER: Mmio<u32>, pub I2C_INTERRUPT_SOURCE_REGISTER: Mmio<u32>, pub I2C_INTERRUPT_SET_REGISTER: Mmio<u32>, pub I2C_SLV_TX_PACKET_FIFO: Mmio<u32>, pub I2C_SLV_RX_FIFO: Mmio<u32>, pub I2C_SLV_PACKET_STATUS: Mmio<u32>, pub I2C_BUS_CLEAR_CONFIG: Mmio<u32>, pub I2C_BUS_CLEAR_STATUS: Mmio<u32>, pub I2C_CONFIG_LOAD: Mmio<u32>, _0x90: Mmio<u32>, pub I2C_INTERFACE_TIMING_0: Mmio<u32>, pub I2C_INTERFACE_TIMING_1: Mmio<u32>, pub I2C_HS_INTERFACE_TIMING_0: Mmio<u32>, pub I2C_HS_INTERFACE_TIMING_1: Mmio<u32>, } /// Representation of an I²C controller. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct I2c { /// The device clock for the controller. clock: &'static Clock, /// A pointer to the registers used for communication. registers: *const Registers, } // Definitions for known I²C devices. impl I2c { /// Representation of the I²C controller 1. pub const C1: Self = I2c { clock: &Clock::I2C_1, registers: (I2C_1234_BASE + 0) as *const Registers, }; /// Representation of the I²C controller 2. pub const C2: Self = I2c { clock: &Clock::I2C_2, registers: (I2C_1234_BASE + 0x400) as *const Registers, }; /// Representation of the I²C controller 3. pub const C3: Self = I2c { clock: &Clock::I2C_3, registers: (I2C_1234_BASE + 0x500) as *const Registers, }; /// Representation of the I²C controller 4. pub const C4: Self = I2c { clock: &Clock::I2C_4, registers: (I2C_1234_BASE + 0x700) as *const Registers, }; /// Representation of the I²C controller 5. pub const C5: Self = I2c { clock: &Clock::I2C_5, registers: (I2C_56_BASE + 0x000) as *const Registers, }; /// Representation of the I²C controller 6. pub const C6: Self = I2c { clock: &Clock::I2C_6, registers: (I2C_56_BASE + 0x100) as *const Registers, }; } impl I2c { /// Loads the hardware configuration for the I²C. fn load_config(&self) { let register_base = unsafe { &*self.registers }; // Set MSTR_CONFIG_LOAD, TIMEOUT_CONFIG_LOAD, undocumented bit. register_base.I2C_CONFIG_LOAD.write(0x25); // Wait up to 20 microseconds for master config to be loaded. for i in 0..20 { usleep(i); if register_base.I2C_CONFIG_LOAD.read() & 1 == 0 { break; } } } /// Transmits the data to the device over I²C. fn send(&self, device: u32, data: &[u8]) -> Result<(), Error> { let register_base = unsafe { &*self.registers }; // Set device for 7-bit write mode. register_base.I2C_CMD_ADDR0.write(device << 1); // Load in data to write. let data_source = u32::from_le_bytes(data.try_into().unwrap()); register_base.I2C_CMD_DATA1.write(data_source); // Set config with LENGTH = data_length, NEW_MASTER_FSM, DEBOUNCE_CNT = 4T. register_base.I2C_CNFG.write((((data.len() << 1) - 2) | 0x2800) as u32); // Load hardware configuration. self.load_config(); // CONFIG |= SEND. register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200); while register_base.I2C_STATUS.read() & 0x100!= 0 { // Wait until not busy. } // Determine result from the result of CMD1_STAT == SL1_XFER_SUCCESSFUL. if register_base.I2C_STATUS.read() & 0xF == 0 { return Ok(()); } else { return Err(Error::TransmissionFailed); } } /// Receives bytes from the device over I²C and writes them to the buffer. fn receive(&self, device: u32, buffer: &mut [u8]) -> Result<(), Error> { let register_base = unsafe { &*self.registers }; // Set device for 7-bit read mode. register_base.I2C_CMD_ADDR0.write((device << 1) | 1); // Set config with LENGTH = buffer.len(), NEW_MASTER_FSM, DEBOUNCE_CNT = 4T. register_base.I2C_CNFG.write((((buffer.len() << 1) - 2) | 0x2840) as u32); // Load hardware configuration. self.load_config(); // CONFIG |= SEND. register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200); while register_base.I2C_STATUS.read() & 0x100!= 0 { // Wait until not busy. } // Ensure success. if register_base.I2C_STATUS.read() & 0xF!= 0 { return Err(Error::QueryFailed); } // Read result and copy it back. let result = register_base.I2C_CMD_DATA1.read().to_le_bytes(); buffer.copy_from_slice(&result[..buffer.len()]); Ok(()) } /// Initializes the I²C controller. pub fn init(&self) { let register_base = unsafe { &*self.registers }; // Enable device clock. self.clock.enable(); // Setup divisor and clear the bus. register_base.I2C_CLK_DIVISOR_REGISTER.write(0x50001); register_base.I2C_BUS_CLEAR_CONFIG.write(0x90003); // Load hardware configuration. self.load_config(); // Wait a while until BUS_CLEAR_DONE is set. for _ in 0..10 { usleep(20000); if register_base.INTERRUPT_STATUS_REGISTER.read() & 0x800!= 0 { break; } } // Dummy read. register_base.I2C_BUS_CLEAR_STATUS.read(); // Read and set the Interrupt Status. register_base.INTERRUPT_STATUS_REGISTER .write(register_base.INTERRUPT_STATUS_REGISTER.read()); } /// Writes a buffer of data to a given device over I²C. pub fn write(&self, device: u32, register: u8, data: &[u8]) -> Result<(), Error> { // Limit input size to 32-bits. One byte is reserved for the device register. if data.len() > 3 { return Err(Error::BufferBoundariesBlown); } // Prepare a buffer holding the device register and the data contents. let mut buffer = [0; 4]; buffer[0] = register; buffer[1..].copy_from_slice(data); // Send the buffer to the device. self.send(device, &buffer[..]) } /// Writes an byte to a given device over I²C. #[inline] pub fn write_byte(&self, device: u32, register: u8, byte: u8) -> Result<(), Error> { // Write single byte to
evice over I²C and writes the result to the buffer. pub fn read(&self, device: u32, register: u8, buffer: &mut [u8]) -> Result<(), Error> { // Limit output size to 32-bits. if buffer.len() > 4 { return Err(Error::BufferBoundariesBlown); } // Write single byte register ID to device. self.send(device, &[register])?; // Receive data and write these to the buffer. self.receive(device, buffer) } /// Reads a byte from a given device over I²C. #[inline] pub fn read_byte(&self, device: u32, register: u8) -> Result<u8, Error> { let mut buffer = [0; 1]; self.read(device, register, &mut buffer)?; Ok(u8::from_le_bytes(buffer.try_into().unwrap())) } } unsafe impl Send for I2c {} unsafe impl Sync for I2c {}
device. self.write(device, register, &byte.to_le_bytes()) } /// Reads a register of a d
identifier_body
mod.rs
//! Inter-Integrated Circuit driver for Tegra210. //! //! # Description //! //! The I²C controller (I2C) implements an I²C 3.0 specification-compliant //! I²C master and slave controller. The I²C controller supports multiple //! masters and slaves. It supports Standard mode (up to 100 Kbits/s), //! Fast mode (up to 400 Kbits/s), Fast mode plus (Fm+, up to 1 Mbits/s), //! and High-speed mode (up to 3.4 Mbits/s). //! //! Tegra X1 devices have six instances of this controller. All six //! instances have identical I2C master functionality. There are also //! three additional I2C instances in the TSEC, CL-DVFS and VI modules. //! //! The I²C controller supports DMA for the Master controller over the APB //! bus. There is no DMA support for the Slave controller. The I²C controller //! also supports packet mode transfers where the data to be transferred is //! encapsulated in a predefined packet format as payload and sent to the //! I²C controller over the APB bus. The header of the packet specifies the //! type of operation to be performed, the size and other parameters. //! //! # Implementation //! //! - The addresses of available I2C devices are exposed as constants. //! //! - The [`Registers`] struct provides abstractions over the I2C registers //! and the possibility to create pointers to each I2C mapped at a different //! address. //! //! - The [`I2c`] represents an I2C and holds the [`Clock`] to enable //! the device and the respective [`Registers`] block pointer to //! communicate over I²C. //! //! - [`I2c`] holds pre-defined constants which represent the I2C //! controllers 1 through 6 and should be preferred over creating //! instances of the [`I2c`] struct manually. //! //! - [`I2c::init`] has to be called for each device before it can //! be used. //! //! - [`I2c::read`] and [`I2c::write`] take buffers as arguments. //! For write operations, this buffer must contain the byte //! representation of the number to send in little-endian byte order. //! For read operations, the buffer wis filled with little-endian-ordered //! bytes. //! //! - The [`Sync`] trait is implemented for [`I2c`], it is considered //! safe to share references between threads. //! //! - [`send_pmic_cpu_shutdown_cmd`], [`read_ti_charger_bit_7`], //! [`clear_ti_charger_bit_7`] and [`set_ti_charger_bit_7`] are helper //! functions which wrap common I2C operations. //! //! [`Registers`]: struct.Registers.html //! [`I2c`]: struct.I2c.html //! [`Clock`]:../clock/struct.Clock.html //! [`I2c::init`]: struct.I2c.html#method.init //! [`I2c::read`]: struct.I2c.html#method.read //! [`I2c::write`]: struct.I2c.html#method.write //! [`Sync`]: https://doc.rust-lang.org/nightly/core/marker/trait.Sync.html //! [`send_pmic_cpu_shutdown_cmd`]: fn.send_pmic_cpu_shutdown_cmd.html //! [`read_ti_charger_bit_7`]: fn.read_ti_charger_bit_7.html //! [`clear_ti_charger_bit_7`]: fn.clear_ti_charger_bit_7.html //! [`set_ti_charger_bit_7`]: fn.set_ti_charger_bit_7.html use core::{convert::TryInto, marker::{Send, Sync}}; use mirage_mmio::Mmio; use crate::{clock::Clock, timer::usleep}; /// Base address for the I²C registers 1 through 4. pub(crate) const I2C_1234_BASE: u32 = 0x7000_C000; /// Base address for the I²C registers 5 through 6. pub(crate) const I2C_56_BASE: u32 = 0x7000_D000; /// The I²C device address for the Maxim 77621 CPU. pub const MAX77621_CPU_I2C_ADDR: u32 = 0x1B; /// The I²C device address for the Maxim 77621 GPU. pub const MAX77621_GPU_I2C_ADDR: u32 = 0x1C; /// The I²C device address for the Maxim 17050. pub const MAX17050_I2C_ADDR: u32 = 0x36; /// The I²C device address for the Maxim 77620 PWR. pub const MAX77620_PWR_I2C_ADDR: u32 = 0x3C; /// The I²C device address for the Maxim 77620 RTC. pub const MAX77620_RTC_I2C_ADDR: u32 = 0x68; /// The I²C device address for the TI BQ24193. pub const BQ24193_I2C_ADDR: u32 = 0x6B; /// Enumeration of possible I²C errors that may occur. #[derive(Debug)] pub enum Error { /// Returned in case the boundaries of a buffer used for /// read and write operations exceed the permitted size. BufferBoundariesBlown, /// Returned when the transmission over I²C errors. TransmissionFailed, /// Returned when a querying error for a device occurs. QueryFailed, } /// Representation of the I²C registers. #[allow(non_snake_case)] #[repr(C)]
pub I2C_CMD_DATA2: Mmio<u32>, _0x14: Mmio<u32>, _0x18: Mmio<u32>, pub I2C_STATUS: Mmio<u32>, pub I2C_SL_CNFG: Mmio<u32>, pub I2C_SL_RCVD: Mmio<u32>, pub I2C_SL_STATUS: Mmio<u32>, pub I2C_SL_ADDR1: Mmio<u32>, pub I2C_SL_ADDR2: Mmio<u32>, pub I2C_TLOW_SEXT: Mmio<u32>, _0x38: Mmio<u32>, pub I2C_SL_DELAY_COUNT: Mmio<u32>, pub I2C_SL_INT_MASK: Mmio<u32>, pub I2C_SL_INT_SOURCE: Mmio<u32>, pub I2C_SL_INT_SET: Mmio<u32>, _0x4C: Mmio<u32>, pub I2C_TX_PACKET_FIFO: Mmio<u32>, pub I2C_RX_FIFO: Mmio<u32>, pub PACKET_TRANSFER_STATUS: Mmio<u32>, pub FIFO_CONTROL: Mmio<u32>, pub FIFO_STATUS: Mmio<u32>, pub INTERRUPT_MASK_REGISTER: Mmio<u32>, pub INTERRUPT_STATUS_REGISTER: Mmio<u32>, pub I2C_CLK_DIVISOR_REGISTER: Mmio<u32>, pub I2C_INTERRUPT_SOURCE_REGISTER: Mmio<u32>, pub I2C_INTERRUPT_SET_REGISTER: Mmio<u32>, pub I2C_SLV_TX_PACKET_FIFO: Mmio<u32>, pub I2C_SLV_RX_FIFO: Mmio<u32>, pub I2C_SLV_PACKET_STATUS: Mmio<u32>, pub I2C_BUS_CLEAR_CONFIG: Mmio<u32>, pub I2C_BUS_CLEAR_STATUS: Mmio<u32>, pub I2C_CONFIG_LOAD: Mmio<u32>, _0x90: Mmio<u32>, pub I2C_INTERFACE_TIMING_0: Mmio<u32>, pub I2C_INTERFACE_TIMING_1: Mmio<u32>, pub I2C_HS_INTERFACE_TIMING_0: Mmio<u32>, pub I2C_HS_INTERFACE_TIMING_1: Mmio<u32>, } /// Representation of an I²C controller. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct I2c { /// The device clock for the controller. clock: &'static Clock, /// A pointer to the registers used for communication. registers: *const Registers, } // Definitions for known I²C devices. impl I2c { /// Representation of the I²C controller 1. pub const C1: Self = I2c { clock: &Clock::I2C_1, registers: (I2C_1234_BASE + 0) as *const Registers, }; /// Representation of the I²C controller 2. pub const C2: Self = I2c { clock: &Clock::I2C_2, registers: (I2C_1234_BASE + 0x400) as *const Registers, }; /// Representation of the I²C controller 3. pub const C3: Self = I2c { clock: &Clock::I2C_3, registers: (I2C_1234_BASE + 0x500) as *const Registers, }; /// Representation of the I²C controller 4. pub const C4: Self = I2c { clock: &Clock::I2C_4, registers: (I2C_1234_BASE + 0x700) as *const Registers, }; /// Representation of the I²C controller 5. pub const C5: Self = I2c { clock: &Clock::I2C_5, registers: (I2C_56_BASE + 0x000) as *const Registers, }; /// Representation of the I²C controller 6. pub const C6: Self = I2c { clock: &Clock::I2C_6, registers: (I2C_56_BASE + 0x100) as *const Registers, }; } impl I2c { /// Loads the hardware configuration for the I²C. fn load_config(&self) { let register_base = unsafe { &*self.registers }; // Set MSTR_CONFIG_LOAD, TIMEOUT_CONFIG_LOAD, undocumented bit. register_base.I2C_CONFIG_LOAD.write(0x25); // Wait up to 20 microseconds for master config to be loaded. for i in 0..20 { usleep(i); if register_base.I2C_CONFIG_LOAD.read() & 1 == 0 { break; } } } /// Transmits the data to the device over I²C. fn send(&self, device: u32, data: &[u8]) -> Result<(), Error> { let register_base = unsafe { &*self.registers }; // Set device for 7-bit write mode. register_base.I2C_CMD_ADDR0.write(device << 1); // Load in data to write. let data_source = u32::from_le_bytes(data.try_into().unwrap()); register_base.I2C_CMD_DATA1.write(data_source); // Set config with LENGTH = data_length, NEW_MASTER_FSM, DEBOUNCE_CNT = 4T. register_base.I2C_CNFG.write((((data.len() << 1) - 2) | 0x2800) as u32); // Load hardware configuration. self.load_config(); // CONFIG |= SEND. register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200); while register_base.I2C_STATUS.read() & 0x100!= 0 { // Wait until not busy. } // Determine result from the result of CMD1_STAT == SL1_XFER_SUCCESSFUL. if register_base.I2C_STATUS.read() & 0xF == 0 { return Ok(()); } else { return Err(Error::TransmissionFailed); } } /// Receives bytes from the device over I²C and writes them to the buffer. fn receive(&self, device: u32, buffer: &mut [u8]) -> Result<(), Error> { let register_base = unsafe { &*self.registers }; // Set device for 7-bit read mode. register_base.I2C_CMD_ADDR0.write((device << 1) | 1); // Set config with LENGTH = buffer.len(), NEW_MASTER_FSM, DEBOUNCE_CNT = 4T. register_base.I2C_CNFG.write((((buffer.len() << 1) - 2) | 0x2840) as u32); // Load hardware configuration. self.load_config(); // CONFIG |= SEND. register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200); while register_base.I2C_STATUS.read() & 0x100!= 0 { // Wait until not busy. } // Ensure success. if register_base.I2C_STATUS.read() & 0xF!= 0 { return Err(Error::QueryFailed); } // Read result and copy it back. let result = register_base.I2C_CMD_DATA1.read().to_le_bytes(); buffer.copy_from_slice(&result[..buffer.len()]); Ok(()) } /// Initializes the I²C controller. pub fn init(&self) { let register_base = unsafe { &*self.registers }; // Enable device clock. self.clock.enable(); // Setup divisor and clear the bus. register_base.I2C_CLK_DIVISOR_REGISTER.write(0x50001); register_base.I2C_BUS_CLEAR_CONFIG.write(0x90003); // Load hardware configuration. self.load_config(); // Wait a while until BUS_CLEAR_DONE is set. for _ in 0..10 { usleep(20000); if register_base.INTERRUPT_STATUS_REGISTER.read() & 0x800!= 0 { break; } } // Dummy read. register_base.I2C_BUS_CLEAR_STATUS.read(); // Read and set the Interrupt Status. register_base.INTERRUPT_STATUS_REGISTER .write(register_base.INTERRUPT_STATUS_REGISTER.read()); } /// Writes a buffer of data to a given device over I²C. pub fn write(&self, device: u32, register: u8, data: &[u8]) -> Result<(), Error> { // Limit input size to 32-bits. One byte is reserved for the device register. if data.len() > 3 { return Err(Error::BufferBoundariesBlown); } // Prepare a buffer holding the device register and the data contents. let mut buffer = [0; 4]; buffer[0] = register; buffer[1..].copy_from_slice(data); // Send the buffer to the device. self.send(device, &buffer[..]) } /// Writes an byte to a given device over I²C. #[inline] pub fn write_byte(&self, device: u32, register: u8, byte: u8) -> Result<(), Error> { // Write single byte to device. self.write(device, register, &byte.to_le_bytes()) } /// Reads a register of a device over I²C and writes the result to the buffer. pub fn read(&self, device: u32, register: u8, buffer: &mut [u8]) -> Result<(), Error> { // Limit output size to 32-bits. if buffer.len() > 4 { return Err(Error::BufferBoundariesBlown); } // Write single byte register ID to device. self.send(device, &[register])?; // Receive data and write these to the buffer. self.receive(device, buffer) } /// Reads a byte from a given device over I²C. #[inline] pub fn read_byte(&self, device: u32, register: u8) -> Result<u8, Error> { let mut buffer = [0; 1]; self.read(device, register, &mut buffer)?; Ok(u8::from_le_bytes(buffer.try_into().unwrap())) } } unsafe impl Send for I2c {} unsafe impl Sync for I2c {}
pub struct Registers { pub I2C_CNFG: Mmio<u32>, pub I2C_CMD_ADDR0: Mmio<u32>, pub I2C_CMD_ADDR1: Mmio<u32>, pub I2C_CMD_DATA1: Mmio<u32>,
random_line_split
mod.rs
//! Inter-Integrated Circuit driver for Tegra210. //! //! # Description //! //! The I²C controller (I2C) implements an I²C 3.0 specification-compliant //! I²C master and slave controller. The I²C controller supports multiple //! masters and slaves. It supports Standard mode (up to 100 Kbits/s), //! Fast mode (up to 400 Kbits/s), Fast mode plus (Fm+, up to 1 Mbits/s), //! and High-speed mode (up to 3.4 Mbits/s). //! //! Tegra X1 devices have six instances of this controller. All six //! instances have identical I2C master functionality. There are also //! three additional I2C instances in the TSEC, CL-DVFS and VI modules. //! //! The I²C controller supports DMA for the Master controller over the APB //! bus. There is no DMA support for the Slave controller. The I²C controller //! also supports packet mode transfers where the data to be transferred is //! encapsulated in a predefined packet format as payload and sent to the //! I²C controller over the APB bus. The header of the packet specifies the //! type of operation to be performed, the size and other parameters. //! //! # Implementation //! //! - The addresses of available I2C devices are exposed as constants. //! //! - The [`Registers`] struct provides abstractions over the I2C registers //! and the possibility to create pointers to each I2C mapped at a different //! address. //! //! - The [`I2c`] represents an I2C and holds the [`Clock`] to enable //! the device and the respective [`Registers`] block pointer to //! communicate over I²C. //! //! - [`I2c`] holds pre-defined constants which represent the I2C //! controllers 1 through 6 and should be preferred over creating //! instances of the [`I2c`] struct manually. //! //! - [`I2c::init`] has to be called for each device before it can //! be used. //! //! - [`I2c::read`] and [`I2c::write`] take buffers as arguments. //! For write operations, this buffer must contain the byte //! representation of the number to send in little-endian byte order. //! For read operations, the buffer wis filled with little-endian-ordered //! bytes. //! //! - The [`Sync`] trait is implemented for [`I2c`], it is considered //! safe to share references between threads. //! //! - [`send_pmic_cpu_shutdown_cmd`], [`read_ti_charger_bit_7`], //! [`clear_ti_charger_bit_7`] and [`set_ti_charger_bit_7`] are helper //! functions which wrap common I2C operations. //! //! [`Registers`]: struct.Registers.html //! [`I2c`]: struct.I2c.html //! [`Clock`]:../clock/struct.Clock.html //! [`I2c::init`]: struct.I2c.html#method.init //! [`I2c::read`]: struct.I2c.html#method.read //! [`I2c::write`]: struct.I2c.html#method.write //! [`Sync`]: https://doc.rust-lang.org/nightly/core/marker/trait.Sync.html //! [`send_pmic_cpu_shutdown_cmd`]: fn.send_pmic_cpu_shutdown_cmd.html //! [`read_ti_charger_bit_7`]: fn.read_ti_charger_bit_7.html //! [`clear_ti_charger_bit_7`]: fn.clear_ti_charger_bit_7.html //! [`set_ti_charger_bit_7`]: fn.set_ti_charger_bit_7.html use core::{convert::TryInto, marker::{Send, Sync}}; use mirage_mmio::Mmio; use crate::{clock::Clock, timer::usleep}; /// Base address for the I²C registers 1 through 4. pub(crate) const I2C_1234_BASE: u32 = 0x7000_C000; /// Base address for the I²C registers 5 through 6. pub(crate) const I2C_56_BASE: u32 = 0x7000_D000; /// The I²C device address for the Maxim 77621 CPU. pub const MAX77621_CPU_I2C_ADDR: u32 = 0x1B; /// The I²C device address for the Maxim 77621 GPU. pub const MAX77621_GPU_I2C_ADDR: u32 = 0x1C; /// The I²C device address for the Maxim 17050. pub const MAX17050_I2C_ADDR: u32 = 0x36; /// The I²C device address for the Maxim 77620 PWR. pub const MAX77620_PWR_I2C_ADDR: u32 = 0x3C; /// The I²C device address for the Maxim 77620 RTC. pub const MAX77620_RTC_I2C_ADDR: u32 = 0x68; /// The I²C device address for the TI BQ24193. pub const BQ24193_I2C_ADDR: u32 = 0x6B; /// Enumeration of possible I²C errors that may occur. #[derive(Debug)] pub enum Error { /// Returned in case the boundaries of a buffer used for /// read and write operations exceed the permitted size. BufferBoundariesBlown, /// Returned when the transmission over I²C errors. TransmissionFailed, /// Returned when a querying error for a device occurs. QueryFailed, } /// Representation of the I²C registers. #[allow(non_snake_case)] #[repr(C)] pub struct Registers { pub I2C_CNFG: Mmio<u32>, pub I2C_CMD_ADDR0: Mmio<u32>, pub I2C_CMD_ADDR1: Mmio<u32>, pub I2C_CMD_DATA1: Mmio<u32>, pub I2C_CMD_DATA2: Mmio<u32>, _0x14: Mmio<u32>, _0x18: Mmio<u32>, pub I2C_STATUS: Mmio<u32>, pub I2C_SL_CNFG: Mmio<u32>, pub I2C_SL_RCVD: Mmio<u32>, pub I2C_SL_STATUS: Mmio<u32>, pub I2C_SL_ADDR1: Mmio<u32>, pub I2C_SL_ADDR2: Mmio<u32>, pub I2C_TLOW_SEXT: Mmio<u32>, _0x38: Mmio<u32>, pub I2C_SL_DELAY_COUNT: Mmio<u32>, pub I2C_SL_INT_MASK: Mmio<u32>, pub I2C_SL_INT_SOURCE: Mmio<u32>, pub I2C_SL_INT_SET: Mmio<u32>, _0x4C: Mmio<u32>, pub I2C_TX_PACKET_FIFO: Mmio<u32>, pub I2C_RX_FIFO: Mmio<u32>, pub PACKET_TRANSFER_STATUS: Mmio<u32>, pub FIFO_CONTROL: Mmio<u32>, pub FIFO_STATUS: Mmio<u32>, pub INTERRUPT_MASK_REGISTER: Mmio<u32>, pub INTERRUPT_STATUS_REGISTER: Mmio<u32>, pub I2C_CLK_DIVISOR_REGISTER: Mmio<u32>, pub I2C_INTERRUPT_SOURCE_REGISTER: Mmio<u32>, pub I2C_INTERRUPT_SET_REGISTER: Mmio<u32>, pub I2C_SLV_TX_PACKET_FIFO: Mmio<u32>, pub I2C_SLV_RX_FIFO: Mmio<u32>, pub I2C_SLV_PACKET_STATUS: Mmio<u32>, pub I2C_BUS_CLEAR_CONFIG: Mmio<u32>, pub I2C_BUS_CLEAR_STATUS: Mmio<u32>, pub I2C_CONFIG_LOAD: Mmio<u32>, _0x90: Mmio<u32>, pub I2C_INTERFACE_TIMING_0: Mmio<u32>, pub I2C_INTERFACE_TIMING_1: Mmio<u32>, pub I2C_HS_INTERFACE_TIMING_0: Mmio<u32>, pub I2C_HS_INTERFACE_TIMING_1: Mmio<u32>, } /// Representation of an I²C controller. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct I2c { /// The device clock for the controller. clock: &'static Clock, /// A pointer to the registers used for communication. registers: *const Registers, } // Definitions for known I²C devices. impl I2c { /// Representation of the I²C controller 1. pub const C1: Self = I2c { clock: &Clock::I2C_1, registers: (I2C_1234_BASE + 0) as *const Registers, }; /// Representation of the I²C controller 2. pub const C2: Self = I2c { clock: &Clock::I2C_2, registers: (I2C_1234_BASE + 0x400) as *const Registers, }; /// Representation of the I²C controller 3. pub const C3: Self = I2c { clock: &Clock::I2C_3, registers: (I2C_1234_BASE + 0x500) as *const Registers, }; /// Representation of the I²C controller 4. pub const C4: Self = I2c { clock: &Clock::I2C_4, registers: (I2C_1234_BASE + 0x700) as *const Registers, }; /// Representation of the I²C controller 5. pub const C5: Self = I2c { clock: &Clock::I2C_5, registers: (I2C_56_BASE + 0x000) as *const Registers, }; /// Representation of the I²C controller 6. pub const C6: Self = I2c { clock: &Clock::I2C_6, registers: (I2C_56_BASE + 0x100) as *const Registers, }; } impl I2c { /// Loads the hardware configuration for the I²C. fn load_config(&self) { let register_base = unsafe { &*self.registers }; // Set MSTR_CONFIG_LOAD, TIMEOUT_CONFIG_LOAD, undocumented bit. register_base.I2C_CONFIG_LOAD.write(0x25); // Wait up to 20 microseconds for master config to be loaded. for i in 0..20 { usleep(i); if register_base.I2C_CONFIG_LOAD.read() & 1 == 0 { break; } } } /// Transmits the data to the device over I²C. fn send(&self, device: u32, data: &[u8]) -> Result<(), Error> { let register_base = unsafe { &*self.registers }; // Set device for 7-bit write mode. register_base.I2C_CMD_ADDR0.write(device << 1); // Load in data to write. let data_source = u32::from_le_bytes(data.try_into().unwrap()); register_base.I2C_CMD_DATA1.write(data_source); // Set config with LENGTH = data_length, NEW_MASTER_FSM, DEBOUNCE_CNT = 4T. register_base.I2C_CNFG.write((((data.len() << 1) - 2) | 0x2800) as u32); // Load hardware configuration. self.load_config(); // CONFIG |= SEND. register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200); while register_base.I2C_STATUS.read() & 0x100!= 0 { // Wait until not busy. } // Determine result from the result of CMD1_STAT == SL1_XFER_SUCCESSFUL. if register_base.I2C_STATUS.read() & 0xF == 0 { return Ok(()); } else { return Err(Error::TransmissionFailed); } } /// Receives bytes from the device over I²C and writes them to the buffer. fn receive(&self, device: u32, buffer: &mut [u8]) -> Result<(), Error> { let register_base = unsafe { &*self.registers }; // Set device for 7-bit read mode. register_base.I2C_CMD_ADDR0.write((device << 1) | 1); // Set config with LENGTH = buffer.len(), NEW_MASTER_FSM, DEBOUNCE_CNT = 4T. register_base.I2C_CNFG.write((((buffer.len() << 1) - 2) | 0x2840) as u32); // Load hardware configuration. self.load_config(); // CONFIG |= SEND. register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200); while register_base.I2C_STATUS.read() & 0x100!= 0 { // Wait until not busy. } // Ensure success. if register_base.I2C_STATUS.read() & 0xF!= 0 { return Err(Error::QueryFailed); } // Read result and copy it back. let result = register_base.I2C_CMD_DATA1.read().to_le_bytes(); buffer.copy_from_slice(&result[..buffer.len()]); Ok(()) } /// Initializes the I²C controller. pub fn init(&self) { let register_base = unsafe { &*self.registers }; // Enable device clock. self.clock.enable(); // Setup divisor and clear the bus. register_base.I2C_CLK_DIVISOR_REGISTER.write(0x50001); register_base.I2C_BUS_CLEAR_CONFIG.write(0x90003); // Load hardware configuration. self.load_config(); // Wait a while until BUS_CLEAR_DONE is set. for _ in 0..10 { usleep(20000); if register_base.INTERRUPT_STATUS_REGISTER.read() & 0x800!= 0 { break; } } // Dummy read. register_base.I2C_BUS_CLEAR_STATUS.read(); // Read and set the Interrupt Status. register_base.INTERRUPT_STATUS_REGISTER .write(register_base.INTERRUPT_STATUS_REGISTER.read()); } /// Writes a buffer of data to a given device over I²C. pub fn write(&self, device: u32, register: u8, data: &[u8]) -> Result<(), Error> { // Limit input size to 32-bits. One byte is reserved for the device register. if data.len() > 3 { return Err(Error::BufferBoundariesBlown); } // Prepare a buffer holding the device register and the data contents. let mut buffer = [0; 4]; buffer[0] = register; buffer[1..].copy_from_slice(data); // Send the buffer to the device. self.send(device, &buffer[..]) } /// Writes an byte to a given device over I²C. #[inline] pub fn write_byte(&self, device: u32, register: u8, byte: u8) -> Result<(), Error> { // Write single byte to device. self.write(device, register, &byte.to_le_bytes()) } /// Reads a register of a device over I²C and writes the result to the buffer. pub fn read(&self, device: u32, register:
buffer: &mut [u8]) -> Result<(), Error> { // Limit output size to 32-bits. if buffer.len() > 4 { return Err(Error::BufferBoundariesBlown); } // Write single byte register ID to device. self.send(device, &[register])?; // Receive data and write these to the buffer. self.receive(device, buffer) } /// Reads a byte from a given device over I²C. #[inline] pub fn read_byte(&self, device: u32, register: u8) -> Result<u8, Error> { let mut buffer = [0; 1]; self.read(device, register, &mut buffer)?; Ok(u8::from_le_bytes(buffer.try_into().unwrap())) } } unsafe impl Send for I2c {} unsafe impl Sync for I2c {}
u8,
identifier_name
mod.rs
//! Inter-Integrated Circuit driver for Tegra210. //! //! # Description //! //! The I²C controller (I2C) implements an I²C 3.0 specification-compliant //! I²C master and slave controller. The I²C controller supports multiple //! masters and slaves. It supports Standard mode (up to 100 Kbits/s), //! Fast mode (up to 400 Kbits/s), Fast mode plus (Fm+, up to 1 Mbits/s), //! and High-speed mode (up to 3.4 Mbits/s). //! //! Tegra X1 devices have six instances of this controller. All six //! instances have identical I2C master functionality. There are also //! three additional I2C instances in the TSEC, CL-DVFS and VI modules. //! //! The I²C controller supports DMA for the Master controller over the APB //! bus. There is no DMA support for the Slave controller. The I²C controller //! also supports packet mode transfers where the data to be transferred is //! encapsulated in a predefined packet format as payload and sent to the //! I²C controller over the APB bus. The header of the packet specifies the //! type of operation to be performed, the size and other parameters. //! //! # Implementation //! //! - The addresses of available I2C devices are exposed as constants. //! //! - The [`Registers`] struct provides abstractions over the I2C registers //! and the possibility to create pointers to each I2C mapped at a different //! address. //! //! - The [`I2c`] represents an I2C and holds the [`Clock`] to enable //! the device and the respective [`Registers`] block pointer to //! communicate over I²C. //! //! - [`I2c`] holds pre-defined constants which represent the I2C //! controllers 1 through 6 and should be preferred over creating //! instances of the [`I2c`] struct manually. //! //! - [`I2c::init`] has to be called for each device before it can //! be used. //! //! - [`I2c::read`] and [`I2c::write`] take buffers as arguments. //! For write operations, this buffer must contain the byte //! representation of the number to send in little-endian byte order. //! For read operations, the buffer wis filled with little-endian-ordered //! bytes. //! //! - The [`Sync`] trait is implemented for [`I2c`], it is considered //! safe to share references between threads. //! //! - [`send_pmic_cpu_shutdown_cmd`], [`read_ti_charger_bit_7`], //! [`clear_ti_charger_bit_7`] and [`set_ti_charger_bit_7`] are helper //! functions which wrap common I2C operations. //! //! [`Registers`]: struct.Registers.html //! [`I2c`]: struct.I2c.html //! [`Clock`]:../clock/struct.Clock.html //! [`I2c::init`]: struct.I2c.html#method.init //! [`I2c::read`]: struct.I2c.html#method.read //! [`I2c::write`]: struct.I2c.html#method.write //! [`Sync`]: https://doc.rust-lang.org/nightly/core/marker/trait.Sync.html //! [`send_pmic_cpu_shutdown_cmd`]: fn.send_pmic_cpu_shutdown_cmd.html //! [`read_ti_charger_bit_7`]: fn.read_ti_charger_bit_7.html //! [`clear_ti_charger_bit_7`]: fn.clear_ti_charger_bit_7.html //! [`set_ti_charger_bit_7`]: fn.set_ti_charger_bit_7.html use core::{convert::TryInto, marker::{Send, Sync}}; use mirage_mmio::Mmio; use crate::{clock::Clock, timer::usleep}; /// Base address for the I²C registers 1 through 4. pub(crate) const I2C_1234_BASE: u32 = 0x7000_C000; /// Base address for the I²C registers 5 through 6. pub(crate) const I2C_56_BASE: u32 = 0x7000_D000; /// The I²C device address for the Maxim 77621 CPU. pub const MAX77621_CPU_I2C_ADDR: u32 = 0x1B; /// The I²C device address for the Maxim 77621 GPU. pub const MAX77621_GPU_I2C_ADDR: u32 = 0x1C; /// The I²C device address for the Maxim 17050. pub const MAX17050_I2C_ADDR: u32 = 0x36; /// The I²C device address for the Maxim 77620 PWR. pub const MAX77620_PWR_I2C_ADDR: u32 = 0x3C; /// The I²C device address for the Maxim 77620 RTC. pub const MAX77620_RTC_I2C_ADDR: u32 = 0x68; /// The I²C device address for the TI BQ24193. pub const BQ24193_I2C_ADDR: u32 = 0x6B; /// Enumeration of possible I²C errors that may occur. #[derive(Debug)] pub enum Error { /// Returned in case the boundaries of a buffer used for /// read and write operations exceed the permitted size. BufferBoundariesBlown, /// Returned when the transmission over I²C errors. TransmissionFailed, /// Returned when a querying error for a device occurs. QueryFailed, } /// Representation of the I²C registers. #[allow(non_snake_case)] #[repr(C)] pub struct Registers { pub I2C_CNFG: Mmio<u32>, pub I2C_CMD_ADDR0: Mmio<u32>, pub I2C_CMD_ADDR1: Mmio<u32>, pub I2C_CMD_DATA1: Mmio<u32>, pub I2C_CMD_DATA2: Mmio<u32>, _0x14: Mmio<u32>, _0x18: Mmio<u32>, pub I2C_STATUS: Mmio<u32>, pub I2C_SL_CNFG: Mmio<u32>, pub I2C_SL_RCVD: Mmio<u32>, pub I2C_SL_STATUS: Mmio<u32>, pub I2C_SL_ADDR1: Mmio<u32>, pub I2C_SL_ADDR2: Mmio<u32>, pub I2C_TLOW_SEXT: Mmio<u32>, _0x38: Mmio<u32>, pub I2C_SL_DELAY_COUNT: Mmio<u32>, pub I2C_SL_INT_MASK: Mmio<u32>, pub I2C_SL_INT_SOURCE: Mmio<u32>, pub I2C_SL_INT_SET: Mmio<u32>, _0x4C: Mmio<u32>, pub I2C_TX_PACKET_FIFO: Mmio<u32>, pub I2C_RX_FIFO: Mmio<u32>, pub PACKET_TRANSFER_STATUS: Mmio<u32>, pub FIFO_CONTROL: Mmio<u32>, pub FIFO_STATUS: Mmio<u32>, pub INTERRUPT_MASK_REGISTER: Mmio<u32>, pub INTERRUPT_STATUS_REGISTER: Mmio<u32>, pub I2C_CLK_DIVISOR_REGISTER: Mmio<u32>, pub I2C_INTERRUPT_SOURCE_REGISTER: Mmio<u32>, pub I2C_INTERRUPT_SET_REGISTER: Mmio<u32>, pub I2C_SLV_TX_PACKET_FIFO: Mmio<u32>, pub I2C_SLV_RX_FIFO: Mmio<u32>, pub I2C_SLV_PACKET_STATUS: Mmio<u32>, pub I2C_BUS_CLEAR_CONFIG: Mmio<u32>, pub I2C_BUS_CLEAR_STATUS: Mmio<u32>, pub I2C_CONFIG_LOAD: Mmio<u32>, _0x90: Mmio<u32>, pub I2C_INTERFACE_TIMING_0: Mmio<u32>, pub I2C_INTERFACE_TIMING_1: Mmio<u32>, pub I2C_HS_INTERFACE_TIMING_0: Mmio<u32>, pub I2C_HS_INTERFACE_TIMING_1: Mmio<u32>, } /// Representation of an I²C controller. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct I2c { /// The device clock for the controller. clock: &'static Clock, /// A pointer to the registers used for communication. registers: *const Registers, } // Definitions for known I²C devices. impl I2c { /// Representation of the I²C controller 1. pub const C1: Self = I2c { clock: &Clock::I2C_1, registers: (I2C_1234_BASE + 0) as *const Registers, }; /// Representation of the I²C controller 2. pub const C2: Self = I2c { clock: &Clock::I2C_2, registers: (I2C_1234_BASE + 0x400) as *const Registers, }; /// Representation of the I²C controller 3. pub const C3: Self = I2c { clock: &Clock::I2C_3, registers: (I2C_1234_BASE + 0x500) as *const Registers, }; /// Representation of the I²C controller 4. pub const C4: Self = I2c { clock: &Clock::I2C_4, registers: (I2C_1234_BASE + 0x700) as *const Registers, }; /// Representation of the I²C controller 5. pub const C5: Self = I2c { clock: &Clock::I2C_5, registers: (I2C_56_BASE + 0x000) as *const Registers, }; /// Representation of the I²C controller 6. pub const C6: Self = I2c { clock: &Clock::I2C_6, registers: (I2C_56_BASE + 0x100) as *const Registers, }; } impl I2c { /// Loads the hardware configuration for the I²C. fn load_config(&self) { let register_base = unsafe { &*self.registers }; // Set MSTR_CONFIG_LOAD, TIMEOUT_CONFIG_LOAD, undocumented bit. register_base.I2C_CONFIG_LOAD.write(0x25); // Wait up to 20 microseconds for master config to be loaded. for i in 0..20 { usleep(i); if register_base.I2C_CONFIG_LOAD.read() & 1 == 0 { break; } } } /// Transmits the data to the device over I²C. fn send(&self, device: u32, data: &[u8]) -> Result<(), Error> { let register_base = unsafe { &*self.registers }; // Set device for 7-bit write mode. register_base.I2C_CMD_ADDR0.write(device << 1); // Load in data to write. let data_source = u32::from_le_bytes(data.try_into().unwrap()); register_base.I2C_CMD_DATA1.write(data_source); // Set config with LENGTH = data_length, NEW_MASTER_FSM, DEBOUNCE_CNT = 4T. register_base.I2C_CNFG.write((((data.len() << 1) - 2) | 0x2800) as u32); // Load hardware configuration. self.load_config(); // CONFIG |= SEND. register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200); while register_base.I2C_STATUS.read() & 0x100!= 0 { // Wait until not busy. } // Determine result from the result of CMD1_STAT == SL1_XFER_SUCCESSFUL. if register_base.I2C_STATUS.read() & 0xF == 0 { return Ok(()); } else { return Err(Error::TransmissionFailed); } } /// Receives bytes from the device over I²C and writes them to the buffer. fn receive(&self, device: u32, buffer: &mut [u8]) -> Result<(), Error> { let register_base = unsafe { &*self.registers }; // Set device for 7-bit read mode. register_base.I2C_CMD_ADDR0.write((device << 1) | 1); // Set config with LENGTH = buffer.len(), NEW_MASTER_FSM, DEBOUNCE_CNT = 4T. register_base.I2C_CNFG.write((((buffer.len() << 1) - 2) | 0x2840) as u32); // Load hardware configuration. self.load_config(); // CONFIG |= SEND. register_base.I2C_CNFG.write((register_base.I2C_CNFG.read() & 0xFFFF_FDFF) | 0x200); while register_base.I2C_STATUS.read() & 0x100!= 0 { // Wait until not busy. } // Ensure success. if register_base.I2C_STATUS.read() & 0xF!= 0 { return Err(Error
opy it back. let result = register_base.I2C_CMD_DATA1.read().to_le_bytes(); buffer.copy_from_slice(&result[..buffer.len()]); Ok(()) } /// Initializes the I²C controller. pub fn init(&self) { let register_base = unsafe { &*self.registers }; // Enable device clock. self.clock.enable(); // Setup divisor and clear the bus. register_base.I2C_CLK_DIVISOR_REGISTER.write(0x50001); register_base.I2C_BUS_CLEAR_CONFIG.write(0x90003); // Load hardware configuration. self.load_config(); // Wait a while until BUS_CLEAR_DONE is set. for _ in 0..10 { usleep(20000); if register_base.INTERRUPT_STATUS_REGISTER.read() & 0x800!= 0 { break; } } // Dummy read. register_base.I2C_BUS_CLEAR_STATUS.read(); // Read and set the Interrupt Status. register_base.INTERRUPT_STATUS_REGISTER .write(register_base.INTERRUPT_STATUS_REGISTER.read()); } /// Writes a buffer of data to a given device over I²C. pub fn write(&self, device: u32, register: u8, data: &[u8]) -> Result<(), Error> { // Limit input size to 32-bits. One byte is reserved for the device register. if data.len() > 3 { return Err(Error::BufferBoundariesBlown); } // Prepare a buffer holding the device register and the data contents. let mut buffer = [0; 4]; buffer[0] = register; buffer[1..].copy_from_slice(data); // Send the buffer to the device. self.send(device, &buffer[..]) } /// Writes an byte to a given device over I²C. #[inline] pub fn write_byte(&self, device: u32, register: u8, byte: u8) -> Result<(), Error> { // Write single byte to device. self.write(device, register, &byte.to_le_bytes()) } /// Reads a register of a device over I²C and writes the result to the buffer. pub fn read(&self, device: u32, register: u8, buffer: &mut [u8]) -> Result<(), Error> { // Limit output size to 32-bits. if buffer.len() > 4 { return Err(Error::BufferBoundariesBlown); } // Write single byte register ID to device. self.send(device, &[register])?; // Receive data and write these to the buffer. self.receive(device, buffer) } /// Reads a byte from a given device over I²C. #[inline] pub fn read_byte(&self, device: u32, register: u8) -> Result<u8, Error> { let mut buffer = [0; 1]; self.read(device, register, &mut buffer)?; Ok(u8::from_le_bytes(buffer.try_into().unwrap())) } } unsafe impl Send for I2c {} unsafe impl Sync for I2c {}
::QueryFailed); } // Read result and c
conditional_block
get_block_template.rs
//! Support functions for the `get_block_template()` RPC. use std::{collections::HashMap, iter, sync::Arc}; use jsonrpc_core::{Error, ErrorCode, Result}; use tower::{Service, ServiceExt}; use zebra_chain::{ amount::{self, Amount, NegativeOrZero, NonNegative}, block::{ self, merkle::{self, AuthDataRoot}, Block, ChainHistoryBlockTxAuthCommitmentHash, Height, }, chain_sync_status::ChainSyncStatus, chain_tip::ChainTip, parameters::Network, serialization::ZcashDeserializeInto, transaction::{Transaction, UnminedTx, VerifiedUnminedTx}, transparent, }; use zebra_consensus::{ funding_stream_address, funding_stream_values, miner_subsidy, FundingStreamReceiver, }; use zebra_node_services::mempool; use zebra_state::GetBlockTemplateChainInfo; use crate::methods::get_block_template_rpcs::{ constants::{MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP, NOT_SYNCED_ERROR_CODE}, types::{default_roots::DefaultRoots, transaction::TransactionTemplate}, }; pub use crate::methods::get_block_template_rpcs::types::get_block_template::*; // - Parameter checks /// Checks that `data` is omitted in `Template` mode or provided in `Proposal` mode, /// /// Returns an error if there's a mismatch between the mode and whether `data` is provided. pub fn check_parameters(parameters: &Option<JsonParameters>) -> Result<()> { let Some(parameters) = parameters else { return Ok(()); }; match parameters { JsonParameters { mode: GetBlockTemplateRequestMode::Template, data: None, .. } | JsonParameters { mode: GetBlockTemplateRequestMode::Proposal, data: Some(_), .. } => Ok(()), JsonParameters { mode: GetBlockTemplateRequestMode::Proposal, data: None, .. } => Err(Error { code: ErrorCode::InvalidParams, message: "\"data\" parameter must be \ provided in \"proposal\" mode" .to_string(), data: None, }), JsonParameters { mode: GetBlockTemplateRequestMode::Template, data: Some(_), .. } => Err(Error { code: ErrorCode::InvalidParams, message: "\"data\" parameter must be \ omitted in \"template\" mode" .to_string(), data: None, }), } } /// Returns the miner address, or an error if it is invalid. pub fn check_miner_address( miner_address: Option<transparent::Address>, ) -> Result<transparent::Address> { miner_address.ok_or_else(|| Error { code: ErrorCode::ServerError(0), message: "configure mining.miner_address in zebrad.toml \ with a transparent address" .to_string(), data: None, }) } /// Attempts to validate block proposal against all of the server's /// usual acceptance rules (except proof-of-work). /// /// Returns a `getblocktemplate` [`Response`]. pub async fn validate_block_proposal<BlockVerifierRouter, Tip, SyncStatus>( mut block_verifier_router: BlockVerifierRouter, block_proposal_bytes: Vec<u8>, network: Network, latest_chain_tip: Tip, sync_status: SyncStatus, ) -> Result<Response> where BlockVerifierRouter: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError> + Clone + Send + Sync +'static, Tip: ChainTip + Clone + Send + Sync +'static, SyncStatus: ChainSyncStatus + Clone + Send + Sync +'static, { check_synced_to_tip(network, latest_chain_tip, sync_status)?; let block: Block = match block_proposal_bytes.zcash_deserialize_into() { Ok(block) => block, Err(parse_error) => { tracing::info!( ?parse_error, "error response from block parser in CheckProposal request" ); return Ok( ProposalResponse::rejected("invalid proposal format", parse_error.into()).into(), ); } }; let block_verifier_router_response = block_verifier_router .ready() .await .map_err(|error| Error { code: ErrorCode::ServerError(0), message: error.to_string(), data: None, })? .call(zebra_consensus::Request::CheckProposal(Arc::new(block))) .await; Ok(block_verifier_router_response .map(|_hash| ProposalResponse::Valid) .unwrap_or_else(|verify_chain_error| { tracing::info!( ?verify_chain_error, "error response from block_verifier_router in CheckProposal request" ); ProposalResponse::rejected("invalid proposal", verify_chain_error) }) .into()) } // - State and syncer checks /// Returns an error if Zebra is not synced to the consensus chain tip. /// This error might be incorrect if the local clock is skewed. pub fn check_synced_to_tip<Tip, SyncStatus>( network: Network, latest_chain_tip: Tip, sync_status: SyncStatus, ) -> Result<()> where Tip: ChainTip + Clone + Send + Sync +'static, SyncStatus: ChainSyncStatus + Clone + Send + Sync +'static, { // The tip estimate may not be the same as the one coming from the state // but this is ok for an estimate let (estimated_distance_to_chain_tip, local_tip_height) = latest_chain_tip .estimate_distance_to_network_chain_tip(network) .ok_or_else(|| Error { code: ErrorCode::ServerError(0), message: "No Chain tip available yet".to_string(), data: None, })?; if!sync_status.is_close_to_tip() || estimated_distance_to_chain_tip > MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP { tracing::info!( ?estimated_distance_to_chain_tip, ?local_tip_height, "Zebra has not synced to the chain tip. \ Hint: check your network connection, clock, and time zone settings." ); return Err(Error { code: NOT_SYNCED_ERROR_CODE, message: format!( "Zebra has not synced to the chain tip, \ estimated distance: {estimated_distance_to_chain_tip:?}, \ local tip: {local_tip_height:?}. \ Hint: check your network connection, clock, and time zone settings." ), data: None, }); } Ok(()) } // - State and mempool data fetches /// Returns the state data for the block template. /// /// You should call `check_synced_to_tip()` before calling this function. /// If the state does not have enough blocks, returns an error. pub async fn fetch_state_tip_and_local_time<State>( state: State, ) -> Result<GetBlockTemplateChainInfo> where State: Service< zebra_state::ReadRequest, Response = zebra_state::ReadResponse, Error = zebra_state::BoxError, > + Clone + Send + Sync +'static, { let request = zebra_state::ReadRequest::ChainInfo; let response = state .oneshot(request.clone()) .await .map_err(|error| Error { code: ErrorCode::ServerError(0), message: error.to_string(), data: None, })?; let chain_info = match response { zebra_state::ReadResponse::ChainInfo(chain_info) => chain_info, _ => unreachable!("incorrect response to {request:?}"), }; Ok(chain_info) } /// Returns the transactions that are currently in `mempool`, or None if the /// `last_seen_tip_hash` from the mempool response doesn't match the tip hash from the state. /// /// You should call `check_synced_to_tip()` before calling this function. /// If the mempool is inactive because Zebra is not synced to the tip, returns no transactions. pub async fn fetch_mempool_transactions<Mempool>( mempool: Mempool, chain_tip_hash: block::Hash, ) -> Result<Option<Vec<VerifiedUnminedTx>>> where Mempool: Service< mempool::Request, Response = mempool::Response, Error = zebra_node_services::BoxError, > +'static, Mempool::Future: Send, { let response = mempool .oneshot(mempool::Request::FullTransactions) .await .map_err(|error| Error { code: ErrorCode::ServerError(0), message: error.to_string(), data: None, })?; let mempool::Response::FullTransactions { transactions, last_seen_tip_hash, } = response else { unreachable!("unmatched response to a mempool::FullTransactions request") }; // Check that the mempool and state were in sync when we made the requests Ok((last_seen_tip_hash == chain_tip_hash).then_some(transactions)) } // - Response processing /// Generates and returns the coinbase transaction and default roots. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. pub fn generate_coinbase_and_roots( network: Network, height: Height, miner_address: transparent::Address, mempool_txs: &[VerifiedUnminedTx], history_tree: Arc<zebra_chain::history_tree::HistoryTree>, like_zcashd: bool, extra_coinbase_data: Vec<u8>, ) -> (TransactionTemplate<NegativeOrZero>, DefaultRoots) { // Generate the coinbase transaction let miner_fee = calculate_miner_fee(mempool_txs); let coinbase_txn = generate_coinbase_transaction( network, height, miner_address, miner_fee, like_zcashd, extra_coinbase_data, ); // Calculate block default roots // // TODO: move expensive root, hash, and tree cryptography to a rayon thread? let default_roots = calculate_default_root_hashes(&coinbase_txn, mempool_txs, history_tree); let coinbase_txn = TransactionTemplate::from_coinbase(&coinbase_txn, miner_fee); (coinbase_txn, default_roots) } // - Coinbase transaction processing /// Returns a coinbase transaction for the supplied parameters. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. pub fn generate_coinbase_transaction( network: Network, height: Height, miner_address: transparent::Address, miner_fee: Amount<NonNegative>, like_zcashd: bool, extra_coinbase_data: Vec<u8>, ) -> UnminedTx { let outputs = standard_coinbase_outputs(network, height, miner_address, miner_fee, like_zcashd); if like_zcashd { Transaction::new_v4_coinbase(network, height, outputs, like_zcashd, extra_coinbase_data) .into() } else { Transaction::new_v5_coinbase(network, height, outputs, extra_coinbase_data).into() } } /// Returns the total miner fee for `mempool_txs`. pub fn calculate_miner_fee(mempool_txs: &[VerifiedUnminedTx]) -> Amount<NonNegative> { let miner_fee: amount::Result<Amount<NonNegative>> = mempool_txs.iter().map(|tx| tx.miner_fee).sum(); miner_fee.expect( "invalid selected transactions: \ fees in a valid block can not be more than MAX_MONEY", ) } /// Returns the standard funding stream and miner reward transparent output scripts /// for `network`, `height` and `miner_fee`. /// /// Only works for post-Canopy heights. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. pub fn standard_coinbase_outputs( network: Network, height: Height, miner_address: transparent::Address, miner_fee: Amount<NonNegative>, like_zcashd: bool, ) -> Vec<(Amount<NonNegative>, transparent::Script)> { let funding_streams = funding_stream_values(height, network) .expect("funding stream value calculations are valid for reasonable chain heights"); // Optional TODO: move this into a zebra_consensus function? let funding_streams: HashMap< FundingStreamReceiver, (Amount<NonNegative>, transparent::Address), > = funding_streams .into_iter() .map(|(receiver, amount)| { ( receiver, (amount, funding_stream_address(height, network, receiver)), ) }) .collect(); let miner_reward = miner_subsidy(height, network) .expect("reward calculations are valid for reasonable chain heights") + miner_fee; let miner_reward = miner_reward.expect("reward calculations are valid for reasonable chain heights"); combine_coinbase_outputs(funding_streams, miner_address, miner_reward, like_zcashd) } /// Combine the miner reward and funding streams into a list of coinbase amounts and addresses. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. fn
( funding_streams: HashMap<FundingStreamReceiver, (Amount<NonNegative>, transparent::Address)>, miner_address: transparent::Address, miner_reward: Amount<NonNegative>, like_zcashd: bool, ) -> Vec<(Amount<NonNegative>, transparent::Script)> { // Combine all the funding streams with the miner reward. let mut coinbase_outputs: Vec<(Amount<NonNegative>, transparent::Address)> = funding_streams .into_iter() .map(|(_receiver, (amount, address))| (amount, address)) .collect(); coinbase_outputs.push((miner_reward, miner_address)); let mut coinbase_outputs: Vec<(Amount<NonNegative>, transparent::Script)> = coinbase_outputs .iter() .map(|(amount, address)| (*amount, address.create_script_from_address())) .collect(); // The HashMap returns funding streams in an arbitrary order, // but Zebra's snapshot tests expect the same order every time. if like_zcashd { // zcashd sorts outputs in serialized data order, excluding the length field coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); } else { // Zebra sorts by amount then script. // // Since the sort is stable, equal amounts will remain sorted by script. coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); coinbase_outputs.sort_by_key(|(amount, _script)| *amount); } coinbase_outputs } // - Transaction roots processing /// Returns the default block roots for the supplied coinbase and mempool transactions, /// and the supplied history tree. /// /// This function runs expensive cryptographic operations. pub fn calculate_default_root_hashes( coinbase_txn: &UnminedTx, mempool_txs: &[VerifiedUnminedTx], history_tree: Arc<zebra_chain::history_tree::HistoryTree>, ) -> DefaultRoots { let (merkle_root, auth_data_root) = calculate_transaction_roots(coinbase_txn, mempool_txs); let chain_history_root = history_tree.hash().expect("history tree can't be empty"); let block_commitments_hash = ChainHistoryBlockTxAuthCommitmentHash::from_commitments( &chain_history_root, &auth_data_root, ); DefaultRoots { merkle_root, chain_history_root, auth_data_root, block_commitments_hash, } } /// Returns the transaction effecting and authorizing roots /// for `coinbase_txn` and `mempool_txs`, which are used in the block header. // // TODO: should this be spawned into a cryptographic operations pool? // (it would only matter if there were a lot of small transactions in a block) pub fn calculate_transaction_roots( coinbase_txn: &UnminedTx, mempool_txs: &[VerifiedUnminedTx], ) -> (merkle::Root, AuthDataRoot) { let block_transactions = || iter::once(coinbase_txn).chain(mempool_txs.iter().map(|tx| &tx.transaction)); let merkle_root = block_transactions().cloned().collect(); let auth_data_root = block_transactions().cloned().collect(); (merkle_root, auth_data_root) }
combine_coinbase_outputs
identifier_name
get_block_template.rs
//! Support functions for the `get_block_template()` RPC. use std::{collections::HashMap, iter, sync::Arc}; use jsonrpc_core::{Error, ErrorCode, Result}; use tower::{Service, ServiceExt}; use zebra_chain::{ amount::{self, Amount, NegativeOrZero, NonNegative}, block::{ self, merkle::{self, AuthDataRoot}, Block, ChainHistoryBlockTxAuthCommitmentHash, Height, }, chain_sync_status::ChainSyncStatus, chain_tip::ChainTip, parameters::Network, serialization::ZcashDeserializeInto, transaction::{Transaction, UnminedTx, VerifiedUnminedTx}, transparent, }; use zebra_consensus::{ funding_stream_address, funding_stream_values, miner_subsidy, FundingStreamReceiver, }; use zebra_node_services::mempool; use zebra_state::GetBlockTemplateChainInfo; use crate::methods::get_block_template_rpcs::{ constants::{MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP, NOT_SYNCED_ERROR_CODE}, types::{default_roots::DefaultRoots, transaction::TransactionTemplate}, }; pub use crate::methods::get_block_template_rpcs::types::get_block_template::*; // - Parameter checks /// Checks that `data` is omitted in `Template` mode or provided in `Proposal` mode, /// /// Returns an error if there's a mismatch between the mode and whether `data` is provided. pub fn check_parameters(parameters: &Option<JsonParameters>) -> Result<()> { let Some(parameters) = parameters else { return Ok(()); }; match parameters { JsonParameters { mode: GetBlockTemplateRequestMode::Template, data: None, .. } | JsonParameters { mode: GetBlockTemplateRequestMode::Proposal, data: Some(_), .. } => Ok(()), JsonParameters { mode: GetBlockTemplateRequestMode::Proposal, data: None, .. } => Err(Error { code: ErrorCode::InvalidParams, message: "\"data\" parameter must be \ provided in \"proposal\" mode" .to_string(), data: None, }), JsonParameters { mode: GetBlockTemplateRequestMode::Template, data: Some(_), .. } => Err(Error { code: ErrorCode::InvalidParams, message: "\"data\" parameter must be \ omitted in \"template\" mode" .to_string(), data: None, }), } } /// Returns the miner address, or an error if it is invalid. pub fn check_miner_address( miner_address: Option<transparent::Address>, ) -> Result<transparent::Address> { miner_address.ok_or_else(|| Error { code: ErrorCode::ServerError(0), message: "configure mining.miner_address in zebrad.toml \ with a transparent address" .to_string(), data: None, }) } /// Attempts to validate block proposal against all of the server's /// usual acceptance rules (except proof-of-work). /// /// Returns a `getblocktemplate` [`Response`]. pub async fn validate_block_proposal<BlockVerifierRouter, Tip, SyncStatus>( mut block_verifier_router: BlockVerifierRouter, block_proposal_bytes: Vec<u8>, network: Network, latest_chain_tip: Tip, sync_status: SyncStatus, ) -> Result<Response> where BlockVerifierRouter: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError> + Clone + Send + Sync +'static, Tip: ChainTip + Clone + Send + Sync +'static, SyncStatus: ChainSyncStatus + Clone + Send + Sync +'static, { check_synced_to_tip(network, latest_chain_tip, sync_status)?; let block: Block = match block_proposal_bytes.zcash_deserialize_into() { Ok(block) => block, Err(parse_error) => { tracing::info!( ?parse_error, "error response from block parser in CheckProposal request" ); return Ok( ProposalResponse::rejected("invalid proposal format", parse_error.into()).into(), ); } }; let block_verifier_router_response = block_verifier_router .ready() .await .map_err(|error| Error { code: ErrorCode::ServerError(0), message: error.to_string(), data: None, })? .call(zebra_consensus::Request::CheckProposal(Arc::new(block))) .await; Ok(block_verifier_router_response .map(|_hash| ProposalResponse::Valid) .unwrap_or_else(|verify_chain_error| { tracing::info!( ?verify_chain_error, "error response from block_verifier_router in CheckProposal request" ); ProposalResponse::rejected("invalid proposal", verify_chain_error) }) .into()) } // - State and syncer checks /// Returns an error if Zebra is not synced to the consensus chain tip. /// This error might be incorrect if the local clock is skewed. pub fn check_synced_to_tip<Tip, SyncStatus>( network: Network, latest_chain_tip: Tip, sync_status: SyncStatus, ) -> Result<()> where Tip: ChainTip + Clone + Send + Sync +'static, SyncStatus: ChainSyncStatus + Clone + Send + Sync +'static, { // The tip estimate may not be the same as the one coming from the state // but this is ok for an estimate let (estimated_distance_to_chain_tip, local_tip_height) = latest_chain_tip .estimate_distance_to_network_chain_tip(network) .ok_or_else(|| Error { code: ErrorCode::ServerError(0), message: "No Chain tip available yet".to_string(), data: None, })?; if!sync_status.is_close_to_tip() || estimated_distance_to_chain_tip > MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP { tracing::info!( ?estimated_distance_to_chain_tip, ?local_tip_height, "Zebra has not synced to the chain tip. \ Hint: check your network connection, clock, and time zone settings." ); return Err(Error { code: NOT_SYNCED_ERROR_CODE, message: format!( "Zebra has not synced to the chain tip, \ estimated distance: {estimated_distance_to_chain_tip:?}, \ local tip: {local_tip_height:?}. \ Hint: check your network connection, clock, and time zone settings." ), data: None, }); } Ok(()) } // - State and mempool data fetches /// Returns the state data for the block template. /// /// You should call `check_synced_to_tip()` before calling this function. /// If the state does not have enough blocks, returns an error. pub async fn fetch_state_tip_and_local_time<State>( state: State, ) -> Result<GetBlockTemplateChainInfo> where State: Service< zebra_state::ReadRequest, Response = zebra_state::ReadResponse, Error = zebra_state::BoxError, > + Clone + Send + Sync +'static,
/// Returns the transactions that are currently in `mempool`, or None if the /// `last_seen_tip_hash` from the mempool response doesn't match the tip hash from the state. /// /// You should call `check_synced_to_tip()` before calling this function. /// If the mempool is inactive because Zebra is not synced to the tip, returns no transactions. pub async fn fetch_mempool_transactions<Mempool>( mempool: Mempool, chain_tip_hash: block::Hash, ) -> Result<Option<Vec<VerifiedUnminedTx>>> where Mempool: Service< mempool::Request, Response = mempool::Response, Error = zebra_node_services::BoxError, > +'static, Mempool::Future: Send, { let response = mempool .oneshot(mempool::Request::FullTransactions) .await .map_err(|error| Error { code: ErrorCode::ServerError(0), message: error.to_string(), data: None, })?; let mempool::Response::FullTransactions { transactions, last_seen_tip_hash, } = response else { unreachable!("unmatched response to a mempool::FullTransactions request") }; // Check that the mempool and state were in sync when we made the requests Ok((last_seen_tip_hash == chain_tip_hash).then_some(transactions)) } // - Response processing /// Generates and returns the coinbase transaction and default roots. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. pub fn generate_coinbase_and_roots( network: Network, height: Height, miner_address: transparent::Address, mempool_txs: &[VerifiedUnminedTx], history_tree: Arc<zebra_chain::history_tree::HistoryTree>, like_zcashd: bool, extra_coinbase_data: Vec<u8>, ) -> (TransactionTemplate<NegativeOrZero>, DefaultRoots) { // Generate the coinbase transaction let miner_fee = calculate_miner_fee(mempool_txs); let coinbase_txn = generate_coinbase_transaction( network, height, miner_address, miner_fee, like_zcashd, extra_coinbase_data, ); // Calculate block default roots // // TODO: move expensive root, hash, and tree cryptography to a rayon thread? let default_roots = calculate_default_root_hashes(&coinbase_txn, mempool_txs, history_tree); let coinbase_txn = TransactionTemplate::from_coinbase(&coinbase_txn, miner_fee); (coinbase_txn, default_roots) } // - Coinbase transaction processing /// Returns a coinbase transaction for the supplied parameters. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. pub fn generate_coinbase_transaction( network: Network, height: Height, miner_address: transparent::Address, miner_fee: Amount<NonNegative>, like_zcashd: bool, extra_coinbase_data: Vec<u8>, ) -> UnminedTx { let outputs = standard_coinbase_outputs(network, height, miner_address, miner_fee, like_zcashd); if like_zcashd { Transaction::new_v4_coinbase(network, height, outputs, like_zcashd, extra_coinbase_data) .into() } else { Transaction::new_v5_coinbase(network, height, outputs, extra_coinbase_data).into() } } /// Returns the total miner fee for `mempool_txs`. pub fn calculate_miner_fee(mempool_txs: &[VerifiedUnminedTx]) -> Amount<NonNegative> { let miner_fee: amount::Result<Amount<NonNegative>> = mempool_txs.iter().map(|tx| tx.miner_fee).sum(); miner_fee.expect( "invalid selected transactions: \ fees in a valid block can not be more than MAX_MONEY", ) } /// Returns the standard funding stream and miner reward transparent output scripts /// for `network`, `height` and `miner_fee`. /// /// Only works for post-Canopy heights. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. pub fn standard_coinbase_outputs( network: Network, height: Height, miner_address: transparent::Address, miner_fee: Amount<NonNegative>, like_zcashd: bool, ) -> Vec<(Amount<NonNegative>, transparent::Script)> { let funding_streams = funding_stream_values(height, network) .expect("funding stream value calculations are valid for reasonable chain heights"); // Optional TODO: move this into a zebra_consensus function? let funding_streams: HashMap< FundingStreamReceiver, (Amount<NonNegative>, transparent::Address), > = funding_streams .into_iter() .map(|(receiver, amount)| { ( receiver, (amount, funding_stream_address(height, network, receiver)), ) }) .collect(); let miner_reward = miner_subsidy(height, network) .expect("reward calculations are valid for reasonable chain heights") + miner_fee; let miner_reward = miner_reward.expect("reward calculations are valid for reasonable chain heights"); combine_coinbase_outputs(funding_streams, miner_address, miner_reward, like_zcashd) } /// Combine the miner reward and funding streams into a list of coinbase amounts and addresses. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. fn combine_coinbase_outputs( funding_streams: HashMap<FundingStreamReceiver, (Amount<NonNegative>, transparent::Address)>, miner_address: transparent::Address, miner_reward: Amount<NonNegative>, like_zcashd: bool, ) -> Vec<(Amount<NonNegative>, transparent::Script)> { // Combine all the funding streams with the miner reward. let mut coinbase_outputs: Vec<(Amount<NonNegative>, transparent::Address)> = funding_streams .into_iter() .map(|(_receiver, (amount, address))| (amount, address)) .collect(); coinbase_outputs.push((miner_reward, miner_address)); let mut coinbase_outputs: Vec<(Amount<NonNegative>, transparent::Script)> = coinbase_outputs .iter() .map(|(amount, address)| (*amount, address.create_script_from_address())) .collect(); // The HashMap returns funding streams in an arbitrary order, // but Zebra's snapshot tests expect the same order every time. if like_zcashd { // zcashd sorts outputs in serialized data order, excluding the length field coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); } else { // Zebra sorts by amount then script. // // Since the sort is stable, equal amounts will remain sorted by script. coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); coinbase_outputs.sort_by_key(|(amount, _script)| *amount); } coinbase_outputs } // - Transaction roots processing /// Returns the default block roots for the supplied coinbase and mempool transactions, /// and the supplied history tree. /// /// This function runs expensive cryptographic operations. pub fn calculate_default_root_hashes( coinbase_txn: &UnminedTx, mempool_txs: &[VerifiedUnminedTx], history_tree: Arc<zebra_chain::history_tree::HistoryTree>, ) -> DefaultRoots { let (merkle_root, auth_data_root) = calculate_transaction_roots(coinbase_txn, mempool_txs); let chain_history_root = history_tree.hash().expect("history tree can't be empty"); let block_commitments_hash = ChainHistoryBlockTxAuthCommitmentHash::from_commitments( &chain_history_root, &auth_data_root, ); DefaultRoots { merkle_root, chain_history_root, auth_data_root, block_commitments_hash, } } /// Returns the transaction effecting and authorizing roots /// for `coinbase_txn` and `mempool_txs`, which are used in the block header. // // TODO: should this be spawned into a cryptographic operations pool? // (it would only matter if there were a lot of small transactions in a block) pub fn calculate_transaction_roots( coinbase_txn: &UnminedTx, mempool_txs: &[VerifiedUnminedTx], ) -> (merkle::Root, AuthDataRoot) { let block_transactions = || iter::once(coinbase_txn).chain(mempool_txs.iter().map(|tx| &tx.transaction)); let merkle_root = block_transactions().cloned().collect(); let auth_data_root = block_transactions().cloned().collect(); (merkle_root, auth_data_root) }
{ let request = zebra_state::ReadRequest::ChainInfo; let response = state .oneshot(request.clone()) .await .map_err(|error| Error { code: ErrorCode::ServerError(0), message: error.to_string(), data: None, })?; let chain_info = match response { zebra_state::ReadResponse::ChainInfo(chain_info) => chain_info, _ => unreachable!("incorrect response to {request:?}"), }; Ok(chain_info) }
identifier_body
get_block_template.rs
//! Support functions for the `get_block_template()` RPC. use std::{collections::HashMap, iter, sync::Arc}; use jsonrpc_core::{Error, ErrorCode, Result}; use tower::{Service, ServiceExt}; use zebra_chain::{ amount::{self, Amount, NegativeOrZero, NonNegative}, block::{ self, merkle::{self, AuthDataRoot}, Block, ChainHistoryBlockTxAuthCommitmentHash, Height, }, chain_sync_status::ChainSyncStatus, chain_tip::ChainTip,
transparent, }; use zebra_consensus::{ funding_stream_address, funding_stream_values, miner_subsidy, FundingStreamReceiver, }; use zebra_node_services::mempool; use zebra_state::GetBlockTemplateChainInfo; use crate::methods::get_block_template_rpcs::{ constants::{MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP, NOT_SYNCED_ERROR_CODE}, types::{default_roots::DefaultRoots, transaction::TransactionTemplate}, }; pub use crate::methods::get_block_template_rpcs::types::get_block_template::*; // - Parameter checks /// Checks that `data` is omitted in `Template` mode or provided in `Proposal` mode, /// /// Returns an error if there's a mismatch between the mode and whether `data` is provided. pub fn check_parameters(parameters: &Option<JsonParameters>) -> Result<()> { let Some(parameters) = parameters else { return Ok(()); }; match parameters { JsonParameters { mode: GetBlockTemplateRequestMode::Template, data: None, .. } | JsonParameters { mode: GetBlockTemplateRequestMode::Proposal, data: Some(_), .. } => Ok(()), JsonParameters { mode: GetBlockTemplateRequestMode::Proposal, data: None, .. } => Err(Error { code: ErrorCode::InvalidParams, message: "\"data\" parameter must be \ provided in \"proposal\" mode" .to_string(), data: None, }), JsonParameters { mode: GetBlockTemplateRequestMode::Template, data: Some(_), .. } => Err(Error { code: ErrorCode::InvalidParams, message: "\"data\" parameter must be \ omitted in \"template\" mode" .to_string(), data: None, }), } } /// Returns the miner address, or an error if it is invalid. pub fn check_miner_address( miner_address: Option<transparent::Address>, ) -> Result<transparent::Address> { miner_address.ok_or_else(|| Error { code: ErrorCode::ServerError(0), message: "configure mining.miner_address in zebrad.toml \ with a transparent address" .to_string(), data: None, }) } /// Attempts to validate block proposal against all of the server's /// usual acceptance rules (except proof-of-work). /// /// Returns a `getblocktemplate` [`Response`]. pub async fn validate_block_proposal<BlockVerifierRouter, Tip, SyncStatus>( mut block_verifier_router: BlockVerifierRouter, block_proposal_bytes: Vec<u8>, network: Network, latest_chain_tip: Tip, sync_status: SyncStatus, ) -> Result<Response> where BlockVerifierRouter: Service<zebra_consensus::Request, Response = block::Hash, Error = zebra_consensus::BoxError> + Clone + Send + Sync +'static, Tip: ChainTip + Clone + Send + Sync +'static, SyncStatus: ChainSyncStatus + Clone + Send + Sync +'static, { check_synced_to_tip(network, latest_chain_tip, sync_status)?; let block: Block = match block_proposal_bytes.zcash_deserialize_into() { Ok(block) => block, Err(parse_error) => { tracing::info!( ?parse_error, "error response from block parser in CheckProposal request" ); return Ok( ProposalResponse::rejected("invalid proposal format", parse_error.into()).into(), ); } }; let block_verifier_router_response = block_verifier_router .ready() .await .map_err(|error| Error { code: ErrorCode::ServerError(0), message: error.to_string(), data: None, })? .call(zebra_consensus::Request::CheckProposal(Arc::new(block))) .await; Ok(block_verifier_router_response .map(|_hash| ProposalResponse::Valid) .unwrap_or_else(|verify_chain_error| { tracing::info!( ?verify_chain_error, "error response from block_verifier_router in CheckProposal request" ); ProposalResponse::rejected("invalid proposal", verify_chain_error) }) .into()) } // - State and syncer checks /// Returns an error if Zebra is not synced to the consensus chain tip. /// This error might be incorrect if the local clock is skewed. pub fn check_synced_to_tip<Tip, SyncStatus>( network: Network, latest_chain_tip: Tip, sync_status: SyncStatus, ) -> Result<()> where Tip: ChainTip + Clone + Send + Sync +'static, SyncStatus: ChainSyncStatus + Clone + Send + Sync +'static, { // The tip estimate may not be the same as the one coming from the state // but this is ok for an estimate let (estimated_distance_to_chain_tip, local_tip_height) = latest_chain_tip .estimate_distance_to_network_chain_tip(network) .ok_or_else(|| Error { code: ErrorCode::ServerError(0), message: "No Chain tip available yet".to_string(), data: None, })?; if!sync_status.is_close_to_tip() || estimated_distance_to_chain_tip > MAX_ESTIMATED_DISTANCE_TO_NETWORK_CHAIN_TIP { tracing::info!( ?estimated_distance_to_chain_tip, ?local_tip_height, "Zebra has not synced to the chain tip. \ Hint: check your network connection, clock, and time zone settings." ); return Err(Error { code: NOT_SYNCED_ERROR_CODE, message: format!( "Zebra has not synced to the chain tip, \ estimated distance: {estimated_distance_to_chain_tip:?}, \ local tip: {local_tip_height:?}. \ Hint: check your network connection, clock, and time zone settings." ), data: None, }); } Ok(()) } // - State and mempool data fetches /// Returns the state data for the block template. /// /// You should call `check_synced_to_tip()` before calling this function. /// If the state does not have enough blocks, returns an error. pub async fn fetch_state_tip_and_local_time<State>( state: State, ) -> Result<GetBlockTemplateChainInfo> where State: Service< zebra_state::ReadRequest, Response = zebra_state::ReadResponse, Error = zebra_state::BoxError, > + Clone + Send + Sync +'static, { let request = zebra_state::ReadRequest::ChainInfo; let response = state .oneshot(request.clone()) .await .map_err(|error| Error { code: ErrorCode::ServerError(0), message: error.to_string(), data: None, })?; let chain_info = match response { zebra_state::ReadResponse::ChainInfo(chain_info) => chain_info, _ => unreachable!("incorrect response to {request:?}"), }; Ok(chain_info) } /// Returns the transactions that are currently in `mempool`, or None if the /// `last_seen_tip_hash` from the mempool response doesn't match the tip hash from the state. /// /// You should call `check_synced_to_tip()` before calling this function. /// If the mempool is inactive because Zebra is not synced to the tip, returns no transactions. pub async fn fetch_mempool_transactions<Mempool>( mempool: Mempool, chain_tip_hash: block::Hash, ) -> Result<Option<Vec<VerifiedUnminedTx>>> where Mempool: Service< mempool::Request, Response = mempool::Response, Error = zebra_node_services::BoxError, > +'static, Mempool::Future: Send, { let response = mempool .oneshot(mempool::Request::FullTransactions) .await .map_err(|error| Error { code: ErrorCode::ServerError(0), message: error.to_string(), data: None, })?; let mempool::Response::FullTransactions { transactions, last_seen_tip_hash, } = response else { unreachable!("unmatched response to a mempool::FullTransactions request") }; // Check that the mempool and state were in sync when we made the requests Ok((last_seen_tip_hash == chain_tip_hash).then_some(transactions)) } // - Response processing /// Generates and returns the coinbase transaction and default roots. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. pub fn generate_coinbase_and_roots( network: Network, height: Height, miner_address: transparent::Address, mempool_txs: &[VerifiedUnminedTx], history_tree: Arc<zebra_chain::history_tree::HistoryTree>, like_zcashd: bool, extra_coinbase_data: Vec<u8>, ) -> (TransactionTemplate<NegativeOrZero>, DefaultRoots) { // Generate the coinbase transaction let miner_fee = calculate_miner_fee(mempool_txs); let coinbase_txn = generate_coinbase_transaction( network, height, miner_address, miner_fee, like_zcashd, extra_coinbase_data, ); // Calculate block default roots // // TODO: move expensive root, hash, and tree cryptography to a rayon thread? let default_roots = calculate_default_root_hashes(&coinbase_txn, mempool_txs, history_tree); let coinbase_txn = TransactionTemplate::from_coinbase(&coinbase_txn, miner_fee); (coinbase_txn, default_roots) } // - Coinbase transaction processing /// Returns a coinbase transaction for the supplied parameters. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. pub fn generate_coinbase_transaction( network: Network, height: Height, miner_address: transparent::Address, miner_fee: Amount<NonNegative>, like_zcashd: bool, extra_coinbase_data: Vec<u8>, ) -> UnminedTx { let outputs = standard_coinbase_outputs(network, height, miner_address, miner_fee, like_zcashd); if like_zcashd { Transaction::new_v4_coinbase(network, height, outputs, like_zcashd, extra_coinbase_data) .into() } else { Transaction::new_v5_coinbase(network, height, outputs, extra_coinbase_data).into() } } /// Returns the total miner fee for `mempool_txs`. pub fn calculate_miner_fee(mempool_txs: &[VerifiedUnminedTx]) -> Amount<NonNegative> { let miner_fee: amount::Result<Amount<NonNegative>> = mempool_txs.iter().map(|tx| tx.miner_fee).sum(); miner_fee.expect( "invalid selected transactions: \ fees in a valid block can not be more than MAX_MONEY", ) } /// Returns the standard funding stream and miner reward transparent output scripts /// for `network`, `height` and `miner_fee`. /// /// Only works for post-Canopy heights. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. pub fn standard_coinbase_outputs( network: Network, height: Height, miner_address: transparent::Address, miner_fee: Amount<NonNegative>, like_zcashd: bool, ) -> Vec<(Amount<NonNegative>, transparent::Script)> { let funding_streams = funding_stream_values(height, network) .expect("funding stream value calculations are valid for reasonable chain heights"); // Optional TODO: move this into a zebra_consensus function? let funding_streams: HashMap< FundingStreamReceiver, (Amount<NonNegative>, transparent::Address), > = funding_streams .into_iter() .map(|(receiver, amount)| { ( receiver, (amount, funding_stream_address(height, network, receiver)), ) }) .collect(); let miner_reward = miner_subsidy(height, network) .expect("reward calculations are valid for reasonable chain heights") + miner_fee; let miner_reward = miner_reward.expect("reward calculations are valid for reasonable chain heights"); combine_coinbase_outputs(funding_streams, miner_address, miner_reward, like_zcashd) } /// Combine the miner reward and funding streams into a list of coinbase amounts and addresses. /// /// If `like_zcashd` is true, try to match the coinbase transactions generated by `zcashd` /// in the `getblocktemplate` RPC. fn combine_coinbase_outputs( funding_streams: HashMap<FundingStreamReceiver, (Amount<NonNegative>, transparent::Address)>, miner_address: transparent::Address, miner_reward: Amount<NonNegative>, like_zcashd: bool, ) -> Vec<(Amount<NonNegative>, transparent::Script)> { // Combine all the funding streams with the miner reward. let mut coinbase_outputs: Vec<(Amount<NonNegative>, transparent::Address)> = funding_streams .into_iter() .map(|(_receiver, (amount, address))| (amount, address)) .collect(); coinbase_outputs.push((miner_reward, miner_address)); let mut coinbase_outputs: Vec<(Amount<NonNegative>, transparent::Script)> = coinbase_outputs .iter() .map(|(amount, address)| (*amount, address.create_script_from_address())) .collect(); // The HashMap returns funding streams in an arbitrary order, // but Zebra's snapshot tests expect the same order every time. if like_zcashd { // zcashd sorts outputs in serialized data order, excluding the length field coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); } else { // Zebra sorts by amount then script. // // Since the sort is stable, equal amounts will remain sorted by script. coinbase_outputs.sort_by_key(|(_amount, script)| script.clone()); coinbase_outputs.sort_by_key(|(amount, _script)| *amount); } coinbase_outputs } // - Transaction roots processing /// Returns the default block roots for the supplied coinbase and mempool transactions, /// and the supplied history tree. /// /// This function runs expensive cryptographic operations. pub fn calculate_default_root_hashes( coinbase_txn: &UnminedTx, mempool_txs: &[VerifiedUnminedTx], history_tree: Arc<zebra_chain::history_tree::HistoryTree>, ) -> DefaultRoots { let (merkle_root, auth_data_root) = calculate_transaction_roots(coinbase_txn, mempool_txs); let chain_history_root = history_tree.hash().expect("history tree can't be empty"); let block_commitments_hash = ChainHistoryBlockTxAuthCommitmentHash::from_commitments( &chain_history_root, &auth_data_root, ); DefaultRoots { merkle_root, chain_history_root, auth_data_root, block_commitments_hash, } } /// Returns the transaction effecting and authorizing roots /// for `coinbase_txn` and `mempool_txs`, which are used in the block header. // // TODO: should this be spawned into a cryptographic operations pool? // (it would only matter if there were a lot of small transactions in a block) pub fn calculate_transaction_roots( coinbase_txn: &UnminedTx, mempool_txs: &[VerifiedUnminedTx], ) -> (merkle::Root, AuthDataRoot) { let block_transactions = || iter::once(coinbase_txn).chain(mempool_txs.iter().map(|tx| &tx.transaction)); let merkle_root = block_transactions().cloned().collect(); let auth_data_root = block_transactions().cloned().collect(); (merkle_root, auth_data_root) }
parameters::Network, serialization::ZcashDeserializeInto, transaction::{Transaction, UnminedTx, VerifiedUnminedTx},
random_line_split
server.rs
// // Copyright (c) Pirmin Kalberer. All rights reserved. // Licensed under the MIT License. See LICENSE file in the project root for full license information. // use core::config::ApplicationCfg; use datasource_type::Datasources; use datasource::DatasourceInput; use core::grid::Grid; use core::layer::Layer; use service::tileset::Tileset; use mvt_service::MvtService; use read_qgs; use core::{parse_config, read_config, Config}; use core::config::DEFAULT_CONFIG; use serde_json; use cache::{Filecache, Nocache, Tilecache}; use nickel::{HttpRouter, MediaType, MiddlewareResult, Nickel, Options, Request, Response, StaticFilesHandler}; use hyper::header::{AccessControlAllowMethods, AccessControlAllowOrigin, CacheControl, CacheDirective, ContentEncoding, Encoding}; use hyper::method::Method; use hyper::header; use std::collections::HashMap; use std::str::FromStr; use clap::ArgMatches; use std::str; use std::process; use open; fn log_request<'mw>( req: &mut Request<MvtService>, res: Response<'mw, MvtService>, ) -> MiddlewareResult<'mw, MvtService> { info!("{} {}", req.origin.method, req.origin.uri); res.next_middleware() } header! { (ContentType, "Content-Type") => [String] } #[derive(RustcEncodable)] struct TilesetInfo { name: String, layerinfos: String, hasviewer: bool, } impl TilesetInfo { fn from_tileset(set: &Tileset) -> TilesetInfo
layerinfos: format!("{}", layerinfos.join(", ")), hasviewer: hasviewer, } } } struct StaticFiles { files: HashMap<&'static str, (&'static [u8], MediaType)>, } impl StaticFiles { fn init() -> StaticFiles { let mut static_files = StaticFiles { files: HashMap::new(), }; static_files.add( "favicon.ico", include_bytes!("static/favicon.ico"), MediaType::Ico, ); static_files.add( "index.html", include_bytes!("static/index.html"), MediaType::Html, ); static_files.add( "viewer.js", include_bytes!("static/viewer.js"), MediaType::Js, ); static_files.add( "viewer.css", include_bytes!("static/viewer.css"), MediaType::Css, ); static_files.add( "maputnik.html", include_bytes!("static/maputnik.html"), MediaType::Html, ); static_files.add( "maputnik.js", include_bytes!("static/maputnik.js"), MediaType::Js, ); static_files.add( "maputnik-vendor.js", include_bytes!("static/maputnik-vendor.js"), MediaType::Js, ); static_files.add( "img/maputnik.png", include_bytes!("static/img/maputnik.png"), MediaType::Png, ); static_files.add( "fonts/Roboto-Regular.ttf", include_bytes!("static/fonts/Roboto-Regular.ttf"), MediaType::Ttf, ); static_files.add( "fonts/Roboto-Medium.ttf", include_bytes!("static/fonts/Roboto-Medium.ttf"), MediaType::Ttf, ); static_files } fn add(&mut self, name: &'static str, data: &'static [u8], media_type: MediaType) { self.files.insert(name, (data, media_type)); } fn content(&self, base: Option<&str>, name: String) -> Option<&(&[u8], MediaType)> { let mut key = if name == "." { "index.html".to_string() } else { name }; if let Some(path) = base { key = format!("{}/{}", path, key); } self.files.get(&key as &str) } } include!(concat!(env!("OUT_DIR"), "/fonts.rs")); static DINO: &'static str = " xxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxx xxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxx xxxxxxxxx xxxxxxx xxxxxx xxxxxxx"; fn set_layer_buffer_defaults(layer: &mut Layer, simplify: bool, clip: bool) { layer.simplify = simplify; if simplify { // Limit features by default unless simplify is set to false layer.query_limit = Some(1000); } layer.buffer_size = match layer.geometry_type { Some(ref geom) => { if clip { if geom.contains("POLYGON") { Some(1) } else { Some(0) } } else { None } } None => None, }; } pub fn service_from_args(args: &ArgMatches) -> (MvtService, ApplicationCfg) { if let Some(cfgpath) = args.value_of("config") { info!("Reading configuration from '{}'", cfgpath); for argname in vec!["dbconn", "datasource", "qgs"] { if args.value_of(argname).is_some() { warn!("Ignoring argument `{}`", argname); } } let config = read_config(cfgpath).unwrap_or_else(|err| { println!("Error reading configuration - {} ", err); process::exit(1) }); let mut svc = MvtService::from_config(&config).unwrap_or_else(|err| { println!("Error reading configuration - {} ", err); process::exit(1) }); svc.connect(); (svc, config) } else { let bind = args.value_of("bind").unwrap_or("127.0.0.1"); let port = u16::from_str(args.value_of("port").unwrap_or("6767")).expect("Invalid port number"); let mut config: ApplicationCfg = parse_config(DEFAULT_CONFIG.to_string(), "").unwrap(); config.webserver.bind = Some(bind.to_string()); config.webserver.port = Some(port); let cache = match args.value_of("cache") { None => Tilecache::Nocache(Nocache), Some(dir) => Tilecache::Filecache(Filecache { basepath: dir.to_string(), baseurl: None, }), }; let simplify = bool::from_str(args.value_of("simplify").unwrap_or("true")).unwrap_or(false); let clip = bool::from_str(args.value_of("clip").unwrap_or("true")).unwrap_or(false); let grid = Grid::web_mercator(); let mut tilesets = Vec::new(); let datasources = if let Some(qgs) = args.value_of("qgs") { info!("Reading configuration from '{}'", qgs); let (datasources, mut tileset) = read_qgs(qgs); for layer in tileset.layers.iter_mut() { set_layer_buffer_defaults(layer, simplify, clip); } tilesets.push(tileset); datasources } else { let datasources = Datasources::from_args(args); if datasources.datasources.is_empty() { println!("Either 'config', 'dbconn' or 'datasource' is required"); process::exit(1) } let detect_geometry_types = true; //TODO: add option (maybe slow for many geometries) for (_name, ds) in &datasources.datasources { let dsconn = ds.connected(); let mut layers = dsconn.detect_layers(detect_geometry_types); while let Some(mut l) = layers.pop() { let extent = dsconn.layer_extent(&l); set_layer_buffer_defaults(&mut l, simplify, clip); let tileset = Tileset { name: l.name.clone(), attribution: None, extent: extent, layers: vec![l], }; tilesets.push(tileset); } } datasources }; let mut svc = MvtService { datasources: datasources, grid: grid, tilesets: tilesets, cache: cache, }; svc.connect(); //TODO: ugly - we connect twice (svc, config) } } pub fn webserver(args: &ArgMatches) { let (mut service, config) = service_from_args(args); let mvt_viewer = config.service.mvt.viewer; let bind: &str = &config.webserver.bind.unwrap_or("127.0.0.1".to_string()); let port = config.webserver.port.unwrap_or(6767); let threads = config.webserver.threads.unwrap_or(4) as usize; let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300); service.prepare_feature_queries(); service.init_cache(); let mut tileset_infos: Vec<TilesetInfo> = service .tilesets .iter() .map(|set| TilesetInfo::from_tileset(&set)) .collect(); tileset_infos.sort_by_key(|ti| ti.name.clone()); let mut server = Nickel::with_data(service); server.options = Options::default().thread_count(Some(threads)); // Avoid thread exhaustion caused by hypers keep_alive handling (https://github.com/hyperium/hyper/issues/368) server.keep_alive_timeout(None); server.utilize(log_request); server.get( "/**(.style)?.json", middleware! { |_req, mut res| res.set(MediaType::Json); res.set(AccessControlAllowMethods(vec![Method::Get])); res.set(AccessControlAllowOrigin::Any); }, ); server.get( "/index.json", middleware! { |_req, res| let service: &MvtService = res.server_data(); let json = service.get_mvt_metadata().unwrap(); serde_json::to_vec(&json).unwrap() }, ); // Font list for Maputnik server.get( "/fontstacks.json", middleware! { |_req, _res| let json = json!(["Roboto Medium","Roboto Regular"]); serde_json::to_vec(&json).unwrap() }, ); // Fonts for Maputnik // Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf server.get( "/fonts/:fonts/:range.pbf", middleware! { |req, mut res| let fontpbfs = fonts(); let fontlist = req.param("fonts").unwrap(); let range = req.param("range").unwrap(); let mut fonts = fontlist.split(",").collect::<Vec<_>>(); fonts.push("Roboto Regular"); // Fallback for font in fonts { let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range); debug!("Font lookup: {}", key); if let Some(pbf) = fontpbfs.get(&key as &str) { res.set_header_fallback(|| ContentType("application/x-protobuf".to_owned())); res.set_header_fallback(|| ContentEncoding(vec![Encoding::Gzip])); return res.send(*pbf) } } }, ); server.get( "/:tileset.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let host = req.origin.headers.get::<header::Host>().unwrap(); let baseurl = format!("http://{}:{}", host.hostname, host.port.unwrap_or(80)); let json = service.get_tilejson(&baseurl, &tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset.style.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let host = req.origin.headers.get::<header::Host>().unwrap(); let baseurl = format!("http://{}:{}", host.hostname, host.port.unwrap_or(80)); let json = service.get_stylejson(&baseurl, &tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset/metadata.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let json = service.get_mbtiles_metadata(&tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset/:z/:x/:y.pbf", middleware! { |req, mut res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let z = req.param("z").unwrap().parse::<u8>().unwrap(); let x = req.param("x").unwrap().parse::<u32>().unwrap(); let y = req.param("y").unwrap().parse::<u32>().unwrap(); let accept_encoding = req.origin.headers.get::<header::AcceptEncoding>(); let gzip = accept_encoding.is_some() && accept_encoding.unwrap().iter().any( |ref qit| qit.item == Encoding::Gzip ); let tile = service.tile_cached(tileset, x, y, z, gzip); if gzip { res.set_header_fallback(|| ContentEncoding(vec![Encoding::Gzip])); } res.set_header_fallback(|| ContentType("application/x-protobuf".to_owned())); res.set_header_fallback(|| CacheControl(vec![CacheDirective::MaxAge(cache_max_age)])); //res.set_header_fallback(|| ContentLength(tile.len() as u64)); res.set(AccessControlAllowMethods(vec![Method::Get])); res.set(AccessControlAllowOrigin::Any); tile }, ); if mvt_viewer { let static_files = StaticFiles::init(); server.get( "/(:base/)?:static", middleware! { |req, mut res| let mut name = req.param("static").unwrap().to_string(); if let Some(format) = req.param("format") { name = format!("{}.{}", name, format); } if let Some(content) = static_files.content(req.param("base"), name) { res.set(content.1); return res.send(content.0) } }, ); } server.get("/**", StaticFilesHandler::new("public/")); println!("{}", DINO); let _listening = server .listen((bind, port)) .expect("Failed to launch server"); let openbrowser = bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false); if openbrowser && mvt_viewer { let _res = open::that(format!("http://{}:{}", bind, port)); } } pub fn gen_config(args: &ArgMatches) -> String { let toml = r#" [webserver] # Bind address. Use 0.0.0.0 to listen on all adresses. bind = "127.0.0.1" port = 6767 threads = 4 #cache_control_max_age = 43200 "#; let mut config; if args.value_of("dbconn").is_some() || args.value_of("datasource").is_some() || args.value_of("qgs").is_some() { let (service, _) = service_from_args(args); config = service.gen_runtime_config(); } else { config = MvtService::gen_config(); } config.push_str(toml); config } #[test] fn test_gen_config() { use core::parse_config; let args = ArgMatches::new(); let toml = gen_config(&args); println!("{}", toml); assert_eq!(Some("# t-rex configuration"), toml.lines().next()); let config = parse_config(toml, "").unwrap(); let _service = MvtService::from_config(&config).unwrap(); //assert_eq!(service.input.connection_url, // "postgresql://user:pass@host/database"); } #[test] #[ignore] fn test_runtime_config() { use std::env; use clap::App; use core::parse_config; if env::var("DBCONN").is_err() { panic!("DBCONN undefined"); } let args = App::new("test") .args_from_usage("--dbconn=[SPEC] 'PostGIS connection postgresql://USER@HOST/DBNAME'") .get_matches_from(vec!["", "--dbconn", &env::var("DBCONN").unwrap()]); let toml = gen_config(&args); println!("{}", toml); assert_eq!(Some("# t-rex configuration"), toml.lines().next()); let config = parse_config(toml, "").unwrap(); let _service = MvtService::from_config(&config).unwrap(); //assert_eq!(service.input.connection_url, env::var("DBCONN").unwrap()); }
{ let mut hasviewer = true; let layerinfos: Vec<String> = set.layers .iter() .map(|l| { let geom_type = l.geometry_type.clone().unwrap_or("UNKNOWN".to_string()); hasviewer = hasviewer && [ "POINT", "LINESTRING", "POLYGON", "MULTPOINT", "MULTILINESTRING", "MULTIPOLYGON", ].contains(&(&geom_type as &str)); format!("{} [{}]", &l.name, &geom_type) }) .collect(); TilesetInfo { name: set.name.clone(),
identifier_body
server.rs
// // Copyright (c) Pirmin Kalberer. All rights reserved. // Licensed under the MIT License. See LICENSE file in the project root for full license information. // use core::config::ApplicationCfg; use datasource_type::Datasources; use datasource::DatasourceInput; use core::grid::Grid; use core::layer::Layer; use service::tileset::Tileset; use mvt_service::MvtService; use read_qgs; use core::{parse_config, read_config, Config}; use core::config::DEFAULT_CONFIG; use serde_json; use cache::{Filecache, Nocache, Tilecache}; use nickel::{HttpRouter, MediaType, MiddlewareResult, Nickel, Options, Request, Response, StaticFilesHandler}; use hyper::header::{AccessControlAllowMethods, AccessControlAllowOrigin, CacheControl, CacheDirective, ContentEncoding, Encoding}; use hyper::method::Method; use hyper::header; use std::collections::HashMap; use std::str::FromStr; use clap::ArgMatches; use std::str; use std::process; use open; fn log_request<'mw>( req: &mut Request<MvtService>, res: Response<'mw, MvtService>, ) -> MiddlewareResult<'mw, MvtService> { info!("{} {}", req.origin.method, req.origin.uri); res.next_middleware() } header! { (ContentType, "Content-Type") => [String] } #[derive(RustcEncodable)] struct TilesetInfo { name: String, layerinfos: String, hasviewer: bool, } impl TilesetInfo { fn from_tileset(set: &Tileset) -> TilesetInfo { let mut hasviewer = true; let layerinfos: Vec<String> = set.layers .iter() .map(|l| { let geom_type = l.geometry_type.clone().unwrap_or("UNKNOWN".to_string()); hasviewer = hasviewer && [ "POINT", "LINESTRING", "POLYGON", "MULTPOINT", "MULTILINESTRING", "MULTIPOLYGON", ].contains(&(&geom_type as &str)); format!("{} [{}]", &l.name, &geom_type) }) .collect(); TilesetInfo { name: set.name.clone(), layerinfos: format!("{}", layerinfos.join(", ")), hasviewer: hasviewer, } } } struct StaticFiles { files: HashMap<&'static str, (&'static [u8], MediaType)>, } impl StaticFiles { fn init() -> StaticFiles { let mut static_files = StaticFiles { files: HashMap::new(), }; static_files.add( "favicon.ico", include_bytes!("static/favicon.ico"), MediaType::Ico, ); static_files.add( "index.html", include_bytes!("static/index.html"), MediaType::Html, ); static_files.add( "viewer.js", include_bytes!("static/viewer.js"), MediaType::Js, ); static_files.add( "viewer.css", include_bytes!("static/viewer.css"), MediaType::Css, ); static_files.add( "maputnik.html", include_bytes!("static/maputnik.html"), MediaType::Html, ); static_files.add( "maputnik.js", include_bytes!("static/maputnik.js"), MediaType::Js, ); static_files.add( "maputnik-vendor.js", include_bytes!("static/maputnik-vendor.js"), MediaType::Js, ); static_files.add( "img/maputnik.png", include_bytes!("static/img/maputnik.png"), MediaType::Png, ); static_files.add( "fonts/Roboto-Regular.ttf", include_bytes!("static/fonts/Roboto-Regular.ttf"), MediaType::Ttf, ); static_files.add( "fonts/Roboto-Medium.ttf", include_bytes!("static/fonts/Roboto-Medium.ttf"), MediaType::Ttf, ); static_files } fn add(&mut self, name: &'static str, data: &'static [u8], media_type: MediaType) { self.files.insert(name, (data, media_type)); } fn content(&self, base: Option<&str>, name: String) -> Option<&(&[u8], MediaType)> { let mut key = if name == "." { "index.html".to_string() } else { name }; if let Some(path) = base { key = format!("{}/{}", path, key); } self.files.get(&key as &str) } } include!(concat!(env!("OUT_DIR"), "/fonts.rs")); static DINO: &'static str = " xxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxx xxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxx xxxxxxxxx xxxxxxx xxxxxx xxxxxxx"; fn set_layer_buffer_defaults(layer: &mut Layer, simplify: bool, clip: bool) { layer.simplify = simplify; if simplify { // Limit features by default unless simplify is set to false layer.query_limit = Some(1000); } layer.buffer_size = match layer.geometry_type { Some(ref geom) => { if clip { if geom.contains("POLYGON") { Some(1) } else
} else { None } } None => None, }; } pub fn service_from_args(args: &ArgMatches) -> (MvtService, ApplicationCfg) { if let Some(cfgpath) = args.value_of("config") { info!("Reading configuration from '{}'", cfgpath); for argname in vec!["dbconn", "datasource", "qgs"] { if args.value_of(argname).is_some() { warn!("Ignoring argument `{}`", argname); } } let config = read_config(cfgpath).unwrap_or_else(|err| { println!("Error reading configuration - {} ", err); process::exit(1) }); let mut svc = MvtService::from_config(&config).unwrap_or_else(|err| { println!("Error reading configuration - {} ", err); process::exit(1) }); svc.connect(); (svc, config) } else { let bind = args.value_of("bind").unwrap_or("127.0.0.1"); let port = u16::from_str(args.value_of("port").unwrap_or("6767")).expect("Invalid port number"); let mut config: ApplicationCfg = parse_config(DEFAULT_CONFIG.to_string(), "").unwrap(); config.webserver.bind = Some(bind.to_string()); config.webserver.port = Some(port); let cache = match args.value_of("cache") { None => Tilecache::Nocache(Nocache), Some(dir) => Tilecache::Filecache(Filecache { basepath: dir.to_string(), baseurl: None, }), }; let simplify = bool::from_str(args.value_of("simplify").unwrap_or("true")).unwrap_or(false); let clip = bool::from_str(args.value_of("clip").unwrap_or("true")).unwrap_or(false); let grid = Grid::web_mercator(); let mut tilesets = Vec::new(); let datasources = if let Some(qgs) = args.value_of("qgs") { info!("Reading configuration from '{}'", qgs); let (datasources, mut tileset) = read_qgs(qgs); for layer in tileset.layers.iter_mut() { set_layer_buffer_defaults(layer, simplify, clip); } tilesets.push(tileset); datasources } else { let datasources = Datasources::from_args(args); if datasources.datasources.is_empty() { println!("Either 'config', 'dbconn' or 'datasource' is required"); process::exit(1) } let detect_geometry_types = true; //TODO: add option (maybe slow for many geometries) for (_name, ds) in &datasources.datasources { let dsconn = ds.connected(); let mut layers = dsconn.detect_layers(detect_geometry_types); while let Some(mut l) = layers.pop() { let extent = dsconn.layer_extent(&l); set_layer_buffer_defaults(&mut l, simplify, clip); let tileset = Tileset { name: l.name.clone(), attribution: None, extent: extent, layers: vec![l], }; tilesets.push(tileset); } } datasources }; let mut svc = MvtService { datasources: datasources, grid: grid, tilesets: tilesets, cache: cache, }; svc.connect(); //TODO: ugly - we connect twice (svc, config) } } pub fn webserver(args: &ArgMatches) { let (mut service, config) = service_from_args(args); let mvt_viewer = config.service.mvt.viewer; let bind: &str = &config.webserver.bind.unwrap_or("127.0.0.1".to_string()); let port = config.webserver.port.unwrap_or(6767); let threads = config.webserver.threads.unwrap_or(4) as usize; let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300); service.prepare_feature_queries(); service.init_cache(); let mut tileset_infos: Vec<TilesetInfo> = service .tilesets .iter() .map(|set| TilesetInfo::from_tileset(&set)) .collect(); tileset_infos.sort_by_key(|ti| ti.name.clone()); let mut server = Nickel::with_data(service); server.options = Options::default().thread_count(Some(threads)); // Avoid thread exhaustion caused by hypers keep_alive handling (https://github.com/hyperium/hyper/issues/368) server.keep_alive_timeout(None); server.utilize(log_request); server.get( "/**(.style)?.json", middleware! { |_req, mut res| res.set(MediaType::Json); res.set(AccessControlAllowMethods(vec![Method::Get])); res.set(AccessControlAllowOrigin::Any); }, ); server.get( "/index.json", middleware! { |_req, res| let service: &MvtService = res.server_data(); let json = service.get_mvt_metadata().unwrap(); serde_json::to_vec(&json).unwrap() }, ); // Font list for Maputnik server.get( "/fontstacks.json", middleware! { |_req, _res| let json = json!(["Roboto Medium","Roboto Regular"]); serde_json::to_vec(&json).unwrap() }, ); // Fonts for Maputnik // Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf server.get( "/fonts/:fonts/:range.pbf", middleware! { |req, mut res| let fontpbfs = fonts(); let fontlist = req.param("fonts").unwrap(); let range = req.param("range").unwrap(); let mut fonts = fontlist.split(",").collect::<Vec<_>>(); fonts.push("Roboto Regular"); // Fallback for font in fonts { let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range); debug!("Font lookup: {}", key); if let Some(pbf) = fontpbfs.get(&key as &str) { res.set_header_fallback(|| ContentType("application/x-protobuf".to_owned())); res.set_header_fallback(|| ContentEncoding(vec![Encoding::Gzip])); return res.send(*pbf) } } }, ); server.get( "/:tileset.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let host = req.origin.headers.get::<header::Host>().unwrap(); let baseurl = format!("http://{}:{}", host.hostname, host.port.unwrap_or(80)); let json = service.get_tilejson(&baseurl, &tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset.style.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let host = req.origin.headers.get::<header::Host>().unwrap(); let baseurl = format!("http://{}:{}", host.hostname, host.port.unwrap_or(80)); let json = service.get_stylejson(&baseurl, &tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset/metadata.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let json = service.get_mbtiles_metadata(&tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset/:z/:x/:y.pbf", middleware! { |req, mut res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let z = req.param("z").unwrap().parse::<u8>().unwrap(); let x = req.param("x").unwrap().parse::<u32>().unwrap(); let y = req.param("y").unwrap().parse::<u32>().unwrap(); let accept_encoding = req.origin.headers.get::<header::AcceptEncoding>(); let gzip = accept_encoding.is_some() && accept_encoding.unwrap().iter().any( |ref qit| qit.item == Encoding::Gzip ); let tile = service.tile_cached(tileset, x, y, z, gzip); if gzip { res.set_header_fallback(|| ContentEncoding(vec![Encoding::Gzip])); } res.set_header_fallback(|| ContentType("application/x-protobuf".to_owned())); res.set_header_fallback(|| CacheControl(vec![CacheDirective::MaxAge(cache_max_age)])); //res.set_header_fallback(|| ContentLength(tile.len() as u64)); res.set(AccessControlAllowMethods(vec![Method::Get])); res.set(AccessControlAllowOrigin::Any); tile }, ); if mvt_viewer { let static_files = StaticFiles::init(); server.get( "/(:base/)?:static", middleware! { |req, mut res| let mut name = req.param("static").unwrap().to_string(); if let Some(format) = req.param("format") { name = format!("{}.{}", name, format); } if let Some(content) = static_files.content(req.param("base"), name) { res.set(content.1); return res.send(content.0) } }, ); } server.get("/**", StaticFilesHandler::new("public/")); println!("{}", DINO); let _listening = server .listen((bind, port)) .expect("Failed to launch server"); let openbrowser = bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false); if openbrowser && mvt_viewer { let _res = open::that(format!("http://{}:{}", bind, port)); } } pub fn gen_config(args: &ArgMatches) -> String { let toml = r#" [webserver] # Bind address. Use 0.0.0.0 to listen on all adresses. bind = "127.0.0.1" port = 6767 threads = 4 #cache_control_max_age = 43200 "#; let mut config; if args.value_of("dbconn").is_some() || args.value_of("datasource").is_some() || args.value_of("qgs").is_some() { let (service, _) = service_from_args(args); config = service.gen_runtime_config(); } else { config = MvtService::gen_config(); } config.push_str(toml); config } #[test] fn test_gen_config() { use core::parse_config; let args = ArgMatches::new(); let toml = gen_config(&args); println!("{}", toml); assert_eq!(Some("# t-rex configuration"), toml.lines().next()); let config = parse_config(toml, "").unwrap(); let _service = MvtService::from_config(&config).unwrap(); //assert_eq!(service.input.connection_url, // "postgresql://user:pass@host/database"); } #[test] #[ignore] fn test_runtime_config() { use std::env; use clap::App; use core::parse_config; if env::var("DBCONN").is_err() { panic!("DBCONN undefined"); } let args = App::new("test") .args_from_usage("--dbconn=[SPEC] 'PostGIS connection postgresql://USER@HOST/DBNAME'") .get_matches_from(vec!["", "--dbconn", &env::var("DBCONN").unwrap()]); let toml = gen_config(&args); println!("{}", toml); assert_eq!(Some("# t-rex configuration"), toml.lines().next()); let config = parse_config(toml, "").unwrap(); let _service = MvtService::from_config(&config).unwrap(); //assert_eq!(service.input.connection_url, env::var("DBCONN").unwrap()); }
{ Some(0) }
conditional_block
server.rs
// // Copyright (c) Pirmin Kalberer. All rights reserved. // Licensed under the MIT License. See LICENSE file in the project root for full license information. // use core::config::ApplicationCfg; use datasource_type::Datasources; use datasource::DatasourceInput; use core::grid::Grid; use core::layer::Layer; use service::tileset::Tileset; use mvt_service::MvtService; use read_qgs; use core::{parse_config, read_config, Config}; use core::config::DEFAULT_CONFIG; use serde_json; use cache::{Filecache, Nocache, Tilecache}; use nickel::{HttpRouter, MediaType, MiddlewareResult, Nickel, Options, Request, Response, StaticFilesHandler}; use hyper::header::{AccessControlAllowMethods, AccessControlAllowOrigin, CacheControl, CacheDirective, ContentEncoding, Encoding}; use hyper::method::Method; use hyper::header; use std::collections::HashMap; use std::str::FromStr; use clap::ArgMatches; use std::str; use std::process; use open; fn log_request<'mw>( req: &mut Request<MvtService>, res: Response<'mw, MvtService>, ) -> MiddlewareResult<'mw, MvtService> { info!("{} {}", req.origin.method, req.origin.uri); res.next_middleware() } header! { (ContentType, "Content-Type") => [String] } #[derive(RustcEncodable)] struct TilesetInfo { name: String, layerinfos: String, hasviewer: bool, } impl TilesetInfo { fn from_tileset(set: &Tileset) -> TilesetInfo { let mut hasviewer = true; let layerinfos: Vec<String> = set.layers .iter() .map(|l| { let geom_type = l.geometry_type.clone().unwrap_or("UNKNOWN".to_string()); hasviewer = hasviewer && [ "POINT", "LINESTRING", "POLYGON", "MULTPOINT", "MULTILINESTRING", "MULTIPOLYGON", ].contains(&(&geom_type as &str)); format!("{} [{}]", &l.name, &geom_type) }) .collect(); TilesetInfo { name: set.name.clone(), layerinfos: format!("{}", layerinfos.join(", ")), hasviewer: hasviewer, } } } struct StaticFiles { files: HashMap<&'static str, (&'static [u8], MediaType)>, } impl StaticFiles { fn
() -> StaticFiles { let mut static_files = StaticFiles { files: HashMap::new(), }; static_files.add( "favicon.ico", include_bytes!("static/favicon.ico"), MediaType::Ico, ); static_files.add( "index.html", include_bytes!("static/index.html"), MediaType::Html, ); static_files.add( "viewer.js", include_bytes!("static/viewer.js"), MediaType::Js, ); static_files.add( "viewer.css", include_bytes!("static/viewer.css"), MediaType::Css, ); static_files.add( "maputnik.html", include_bytes!("static/maputnik.html"), MediaType::Html, ); static_files.add( "maputnik.js", include_bytes!("static/maputnik.js"), MediaType::Js, ); static_files.add( "maputnik-vendor.js", include_bytes!("static/maputnik-vendor.js"), MediaType::Js, ); static_files.add( "img/maputnik.png", include_bytes!("static/img/maputnik.png"), MediaType::Png, ); static_files.add( "fonts/Roboto-Regular.ttf", include_bytes!("static/fonts/Roboto-Regular.ttf"), MediaType::Ttf, ); static_files.add( "fonts/Roboto-Medium.ttf", include_bytes!("static/fonts/Roboto-Medium.ttf"), MediaType::Ttf, ); static_files } fn add(&mut self, name: &'static str, data: &'static [u8], media_type: MediaType) { self.files.insert(name, (data, media_type)); } fn content(&self, base: Option<&str>, name: String) -> Option<&(&[u8], MediaType)> { let mut key = if name == "." { "index.html".to_string() } else { name }; if let Some(path) = base { key = format!("{}/{}", path, key); } self.files.get(&key as &str) } } include!(concat!(env!("OUT_DIR"), "/fonts.rs")); static DINO: &'static str = " xxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxx xxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxx xxxxxxxxx xxxxxxx xxxxxx xxxxxxx"; fn set_layer_buffer_defaults(layer: &mut Layer, simplify: bool, clip: bool) { layer.simplify = simplify; if simplify { // Limit features by default unless simplify is set to false layer.query_limit = Some(1000); } layer.buffer_size = match layer.geometry_type { Some(ref geom) => { if clip { if geom.contains("POLYGON") { Some(1) } else { Some(0) } } else { None } } None => None, }; } pub fn service_from_args(args: &ArgMatches) -> (MvtService, ApplicationCfg) { if let Some(cfgpath) = args.value_of("config") { info!("Reading configuration from '{}'", cfgpath); for argname in vec!["dbconn", "datasource", "qgs"] { if args.value_of(argname).is_some() { warn!("Ignoring argument `{}`", argname); } } let config = read_config(cfgpath).unwrap_or_else(|err| { println!("Error reading configuration - {} ", err); process::exit(1) }); let mut svc = MvtService::from_config(&config).unwrap_or_else(|err| { println!("Error reading configuration - {} ", err); process::exit(1) }); svc.connect(); (svc, config) } else { let bind = args.value_of("bind").unwrap_or("127.0.0.1"); let port = u16::from_str(args.value_of("port").unwrap_or("6767")).expect("Invalid port number"); let mut config: ApplicationCfg = parse_config(DEFAULT_CONFIG.to_string(), "").unwrap(); config.webserver.bind = Some(bind.to_string()); config.webserver.port = Some(port); let cache = match args.value_of("cache") { None => Tilecache::Nocache(Nocache), Some(dir) => Tilecache::Filecache(Filecache { basepath: dir.to_string(), baseurl: None, }), }; let simplify = bool::from_str(args.value_of("simplify").unwrap_or("true")).unwrap_or(false); let clip = bool::from_str(args.value_of("clip").unwrap_or("true")).unwrap_or(false); let grid = Grid::web_mercator(); let mut tilesets = Vec::new(); let datasources = if let Some(qgs) = args.value_of("qgs") { info!("Reading configuration from '{}'", qgs); let (datasources, mut tileset) = read_qgs(qgs); for layer in tileset.layers.iter_mut() { set_layer_buffer_defaults(layer, simplify, clip); } tilesets.push(tileset); datasources } else { let datasources = Datasources::from_args(args); if datasources.datasources.is_empty() { println!("Either 'config', 'dbconn' or 'datasource' is required"); process::exit(1) } let detect_geometry_types = true; //TODO: add option (maybe slow for many geometries) for (_name, ds) in &datasources.datasources { let dsconn = ds.connected(); let mut layers = dsconn.detect_layers(detect_geometry_types); while let Some(mut l) = layers.pop() { let extent = dsconn.layer_extent(&l); set_layer_buffer_defaults(&mut l, simplify, clip); let tileset = Tileset { name: l.name.clone(), attribution: None, extent: extent, layers: vec![l], }; tilesets.push(tileset); } } datasources }; let mut svc = MvtService { datasources: datasources, grid: grid, tilesets: tilesets, cache: cache, }; svc.connect(); //TODO: ugly - we connect twice (svc, config) } } pub fn webserver(args: &ArgMatches) { let (mut service, config) = service_from_args(args); let mvt_viewer = config.service.mvt.viewer; let bind: &str = &config.webserver.bind.unwrap_or("127.0.0.1".to_string()); let port = config.webserver.port.unwrap_or(6767); let threads = config.webserver.threads.unwrap_or(4) as usize; let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300); service.prepare_feature_queries(); service.init_cache(); let mut tileset_infos: Vec<TilesetInfo> = service .tilesets .iter() .map(|set| TilesetInfo::from_tileset(&set)) .collect(); tileset_infos.sort_by_key(|ti| ti.name.clone()); let mut server = Nickel::with_data(service); server.options = Options::default().thread_count(Some(threads)); // Avoid thread exhaustion caused by hypers keep_alive handling (https://github.com/hyperium/hyper/issues/368) server.keep_alive_timeout(None); server.utilize(log_request); server.get( "/**(.style)?.json", middleware! { |_req, mut res| res.set(MediaType::Json); res.set(AccessControlAllowMethods(vec![Method::Get])); res.set(AccessControlAllowOrigin::Any); }, ); server.get( "/index.json", middleware! { |_req, res| let service: &MvtService = res.server_data(); let json = service.get_mvt_metadata().unwrap(); serde_json::to_vec(&json).unwrap() }, ); // Font list for Maputnik server.get( "/fontstacks.json", middleware! { |_req, _res| let json = json!(["Roboto Medium","Roboto Regular"]); serde_json::to_vec(&json).unwrap() }, ); // Fonts for Maputnik // Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf server.get( "/fonts/:fonts/:range.pbf", middleware! { |req, mut res| let fontpbfs = fonts(); let fontlist = req.param("fonts").unwrap(); let range = req.param("range").unwrap(); let mut fonts = fontlist.split(",").collect::<Vec<_>>(); fonts.push("Roboto Regular"); // Fallback for font in fonts { let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range); debug!("Font lookup: {}", key); if let Some(pbf) = fontpbfs.get(&key as &str) { res.set_header_fallback(|| ContentType("application/x-protobuf".to_owned())); res.set_header_fallback(|| ContentEncoding(vec![Encoding::Gzip])); return res.send(*pbf) } } }, ); server.get( "/:tileset.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let host = req.origin.headers.get::<header::Host>().unwrap(); let baseurl = format!("http://{}:{}", host.hostname, host.port.unwrap_or(80)); let json = service.get_tilejson(&baseurl, &tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset.style.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let host = req.origin.headers.get::<header::Host>().unwrap(); let baseurl = format!("http://{}:{}", host.hostname, host.port.unwrap_or(80)); let json = service.get_stylejson(&baseurl, &tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset/metadata.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let json = service.get_mbtiles_metadata(&tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset/:z/:x/:y.pbf", middleware! { |req, mut res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let z = req.param("z").unwrap().parse::<u8>().unwrap(); let x = req.param("x").unwrap().parse::<u32>().unwrap(); let y = req.param("y").unwrap().parse::<u32>().unwrap(); let accept_encoding = req.origin.headers.get::<header::AcceptEncoding>(); let gzip = accept_encoding.is_some() && accept_encoding.unwrap().iter().any( |ref qit| qit.item == Encoding::Gzip ); let tile = service.tile_cached(tileset, x, y, z, gzip); if gzip { res.set_header_fallback(|| ContentEncoding(vec![Encoding::Gzip])); } res.set_header_fallback(|| ContentType("application/x-protobuf".to_owned())); res.set_header_fallback(|| CacheControl(vec![CacheDirective::MaxAge(cache_max_age)])); //res.set_header_fallback(|| ContentLength(tile.len() as u64)); res.set(AccessControlAllowMethods(vec![Method::Get])); res.set(AccessControlAllowOrigin::Any); tile }, ); if mvt_viewer { let static_files = StaticFiles::init(); server.get( "/(:base/)?:static", middleware! { |req, mut res| let mut name = req.param("static").unwrap().to_string(); if let Some(format) = req.param("format") { name = format!("{}.{}", name, format); } if let Some(content) = static_files.content(req.param("base"), name) { res.set(content.1); return res.send(content.0) } }, ); } server.get("/**", StaticFilesHandler::new("public/")); println!("{}", DINO); let _listening = server .listen((bind, port)) .expect("Failed to launch server"); let openbrowser = bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false); if openbrowser && mvt_viewer { let _res = open::that(format!("http://{}:{}", bind, port)); } } pub fn gen_config(args: &ArgMatches) -> String { let toml = r#" [webserver] # Bind address. Use 0.0.0.0 to listen on all adresses. bind = "127.0.0.1" port = 6767 threads = 4 #cache_control_max_age = 43200 "#; let mut config; if args.value_of("dbconn").is_some() || args.value_of("datasource").is_some() || args.value_of("qgs").is_some() { let (service, _) = service_from_args(args); config = service.gen_runtime_config(); } else { config = MvtService::gen_config(); } config.push_str(toml); config } #[test] fn test_gen_config() { use core::parse_config; let args = ArgMatches::new(); let toml = gen_config(&args); println!("{}", toml); assert_eq!(Some("# t-rex configuration"), toml.lines().next()); let config = parse_config(toml, "").unwrap(); let _service = MvtService::from_config(&config).unwrap(); //assert_eq!(service.input.connection_url, // "postgresql://user:pass@host/database"); } #[test] #[ignore] fn test_runtime_config() { use std::env; use clap::App; use core::parse_config; if env::var("DBCONN").is_err() { panic!("DBCONN undefined"); } let args = App::new("test") .args_from_usage("--dbconn=[SPEC] 'PostGIS connection postgresql://USER@HOST/DBNAME'") .get_matches_from(vec!["", "--dbconn", &env::var("DBCONN").unwrap()]); let toml = gen_config(&args); println!("{}", toml); assert_eq!(Some("# t-rex configuration"), toml.lines().next()); let config = parse_config(toml, "").unwrap(); let _service = MvtService::from_config(&config).unwrap(); //assert_eq!(service.input.connection_url, env::var("DBCONN").unwrap()); }
init
identifier_name
server.rs
// // Copyright (c) Pirmin Kalberer. All rights reserved. // Licensed under the MIT License. See LICENSE file in the project root for full license information. // use core::config::ApplicationCfg; use datasource_type::Datasources; use datasource::DatasourceInput; use core::grid::Grid; use core::layer::Layer; use service::tileset::Tileset; use mvt_service::MvtService; use read_qgs; use core::{parse_config, read_config, Config}; use core::config::DEFAULT_CONFIG; use serde_json; use cache::{Filecache, Nocache, Tilecache}; use nickel::{HttpRouter, MediaType, MiddlewareResult, Nickel, Options, Request, Response, StaticFilesHandler}; use hyper::header::{AccessControlAllowMethods, AccessControlAllowOrigin, CacheControl, CacheDirective, ContentEncoding, Encoding}; use hyper::method::Method; use hyper::header; use std::collections::HashMap; use std::str::FromStr; use clap::ArgMatches; use std::str; use std::process; use open; fn log_request<'mw>( req: &mut Request<MvtService>, res: Response<'mw, MvtService>, ) -> MiddlewareResult<'mw, MvtService> { info!("{} {}", req.origin.method, req.origin.uri); res.next_middleware() } header! { (ContentType, "Content-Type") => [String] } #[derive(RustcEncodable)] struct TilesetInfo { name: String, layerinfos: String, hasviewer: bool, } impl TilesetInfo { fn from_tileset(set: &Tileset) -> TilesetInfo { let mut hasviewer = true; let layerinfos: Vec<String> = set.layers .iter() .map(|l| { let geom_type = l.geometry_type.clone().unwrap_or("UNKNOWN".to_string()); hasviewer = hasviewer && [ "POINT", "LINESTRING", "POLYGON", "MULTPOINT", "MULTILINESTRING", "MULTIPOLYGON", ].contains(&(&geom_type as &str)); format!("{} [{}]", &l.name, &geom_type) }) .collect(); TilesetInfo { name: set.name.clone(), layerinfos: format!("{}", layerinfos.join(", ")), hasviewer: hasviewer, } } } struct StaticFiles { files: HashMap<&'static str, (&'static [u8], MediaType)>, } impl StaticFiles { fn init() -> StaticFiles { let mut static_files = StaticFiles { files: HashMap::new(), }; static_files.add( "favicon.ico", include_bytes!("static/favicon.ico"), MediaType::Ico, ); static_files.add( "index.html", include_bytes!("static/index.html"), MediaType::Html, ); static_files.add( "viewer.js", include_bytes!("static/viewer.js"), MediaType::Js, ); static_files.add( "viewer.css", include_bytes!("static/viewer.css"), MediaType::Css, ); static_files.add( "maputnik.html", include_bytes!("static/maputnik.html"), MediaType::Html, ); static_files.add( "maputnik.js", include_bytes!("static/maputnik.js"), MediaType::Js, ); static_files.add( "maputnik-vendor.js", include_bytes!("static/maputnik-vendor.js"), MediaType::Js, ); static_files.add( "img/maputnik.png", include_bytes!("static/img/maputnik.png"), MediaType::Png, ); static_files.add( "fonts/Roboto-Regular.ttf", include_bytes!("static/fonts/Roboto-Regular.ttf"), MediaType::Ttf, ); static_files.add( "fonts/Roboto-Medium.ttf", include_bytes!("static/fonts/Roboto-Medium.ttf"), MediaType::Ttf, ); static_files } fn add(&mut self, name: &'static str, data: &'static [u8], media_type: MediaType) { self.files.insert(name, (data, media_type)); } fn content(&self, base: Option<&str>, name: String) -> Option<&(&[u8], MediaType)> { let mut key = if name == "." { "index.html".to_string() } else { name }; if let Some(path) = base { key = format!("{}/{}", path, key); } self.files.get(&key as &str) } } include!(concat!(env!("OUT_DIR"), "/fonts.rs")); static DINO: &'static str = " xxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxx xxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxxxxxx xxxxxx xxxxxxxxxxxx xxxxxxxxxxx xxxxxxxxxx xxxxxxxxx xxxxxxx xxxxxx xxxxxxx"; fn set_layer_buffer_defaults(layer: &mut Layer, simplify: bool, clip: bool) { layer.simplify = simplify; if simplify { // Limit features by default unless simplify is set to false layer.query_limit = Some(1000); } layer.buffer_size = match layer.geometry_type { Some(ref geom) => { if clip { if geom.contains("POLYGON") { Some(1) } else { Some(0) } } else { None } } None => None, }; } pub fn service_from_args(args: &ArgMatches) -> (MvtService, ApplicationCfg) { if let Some(cfgpath) = args.value_of("config") { info!("Reading configuration from '{}'", cfgpath); for argname in vec!["dbconn", "datasource", "qgs"] { if args.value_of(argname).is_some() { warn!("Ignoring argument `{}`", argname); } } let config = read_config(cfgpath).unwrap_or_else(|err| { println!("Error reading configuration - {} ", err); process::exit(1) }); let mut svc = MvtService::from_config(&config).unwrap_or_else(|err| { println!("Error reading configuration - {} ", err); process::exit(1) }); svc.connect(); (svc, config) } else { let bind = args.value_of("bind").unwrap_or("127.0.0.1"); let port = u16::from_str(args.value_of("port").unwrap_or("6767")).expect("Invalid port number"); let mut config: ApplicationCfg = parse_config(DEFAULT_CONFIG.to_string(), "").unwrap(); config.webserver.bind = Some(bind.to_string()); config.webserver.port = Some(port); let cache = match args.value_of("cache") { None => Tilecache::Nocache(Nocache), Some(dir) => Tilecache::Filecache(Filecache { basepath: dir.to_string(), baseurl: None, }), }; let simplify = bool::from_str(args.value_of("simplify").unwrap_or("true")).unwrap_or(false); let clip = bool::from_str(args.value_of("clip").unwrap_or("true")).unwrap_or(false); let grid = Grid::web_mercator(); let mut tilesets = Vec::new(); let datasources = if let Some(qgs) = args.value_of("qgs") { info!("Reading configuration from '{}'", qgs); let (datasources, mut tileset) = read_qgs(qgs); for layer in tileset.layers.iter_mut() { set_layer_buffer_defaults(layer, simplify, clip); } tilesets.push(tileset); datasources } else { let datasources = Datasources::from_args(args); if datasources.datasources.is_empty() { println!("Either 'config', 'dbconn' or 'datasource' is required"); process::exit(1) } let detect_geometry_types = true; //TODO: add option (maybe slow for many geometries) for (_name, ds) in &datasources.datasources { let dsconn = ds.connected(); let mut layers = dsconn.detect_layers(detect_geometry_types); while let Some(mut l) = layers.pop() { let extent = dsconn.layer_extent(&l); set_layer_buffer_defaults(&mut l, simplify, clip); let tileset = Tileset { name: l.name.clone(), attribution: None, extent: extent, layers: vec![l], }; tilesets.push(tileset); } } datasources }; let mut svc = MvtService { datasources: datasources, grid: grid, tilesets: tilesets, cache: cache, }; svc.connect(); //TODO: ugly - we connect twice (svc, config) } } pub fn webserver(args: &ArgMatches) { let (mut service, config) = service_from_args(args); let mvt_viewer = config.service.mvt.viewer; let bind: &str = &config.webserver.bind.unwrap_or("127.0.0.1".to_string()); let port = config.webserver.port.unwrap_or(6767); let threads = config.webserver.threads.unwrap_or(4) as usize; let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300); service.prepare_feature_queries(); service.init_cache(); let mut tileset_infos: Vec<TilesetInfo> = service .tilesets .iter() .map(|set| TilesetInfo::from_tileset(&set)) .collect(); tileset_infos.sort_by_key(|ti| ti.name.clone()); let mut server = Nickel::with_data(service); server.options = Options::default().thread_count(Some(threads)); // Avoid thread exhaustion caused by hypers keep_alive handling (https://github.com/hyperium/hyper/issues/368) server.keep_alive_timeout(None); server.utilize(log_request); server.get( "/**(.style)?.json", middleware! { |_req, mut res| res.set(MediaType::Json); res.set(AccessControlAllowMethods(vec![Method::Get])); res.set(AccessControlAllowOrigin::Any); }, ); server.get( "/index.json", middleware! { |_req, res| let service: &MvtService = res.server_data(); let json = service.get_mvt_metadata().unwrap(); serde_json::to_vec(&json).unwrap() }, ); // Font list for Maputnik server.get( "/fontstacks.json", middleware! { |_req, _res| let json = json!(["Roboto Medium","Roboto Regular"]); serde_json::to_vec(&json).unwrap() }, ); // Fonts for Maputnik // Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf server.get( "/fonts/:fonts/:range.pbf", middleware! { |req, mut res| let fontpbfs = fonts(); let fontlist = req.param("fonts").unwrap(); let range = req.param("range").unwrap(); let mut fonts = fontlist.split(",").collect::<Vec<_>>(); fonts.push("Roboto Regular"); // Fallback for font in fonts { let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range); debug!("Font lookup: {}", key); if let Some(pbf) = fontpbfs.get(&key as &str) { res.set_header_fallback(|| ContentType("application/x-protobuf".to_owned())); res.set_header_fallback(|| ContentEncoding(vec![Encoding::Gzip])); return res.send(*pbf) } } }, ); server.get( "/:tileset.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let host = req.origin.headers.get::<header::Host>().unwrap(); let baseurl = format!("http://{}:{}", host.hostname, host.port.unwrap_or(80)); let json = service.get_tilejson(&baseurl, &tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset.style.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let host = req.origin.headers.get::<header::Host>().unwrap(); let baseurl = format!("http://{}:{}", host.hostname, host.port.unwrap_or(80)); let json = service.get_stylejson(&baseurl, &tileset).unwrap(); serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset/metadata.json", middleware! { |req, res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap();
serde_json::to_vec(&json).unwrap() }, ); server.get( "/:tileset/:z/:x/:y.pbf", middleware! { |req, mut res| let service: &MvtService = res.server_data(); let tileset = req.param("tileset").unwrap(); let z = req.param("z").unwrap().parse::<u8>().unwrap(); let x = req.param("x").unwrap().parse::<u32>().unwrap(); let y = req.param("y").unwrap().parse::<u32>().unwrap(); let accept_encoding = req.origin.headers.get::<header::AcceptEncoding>(); let gzip = accept_encoding.is_some() && accept_encoding.unwrap().iter().any( |ref qit| qit.item == Encoding::Gzip ); let tile = service.tile_cached(tileset, x, y, z, gzip); if gzip { res.set_header_fallback(|| ContentEncoding(vec![Encoding::Gzip])); } res.set_header_fallback(|| ContentType("application/x-protobuf".to_owned())); res.set_header_fallback(|| CacheControl(vec![CacheDirective::MaxAge(cache_max_age)])); //res.set_header_fallback(|| ContentLength(tile.len() as u64)); res.set(AccessControlAllowMethods(vec![Method::Get])); res.set(AccessControlAllowOrigin::Any); tile }, ); if mvt_viewer { let static_files = StaticFiles::init(); server.get( "/(:base/)?:static", middleware! { |req, mut res| let mut name = req.param("static").unwrap().to_string(); if let Some(format) = req.param("format") { name = format!("{}.{}", name, format); } if let Some(content) = static_files.content(req.param("base"), name) { res.set(content.1); return res.send(content.0) } }, ); } server.get("/**", StaticFilesHandler::new("public/")); println!("{}", DINO); let _listening = server .listen((bind, port)) .expect("Failed to launch server"); let openbrowser = bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false); if openbrowser && mvt_viewer { let _res = open::that(format!("http://{}:{}", bind, port)); } } pub fn gen_config(args: &ArgMatches) -> String { let toml = r#" [webserver] # Bind address. Use 0.0.0.0 to listen on all adresses. bind = "127.0.0.1" port = 6767 threads = 4 #cache_control_max_age = 43200 "#; let mut config; if args.value_of("dbconn").is_some() || args.value_of("datasource").is_some() || args.value_of("qgs").is_some() { let (service, _) = service_from_args(args); config = service.gen_runtime_config(); } else { config = MvtService::gen_config(); } config.push_str(toml); config } #[test] fn test_gen_config() { use core::parse_config; let args = ArgMatches::new(); let toml = gen_config(&args); println!("{}", toml); assert_eq!(Some("# t-rex configuration"), toml.lines().next()); let config = parse_config(toml, "").unwrap(); let _service = MvtService::from_config(&config).unwrap(); //assert_eq!(service.input.connection_url, // "postgresql://user:pass@host/database"); } #[test] #[ignore] fn test_runtime_config() { use std::env; use clap::App; use core::parse_config; if env::var("DBCONN").is_err() { panic!("DBCONN undefined"); } let args = App::new("test") .args_from_usage("--dbconn=[SPEC] 'PostGIS connection postgresql://USER@HOST/DBNAME'") .get_matches_from(vec!["", "--dbconn", &env::var("DBCONN").unwrap()]); let toml = gen_config(&args); println!("{}", toml); assert_eq!(Some("# t-rex configuration"), toml.lines().next()); let config = parse_config(toml, "").unwrap(); let _service = MvtService::from_config(&config).unwrap(); //assert_eq!(service.input.connection_url, env::var("DBCONN").unwrap()); }
let json = service.get_mbtiles_metadata(&tileset).unwrap();
random_line_split
mod.rs
pub mod dir_diff_list; use crate::cli_opt::{ApplyOpts, CliOpts, Command, TestSamplesOpts}; use crate::path_pattern::PathPattern; use crate::{error::*, SourceLoc}; use clap::Parser; use dir_diff_list::Difference; use dir_diff_list::EntryDiff; use std::fs; use std::path::{Path, PathBuf}; use tempfile::{tempdir, TempDir}; use tracing::info; pub fn test_samples(cfg: &TestSamplesOpts) -> Result<()> { let template_base_path = &cfg.src.download(cfg.offline)?; if!check_samples(template_base_path, &cfg.src, cfg.review)? { Err(crate::Error::TestSamplesFailed {}) } else { Ok(()) } } fn check_samples<A: AsRef<Path>>( template_path: A, template_loc: &SourceLoc, review_mode: bool, ) -> Result<bool> { let mut is_success = true; let tmp_dir = tempdir()?; let samples_folder = template_path .as_ref() .join(crate::cfg::TEMPLATE_SAMPLES_DIRNAME); let samples = Sample::find_from_folder(template_loc, &samples_folder, &tmp_dir)?; info!(nb_samples_detected = samples.len(),?samples_folder); for sample in samples { info!(sample =?sample.name, args =?sample.args, "checking..."); let run = SampleRun::run(&sample)?; is_success = is_success && run.is_success(); show_differences(&sample.name, &run.diffs, review_mode)?; } Ok(is_success || review_mode) } //TODO move to ui module to be customizable (in future) pub fn show_differences(name: &str, entries: &[EntryDiff], review_mode: bool) -> Result<()> { let mut updates_count = 0; for entry in entries { println!("{:-^1$}", "-", 80); entry.show(); if review_mode && entry.review()? { updates_count += 1 } } println!("{:-^1$}", "-", 80); println!( "number of differences in sample '{}': {}", name, entries.len(), ); if review_mode { println!("number of updates in sample '{}': {}", name, updates_count); } println!("{:-^1$}", "-", 80); Ok(()) } impl EntryDiff { fn show(&self) { match &self.difference { Difference::Presence { expect, actual } => { if *expect &&!*actual { println!( "missing file in the actual: {}", self.relative_path.to_string_lossy() ); } else { println!( "unexpected file in the actual: {}", self.relative_path.to_string_lossy() ); } } Difference::Kind { expect, actual } => { println!( "difference kind of entry on: {}, expected: {:?}, actual: {:?}", self.relative_path.to_string_lossy(), expect, actual ); } Difference::StringContent { expect, actual } => { println!( "difference detected on: {}\n", self.relative_path.to_string_lossy() ); crate::ui::show_difference_text(expect, actual, true); } Difference::BinaryContent { expect_md5, actual_md5, } => { println!( "difference detected on: {} (detected as binary file)\n", self.relative_path.to_string_lossy() ); println!("expected md5: {}", expect_md5); println!("actual md5: {}", actual_md5); } } } // TODO add test fn review(&self) -> Result<bool> { let accept_update = match self.difference { Difference::Presence { expect, actual } => { if expect &&!actual
else if crate::ui::ask_to_update_sample("Accept to add into sample?")? { let path = self.actual_base_path.join(&self.relative_path); let is_dir = std::fs::metadata(&path)?.is_dir(); if is_dir { std::fs::create_dir_all(self.expect_base_path.join(&self.relative_path))?; } else { std::fs::copy(path, self.expect_base_path.join(&self.relative_path))?; } std::fs::copy( self.actual_base_path.join(&self.relative_path), self.expect_base_path.join(&self.relative_path), )?; true } else { false } } _ => { if crate::ui::ask_to_update_sample("Accept to update file into sample?")? { std::fs::copy( self.actual_base_path.join(&self.relative_path), self.expect_base_path.join(&self.relative_path), )?; true } else { false } } }; Ok(accept_update) } } #[derive(Debug, Clone)] struct Sample { pub name: String, pub args: ApplyOpts, pub expected: PathBuf, pub existing: PathBuf, pub ignores: Vec<PathPattern>, } impl Sample { // scan folder to find sample to test (xxx.args, xxx.expected, xxx.existing) fn find_from_folder<B: AsRef<Path>>( template_loc: &SourceLoc, samples_folder: B, tmp_dir: &TempDir, ) -> Result<Vec<Sample>> { let mut out = vec![]; for e in fs::read_dir(&samples_folder).map_err(|source| Error::ListFolder { path: samples_folder.as_ref().into(), source, })? { let path = e?.path(); if path .extension() .filter(|x| x.to_string_lossy() == "expected") .is_some() { let name = path .file_stem() .expect("folder should have a file name without extension") .to_string_lossy() .to_string(); let expected = path.clone(); let existing = path.with_extension("existing"); let args_file = path.with_extension("cfg.yaml"); let destination = tmp_dir.path().join(&name).to_path_buf(); let sample_cfg = SampleCfg::from_file(args_file)?; let args = sample_cfg.make_args(template_loc, destination)?; let ignores = sample_cfg.make_ignores()?; out.push(Sample { name, args, expected, existing, ignores, }); } } Ok(out) } } #[derive(Deserialize, Serialize, Debug, Default, Clone, PartialEq)] struct SampleCfg { apply_args: Option<Vec<String>>, check_ignores: Option<Vec<String>>, } impl SampleCfg { fn from_file<P: AsRef<Path>>(file: P) -> Result<Self> { let v = if file.as_ref().exists() { let cfg_str = fs::read_to_string(file.as_ref()).map_err(|source| Error::ReadFile { path: file.as_ref().into(), source, })?; serde_yaml::from_str::<SampleCfg>(&cfg_str)? } else { SampleCfg::default() }; Ok(v) } fn make_ignores(&self) -> Result<Vec<PathPattern>> { use std::str::FromStr; let trim_chars: &[_] = &['\r', '\n','', '\t', '"', '\'']; let ignores = self .check_ignores .clone() .unwrap_or_default() .iter() .map(|v| v.trim_matches(trim_chars)) .filter(|v|!v.is_empty()) .map(PathPattern::from_str) .collect::<Result<Vec<PathPattern>>>()?; Ok(ignores) } fn make_args<B: AsRef<Path>>( &self, template_loc: &SourceLoc, destination: B, ) -> Result<ApplyOpts> { let cfg_args = self.apply_args.clone().unwrap_or_default(); let mut args_line = cfg_args.iter().map(|s| s.as_str()).collect::<Vec<_>>(); args_line.push("--confirm"); args_line.push("never"); args_line.push("--no-interaction"); args_line.push("--destination"); args_line.push( destination .as_ref() .to_str() .expect("to convert destination path into str"), ); args_line.push("--source"); args_line.push(&template_loc.uri.raw); if let Some(rev) = &template_loc.rev { args_line.push("--rev"); args_line.push(rev); } let buff = template_loc.subfolder.as_ref().map(|v| v.to_string_lossy()); if let Some(subfolder) = buff.as_ref() { args_line.push("--source-subfolder"); args_line.push(subfolder); } //HACK from_iter_safe expect first entry to be the binary name, // unless clap::AppSettings::NoBinaryName has been used // (but I don't know how to use it in this case, patch is welcomed) args_line.insert(0, "apply"); args_line.insert(0, "ffizer"); CliOpts::try_parse_from(args_line) .map_err(Error::from) .and_then(|o| match o.cmd { Command::Apply(g) => Ok(g), e => Err(Error::Unknown(format!( "command should always be parsed as 'apply' not as {:?}", e ))), }) } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct SampleRun { diffs: Vec<EntryDiff>, } impl SampleRun { #[tracing::instrument] pub fn run(sample: &Sample) -> Result<SampleRun> { // ALTERNATIVE: fork a sub-process to run current ffizer in apply mode let destination = &sample.args.dst_folder; if sample.existing.exists() { copy(&sample.existing, destination)?; } let ctx = crate::Ctx { cmd_opt: sample.args.clone(), }; crate::process(&ctx)?; let diffs = dir_diff_list::search_diff(destination, &sample.expected, &sample.ignores)?; Ok(SampleRun { diffs }) } pub fn is_success(&self) -> bool { self.diffs.is_empty() } } impl std::fmt::Display for SampleRun { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Differences: {:#?}", self.diffs) } } /// recursively copy a directory /// based on https://stackoverflow.com/a/60406693/469066 pub fn copy<U: AsRef<Path>, V: AsRef<Path>>(from: U, to: V) -> Result<()> { let mut stack = vec![PathBuf::from(from.as_ref())]; let output_root = PathBuf::from(to.as_ref()); let input_root = PathBuf::from(from.as_ref()).components().count(); while let Some(working_path) = stack.pop() { //println!("process: {:?}", &working_path); // Generate a relative path let src: PathBuf = working_path.components().skip(input_root).collect(); // Create a destination if missing let dest = if src.components().count() == 0 { output_root.clone() } else { output_root.join(&src) }; if fs::metadata(&dest).is_err() { // println!(" mkdir: {:?}", dest); fs::create_dir_all(&dest).map_err(|source| Error::CreateFolder { path: dest.clone(), source, })?; } for entry in fs::read_dir(&working_path).map_err(|source| Error::ListFolder { path: working_path, source, })? { let path = entry?.path(); if path.is_dir() { stack.push(path); } else if let Some(filename) = path.file_name() { let dest_path = dest.join(filename); //println!(" copy: {:?} -> {:?}", &path, &dest_path); fs::copy(&path, &dest_path).map_err(|source| Error::CopyFile { src: path, dst: dest_path, source, })?; } } } Ok(()) }
{ let path = self.expect_base_path.join(&self.relative_path); let is_dir = std::fs::metadata(&path)?.is_dir(); if crate::ui::ask_to_update_sample("Accept to remove from sample ?")? { if is_dir { std::fs::remove_dir_all(&path)?; } else { std::fs::remove_file(&path)?; } true } else { false } }
conditional_block
mod.rs
pub mod dir_diff_list; use crate::cli_opt::{ApplyOpts, CliOpts, Command, TestSamplesOpts}; use crate::path_pattern::PathPattern; use crate::{error::*, SourceLoc}; use clap::Parser; use dir_diff_list::Difference; use dir_diff_list::EntryDiff; use std::fs; use std::path::{Path, PathBuf}; use tempfile::{tempdir, TempDir}; use tracing::info; pub fn test_samples(cfg: &TestSamplesOpts) -> Result<()> { let template_base_path = &cfg.src.download(cfg.offline)?; if!check_samples(template_base_path, &cfg.src, cfg.review)? { Err(crate::Error::TestSamplesFailed {}) } else { Ok(()) } } fn check_samples<A: AsRef<Path>>( template_path: A, template_loc: &SourceLoc, review_mode: bool, ) -> Result<bool> { let mut is_success = true; let tmp_dir = tempdir()?; let samples_folder = template_path .as_ref() .join(crate::cfg::TEMPLATE_SAMPLES_DIRNAME); let samples = Sample::find_from_folder(template_loc, &samples_folder, &tmp_dir)?; info!(nb_samples_detected = samples.len(),?samples_folder); for sample in samples { info!(sample =?sample.name, args =?sample.args, "checking..."); let run = SampleRun::run(&sample)?; is_success = is_success && run.is_success(); show_differences(&sample.name, &run.diffs, review_mode)?; } Ok(is_success || review_mode) } //TODO move to ui module to be customizable (in future) pub fn show_differences(name: &str, entries: &[EntryDiff], review_mode: bool) -> Result<()> { let mut updates_count = 0; for entry in entries { println!("{:-^1$}", "-", 80); entry.show(); if review_mode && entry.review()? { updates_count += 1 } } println!("{:-^1$}", "-", 80); println!( "number of differences in sample '{}': {}", name, entries.len(), ); if review_mode { println!("number of updates in sample '{}': {}", name, updates_count); } println!("{:-^1$}", "-", 80); Ok(()) } impl EntryDiff { fn show(&self) { match &self.difference { Difference::Presence { expect, actual } => { if *expect &&!*actual { println!( "missing file in the actual: {}", self.relative_path.to_string_lossy() ); } else { println!( "unexpected file in the actual: {}", self.relative_path.to_string_lossy() ); } } Difference::Kind { expect, actual } => { println!( "difference kind of entry on: {}, expected: {:?}, actual: {:?}", self.relative_path.to_string_lossy(), expect, actual ); } Difference::StringContent { expect, actual } => { println!( "difference detected on: {}\n", self.relative_path.to_string_lossy() ); crate::ui::show_difference_text(expect, actual, true); } Difference::BinaryContent { expect_md5, actual_md5, } => { println!( "difference detected on: {} (detected as binary file)\n", self.relative_path.to_string_lossy() ); println!("expected md5: {}", expect_md5); println!("actual md5: {}", actual_md5); } } } // TODO add test fn review(&self) -> Result<bool> { let accept_update = match self.difference { Difference::Presence { expect, actual } => { if expect &&!actual { let path = self.expect_base_path.join(&self.relative_path); let is_dir = std::fs::metadata(&path)?.is_dir(); if crate::ui::ask_to_update_sample("Accept to remove from sample?")? { if is_dir { std::fs::remove_dir_all(&path)?; } else { std::fs::remove_file(&path)?; } true } else { false } } else if crate::ui::ask_to_update_sample("Accept to add into sample?")? { let path = self.actual_base_path.join(&self.relative_path); let is_dir = std::fs::metadata(&path)?.is_dir(); if is_dir { std::fs::create_dir_all(self.expect_base_path.join(&self.relative_path))?; } else { std::fs::copy(path, self.expect_base_path.join(&self.relative_path))?; } std::fs::copy( self.actual_base_path.join(&self.relative_path), self.expect_base_path.join(&self.relative_path), )?; true } else { false } } _ => { if crate::ui::ask_to_update_sample("Accept to update file into sample?")? { std::fs::copy( self.actual_base_path.join(&self.relative_path), self.expect_base_path.join(&self.relative_path), )?; true } else { false } } }; Ok(accept_update) } } #[derive(Debug, Clone)] struct Sample { pub name: String, pub args: ApplyOpts, pub expected: PathBuf, pub existing: PathBuf, pub ignores: Vec<PathPattern>, } impl Sample { // scan folder to find sample to test (xxx.args, xxx.expected, xxx.existing) fn find_from_folder<B: AsRef<Path>>( template_loc: &SourceLoc, samples_folder: B, tmp_dir: &TempDir, ) -> Result<Vec<Sample>> { let mut out = vec![]; for e in fs::read_dir(&samples_folder).map_err(|source| Error::ListFolder { path: samples_folder.as_ref().into(), source, })? { let path = e?.path(); if path .extension() .filter(|x| x.to_string_lossy() == "expected") .is_some() { let name = path .file_stem() .expect("folder should have a file name without extension") .to_string_lossy() .to_string(); let expected = path.clone(); let existing = path.with_extension("existing"); let args_file = path.with_extension("cfg.yaml"); let destination = tmp_dir.path().join(&name).to_path_buf(); let sample_cfg = SampleCfg::from_file(args_file)?; let args = sample_cfg.make_args(template_loc, destination)?; let ignores = sample_cfg.make_ignores()?; out.push(Sample { name, args, expected, existing, ignores, }); } } Ok(out) } } #[derive(Deserialize, Serialize, Debug, Default, Clone, PartialEq)] struct SampleCfg { apply_args: Option<Vec<String>>, check_ignores: Option<Vec<String>>, } impl SampleCfg { fn from_file<P: AsRef<Path>>(file: P) -> Result<Self> { let v = if file.as_ref().exists() { let cfg_str = fs::read_to_string(file.as_ref()).map_err(|source| Error::ReadFile { path: file.as_ref().into(), source, })?; serde_yaml::from_str::<SampleCfg>(&cfg_str)? } else { SampleCfg::default() }; Ok(v) } fn make_ignores(&self) -> Result<Vec<PathPattern>> { use std::str::FromStr; let trim_chars: &[_] = &['\r', '\n','', '\t', '"', '\'']; let ignores = self .check_ignores .clone() .unwrap_or_default() .iter() .map(|v| v.trim_matches(trim_chars)) .filter(|v|!v.is_empty()) .map(PathPattern::from_str) .collect::<Result<Vec<PathPattern>>>()?; Ok(ignores) } fn make_args<B: AsRef<Path>>( &self, template_loc: &SourceLoc, destination: B, ) -> Result<ApplyOpts> { let cfg_args = self.apply_args.clone().unwrap_or_default(); let mut args_line = cfg_args.iter().map(|s| s.as_str()).collect::<Vec<_>>(); args_line.push("--confirm"); args_line.push("never"); args_line.push("--no-interaction"); args_line.push("--destination"); args_line.push( destination .as_ref() .to_str() .expect("to convert destination path into str"), ); args_line.push("--source"); args_line.push(&template_loc.uri.raw); if let Some(rev) = &template_loc.rev { args_line.push("--rev"); args_line.push(rev); } let buff = template_loc.subfolder.as_ref().map(|v| v.to_string_lossy()); if let Some(subfolder) = buff.as_ref() { args_line.push("--source-subfolder"); args_line.push(subfolder); } //HACK from_iter_safe expect first entry to be the binary name, // unless clap::AppSettings::NoBinaryName has been used // (but I don't know how to use it in this case, patch is welcomed) args_line.insert(0, "apply"); args_line.insert(0, "ffizer"); CliOpts::try_parse_from(args_line) .map_err(Error::from) .and_then(|o| match o.cmd { Command::Apply(g) => Ok(g), e => Err(Error::Unknown(format!( "command should always be parsed as 'apply' not as {:?}", e ))), }) } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct SampleRun { diffs: Vec<EntryDiff>, } impl SampleRun { #[tracing::instrument] pub fn run(sample: &Sample) -> Result<SampleRun>
pub fn is_success(&self) -> bool { self.diffs.is_empty() } } impl std::fmt::Display for SampleRun { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Differences: {:#?}", self.diffs) } } /// recursively copy a directory /// based on https://stackoverflow.com/a/60406693/469066 pub fn copy<U: AsRef<Path>, V: AsRef<Path>>(from: U, to: V) -> Result<()> { let mut stack = vec![PathBuf::from(from.as_ref())]; let output_root = PathBuf::from(to.as_ref()); let input_root = PathBuf::from(from.as_ref()).components().count(); while let Some(working_path) = stack.pop() { //println!("process: {:?}", &working_path); // Generate a relative path let src: PathBuf = working_path.components().skip(input_root).collect(); // Create a destination if missing let dest = if src.components().count() == 0 { output_root.clone() } else { output_root.join(&src) }; if fs::metadata(&dest).is_err() { // println!(" mkdir: {:?}", dest); fs::create_dir_all(&dest).map_err(|source| Error::CreateFolder { path: dest.clone(), source, })?; } for entry in fs::read_dir(&working_path).map_err(|source| Error::ListFolder { path: working_path, source, })? { let path = entry?.path(); if path.is_dir() { stack.push(path); } else if let Some(filename) = path.file_name() { let dest_path = dest.join(filename); //println!(" copy: {:?} -> {:?}", &path, &dest_path); fs::copy(&path, &dest_path).map_err(|source| Error::CopyFile { src: path, dst: dest_path, source, })?; } } } Ok(()) }
{ // ALTERNATIVE: fork a sub-process to run current ffizer in apply mode let destination = &sample.args.dst_folder; if sample.existing.exists() { copy(&sample.existing, destination)?; } let ctx = crate::Ctx { cmd_opt: sample.args.clone(), }; crate::process(&ctx)?; let diffs = dir_diff_list::search_diff(destination, &sample.expected, &sample.ignores)?; Ok(SampleRun { diffs }) }
identifier_body
mod.rs
pub mod dir_diff_list; use crate::cli_opt::{ApplyOpts, CliOpts, Command, TestSamplesOpts}; use crate::path_pattern::PathPattern; use crate::{error::*, SourceLoc}; use clap::Parser; use dir_diff_list::Difference; use dir_diff_list::EntryDiff; use std::fs; use std::path::{Path, PathBuf}; use tempfile::{tempdir, TempDir}; use tracing::info; pub fn test_samples(cfg: &TestSamplesOpts) -> Result<()> { let template_base_path = &cfg.src.download(cfg.offline)?; if!check_samples(template_base_path, &cfg.src, cfg.review)? { Err(crate::Error::TestSamplesFailed {}) } else { Ok(()) } } fn check_samples<A: AsRef<Path>>( template_path: A, template_loc: &SourceLoc, review_mode: bool, ) -> Result<bool> { let mut is_success = true; let tmp_dir = tempdir()?; let samples_folder = template_path .as_ref() .join(crate::cfg::TEMPLATE_SAMPLES_DIRNAME); let samples = Sample::find_from_folder(template_loc, &samples_folder, &tmp_dir)?; info!(nb_samples_detected = samples.len(),?samples_folder); for sample in samples { info!(sample =?sample.name, args =?sample.args, "checking..."); let run = SampleRun::run(&sample)?; is_success = is_success && run.is_success(); show_differences(&sample.name, &run.diffs, review_mode)?; } Ok(is_success || review_mode) } //TODO move to ui module to be customizable (in future) pub fn show_differences(name: &str, entries: &[EntryDiff], review_mode: bool) -> Result<()> { let mut updates_count = 0; for entry in entries { println!("{:-^1$}", "-", 80); entry.show(); if review_mode && entry.review()? { updates_count += 1 } } println!("{:-^1$}", "-", 80); println!( "number of differences in sample '{}': {}", name, entries.len(), ); if review_mode { println!("number of updates in sample '{}': {}", name, updates_count); } println!("{:-^1$}", "-", 80); Ok(()) } impl EntryDiff { fn show(&self) { match &self.difference { Difference::Presence { expect, actual } => { if *expect &&!*actual { println!( "missing file in the actual: {}", self.relative_path.to_string_lossy() ); } else { println!( "unexpected file in the actual: {}", self.relative_path.to_string_lossy() ); } } Difference::Kind { expect, actual } => { println!( "difference kind of entry on: {}, expected: {:?}, actual: {:?}", self.relative_path.to_string_lossy(), expect, actual ); } Difference::StringContent { expect, actual } => { println!( "difference detected on: {}\n", self.relative_path.to_string_lossy() ); crate::ui::show_difference_text(expect, actual, true); } Difference::BinaryContent { expect_md5, actual_md5, } => { println!( "difference detected on: {} (detected as binary file)\n", self.relative_path.to_string_lossy() ); println!("expected md5: {}", expect_md5); println!("actual md5: {}", actual_md5); } } } // TODO add test fn review(&self) -> Result<bool> { let accept_update = match self.difference { Difference::Presence { expect, actual } => { if expect &&!actual { let path = self.expect_base_path.join(&self.relative_path); let is_dir = std::fs::metadata(&path)?.is_dir(); if crate::ui::ask_to_update_sample("Accept to remove from sample?")? { if is_dir { std::fs::remove_dir_all(&path)?; } else { std::fs::remove_file(&path)?; } true } else { false } } else if crate::ui::ask_to_update_sample("Accept to add into sample?")? { let path = self.actual_base_path.join(&self.relative_path); let is_dir = std::fs::metadata(&path)?.is_dir(); if is_dir { std::fs::create_dir_all(self.expect_base_path.join(&self.relative_path))?; } else { std::fs::copy(path, self.expect_base_path.join(&self.relative_path))?; } std::fs::copy( self.actual_base_path.join(&self.relative_path), self.expect_base_path.join(&self.relative_path), )?; true } else { false } } _ => { if crate::ui::ask_to_update_sample("Accept to update file into sample?")? { std::fs::copy( self.actual_base_path.join(&self.relative_path), self.expect_base_path.join(&self.relative_path), )?; true } else { false } } }; Ok(accept_update) } } #[derive(Debug, Clone)] struct Sample { pub name: String, pub args: ApplyOpts, pub expected: PathBuf, pub existing: PathBuf, pub ignores: Vec<PathPattern>, } impl Sample { // scan folder to find sample to test (xxx.args, xxx.expected, xxx.existing)
samples_folder: B, tmp_dir: &TempDir, ) -> Result<Vec<Sample>> { let mut out = vec![]; for e in fs::read_dir(&samples_folder).map_err(|source| Error::ListFolder { path: samples_folder.as_ref().into(), source, })? { let path = e?.path(); if path .extension() .filter(|x| x.to_string_lossy() == "expected") .is_some() { let name = path .file_stem() .expect("folder should have a file name without extension") .to_string_lossy() .to_string(); let expected = path.clone(); let existing = path.with_extension("existing"); let args_file = path.with_extension("cfg.yaml"); let destination = tmp_dir.path().join(&name).to_path_buf(); let sample_cfg = SampleCfg::from_file(args_file)?; let args = sample_cfg.make_args(template_loc, destination)?; let ignores = sample_cfg.make_ignores()?; out.push(Sample { name, args, expected, existing, ignores, }); } } Ok(out) } } #[derive(Deserialize, Serialize, Debug, Default, Clone, PartialEq)] struct SampleCfg { apply_args: Option<Vec<String>>, check_ignores: Option<Vec<String>>, } impl SampleCfg { fn from_file<P: AsRef<Path>>(file: P) -> Result<Self> { let v = if file.as_ref().exists() { let cfg_str = fs::read_to_string(file.as_ref()).map_err(|source| Error::ReadFile { path: file.as_ref().into(), source, })?; serde_yaml::from_str::<SampleCfg>(&cfg_str)? } else { SampleCfg::default() }; Ok(v) } fn make_ignores(&self) -> Result<Vec<PathPattern>> { use std::str::FromStr; let trim_chars: &[_] = &['\r', '\n','', '\t', '"', '\'']; let ignores = self .check_ignores .clone() .unwrap_or_default() .iter() .map(|v| v.trim_matches(trim_chars)) .filter(|v|!v.is_empty()) .map(PathPattern::from_str) .collect::<Result<Vec<PathPattern>>>()?; Ok(ignores) } fn make_args<B: AsRef<Path>>( &self, template_loc: &SourceLoc, destination: B, ) -> Result<ApplyOpts> { let cfg_args = self.apply_args.clone().unwrap_or_default(); let mut args_line = cfg_args.iter().map(|s| s.as_str()).collect::<Vec<_>>(); args_line.push("--confirm"); args_line.push("never"); args_line.push("--no-interaction"); args_line.push("--destination"); args_line.push( destination .as_ref() .to_str() .expect("to convert destination path into str"), ); args_line.push("--source"); args_line.push(&template_loc.uri.raw); if let Some(rev) = &template_loc.rev { args_line.push("--rev"); args_line.push(rev); } let buff = template_loc.subfolder.as_ref().map(|v| v.to_string_lossy()); if let Some(subfolder) = buff.as_ref() { args_line.push("--source-subfolder"); args_line.push(subfolder); } //HACK from_iter_safe expect first entry to be the binary name, // unless clap::AppSettings::NoBinaryName has been used // (but I don't know how to use it in this case, patch is welcomed) args_line.insert(0, "apply"); args_line.insert(0, "ffizer"); CliOpts::try_parse_from(args_line) .map_err(Error::from) .and_then(|o| match o.cmd { Command::Apply(g) => Ok(g), e => Err(Error::Unknown(format!( "command should always be parsed as 'apply' not as {:?}", e ))), }) } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct SampleRun { diffs: Vec<EntryDiff>, } impl SampleRun { #[tracing::instrument] pub fn run(sample: &Sample) -> Result<SampleRun> { // ALTERNATIVE: fork a sub-process to run current ffizer in apply mode let destination = &sample.args.dst_folder; if sample.existing.exists() { copy(&sample.existing, destination)?; } let ctx = crate::Ctx { cmd_opt: sample.args.clone(), }; crate::process(&ctx)?; let diffs = dir_diff_list::search_diff(destination, &sample.expected, &sample.ignores)?; Ok(SampleRun { diffs }) } pub fn is_success(&self) -> bool { self.diffs.is_empty() } } impl std::fmt::Display for SampleRun { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Differences: {:#?}", self.diffs) } } /// recursively copy a directory /// based on https://stackoverflow.com/a/60406693/469066 pub fn copy<U: AsRef<Path>, V: AsRef<Path>>(from: U, to: V) -> Result<()> { let mut stack = vec![PathBuf::from(from.as_ref())]; let output_root = PathBuf::from(to.as_ref()); let input_root = PathBuf::from(from.as_ref()).components().count(); while let Some(working_path) = stack.pop() { //println!("process: {:?}", &working_path); // Generate a relative path let src: PathBuf = working_path.components().skip(input_root).collect(); // Create a destination if missing let dest = if src.components().count() == 0 { output_root.clone() } else { output_root.join(&src) }; if fs::metadata(&dest).is_err() { // println!(" mkdir: {:?}", dest); fs::create_dir_all(&dest).map_err(|source| Error::CreateFolder { path: dest.clone(), source, })?; } for entry in fs::read_dir(&working_path).map_err(|source| Error::ListFolder { path: working_path, source, })? { let path = entry?.path(); if path.is_dir() { stack.push(path); } else if let Some(filename) = path.file_name() { let dest_path = dest.join(filename); //println!(" copy: {:?} -> {:?}", &path, &dest_path); fs::copy(&path, &dest_path).map_err(|source| Error::CopyFile { src: path, dst: dest_path, source, })?; } } } Ok(()) }
fn find_from_folder<B: AsRef<Path>>( template_loc: &SourceLoc,
random_line_split
mod.rs
pub mod dir_diff_list; use crate::cli_opt::{ApplyOpts, CliOpts, Command, TestSamplesOpts}; use crate::path_pattern::PathPattern; use crate::{error::*, SourceLoc}; use clap::Parser; use dir_diff_list::Difference; use dir_diff_list::EntryDiff; use std::fs; use std::path::{Path, PathBuf}; use tempfile::{tempdir, TempDir}; use tracing::info; pub fn test_samples(cfg: &TestSamplesOpts) -> Result<()> { let template_base_path = &cfg.src.download(cfg.offline)?; if!check_samples(template_base_path, &cfg.src, cfg.review)? { Err(crate::Error::TestSamplesFailed {}) } else { Ok(()) } } fn check_samples<A: AsRef<Path>>( template_path: A, template_loc: &SourceLoc, review_mode: bool, ) -> Result<bool> { let mut is_success = true; let tmp_dir = tempdir()?; let samples_folder = template_path .as_ref() .join(crate::cfg::TEMPLATE_SAMPLES_DIRNAME); let samples = Sample::find_from_folder(template_loc, &samples_folder, &tmp_dir)?; info!(nb_samples_detected = samples.len(),?samples_folder); for sample in samples { info!(sample =?sample.name, args =?sample.args, "checking..."); let run = SampleRun::run(&sample)?; is_success = is_success && run.is_success(); show_differences(&sample.name, &run.diffs, review_mode)?; } Ok(is_success || review_mode) } //TODO move to ui module to be customizable (in future) pub fn show_differences(name: &str, entries: &[EntryDiff], review_mode: bool) -> Result<()> { let mut updates_count = 0; for entry in entries { println!("{:-^1$}", "-", 80); entry.show(); if review_mode && entry.review()? { updates_count += 1 } } println!("{:-^1$}", "-", 80); println!( "number of differences in sample '{}': {}", name, entries.len(), ); if review_mode { println!("number of updates in sample '{}': {}", name, updates_count); } println!("{:-^1$}", "-", 80); Ok(()) } impl EntryDiff { fn show(&self) { match &self.difference { Difference::Presence { expect, actual } => { if *expect &&!*actual { println!( "missing file in the actual: {}", self.relative_path.to_string_lossy() ); } else { println!( "unexpected file in the actual: {}", self.relative_path.to_string_lossy() ); } } Difference::Kind { expect, actual } => { println!( "difference kind of entry on: {}, expected: {:?}, actual: {:?}", self.relative_path.to_string_lossy(), expect, actual ); } Difference::StringContent { expect, actual } => { println!( "difference detected on: {}\n", self.relative_path.to_string_lossy() ); crate::ui::show_difference_text(expect, actual, true); } Difference::BinaryContent { expect_md5, actual_md5, } => { println!( "difference detected on: {} (detected as binary file)\n", self.relative_path.to_string_lossy() ); println!("expected md5: {}", expect_md5); println!("actual md5: {}", actual_md5); } } } // TODO add test fn review(&self) -> Result<bool> { let accept_update = match self.difference { Difference::Presence { expect, actual } => { if expect &&!actual { let path = self.expect_base_path.join(&self.relative_path); let is_dir = std::fs::metadata(&path)?.is_dir(); if crate::ui::ask_to_update_sample("Accept to remove from sample?")? { if is_dir { std::fs::remove_dir_all(&path)?; } else { std::fs::remove_file(&path)?; } true } else { false } } else if crate::ui::ask_to_update_sample("Accept to add into sample?")? { let path = self.actual_base_path.join(&self.relative_path); let is_dir = std::fs::metadata(&path)?.is_dir(); if is_dir { std::fs::create_dir_all(self.expect_base_path.join(&self.relative_path))?; } else { std::fs::copy(path, self.expect_base_path.join(&self.relative_path))?; } std::fs::copy( self.actual_base_path.join(&self.relative_path), self.expect_base_path.join(&self.relative_path), )?; true } else { false } } _ => { if crate::ui::ask_to_update_sample("Accept to update file into sample?")? { std::fs::copy( self.actual_base_path.join(&self.relative_path), self.expect_base_path.join(&self.relative_path), )?; true } else { false } } }; Ok(accept_update) } } #[derive(Debug, Clone)] struct Sample { pub name: String, pub args: ApplyOpts, pub expected: PathBuf, pub existing: PathBuf, pub ignores: Vec<PathPattern>, } impl Sample { // scan folder to find sample to test (xxx.args, xxx.expected, xxx.existing) fn find_from_folder<B: AsRef<Path>>( template_loc: &SourceLoc, samples_folder: B, tmp_dir: &TempDir, ) -> Result<Vec<Sample>> { let mut out = vec![]; for e in fs::read_dir(&samples_folder).map_err(|source| Error::ListFolder { path: samples_folder.as_ref().into(), source, })? { let path = e?.path(); if path .extension() .filter(|x| x.to_string_lossy() == "expected") .is_some() { let name = path .file_stem() .expect("folder should have a file name without extension") .to_string_lossy() .to_string(); let expected = path.clone(); let existing = path.with_extension("existing"); let args_file = path.with_extension("cfg.yaml"); let destination = tmp_dir.path().join(&name).to_path_buf(); let sample_cfg = SampleCfg::from_file(args_file)?; let args = sample_cfg.make_args(template_loc, destination)?; let ignores = sample_cfg.make_ignores()?; out.push(Sample { name, args, expected, existing, ignores, }); } } Ok(out) } } #[derive(Deserialize, Serialize, Debug, Default, Clone, PartialEq)] struct SampleCfg { apply_args: Option<Vec<String>>, check_ignores: Option<Vec<String>>, } impl SampleCfg { fn
<P: AsRef<Path>>(file: P) -> Result<Self> { let v = if file.as_ref().exists() { let cfg_str = fs::read_to_string(file.as_ref()).map_err(|source| Error::ReadFile { path: file.as_ref().into(), source, })?; serde_yaml::from_str::<SampleCfg>(&cfg_str)? } else { SampleCfg::default() }; Ok(v) } fn make_ignores(&self) -> Result<Vec<PathPattern>> { use std::str::FromStr; let trim_chars: &[_] = &['\r', '\n','', '\t', '"', '\'']; let ignores = self .check_ignores .clone() .unwrap_or_default() .iter() .map(|v| v.trim_matches(trim_chars)) .filter(|v|!v.is_empty()) .map(PathPattern::from_str) .collect::<Result<Vec<PathPattern>>>()?; Ok(ignores) } fn make_args<B: AsRef<Path>>( &self, template_loc: &SourceLoc, destination: B, ) -> Result<ApplyOpts> { let cfg_args = self.apply_args.clone().unwrap_or_default(); let mut args_line = cfg_args.iter().map(|s| s.as_str()).collect::<Vec<_>>(); args_line.push("--confirm"); args_line.push("never"); args_line.push("--no-interaction"); args_line.push("--destination"); args_line.push( destination .as_ref() .to_str() .expect("to convert destination path into str"), ); args_line.push("--source"); args_line.push(&template_loc.uri.raw); if let Some(rev) = &template_loc.rev { args_line.push("--rev"); args_line.push(rev); } let buff = template_loc.subfolder.as_ref().map(|v| v.to_string_lossy()); if let Some(subfolder) = buff.as_ref() { args_line.push("--source-subfolder"); args_line.push(subfolder); } //HACK from_iter_safe expect first entry to be the binary name, // unless clap::AppSettings::NoBinaryName has been used // (but I don't know how to use it in this case, patch is welcomed) args_line.insert(0, "apply"); args_line.insert(0, "ffizer"); CliOpts::try_parse_from(args_line) .map_err(Error::from) .and_then(|o| match o.cmd { Command::Apply(g) => Ok(g), e => Err(Error::Unknown(format!( "command should always be parsed as 'apply' not as {:?}", e ))), }) } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct SampleRun { diffs: Vec<EntryDiff>, } impl SampleRun { #[tracing::instrument] pub fn run(sample: &Sample) -> Result<SampleRun> { // ALTERNATIVE: fork a sub-process to run current ffizer in apply mode let destination = &sample.args.dst_folder; if sample.existing.exists() { copy(&sample.existing, destination)?; } let ctx = crate::Ctx { cmd_opt: sample.args.clone(), }; crate::process(&ctx)?; let diffs = dir_diff_list::search_diff(destination, &sample.expected, &sample.ignores)?; Ok(SampleRun { diffs }) } pub fn is_success(&self) -> bool { self.diffs.is_empty() } } impl std::fmt::Display for SampleRun { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Differences: {:#?}", self.diffs) } } /// recursively copy a directory /// based on https://stackoverflow.com/a/60406693/469066 pub fn copy<U: AsRef<Path>, V: AsRef<Path>>(from: U, to: V) -> Result<()> { let mut stack = vec![PathBuf::from(from.as_ref())]; let output_root = PathBuf::from(to.as_ref()); let input_root = PathBuf::from(from.as_ref()).components().count(); while let Some(working_path) = stack.pop() { //println!("process: {:?}", &working_path); // Generate a relative path let src: PathBuf = working_path.components().skip(input_root).collect(); // Create a destination if missing let dest = if src.components().count() == 0 { output_root.clone() } else { output_root.join(&src) }; if fs::metadata(&dest).is_err() { // println!(" mkdir: {:?}", dest); fs::create_dir_all(&dest).map_err(|source| Error::CreateFolder { path: dest.clone(), source, })?; } for entry in fs::read_dir(&working_path).map_err(|source| Error::ListFolder { path: working_path, source, })? { let path = entry?.path(); if path.is_dir() { stack.push(path); } else if let Some(filename) = path.file_name() { let dest_path = dest.join(filename); //println!(" copy: {:?} -> {:?}", &path, &dest_path); fs::copy(&path, &dest_path).map_err(|source| Error::CopyFile { src: path, dst: dest_path, source, })?; } } } Ok(()) }
from_file
identifier_name
mod.rs
//! [RFC 7230](https://tools.ietf.org/html/rfc723) compliant HTTP 1.1 request parser mod util; use std::io::prelude::*; use std::net::TcpStream; use std::collections::HashMap; use std::sync::Arc; use self::util::*; pub use self::util::ParseError; use self::util::TokenType::{TChar, Invalid}; /// A container for the details of an HTTP request #[derive(Debug, Eq, PartialEq)] pub struct Request { /// HTTP Version version: (u8, u8), /// HTTP Method (verb) method: Method, /// Target (the URI from path onwards) target: String, /// The HTTP request headers headers: HashMap<String, String>, /// The request body body: Vec<u8>, } impl Request { /// Get the request's HTTP version, in the format (major, minor) pub fn get_version(&self) -> (u8, u8) { self.version } /// Get the request method pub fn get_method(&self) -> &Method { &self.method } /// Get the request target (usually the [origin form](https://tools.ietf.org/html/rfc7230#section-5.3.1) of the /// request url, which is the absolute path followed optionally by the query) pub fn get_target(&self) -> &str { &self.target } /// Get the request headers /// TODO: This should either be a collection or parsed to combine comma separated headers pub fn get_headers(&self) -> &HashMap<String, String> { &self.headers } /// Get the request body, if one was supplied in the request pub fn get_body(&self) -> &[u8] { self.body.as_slice() } } impl Request { /// Parse a request stream pub fn from(stream: &mut TcpStream) -> Result<Request, ParseError> { let mut builder = RequestBuilder::new(); let mut it = StreamReader::from(stream); Request::parse_request_line(&mut builder, &mut it)?; Request::parse_headers(&mut builder, &mut it)?; // Sanity checks Request::parse_body(&mut builder, &mut it)?; Ok(builder.into_request().unwrap()) } /// Parse the request line, which is the first line of the request /// /// It should have the form `Method Target HTTP/Version`, as defined in /// [RFC 7230 §3.1.1](https://tools.ietf.org/html/rfc7230#section-3.1.1). fn parse_request_line<T>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError> where T: Read { // Request method let method = Request::parse_request_method(it)?; builder.set_method(method); // Target let target = Request::parse_request_target(it)?; builder.set_target(target); // Version let version = Request::parse_request_version(it)?; builder.set_version(version.0, version.1); Ok(()) } /// Parse the method (GET, POST, etc). It should be 1 or more visible characters, treated case-sensitively, and it /// is followed by a single space (according to /// [RFC 7230 §3.1.1](https://tools.ietf.org/html/rfc7230#section-3.1.1)). fn parse_request_method<T>(it: &mut StreamReader<T>) -> Result<Method, ParseError> where T: Read { let mut method = Vec::new(); // Read bytes for b in it { match TokenType::from(b) { TChar(c) => method.push(c), Invalid(b' ') => return Ok(Method::from(method)), Invalid(_) => return Err(ParseError::IllegalCharacter), } } Err(ParseError::EOF) } /// Parse the target (requested resource). The most general form is 1 or more visible characters (followed by a /// single space), though more restrictive parsing would be permitted as defined in /// [RFC 7230 §5.3](https://tools.ietf.org/html/rfc7230#section-5.3). fn parse_request_target<T>(it: &mut StreamReader<T>) -> Result<String, ParseError> where T: Read { let mut target = Vec::new(); // Read bytes for b in it { match b { // Allowed characters in URLs per [RFC 3986](https://tools.ietf.org/html/rfc3986#appendix-A) b'!' | b'#'...b';' | b'=' | b'?'...b'[' | b']'...b'z' | b'|' | b'~' => target.push(b), b''=> return Ok(String::from_utf8(target).unwrap()), // Safe to unwrap because input is sanitised _ => return Err(ParseError::IllegalCharacter), } } Err(ParseError::EOF) } /// Parse the HTTP version, which should be HTTP/maj.min, where maj and min are single digits, as defined in /// [RFC 7230 §2.6](https://tools.ietf.org/html/rfc7230#section-2.6). fn parse_request_version<T>(it: &mut StreamReader<T>) -> Result<(u8, u8), ParseError> where T: Read { let expected_it = "HTTP/".bytes(); for expected in expected_it { match it.next() { Some(b) if b == expected => (), Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), } } let major = match it.next() { Some(n) if n >= 48 && n <= 57 => n - 48, Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), }; match it.next() { Some(b'.') => (), Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), } let minor = match it.next() { Some(n) if n >= 48 && n <= 57 => n - 48, Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), }; // Should now be at the end of the Request Line match it.next() { Some(b'\r') => (), Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), } match it.next() { Some(b'\n') => (), Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), } Ok((major, minor)) } /// Parse the request headers from `it` into `builder`, as specified in /// [RFC 7230 §3.2](https://tools.ietf.org/html/rfc7230#section-3.2) fn parse_headers<T: Read>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError> { // An enum to store the current state of the parser enum ParserState { // After a new line, ready to parse the header name Start, // Currently parsing the header name Name {name: Vec<u8>}, // Currently parsing the whitespace after the : but before the value ValueLeadingWS {name: String}, // Currently parsing the value Value {name: String, value: Vec<u8>}, // Currently parsing the new line (CR (here) LF) NewLine, // Currently parsing the final new line (CR LF CR (here) LF) FinalNewLine, }; let mut state = ParserState::Start; 'outer: loop { let b = match it.next() { None => return Err(ParseError::EOF), Some(b) => b, }; // Wrap this in a loop so that we can cheaply transition to a different state without having consumed // any characters loop { match state { ParserState::Start => match b { b'\r' => state = ParserState::FinalNewLine, _ => { // Move straight into Name without consuming this character state = ParserState::Name { name: Vec::new() }; continue; } }, ParserState::Name {name: mut n} => match TokenType::from(b) { TChar(c) => { n.push(c); state = ParserState::Name {name: n} }, Invalid(b':') => { // Safe to convert to UTF-8 because it was constructed from just ASCII characters let name = String::from_utf8(n).unwrap(); state = ParserState::ValueLeadingWS {name: name}; }, Invalid(_) => return Err(ParseError::IllegalCharacter), }, ParserState::ValueLeadingWS {name: n} => match b { b''| b'\t' => state = ParserState::ValueLeadingWS {name: n}, _ => { // Move straight into Value without consuming state = ParserState::Value { name: n, value: Vec::new() }; continue; } }, ParserState::Value {name: n, value: mut v} => match b { b'\t' | b' '...b'~' => { v.push(b); state = ParserState::Value {name: n, value: v}; }, 0x80...0xFF => { // The specification says that headers containing these characters SHOULD be considered as // opaque data. However, doing that means we can't treat the headers as strings, because // this would break UTF-8 compliance, thereby vastly increasing the complexity of the rest // of the code. The non-ASCII characters will therefore be silently discarded state = ParserState::Value {name: n, value: v}; } b'\r' => { // Because we discarded the invalid characters, it's safe to convert to UTF-8 let value = String::from_utf8(v).unwrap(); // Store the header builder.add_header(n, value); // Transition to expect the LF state = ParserState::NewLine; }, _ => return Err(ParseError::IllegalCharacter), }, ParserState::NewLine => match b { b'\n' => state = ParserState::Start, _ => return Err(ParseError::IllegalCharacter), }, ParserState::FinalNewLine => match b { b'\n' => break 'outer, _ => return Err(ParseError::IllegalCharacter), } } // Consume the next character break; } } Ok(()) } fn parse_body<T: Read>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError> { println!("Starting parse body"); // TODO: can't read to end Ok(())/* match it.get_inner().read_to_end(builder.get_body()) { Ok(0) => Ok(()), Ok(_) => { println!("Body read complete"); Ok(()) }, Err(e) => Err(ParseError::new_server_error(e)), }*/ } } unsafe impl Send for Request {} // nb. Not syncable until fully constructed (when the hashmap becomes effectively immutable) // Public interface is completely syncable unsafe impl Sync for Request {} /// HTTP Methods (verbs), as defined by [RFC 7231 §4](https://tools.ietf.org/html/rfc7231#section-4) #[derive(Debug, Eq, PartialEq, Clone)] pub enum Method { Get, Post, Patch, Delete, Put, Head, Connect, Options, Trace, Custom(Arc<Vec<u8>>), } impl Method { /// Construct a `Method` from the corresponding case-sensitive name, provided as a vector of bytes. /// Ownership of the vector is required to store the name in the event that it isn't a known method. pub fn from(name: Vec<u8>) -> Method { use self::Method::*; if name.as_slice() == &b"GET"[..] { return Get }; if name.as_slice() == &b"POST"[..] { return Post }; if name.as_slice() == &b"PATCH"[..] { return Patch }; if name.as_slice() == &b"DELETE"[..] { return Delete }; if name.as_slice() == &b"PUT"[..] { return Put }; if name.as_slice() == &b"HEAD"[..] { return Head }; if name.as_slice() == &b"CONNECT"[..] { return Connect }; if name.as_slice() == &b"OPTIONS"[..] { return Options }; if name.as_slice() == &b"TRACE"[..] { return Trace }; return Custom(Arc::from(name)); } } unsafe impl Send for Method {} /// A struct that can be used to incrementally build up a request, so the components are optional #[derive(Debug, Eq, PartialEq)] struct RequestBuilder { version: Option<(u8, u8)>, method: Option<Method>, target: Option<String>, headers: HashMap<String, String>, body: Vec<u8>, } impl RequestBuilder { /// Construct a new RequestBuilder pub fn new() -> RequestBuilder { RequestBuilder { version: None, method: None, target: None, headers: HashMap::new(), body: Vec::new(), } } /// Set the HTTP version of this request pub fn set_version(&mut self, major: u8, minor: u8) { self.version = Some((major, minor)); } /// Set the request method pub fn set_method(&mut self, method: Method) { self.method = Some(method); } /// Set the request target pub fn set_target(&mut self, target: String) { self.target = Some(target); } /// Set the body of the request pub fn get_body(&mut self) -> &mut Vec<u8> { &mut self.body } /// Add a header. This method currently stores the latest version in the event of duplicate headers. pub fn add_header(&mut self, key: String, val: String) { self.headers.insert(key, val); } pub fn get_he
) -> &HashMap<String, String> { &self.headers } /// Convert this request builder into a full request pub fn into_request(self) -> Option<Request> { match self { RequestBuilder { version: Some(version), method: Some(method), target: Some(target), headers, body, } => Some(Request{ version, method, target, headers, body }), _ => None, } } } #[cfg(test)] mod tests { use super::*; use std::str::Bytes; use std::io; #[test] fn test_parse_request_line() { let mut builder = RequestBuilder::new(); let mut byte_iterator = StrReader::new("GET /test/path?k=v&k2 HTTP/1.1\r\n".bytes()); let mut it = StreamReader::from(&mut byte_iterator); Request::parse_request_line(&mut builder, &mut it).unwrap(); assert_eq!(builder, RequestBuilder { version: Some((1, 1)), method: Some(Method::Get), target: Some(String::from("/test/path?k=v&k2")), headers: HashMap::new(), body: vec![], }); } struct StrReader<'a> { data: Bytes<'a>, } impl<'a> StrReader<'a> { fn new(data: Bytes) -> StrReader { StrReader { data, } } } impl<'a> Read for StrReader<'a> { fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> { let len = buf.len(); let mut i = 0; while i < len { buf[i] = match self.data.next() { Some(d) => d, None => return Ok(i), }; i += 1; } Ok(i) } } }
aders(&self
identifier_name
mod.rs
//! [RFC 7230](https://tools.ietf.org/html/rfc723) compliant HTTP 1.1 request parser mod util; use std::io::prelude::*; use std::net::TcpStream; use std::collections::HashMap; use std::sync::Arc; use self::util::*; pub use self::util::ParseError; use self::util::TokenType::{TChar, Invalid}; /// A container for the details of an HTTP request #[derive(Debug, Eq, PartialEq)] pub struct Request { /// HTTP Version version: (u8, u8), /// HTTP Method (verb) method: Method, /// Target (the URI from path onwards) target: String, /// The HTTP request headers headers: HashMap<String, String>, /// The request body body: Vec<u8>, } impl Request { /// Get the request's HTTP version, in the format (major, minor) pub fn get_version(&self) -> (u8, u8) { self.version } /// Get the request method pub fn get_method(&self) -> &Method { &self.method } /// Get the request target (usually the [origin form](https://tools.ietf.org/html/rfc7230#section-5.3.1) of the /// request url, which is the absolute path followed optionally by the query) pub fn get_target(&self) -> &str { &self.target } /// Get the request headers /// TODO: This should either be a collection or parsed to combine comma separated headers pub fn get_headers(&self) -> &HashMap<String, String> { &self.headers } /// Get the request body, if one was supplied in the request pub fn get_body(&self) -> &[u8] { self.body.as_slice() } } impl Request { /// Parse a request stream pub fn from(stream: &mut TcpStream) -> Result<Request, ParseError> { let mut builder = RequestBuilder::new(); let mut it = StreamReader::from(stream); Request::parse_request_line(&mut builder, &mut it)?; Request::parse_headers(&mut builder, &mut it)?; // Sanity checks Request::parse_body(&mut builder, &mut it)?; Ok(builder.into_request().unwrap()) } /// Parse the request line, which is the first line of the request /// /// It should have the form `Method Target HTTP/Version`, as defined in /// [RFC 7230 §3.1.1](https://tools.ietf.org/html/rfc7230#section-3.1.1). fn parse_request_line<T>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError> where T: Read { // Request method let method = Request::parse_request_method(it)?; builder.set_method(method); // Target let target = Request::parse_request_target(it)?; builder.set_target(target); // Version let version = Request::parse_request_version(it)?; builder.set_version(version.0, version.1); Ok(()) } /// Parse the method (GET, POST, etc). It should be 1 or more visible characters, treated case-sensitively, and it /// is followed by a single space (according to /// [RFC 7230 §3.1.1](https://tools.ietf.org/html/rfc7230#section-3.1.1)). fn parse_request_method<T>(it: &mut StreamReader<T>) -> Result<Method, ParseError> where T: Read { let mut method = Vec::new(); // Read bytes for b in it { match TokenType::from(b) { TChar(c) => method.push(c), Invalid(b' ') => return Ok(Method::from(method)), Invalid(_) => return Err(ParseError::IllegalCharacter), } } Err(ParseError::EOF) } /// Parse the target (requested resource). The most general form is 1 or more visible characters (followed by a /// single space), though more restrictive parsing would be permitted as defined in /// [RFC 7230 §5.3](https://tools.ietf.org/html/rfc7230#section-5.3). fn parse_request_target<T>(it: &mut StreamReader<T>) -> Result<String, ParseError> where T: Read { let mut target = Vec::new(); // Read bytes for b in it { match b { // Allowed characters in URLs per [RFC 3986](https://tools.ietf.org/html/rfc3986#appendix-A) b'!' | b'#'...b';' | b'=' | b'?'...b'[' | b']'...b'z' | b'|' | b'~' => target.push(b), b''=> return Ok(String::from_utf8(target).unwrap()), // Safe to unwrap because input is sanitised _ => return Err(ParseError::IllegalCharacter), } } Err(ParseError::EOF) } /// Parse the HTTP version, which should be HTTP/maj.min, where maj and min are single digits, as defined in /// [RFC 7230 §2.6](https://tools.ietf.org/html/rfc7230#section-2.6). fn parse_request_version<T>(it: &mut StreamReader<T>) -> Result<(u8, u8), ParseError> where T: Read { let expected_it = "HTTP/".bytes(); for expected in expected_it { match it.next() { Some(b) if b == expected => (), Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), } } let major = match it.next() { Some(n) if n >= 48 && n <= 57 => n - 48, Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), }; match it.next() { Some(b'.') => (), Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), } let minor = match it.next() { Some(n) if n >= 48 && n <= 57 => n - 48, Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), }; // Should now be at the end of the Request Line match it.next() { Some(b'\r') => (), Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), } match it.next() { Some(b'\n') => (), Some(_) => return Err(ParseError::IllegalCharacter), None => return Err(ParseError::EOF), } Ok((major, minor)) } /// Parse the request headers from `it` into `builder`, as specified in /// [RFC 7230 §3.2](https://tools.ietf.org/html/rfc7230#section-3.2) fn parse_headers<T: Read>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError> { // An enum to store the current state of the parser enum ParserState { // After a new line, ready to parse the header name Start, // Currently parsing the header name Name {name: Vec<u8>}, // Currently parsing the whitespace after the : but before the value ValueLeadingWS {name: String}, // Currently parsing the value Value {name: String, value: Vec<u8>}, // Currently parsing the new line (CR (here) LF) NewLine, // Currently parsing the final new line (CR LF CR (here) LF) FinalNewLine, }; let mut state = ParserState::Start; 'outer: loop { let b = match it.next() { None => return Err(ParseError::EOF), Some(b) => b, }; // Wrap this in a loop so that we can cheaply transition to a different state without having consumed // any characters loop { match state { ParserState::Start => match b { b'\r' => state = ParserState::FinalNewLine, _ => { // Move straight into Name without consuming this character state = ParserState::Name { name: Vec::new() }; continue; } }, ParserState::Name {name: mut n} => match TokenType::from(b) { TChar(c) => { n.push(c); state = ParserState::Name {name: n} }, Invalid(b':') => { // Safe to convert to UTF-8 because it was constructed from just ASCII characters let name = String::from_utf8(n).unwrap(); state = ParserState::ValueLeadingWS {name: name}; }, Invalid(_) => return Err(ParseError::IllegalCharacter), }, ParserState::ValueLeadingWS {name: n} => match b { b''| b'\t' => state = ParserState::ValueLeadingWS {name: n}, _ => { // Move straight into Value without consuming state = ParserState::Value { name: n, value: Vec::new() }; continue; } }, ParserState::Value {name: n, value: mut v} => match b { b'\t' | b' '...b'~' => { v.push(b); state = ParserState::Value {name: n, value: v}; }, 0x80...0xFF => { // The specification says that headers containing these characters SHOULD be considered as // opaque data. However, doing that means we can't treat the headers as strings, because // this would break UTF-8 compliance, thereby vastly increasing the complexity of the rest // of the code. The non-ASCII characters will therefore be silently discarded state = ParserState::Value {name: n, value: v}; } b'\r' => { // Because we discarded the invalid characters, it's safe to convert to UTF-8 let value = String::from_utf8(v).unwrap(); // Store the header builder.add_header(n, value); // Transition to expect the LF state = ParserState::NewLine; }, _ => return Err(ParseError::IllegalCharacter), }, ParserState::NewLine => match b { b'\n' => state = ParserState::Start, _ => return Err(ParseError::IllegalCharacter), }, ParserState::FinalNewLine => match b { b'\n' => break 'outer, _ => return Err(ParseError::IllegalCharacter), } } // Consume the next character break; } } Ok(()) } fn parse_body<T: Read>(builder: &mut RequestBuilder, it: &mut StreamReader<T>) -> Result<(), ParseError> { println!("Starting parse body"); // TODO: can't read to end Ok(())/*
match it.get_inner().read_to_end(builder.get_body()) { Ok(0) => Ok(()), Ok(_) => { println!("Body read complete"); Ok(()) }, Err(e) => Err(ParseError::new_server_error(e)), }*/ } } unsafe impl Send for Request {} // nb. Not syncable until fully constructed (when the hashmap becomes effectively immutable) // Public interface is completely syncable unsafe impl Sync for Request {} /// HTTP Methods (verbs), as defined by [RFC 7231 §4](https://tools.ietf.org/html/rfc7231#section-4) #[derive(Debug, Eq, PartialEq, Clone)] pub enum Method { Get, Post, Patch, Delete, Put, Head, Connect, Options, Trace, Custom(Arc<Vec<u8>>), } impl Method { /// Construct a `Method` from the corresponding case-sensitive name, provided as a vector of bytes. /// Ownership of the vector is required to store the name in the event that it isn't a known method. pub fn from(name: Vec<u8>) -> Method { use self::Method::*; if name.as_slice() == &b"GET"[..] { return Get }; if name.as_slice() == &b"POST"[..] { return Post }; if name.as_slice() == &b"PATCH"[..] { return Patch }; if name.as_slice() == &b"DELETE"[..] { return Delete }; if name.as_slice() == &b"PUT"[..] { return Put }; if name.as_slice() == &b"HEAD"[..] { return Head }; if name.as_slice() == &b"CONNECT"[..] { return Connect }; if name.as_slice() == &b"OPTIONS"[..] { return Options }; if name.as_slice() == &b"TRACE"[..] { return Trace }; return Custom(Arc::from(name)); } } unsafe impl Send for Method {} /// A struct that can be used to incrementally build up a request, so the components are optional #[derive(Debug, Eq, PartialEq)] struct RequestBuilder { version: Option<(u8, u8)>, method: Option<Method>, target: Option<String>, headers: HashMap<String, String>, body: Vec<u8>, } impl RequestBuilder { /// Construct a new RequestBuilder pub fn new() -> RequestBuilder { RequestBuilder { version: None, method: None, target: None, headers: HashMap::new(), body: Vec::new(), } } /// Set the HTTP version of this request pub fn set_version(&mut self, major: u8, minor: u8) { self.version = Some((major, minor)); } /// Set the request method pub fn set_method(&mut self, method: Method) { self.method = Some(method); } /// Set the request target pub fn set_target(&mut self, target: String) { self.target = Some(target); } /// Set the body of the request pub fn get_body(&mut self) -> &mut Vec<u8> { &mut self.body } /// Add a header. This method currently stores the latest version in the event of duplicate headers. pub fn add_header(&mut self, key: String, val: String) { self.headers.insert(key, val); } pub fn get_headers(&self) -> &HashMap<String, String> { &self.headers } /// Convert this request builder into a full request pub fn into_request(self) -> Option<Request> { match self { RequestBuilder { version: Some(version), method: Some(method), target: Some(target), headers, body, } => Some(Request{ version, method, target, headers, body }), _ => None, } } } #[cfg(test)] mod tests { use super::*; use std::str::Bytes; use std::io; #[test] fn test_parse_request_line() { let mut builder = RequestBuilder::new(); let mut byte_iterator = StrReader::new("GET /test/path?k=v&k2 HTTP/1.1\r\n".bytes()); let mut it = StreamReader::from(&mut byte_iterator); Request::parse_request_line(&mut builder, &mut it).unwrap(); assert_eq!(builder, RequestBuilder { version: Some((1, 1)), method: Some(Method::Get), target: Some(String::from("/test/path?k=v&k2")), headers: HashMap::new(), body: vec![], }); } struct StrReader<'a> { data: Bytes<'a>, } impl<'a> StrReader<'a> { fn new(data: Bytes) -> StrReader { StrReader { data, } } } impl<'a> Read for StrReader<'a> { fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> { let len = buf.len(); let mut i = 0; while i < len { buf[i] = match self.data.next() { Some(d) => d, None => return Ok(i), }; i += 1; } Ok(i) } } }
random_line_split
spacing.rs
extern crate std as ruststd ; use core :: hash :: { self , Hash } ; use core ::intrinsics:: {arith_offset,assume} ; use core::iter::FromIterator; use core :: mem; use core::ops::{ Index,IndexMut }; const CAPACITY :usize = 2*B-1; fn foo ( a : i32 , b :str, c: i32 ,d:f32 ) -> ( ) { let mut e : & 'static [ str ] = "abcd" ; let mut f :& [str ]="cdef"; bar ( ) ; let array :[ i32 ; 45 ] = [ 0 ;45 ]; let f: fn(i32, u64) -> i32; let f2 : fn ( i32 )->i32 ; let unit: ( ) = ( ); let foo = 2+2; let moo = 2*2; let meh = 2*2+3*3; else_block . as_ref ( ) . map ( | e | & * * e ) ; match self.node { ast :: ExprKind :: Field ( .. ) | ast::ExprKind::MethodCall(.. )=>rewrite_chain(self, context, width, offset) }; let f: fn ( & _, _ ) -> _ = unimplemented ! () ; let f = unimplemented ! {} ; { foo ( ) ; } for & (sample, radiance) in samples.iter() {} map(|& s| moo()); match x { S{foo}=>92 } } enum Message { Quit , ChangeColor ( i32 , i32 , i32 ) , Move { x : i32 , y : i32 } , Write ( String ) , } enum Foo{ Bar = 123 , Baz=0 } pub struct Vec < T > { buf : RawVec < T> , len :usize , } impl <T >Vec < T > { pub fn new ( ) -> Vec <T> { Vec { buf : RawVec :: new ( ) , len :0, } } pub fn with_capacity (capacity :usize)->Vec <T>{ Vec { buf:RawVec::with_capacity ( capacity ), len:0, } } pub unsafe fn from_raw_parts(ptr:* mut T, length :usize, capacity: usize) -> Vec <T>
pub fn capacity( & self ) -> usize { self . buf . cap ( ) } pub fn reserve(& mut self, additional: usize) { self. buf.reserve(self. len,additional) ; } pub fn into_boxed_slice( mut self ) -> Box < [ T ] > { unsafe{ self . shrink_to_fit ( ) ; let buf = ptr::read( & self . buf ); mem :: forget ( self ) ; buf.into_box() } } pub fn truncate(&mut self,len: usize) { unsafe { while len < self . len { self . len -= 1 ; let len = self . len ; ptr::drop_in_place(self.get_unchecked_mut(len)); } } } pub fn as_slice(& self) -> & [T] { self } pub fn as_mut_slice(&mut self) -> & mut[T] { & mut self [ .. ] } pub unsafe fn set_len(& mut self, len: usize) { self . len = len; } pub fn remove(&mut self, index: usize) -> T { let len = self.len(); assert!(index < len); unsafe { let ret; { let ptr = self.as_mut_ptr().offset(index as isize); ret = ptr::read(ptr); ptr::copy(ptr.offset (1), ptr, len-index-1); } self.set_len(len - 1); ret } } pub fn retain < F > ( & mut self , mut f : F ) where F : FnMut ( & T ) -> bool { let len = self.len(); let mut del = 0; { let v = & mut * * self ; for i in 0 .. len { if ! f ( & v [ i ] ) { del += 1 ; } else if del > 0 { v.swap(i - del, i); } } } if del>0{ self.truncate(len-del); } } pub fn drain<R>(&mut self,range:R)->Drain<T>where R:RangeArgument <usize>{ let len = self.len(); let start = * range.start() .unwrap_or( & 0 ) ; let end = * range. end().unwrap_or( & len ) ; assert!(start <= end); assert!(end <= len); } } impl<T:Clone>Vec<T>{ pub fn extend_from_slice(&mut self, other: & [ T ] ){ self.reserve(other.len()); for i in 0..other.len(){ let len = self.len(); unsafe { ptr::write(self.get_unchecked_mut(len), other.get_unchecked(i).clone()); self.set_len(len + 1); } } } } impl < T : PartialEq > Vec < T > { pub fn dedup ( & mut self ) { unsafe{ let ln = self.len(); if ln <= 1 { return ; } let p = self.as_mut_ptr(); let mut r:usize =1 ; let mut w:usize=1; while r < ln { let p_r = p.offset( r as isize ); let p_wm1 = p.offset ( ( w - 1 )as isize ); if * p_r !=* p_wm1 { if r!=w{ let p_w = p_wm1.offset(1); mem::swap( & mut * p_r , & mut * p_w ); } w += 1; } r+=1 ; } self.truncate(w); } } } pub fn from_elem < T : Clone > ( elem :T,n:usize) -> Vec <T>{ } impl < T :Clone >Clone for Vec <T>{ fn clone(&self) -> Vec<T> { < [ T ] > :: to_vec ( & * * self ) } fn clone(&self) -> Vec<T> { :: slice ::to_vec( &** self) } fn clone_from(&mut self, other:& Vec <T>) { self.truncate(other. len()); let len=self. len(); self.clone_from_slice(& other [.. len ]); self.extend_from_slice(& other[ len.. ]); } } impl< T:Hash>Hash for Vec<T> { fn hash<H :hash :: Hasher >( & self, state: &mut H) { Hash::hash(& **self, state) } } impl<T> Index < usize > for Vec < T > { type Output = T ; fn index(&self, index: usize) ->& T { & ( * * self ) [ index ] } } impl < T > IndexMut < usize > for Vec < T > { fn index_mut(&mut self, index: usize) -> &mut T { & mut ( * * self ) [ index ] } } impl<T> FromIterator<T> for Vec<T> { fn from_iter < I : IntoIterator < Item = T > > ( iter : I ) -> Vec < T > { let mut iterator = iter.into_iter(); let mut vector = match iterator. next () { None=>return Vec::new() , Some ( element ) => { let( lower , _ ) = iterator.size_hint(); //... } }; //... } } impl<T> IntoIterator for Vec<T> { type Item = T; type IntoIter = IntoIter<T>; fn into_iter( mut self ) -> IntoIter<T> { unsafe{ let ptr = self.as_mut_ptr(); assume(!ptr.is_null()); let begin = ptr as * const T ; let end = if mem :: size_of :: < T > ( ) == 0 { arith_offset ( ptr as * const i8 , self.len() as isize )as*const T } else { ptr . offset (self.len()as isize)as* const T } ; let buf = ptr::read(& self.buf); mem::forget(self); IntoIter{ _buf :buf, ptr: begin, end : end, } } } } impl < 'a,T >IntoIterator for& 'a Vec < T > { type Item = & 'a T; } impl<T> Iterator for IntoIter<T> { fn size_hint ( & self ) -> ( usize , Option < usize > ) { let diff = ( self.end as usize )-( self.ptr as usize); let size = mem::size_of::<T>(); let exact = diff /(if size == 0 {1}else{size}); ( exact, Some( exact ) ) } } impl<'a, T> Iterator for Drain<'a, T> { type Item = T; fn next(&mut self) -> Option<T> { self.iter.next().map( | elt | unsafe { ptr::read(elt as* const _ ) } ) } } trait Extend < A > { fn extend < T : IntoIterator < Item = A > > ( & mut self , iterable : T ) ; } impl < R , F : FnOnce ( ) -> R > FnOnce < ( ) > for AssertRecoverSafe < F > { extern "rust-call" fn call_once ( self , _args : ( ) ) -> R { ( self . 0 ) ( ) } } fn catch_unwind < F :FnOnce ( ) -> R+UnwindSafe , R > ( f :F ) -> Result < R > { let mut result = None ; unsafe { let result=& mut result ; unwind :: try ( move| |* result=Some( f ( ) ) ) ? } Ok ( result . unwrap ( ) ) } fn propagate ( payload :Box < Any+Send > ) -> ! {} impl<K,V>Root<K,V>{ pub fn as_ref ( & self )-> NodeRef < marker :: Immut , K,V, marker :: LeafOrInternal >{ NodeRef { root: self as * const _ as * mut _ , } } } macro_rules! vec { ( $( $x:expr ),* ) => { { let mut temp_vec = Vec::new(); $( temp_vec.push($x); )* temp_vec } }; } mod math { type Complex = ( f64 , f64 ) ; fn sin ( f : f64 ) -> f64 { /*... */ } } fn foo(&& (x, _): && (i32, i32)) {}
{ Vec{ buf :RawVec::from_raw_parts(ptr, capacity) , len :length , } }
identifier_body
spacing.rs
extern crate std as ruststd ; use core :: hash :: { self , Hash } ; use core ::intrinsics:: {arith_offset,assume} ; use core::iter::FromIterator; use core :: mem; use core::ops::{ Index,IndexMut }; const CAPACITY :usize = 2*B-1; fn foo ( a : i32 , b :str, c: i32 ,d:f32 ) -> ( ) { let mut e : & 'static [ str ] = "abcd" ; let mut f :& [str ]="cdef"; bar ( ) ; let array :[ i32 ; 45 ] = [ 0 ;45 ]; let f: fn(i32, u64) -> i32; let f2 : fn ( i32 )->i32 ; let unit: ( ) = ( ); let foo = 2+2; let moo = 2*2; let meh = 2*2+3*3; else_block . as_ref ( ) . map ( | e | & * * e ) ; match self.node { ast :: ExprKind :: Field ( .. ) | ast::ExprKind::MethodCall(.. )=>rewrite_chain(self, context, width, offset) }; let f: fn ( & _, _ ) -> _ = unimplemented ! () ; let f = unimplemented ! {} ; { foo ( ) ; } for & (sample, radiance) in samples.iter() {} map(|& s| moo()); match x { S{foo}=>92 } } enum Message { Quit , ChangeColor ( i32 , i32 , i32 ) , Move { x : i32 , y : i32 } , Write ( String ) , } enum Foo{ Bar = 123 , Baz=0 } pub struct Vec < T > { buf : RawVec < T> , len :usize , } impl <T >Vec < T > { pub fn new ( ) -> Vec <T> { Vec { buf : RawVec :: new ( ) , len :0, } } pub fn with_capacity (capacity :usize)->Vec <T>{ Vec { buf:RawVec::with_capacity ( capacity ), len:0, } } pub unsafe fn from_raw_parts(ptr:* mut T, length :usize, capacity: usize) -> Vec <T>{ Vec{ buf :RawVec::from_raw_parts(ptr, capacity), len :length, } } pub fn capacity( & self ) -> usize { self . buf . cap ( ) } pub fn reserve(& mut self, additional: usize) { self. buf.reserve(self. len,additional) ; } pub fn into_boxed_slice( mut self ) -> Box < [ T ] > { unsafe{ self . shrink_to_fit ( ) ; let buf = ptr::read( & self . buf ); mem :: forget ( self ) ; buf.into_box() } } pub fn truncate(&mut self,len: usize) { unsafe { while len < self . len { self . len -= 1 ; let len = self . len ; ptr::drop_in_place(self.get_unchecked_mut(len)); } } } pub fn as_slice(& self) -> & [T] { self } pub fn as_mut_slice(&mut self) -> & mut[T] { & mut self [ .. ] } pub unsafe fn set_len(& mut self, len: usize) { self . len = len; } pub fn remove(&mut self, index: usize) -> T { let len = self.len(); assert!(index < len); unsafe { let ret; { let ptr = self.as_mut_ptr().offset(index as isize); ret = ptr::read(ptr); ptr::copy(ptr.offset (1), ptr, len-index-1); } self.set_len(len - 1); ret } } pub fn retain < F > ( & mut self , mut f : F ) where F : FnMut ( & T ) -> bool { let len = self.len(); let mut del = 0; { let v = & mut * * self ; for i in 0 .. len { if ! f ( & v [ i ] ) { del += 1 ; } else if del > 0 { v.swap(i - del, i); } } } if del>0{ self.truncate(len-del); } } pub fn
<R>(&mut self,range:R)->Drain<T>where R:RangeArgument <usize>{ let len = self.len(); let start = * range.start() .unwrap_or( & 0 ) ; let end = * range. end().unwrap_or( & len ) ; assert!(start <= end); assert!(end <= len); } } impl<T:Clone>Vec<T>{ pub fn extend_from_slice(&mut self, other: & [ T ] ){ self.reserve(other.len()); for i in 0..other.len(){ let len = self.len(); unsafe { ptr::write(self.get_unchecked_mut(len), other.get_unchecked(i).clone()); self.set_len(len + 1); } } } } impl < T : PartialEq > Vec < T > { pub fn dedup ( & mut self ) { unsafe{ let ln = self.len(); if ln <= 1 { return ; } let p = self.as_mut_ptr(); let mut r:usize =1 ; let mut w:usize=1; while r < ln { let p_r = p.offset( r as isize ); let p_wm1 = p.offset ( ( w - 1 )as isize ); if * p_r !=* p_wm1 { if r!=w{ let p_w = p_wm1.offset(1); mem::swap( & mut * p_r , & mut * p_w ); } w += 1; } r+=1 ; } self.truncate(w); } } } pub fn from_elem < T : Clone > ( elem :T,n:usize) -> Vec <T>{ } impl < T :Clone >Clone for Vec <T>{ fn clone(&self) -> Vec<T> { < [ T ] > :: to_vec ( & * * self ) } fn clone(&self) -> Vec<T> { :: slice ::to_vec( &** self) } fn clone_from(&mut self, other:& Vec <T>) { self.truncate(other. len()); let len=self. len(); self.clone_from_slice(& other [.. len ]); self.extend_from_slice(& other[ len.. ]); } } impl< T:Hash>Hash for Vec<T> { fn hash<H :hash :: Hasher >( & self, state: &mut H) { Hash::hash(& **self, state) } } impl<T> Index < usize > for Vec < T > { type Output = T ; fn index(&self, index: usize) ->& T { & ( * * self ) [ index ] } } impl < T > IndexMut < usize > for Vec < T > { fn index_mut(&mut self, index: usize) -> &mut T { & mut ( * * self ) [ index ] } } impl<T> FromIterator<T> for Vec<T> { fn from_iter < I : IntoIterator < Item = T > > ( iter : I ) -> Vec < T > { let mut iterator = iter.into_iter(); let mut vector = match iterator. next () { None=>return Vec::new() , Some ( element ) => { let( lower , _ ) = iterator.size_hint(); //... } }; //... } } impl<T> IntoIterator for Vec<T> { type Item = T; type IntoIter = IntoIter<T>; fn into_iter( mut self ) -> IntoIter<T> { unsafe{ let ptr = self.as_mut_ptr(); assume(!ptr.is_null()); let begin = ptr as * const T ; let end = if mem :: size_of :: < T > ( ) == 0 { arith_offset ( ptr as * const i8 , self.len() as isize )as*const T } else { ptr . offset (self.len()as isize)as* const T } ; let buf = ptr::read(& self.buf); mem::forget(self); IntoIter{ _buf :buf, ptr: begin, end : end, } } } } impl < 'a,T >IntoIterator for& 'a Vec < T > { type Item = & 'a T; } impl<T> Iterator for IntoIter<T> { fn size_hint ( & self ) -> ( usize , Option < usize > ) { let diff = ( self.end as usize )-( self.ptr as usize); let size = mem::size_of::<T>(); let exact = diff /(if size == 0 {1}else{size}); ( exact, Some( exact ) ) } } impl<'a, T> Iterator for Drain<'a, T> { type Item = T; fn next(&mut self) -> Option<T> { self.iter.next().map( | elt | unsafe { ptr::read(elt as* const _ ) } ) } } trait Extend < A > { fn extend < T : IntoIterator < Item = A > > ( & mut self , iterable : T ) ; } impl < R , F : FnOnce ( ) -> R > FnOnce < ( ) > for AssertRecoverSafe < F > { extern "rust-call" fn call_once ( self , _args : ( ) ) -> R { ( self . 0 ) ( ) } } fn catch_unwind < F :FnOnce ( ) -> R+UnwindSafe , R > ( f :F ) -> Result < R > { let mut result = None ; unsafe { let result=& mut result ; unwind :: try ( move| |* result=Some( f ( ) ) ) ? } Ok ( result . unwrap ( ) ) } fn propagate ( payload :Box < Any+Send > ) -> ! {} impl<K,V>Root<K,V>{ pub fn as_ref ( & self )-> NodeRef < marker :: Immut , K,V, marker :: LeafOrInternal >{ NodeRef { root: self as * const _ as * mut _ , } } } macro_rules! vec { ( $( $x:expr ),* ) => { { let mut temp_vec = Vec::new(); $( temp_vec.push($x); )* temp_vec } }; } mod math { type Complex = ( f64 , f64 ) ; fn sin ( f : f64 ) -> f64 { /*... */ } } fn foo(&& (x, _): && (i32, i32)) {}
drain
identifier_name
spacing.rs
extern crate std as ruststd ; use core :: hash :: { self , Hash } ; use core ::intrinsics:: {arith_offset,assume} ; use core::iter::FromIterator; use core :: mem; use core::ops::{ Index,IndexMut }; const CAPACITY :usize = 2*B-1; fn foo ( a : i32 , b :str, c: i32 ,d:f32 ) -> ( ) { let mut e : & 'static [ str ] = "abcd" ; let mut f :& [str ]="cdef"; bar ( ) ; let array :[ i32 ; 45 ] = [ 0 ;45 ]; let f: fn(i32, u64) -> i32; let f2 : fn ( i32 )->i32 ; let unit: ( ) = ( ); let foo = 2+2; let moo = 2*2; let meh = 2*2+3*3; else_block . as_ref ( ) . map ( | e | & * * e ) ; match self.node { ast :: ExprKind :: Field ( .. ) | ast::ExprKind::MethodCall(.. )=>rewrite_chain(self, context, width, offset) }; let f: fn ( & _, _ ) -> _ = unimplemented ! () ; let f = unimplemented ! {} ; { foo ( ) ; } for & (sample, radiance) in samples.iter() {} map(|& s| moo()); match x { S{foo}=>92 } } enum Message { Quit , ChangeColor ( i32 , i32 , i32 ) , Move { x : i32 , y : i32 } , Write ( String ) , } enum Foo{ Bar = 123 , Baz=0 } pub struct Vec < T > { buf : RawVec < T> , len :usize , } impl <T >Vec < T > { pub fn new ( ) -> Vec <T> { Vec { buf : RawVec :: new ( ) , len :0, } } pub fn with_capacity (capacity :usize)->Vec <T>{ Vec { buf:RawVec::with_capacity ( capacity ), len:0, } } pub unsafe fn from_raw_parts(ptr:* mut T, length :usize, capacity: usize) -> Vec <T>{ Vec{ buf :RawVec::from_raw_parts(ptr, capacity), len :length,
} } pub fn capacity( & self ) -> usize { self . buf . cap ( ) } pub fn reserve(& mut self, additional: usize) { self. buf.reserve(self. len,additional) ; } pub fn into_boxed_slice( mut self ) -> Box < [ T ] > { unsafe{ self . shrink_to_fit ( ) ; let buf = ptr::read( & self . buf ); mem :: forget ( self ) ; buf.into_box() } } pub fn truncate(&mut self,len: usize) { unsafe { while len < self . len { self . len -= 1 ; let len = self . len ; ptr::drop_in_place(self.get_unchecked_mut(len)); } } } pub fn as_slice(& self) -> & [T] { self } pub fn as_mut_slice(&mut self) -> & mut[T] { & mut self [ .. ] } pub unsafe fn set_len(& mut self, len: usize) { self . len = len; } pub fn remove(&mut self, index: usize) -> T { let len = self.len(); assert!(index < len); unsafe { let ret; { let ptr = self.as_mut_ptr().offset(index as isize); ret = ptr::read(ptr); ptr::copy(ptr.offset (1), ptr, len-index-1); } self.set_len(len - 1); ret } } pub fn retain < F > ( & mut self , mut f : F ) where F : FnMut ( & T ) -> bool { let len = self.len(); let mut del = 0; { let v = & mut * * self ; for i in 0 .. len { if ! f ( & v [ i ] ) { del += 1 ; } else if del > 0 { v.swap(i - del, i); } } } if del>0{ self.truncate(len-del); } } pub fn drain<R>(&mut self,range:R)->Drain<T>where R:RangeArgument <usize>{ let len = self.len(); let start = * range.start() .unwrap_or( & 0 ) ; let end = * range. end().unwrap_or( & len ) ; assert!(start <= end); assert!(end <= len); } } impl<T:Clone>Vec<T>{ pub fn extend_from_slice(&mut self, other: & [ T ] ){ self.reserve(other.len()); for i in 0..other.len(){ let len = self.len(); unsafe { ptr::write(self.get_unchecked_mut(len), other.get_unchecked(i).clone()); self.set_len(len + 1); } } } } impl < T : PartialEq > Vec < T > { pub fn dedup ( & mut self ) { unsafe{ let ln = self.len(); if ln <= 1 { return ; } let p = self.as_mut_ptr(); let mut r:usize =1 ; let mut w:usize=1; while r < ln { let p_r = p.offset( r as isize ); let p_wm1 = p.offset ( ( w - 1 )as isize ); if * p_r !=* p_wm1 { if r!=w{ let p_w = p_wm1.offset(1); mem::swap( & mut * p_r , & mut * p_w ); } w += 1; } r+=1 ; } self.truncate(w); } } } pub fn from_elem < T : Clone > ( elem :T,n:usize) -> Vec <T>{ } impl < T :Clone >Clone for Vec <T>{ fn clone(&self) -> Vec<T> { < [ T ] > :: to_vec ( & * * self ) } fn clone(&self) -> Vec<T> { :: slice ::to_vec( &** self) } fn clone_from(&mut self, other:& Vec <T>) { self.truncate(other. len()); let len=self. len(); self.clone_from_slice(& other [.. len ]); self.extend_from_slice(& other[ len.. ]); } } impl< T:Hash>Hash for Vec<T> { fn hash<H :hash :: Hasher >( & self, state: &mut H) { Hash::hash(& **self, state) } } impl<T> Index < usize > for Vec < T > { type Output = T ; fn index(&self, index: usize) ->& T { & ( * * self ) [ index ] } } impl < T > IndexMut < usize > for Vec < T > { fn index_mut(&mut self, index: usize) -> &mut T { & mut ( * * self ) [ index ] } } impl<T> FromIterator<T> for Vec<T> { fn from_iter < I : IntoIterator < Item = T > > ( iter : I ) -> Vec < T > { let mut iterator = iter.into_iter(); let mut vector = match iterator. next () { None=>return Vec::new() , Some ( element ) => { let( lower , _ ) = iterator.size_hint(); //... } }; //... } } impl<T> IntoIterator for Vec<T> { type Item = T; type IntoIter = IntoIter<T>; fn into_iter( mut self ) -> IntoIter<T> { unsafe{ let ptr = self.as_mut_ptr(); assume(!ptr.is_null()); let begin = ptr as * const T ; let end = if mem :: size_of :: < T > ( ) == 0 { arith_offset ( ptr as * const i8 , self.len() as isize )as*const T } else { ptr . offset (self.len()as isize)as* const T } ; let buf = ptr::read(& self.buf); mem::forget(self); IntoIter{ _buf :buf, ptr: begin, end : end, } } } } impl < 'a,T >IntoIterator for& 'a Vec < T > { type Item = & 'a T; } impl<T> Iterator for IntoIter<T> { fn size_hint ( & self ) -> ( usize , Option < usize > ) { let diff = ( self.end as usize )-( self.ptr as usize); let size = mem::size_of::<T>(); let exact = diff /(if size == 0 {1}else{size}); ( exact, Some( exact ) ) } } impl<'a, T> Iterator for Drain<'a, T> { type Item = T; fn next(&mut self) -> Option<T> { self.iter.next().map( | elt | unsafe { ptr::read(elt as* const _ ) } ) } } trait Extend < A > { fn extend < T : IntoIterator < Item = A > > ( & mut self , iterable : T ) ; } impl < R , F : FnOnce ( ) -> R > FnOnce < ( ) > for AssertRecoverSafe < F > { extern "rust-call" fn call_once ( self , _args : ( ) ) -> R { ( self . 0 ) ( ) } } fn catch_unwind < F :FnOnce ( ) -> R+UnwindSafe , R > ( f :F ) -> Result < R > { let mut result = None ; unsafe { let result=& mut result ; unwind :: try ( move| |* result=Some( f ( ) ) ) ? } Ok ( result . unwrap ( ) ) } fn propagate ( payload :Box < Any+Send > ) -> ! {} impl<K,V>Root<K,V>{ pub fn as_ref ( & self )-> NodeRef < marker :: Immut , K,V, marker :: LeafOrInternal >{ NodeRef { root: self as * const _ as * mut _ , } } } macro_rules! vec { ( $( $x:expr ),* ) => { { let mut temp_vec = Vec::new(); $( temp_vec.push($x); )* temp_vec } }; } mod math { type Complex = ( f64 , f64 ) ; fn sin ( f : f64 ) -> f64 { /*... */ } } fn foo(&& (x, _): && (i32, i32)) {}
random_line_split
spacing.rs
extern crate std as ruststd ; use core :: hash :: { self , Hash } ; use core ::intrinsics:: {arith_offset,assume} ; use core::iter::FromIterator; use core :: mem; use core::ops::{ Index,IndexMut }; const CAPACITY :usize = 2*B-1; fn foo ( a : i32 , b :str, c: i32 ,d:f32 ) -> ( ) { let mut e : & 'static [ str ] = "abcd" ; let mut f :& [str ]="cdef"; bar ( ) ; let array :[ i32 ; 45 ] = [ 0 ;45 ]; let f: fn(i32, u64) -> i32; let f2 : fn ( i32 )->i32 ; let unit: ( ) = ( ); let foo = 2+2; let moo = 2*2; let meh = 2*2+3*3; else_block . as_ref ( ) . map ( | e | & * * e ) ; match self.node { ast :: ExprKind :: Field ( .. ) | ast::ExprKind::MethodCall(.. )=>rewrite_chain(self, context, width, offset) }; let f: fn ( & _, _ ) -> _ = unimplemented ! () ; let f = unimplemented ! {} ; { foo ( ) ; } for & (sample, radiance) in samples.iter() {} map(|& s| moo()); match x { S{foo}=>92 } } enum Message { Quit , ChangeColor ( i32 , i32 , i32 ) , Move { x : i32 , y : i32 } , Write ( String ) , } enum Foo{ Bar = 123 , Baz=0 } pub struct Vec < T > { buf : RawVec < T> , len :usize , } impl <T >Vec < T > { pub fn new ( ) -> Vec <T> { Vec { buf : RawVec :: new ( ) , len :0, } } pub fn with_capacity (capacity :usize)->Vec <T>{ Vec { buf:RawVec::with_capacity ( capacity ), len:0, } } pub unsafe fn from_raw_parts(ptr:* mut T, length :usize, capacity: usize) -> Vec <T>{ Vec{ buf :RawVec::from_raw_parts(ptr, capacity), len :length, } } pub fn capacity( & self ) -> usize { self . buf . cap ( ) } pub fn reserve(& mut self, additional: usize) { self. buf.reserve(self. len,additional) ; } pub fn into_boxed_slice( mut self ) -> Box < [ T ] > { unsafe{ self . shrink_to_fit ( ) ; let buf = ptr::read( & self . buf ); mem :: forget ( self ) ; buf.into_box() } } pub fn truncate(&mut self,len: usize) { unsafe { while len < self . len { self . len -= 1 ; let len = self . len ; ptr::drop_in_place(self.get_unchecked_mut(len)); } } } pub fn as_slice(& self) -> & [T] { self } pub fn as_mut_slice(&mut self) -> & mut[T] { & mut self [ .. ] } pub unsafe fn set_len(& mut self, len: usize) { self . len = len; } pub fn remove(&mut self, index: usize) -> T { let len = self.len(); assert!(index < len); unsafe { let ret; { let ptr = self.as_mut_ptr().offset(index as isize); ret = ptr::read(ptr); ptr::copy(ptr.offset (1), ptr, len-index-1); } self.set_len(len - 1); ret } } pub fn retain < F > ( & mut self , mut f : F ) where F : FnMut ( & T ) -> bool { let len = self.len(); let mut del = 0; { let v = & mut * * self ; for i in 0 .. len { if ! f ( & v [ i ] ) { del += 1 ; } else if del > 0 { v.swap(i - del, i); } } } if del>0{ self.truncate(len-del); } } pub fn drain<R>(&mut self,range:R)->Drain<T>where R:RangeArgument <usize>{ let len = self.len(); let start = * range.start() .unwrap_or( & 0 ) ; let end = * range. end().unwrap_or( & len ) ; assert!(start <= end); assert!(end <= len); } } impl<T:Clone>Vec<T>{ pub fn extend_from_slice(&mut self, other: & [ T ] ){ self.reserve(other.len()); for i in 0..other.len(){ let len = self.len(); unsafe { ptr::write(self.get_unchecked_mut(len), other.get_unchecked(i).clone()); self.set_len(len + 1); } } } } impl < T : PartialEq > Vec < T > { pub fn dedup ( & mut self ) { unsafe{ let ln = self.len(); if ln <= 1 { return ; } let p = self.as_mut_ptr(); let mut r:usize =1 ; let mut w:usize=1; while r < ln { let p_r = p.offset( r as isize ); let p_wm1 = p.offset ( ( w - 1 )as isize ); if * p_r !=* p_wm1
r+=1 ; } self.truncate(w); } } } pub fn from_elem < T : Clone > ( elem :T,n:usize) -> Vec <T>{ } impl < T :Clone >Clone for Vec <T>{ fn clone(&self) -> Vec<T> { < [ T ] > :: to_vec ( & * * self ) } fn clone(&self) -> Vec<T> { :: slice ::to_vec( &** self) } fn clone_from(&mut self, other:& Vec <T>) { self.truncate(other. len()); let len=self. len(); self.clone_from_slice(& other [.. len ]); self.extend_from_slice(& other[ len.. ]); } } impl< T:Hash>Hash for Vec<T> { fn hash<H :hash :: Hasher >( & self, state: &mut H) { Hash::hash(& **self, state) } } impl<T> Index < usize > for Vec < T > { type Output = T ; fn index(&self, index: usize) ->& T { & ( * * self ) [ index ] } } impl < T > IndexMut < usize > for Vec < T > { fn index_mut(&mut self, index: usize) -> &mut T { & mut ( * * self ) [ index ] } } impl<T> FromIterator<T> for Vec<T> { fn from_iter < I : IntoIterator < Item = T > > ( iter : I ) -> Vec < T > { let mut iterator = iter.into_iter(); let mut vector = match iterator. next () { None=>return Vec::new() , Some ( element ) => { let( lower , _ ) = iterator.size_hint(); //... } }; //... } } impl<T> IntoIterator for Vec<T> { type Item = T; type IntoIter = IntoIter<T>; fn into_iter( mut self ) -> IntoIter<T> { unsafe{ let ptr = self.as_mut_ptr(); assume(!ptr.is_null()); let begin = ptr as * const T ; let end = if mem :: size_of :: < T > ( ) == 0 { arith_offset ( ptr as * const i8 , self.len() as isize )as*const T } else { ptr . offset (self.len()as isize)as* const T } ; let buf = ptr::read(& self.buf); mem::forget(self); IntoIter{ _buf :buf, ptr: begin, end : end, } } } } impl < 'a,T >IntoIterator for& 'a Vec < T > { type Item = & 'a T; } impl<T> Iterator for IntoIter<T> { fn size_hint ( & self ) -> ( usize , Option < usize > ) { let diff = ( self.end as usize )-( self.ptr as usize); let size = mem::size_of::<T>(); let exact = diff /(if size == 0 {1}else{size}); ( exact, Some( exact ) ) } } impl<'a, T> Iterator for Drain<'a, T> { type Item = T; fn next(&mut self) -> Option<T> { self.iter.next().map( | elt | unsafe { ptr::read(elt as* const _ ) } ) } } trait Extend < A > { fn extend < T : IntoIterator < Item = A > > ( & mut self , iterable : T ) ; } impl < R , F : FnOnce ( ) -> R > FnOnce < ( ) > for AssertRecoverSafe < F > { extern "rust-call" fn call_once ( self , _args : ( ) ) -> R { ( self . 0 ) ( ) } } fn catch_unwind < F :FnOnce ( ) -> R+UnwindSafe , R > ( f :F ) -> Result < R > { let mut result = None ; unsafe { let result=& mut result ; unwind :: try ( move| |* result=Some( f ( ) ) ) ? } Ok ( result . unwrap ( ) ) } fn propagate ( payload :Box < Any+Send > ) -> ! {} impl<K,V>Root<K,V>{ pub fn as_ref ( & self )-> NodeRef < marker :: Immut , K,V, marker :: LeafOrInternal >{ NodeRef { root: self as * const _ as * mut _ , } } } macro_rules! vec { ( $( $x:expr ),* ) => { { let mut temp_vec = Vec::new(); $( temp_vec.push($x); )* temp_vec } }; } mod math { type Complex = ( f64 , f64 ) ; fn sin ( f : f64 ) -> f64 { /*... */ } } fn foo(&& (x, _): && (i32, i32)) {}
{ if r!=w{ let p_w = p_wm1.offset(1); mem::swap( & mut * p_r , & mut * p_w ); } w += 1; }
conditional_block
xcode.rs
bundle_id: String, #[serde(rename = "CFBundleShortVersionString")] version: String, #[serde(rename = "CFBundleVersion")] build: String, } #[derive(Deserialize, Debug)] pub struct XcodeProjectInfo { targets: Vec<String>, configurations: Vec<String>, #[serde(default = "PathBuf::new")] path: PathBuf, } impl fmt::Display for InfoPlist { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{} ({})", self.name(), &self.version) } } pub fn expand_xcodevars<S>(s: &str, vars: &HashMap<String, String, S>) -> String where S: BuildHasher, { lazy_static! { static ref SEP_RE: Regex = Regex::new(r"[\s/]+").unwrap(); } expand_vars(s, |key| { if key.is_empty() { return "".into(); } let mut iter = key.splitn(2, ':'); let value = vars .get(iter.next().unwrap()) .map(String::as_str) .unwrap_or(""); match iter.next() { Some("rfc1034identifier") => SEP_RE.replace_all(value, "-").into_owned(), Some("identifier") => SEP_RE.replace_all(value, "_").into_owned(), None | Some(_) => value.to_string(), } }) .into_owned() } fn get_xcode_project_info(path: &Path) -> Result<Option<XcodeProjectInfo>> { if_chain! { if let Some(filename_os) = path.file_name(); if let Some(filename) = filename_os.to_str(); if filename.ends_with(".xcodeproj"); then { return match XcodeProjectInfo::from_path(path) { Ok(info) => Ok(Some(info)), _ => Ok(None), }; } } let mut projects = vec![]; for entry in (fs::read_dir(path)?).flatten() { if let Some(filename) = entry.file_name().to_str() { if filename.ends_with(".xcodeproj") { projects.push(entry.path().to_path_buf()); } } } if projects.len() == 1 { match XcodeProjectInfo::from_path(&projects[0]) { Ok(info) => Ok(Some(info)), _ => Ok(None), } } else { Ok(None) } } impl XcodeProjectInfo { pub fn from_path<P: AsRef<Path>>(path: P) -> Result<XcodeProjectInfo> { #[derive(Deserialize)] struct Output { project: XcodeProjectInfo, } let p = process::Command::new("xcodebuild") .arg("-list") .arg("-json") .arg("-project") .arg(path.as_ref().as_os_str()) .output()?; match serde_json::from_slice::<Output>(&p.stdout) { Ok(mut rv) => { rv.project.path = path.as_ref().canonicalize()?; Ok(rv.project) } Err(e) => { warn!("Your.xcodeproj might be malformed. Command `xcodebuild -list -json -project {}` failed to produce a valid JSON output.", path.as_ref().display()); Err(e.into()) } } } pub fn base_path(&self) -> &Path { self.path.parent().unwrap() } pub fn get_build_vars( &self, target: &str, configuration: &str, ) -> Result<HashMap<String, String>> { let mut rv = HashMap::new(); let p = process::Command::new("xcodebuild") .arg("-showBuildSettings") .arg("-project") .arg(&self.path) .arg("-target") .arg(target) .arg("-configuration") .arg(configuration) .output()?; for line_rv in p.stdout.lines() { let line = line_rv?; if let Some(suffix) = line.strip_prefix(" ") { let mut sep = suffix.splitn(2, " = "); if_chain! { if let Some(key) = sep.next(); if let Some(value) = sep.next(); then { rv.insert(key.to_owned(), value.to_owned()); } } } } Ok(rv) } /// Return the first target pub fn get_first_target(&self) -> Option<&str> { if!self.targets.is_empty() { Some(&self.targets[0]) } else { None } } /// Returns the config with a certain name pub fn get_configuration(&self, name: &str) -> Option<&str> { let name = name.to_lowercase(); self.configurations .iter() .find(|&cfg| cfg.to_lowercase() == name) .map(|v| v.as_ref()) } } impl InfoPlist { /// Loads a processed plist file. pub fn discover_from_env() -> Result<Option<InfoPlist>> { // if we are loaded directly from xcode we can trust the os environment // and pass those variables to the processor. if env::var("XCODE_VERSION_ACTUAL").is_ok() { let vars: HashMap<_, _> = env::vars().collect(); if let Some(filename) = vars.get("INFOPLIST_FILE") { let base = vars.get("PROJECT_DIR").map(String::as_str).unwrap_or("."); let path = env::current_dir().unwrap().join(base).join(filename); Ok(Some(InfoPlist::load_and_process(path, &vars)?)) } else if let Ok(default_plist) = InfoPlist::from_env_vars(&vars) { Ok(Some(default_plist)) } else { Ok(None) } // otherwise, we discover the project info from the current path and // invoke xcodebuild to give us the project settings for the first // target. } else { if_chain! { if let Ok(here) = env::current_dir(); if let Some(pi) = get_xcode_project_info(&here)?; then { InfoPlist::from_project_info(&pi) } else { Ok(None) } } } } /// Loads an info plist from a given project info pub fn from_project_info(pi: &XcodeProjectInfo) -> Result<Option<InfoPlist>> { if_chain! { if let Some(config) = pi.get_configuration("release") .or_else(|| pi.get_configuration("debug")); if let Some(target) = pi.get_first_target(); then { let vars = pi.get_build_vars(target, config)?; if let Some(path) = vars.get("INFOPLIST_FILE") { let base = vars.get("PROJECT_DIR").map(Path::new) .unwrap_or_else(|| pi.base_path()); let path = base.join(path); return Ok(Some(InfoPlist::load_and_process(path, &vars)?)) } } } Ok(None) } /// Loads an info plist file from a path and processes it with the given vars pub fn load_and_process<P: AsRef<Path>>( path: P, vars: &HashMap<String, String>, ) -> Result<InfoPlist> { // do we want to preprocess the plist file? let plist = if vars.get("INFOPLIST_PREPROCESS").map(String::as_str) == Some("YES") { let mut c = process::Command::new("cc"); c.arg("-xc").arg("-P").arg("-E"); if let Some(defs) = vars.get("INFOPLIST_OTHER_PREPROCESSOR_FLAGS") { for token in defs.split_whitespace() { c.arg(token); } } if let Some(defs) = vars.get("INFOPLIST_PREPROCESSOR_DEFINITIONS") { for token in defs.split_whitespace() { c.arg(format!("-D{token}")); } } c.arg(path.as_ref()); let p = c.output()?; InfoPlist::from_reader(Cursor::new(&p.stdout[..])) } else { InfoPlist::from_path(path).or_else(|err| { /* This is sort of an edge-case, as XCode is not producing an `Info.plist` file by default anymore. However, it still does so for some templates. For example iOS Storyboard template will produce a partial `Info.plist` file, with a content only related to the Storyboard itself, but not the project as a whole. eg. <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>UIApplicationSceneManifest</key> <dict> <key>UISceneConfigurations</key> <dict> <key>UIWindowSceneSessionRoleApplication</key> <array> <dict> <key>UISceneStoryboardFile</key> <string>Main</string> </dict> </array> </dict> </dict> </dict> </plist> This causes a sort of false-positive, as `INFOPLIST_FILE` is present, yet it contains no data required by the CLI to correctly produce a `InfoPlist` struct. In the case like that, we try to fallback to env variables collected either by `xcodebuild` binary, or directly through `env` if we were called from within XCode itself. */ InfoPlist::from_env_vars(vars).map_err(|e| e.context(err)) }) }; plist.map(|raw| InfoPlist { name: expand_xcodevars(&raw.name, vars), bundle_id: expand_xcodevars(&raw.bundle_id, vars), version: expand_xcodevars(&raw.version, vars), build: expand_xcodevars(&raw.build, vars), }) } /// Loads an info plist from provided environment variables list pub fn from_env_vars(vars: &HashMap<String, String>) -> Result<InfoPlist> { let name = vars .get("PRODUCT_NAME") .map(String::to_owned) .ok_or_else(|| format_err!("PRODUCT_NAME is missing"))?; let bundle_id = vars .get("PRODUCT_BUNDLE_IDENTIFIER") .map(String::to_owned) .ok_or_else(|| format_err!("PRODUCT_BUNDLE_IDENTIFIER is missing"))?; let version = vars .get("MARKETING_VERSION") .map(String::to_owned) .ok_or_else(|| format_err!("MARKETING_VERSION is missing"))?; let build = vars .get("CURRENT_PROJECT_VERSION") .map(String::to_owned) .ok_or_else(|| format_err!("CURRENT_PROJECT_VERSION is missing"))?; Ok(InfoPlist { name, bundle_id, version, build, }) } /// Loads an info plist file from a path and does not process it. pub fn from_path<P: AsRef<Path>>(path: P) -> Result<InfoPlist> { let mut f = fs::File::open(path.as_ref()).context("Could not open Info.plist file")?; InfoPlist::from_reader(&mut f) } /// Loads an info plist file from a reader. pub fn from_reader<R: SeekRead>(rdr: R) -> Result<InfoPlist> { let rdr = BufReader::new(rdr); plist::from_reader(rdr).context("Could not parse Info.plist file") } pub fn get_release_name(&self) -> String { format!("{}@{}", self.bundle_id(), self.version()) } pub fn version(&self) -> &str { &self.version } pub fn build(&self) -> &str { &self.build } pub fn name(&self) -> &str { &self.name } pub fn bundle_id(&self) -> &str { &self.bundle_id } } /// Helper struct that allows the current execution to detach from /// the xcode console and continue in the background. This becomes /// a dummy shim for non xcode runs or platforms. pub struct MayDetach<'a> { output_file: Option<TempFile>, #[allow(dead_code)] task_name: &'a str, } impl<'a> MayDetach<'a> { fn new(task_name: &'a str) -> MayDetach<'a> { MayDetach { output_file: None, task_name, } } /// Returns true if we are deteached from xcode pub fn is_detached(&self) -> bool { self.output_file.is_some() } /// If we are launched from xcode this detaches us from the xcode console /// and continues execution in the background. From this moment on output /// is captured and the user is notified with notifications. #[cfg(target_os = "macos")] pub fn may_detach(&mut self) -> Result<bool> { if!launched_from_xcode() { return Ok(false); } println!("Continuing in background."); show_notification("Sentry", &format!("{} starting", self.task_name))?; let output_file = TempFile::create()?; daemonize_redirect( Some(output_file.path()), Some(output_file.path()), ChdirMode::NoChdir, ) .unwrap(); self.output_file = Some(output_file); Ok(true) } /// For non mac platforms this just never detaches. #[cfg(not(target_os = "macos"))] pub fn may_detach(&mut self) -> Result<bool> { Ok(false) } /// Wraps the execution of a code block. Does not detach until someone /// calls into `may_detach`. #[cfg(target_os = "macos")] pub fn wrap<T, F: FnOnce(&mut MayDetach<'_>) -> Result<T>>( task_name: &'a str, f: F, ) -> Result<T> { use std::time::Duration; let mut md = MayDetach::new(task_name); match f(&mut md) { Ok(x) => { md.show_done()?; Ok(x) } Err(err) => { if let Some(ref output_file) = md.output_file { crate::utils::system::print_error(&err); if md.show_critical_info()? { open::that(output_file.path())?; std::thread::sleep(Duration::from_millis(5000)); } } Err(err) } } } /// Dummy wrap call that never detaches for non mac platforms. #[cfg(not(target_os = "macos"))] pub fn wrap<T, F: FnOnce(&mut MayDetach) -> Result<T>>(task_name: &'a str, f: F) -> Result<T> { f(&mut MayDetach::new(task_name)) } #[cfg(target_os = "macos")] fn show_critical_info(&self) -> Result<bool> { show_critical_info( &format!("{} failed", self.task_name), "The Sentry build step failed while running in the background. \ You can ignore this error or view details to attempt to resolve \ it. Ignoring it might cause your crashes not to be handled \ properly.", ) } #[cfg(target_os = "macos")] fn show_done(&self) -> Result<()> { if self.is_detached()
Ok(()) } } /// Returns true if we were invoked from xcode #[cfg(target_os = "macos")] pub fn launched_from_xcode() -> bool { if env::var("XCODE_VERSION_ACTUAL").is_err() { return false; } let mut pid = unsafe { getpid() as u32 }; while let Some(parent) = mac_process_info::get_parent_pid(pid) { if parent == 1 { break; } if let Ok(name) = mac_process_info::get_process_name(parent) { if name == "Xcode" { return true; } } pid = parent; } false } /// Returns true if we were invoked from xcode #[cfg(not(target_os = "macos"))] pub fn launched_from_xcode() -> bool { false } /// Shows a dialog in xcode and blocks. The dialog will have a title and a /// message as well as the buttons "Show details" and "Ignore". Returns /// `true` if the `show details` button has been pressed. #[cfg(target_os = "macos")] pub fn show_critical_info(title: &str, message: &str) -> Result<bool> { use serde::Serialize; lazy_static! { static ref SCRIPT: osascript::JavaScript = osascript::JavaScript::new( " var App = Application('XCode'); App.includeStandardAdditions = true; return App.displayAlert($params.title, { message: $params.message, as: \"critical\", buttons: [\"Show details\", \"Ignore\"] }); " ); } #[derive(Serialize)] struct AlertParams<'a> { title: &'a str, message: &'a str, } #[derive(Debug, Deserialize)] struct AlertResult { #[serde(rename = "buttonReturned")] button: String, } let rv: AlertResult = SCRIPT .execute_with_params(AlertParams { title, message }) .context("Failed to display Xcode dialog")?; Ok(&rv.button!= "Ignore") } /// Shows a notification in xcode #[cfg(target_os = "macos")] pub fn show_notification(title: &str, message
{ show_notification("Sentry", &format!("{} finished", self.task_name))?; }
conditional_block
xcode.rs
if let Some(filename_os) = path.file_name(); if let Some(filename) = filename_os.to_str(); if filename.ends_with(".xcodeproj"); then { return match XcodeProjectInfo::from_path(path) { Ok(info) => Ok(Some(info)), _ => Ok(None), }; } } let mut projects = vec![]; for entry in (fs::read_dir(path)?).flatten() { if let Some(filename) = entry.file_name().to_str() { if filename.ends_with(".xcodeproj") { projects.push(entry.path().to_path_buf()); } } } if projects.len() == 1 { match XcodeProjectInfo::from_path(&projects[0]) { Ok(info) => Ok(Some(info)), _ => Ok(None), } } else { Ok(None) } } impl XcodeProjectInfo { pub fn from_path<P: AsRef<Path>>(path: P) -> Result<XcodeProjectInfo> { #[derive(Deserialize)] struct Output { project: XcodeProjectInfo, } let p = process::Command::new("xcodebuild") .arg("-list") .arg("-json") .arg("-project") .arg(path.as_ref().as_os_str()) .output()?; match serde_json::from_slice::<Output>(&p.stdout) { Ok(mut rv) => { rv.project.path = path.as_ref().canonicalize()?; Ok(rv.project) } Err(e) => { warn!("Your.xcodeproj might be malformed. Command `xcodebuild -list -json -project {}` failed to produce a valid JSON output.", path.as_ref().display()); Err(e.into()) } } } pub fn base_path(&self) -> &Path { self.path.parent().unwrap() } pub fn get_build_vars( &self, target: &str, configuration: &str, ) -> Result<HashMap<String, String>> { let mut rv = HashMap::new(); let p = process::Command::new("xcodebuild") .arg("-showBuildSettings") .arg("-project") .arg(&self.path) .arg("-target") .arg(target) .arg("-configuration") .arg(configuration) .output()?; for line_rv in p.stdout.lines() { let line = line_rv?; if let Some(suffix) = line.strip_prefix(" ") { let mut sep = suffix.splitn(2, " = "); if_chain! { if let Some(key) = sep.next(); if let Some(value) = sep.next(); then { rv.insert(key.to_owned(), value.to_owned()); } } } } Ok(rv) } /// Return the first target pub fn get_first_target(&self) -> Option<&str> { if!self.targets.is_empty() { Some(&self.targets[0]) } else { None } } /// Returns the config with a certain name pub fn get_configuration(&self, name: &str) -> Option<&str> { let name = name.to_lowercase(); self.configurations .iter() .find(|&cfg| cfg.to_lowercase() == name) .map(|v| v.as_ref()) } } impl InfoPlist { /// Loads a processed plist file. pub fn discover_from_env() -> Result<Option<InfoPlist>> { // if we are loaded directly from xcode we can trust the os environment // and pass those variables to the processor. if env::var("XCODE_VERSION_ACTUAL").is_ok() { let vars: HashMap<_, _> = env::vars().collect(); if let Some(filename) = vars.get("INFOPLIST_FILE") { let base = vars.get("PROJECT_DIR").map(String::as_str).unwrap_or("."); let path = env::current_dir().unwrap().join(base).join(filename); Ok(Some(InfoPlist::load_and_process(path, &vars)?)) } else if let Ok(default_plist) = InfoPlist::from_env_vars(&vars) { Ok(Some(default_plist)) } else { Ok(None) } // otherwise, we discover the project info from the current path and // invoke xcodebuild to give us the project settings for the first // target. } else { if_chain! { if let Ok(here) = env::current_dir(); if let Some(pi) = get_xcode_project_info(&here)?; then { InfoPlist::from_project_info(&pi) } else { Ok(None) } } } } /// Loads an info plist from a given project info pub fn from_project_info(pi: &XcodeProjectInfo) -> Result<Option<InfoPlist>> { if_chain! { if let Some(config) = pi.get_configuration("release") .or_else(|| pi.get_configuration("debug")); if let Some(target) = pi.get_first_target(); then { let vars = pi.get_build_vars(target, config)?; if let Some(path) = vars.get("INFOPLIST_FILE") { let base = vars.get("PROJECT_DIR").map(Path::new) .unwrap_or_else(|| pi.base_path()); let path = base.join(path); return Ok(Some(InfoPlist::load_and_process(path, &vars)?)) } } } Ok(None) } /// Loads an info plist file from a path and processes it with the given vars pub fn load_and_process<P: AsRef<Path>>( path: P, vars: &HashMap<String, String>, ) -> Result<InfoPlist> { // do we want to preprocess the plist file? let plist = if vars.get("INFOPLIST_PREPROCESS").map(String::as_str) == Some("YES") { let mut c = process::Command::new("cc"); c.arg("-xc").arg("-P").arg("-E"); if let Some(defs) = vars.get("INFOPLIST_OTHER_PREPROCESSOR_FLAGS") { for token in defs.split_whitespace() { c.arg(token); } } if let Some(defs) = vars.get("INFOPLIST_PREPROCESSOR_DEFINITIONS") { for token in defs.split_whitespace() { c.arg(format!("-D{token}")); } } c.arg(path.as_ref()); let p = c.output()?; InfoPlist::from_reader(Cursor::new(&p.stdout[..])) } else { InfoPlist::from_path(path).or_else(|err| { /* This is sort of an edge-case, as XCode is not producing an `Info.plist` file by default anymore. However, it still does so for some templates. For example iOS Storyboard template will produce a partial `Info.plist` file, with a content only related to the Storyboard itself, but not the project as a whole. eg. <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>UIApplicationSceneManifest</key> <dict> <key>UISceneConfigurations</key> <dict> <key>UIWindowSceneSessionRoleApplication</key> <array> <dict> <key>UISceneStoryboardFile</key> <string>Main</string> </dict> </array> </dict> </dict> </dict> </plist> This causes a sort of false-positive, as `INFOPLIST_FILE` is present, yet it contains no data required by the CLI to correctly produce a `InfoPlist` struct. In the case like that, we try to fallback to env variables collected either by `xcodebuild` binary, or directly through `env` if we were called from within XCode itself. */ InfoPlist::from_env_vars(vars).map_err(|e| e.context(err)) }) }; plist.map(|raw| InfoPlist { name: expand_xcodevars(&raw.name, vars), bundle_id: expand_xcodevars(&raw.bundle_id, vars), version: expand_xcodevars(&raw.version, vars), build: expand_xcodevars(&raw.build, vars), }) } /// Loads an info plist from provided environment variables list pub fn from_env_vars(vars: &HashMap<String, String>) -> Result<InfoPlist> { let name = vars .get("PRODUCT_NAME") .map(String::to_owned) .ok_or_else(|| format_err!("PRODUCT_NAME is missing"))?; let bundle_id = vars .get("PRODUCT_BUNDLE_IDENTIFIER") .map(String::to_owned) .ok_or_else(|| format_err!("PRODUCT_BUNDLE_IDENTIFIER is missing"))?; let version = vars .get("MARKETING_VERSION") .map(String::to_owned) .ok_or_else(|| format_err!("MARKETING_VERSION is missing"))?; let build = vars .get("CURRENT_PROJECT_VERSION") .map(String::to_owned) .ok_or_else(|| format_err!("CURRENT_PROJECT_VERSION is missing"))?; Ok(InfoPlist { name, bundle_id, version, build, }) } /// Loads an info plist file from a path and does not process it. pub fn from_path<P: AsRef<Path>>(path: P) -> Result<InfoPlist> { let mut f = fs::File::open(path.as_ref()).context("Could not open Info.plist file")?; InfoPlist::from_reader(&mut f) } /// Loads an info plist file from a reader. pub fn from_reader<R: SeekRead>(rdr: R) -> Result<InfoPlist> { let rdr = BufReader::new(rdr); plist::from_reader(rdr).context("Could not parse Info.plist file") } pub fn get_release_name(&self) -> String { format!("{}@{}", self.bundle_id(), self.version()) } pub fn version(&self) -> &str { &self.version } pub fn build(&self) -> &str { &self.build } pub fn name(&self) -> &str { &self.name } pub fn bundle_id(&self) -> &str { &self.bundle_id } } /// Helper struct that allows the current execution to detach from /// the xcode console and continue in the background. This becomes /// a dummy shim for non xcode runs or platforms. pub struct MayDetach<'a> { output_file: Option<TempFile>, #[allow(dead_code)] task_name: &'a str, } impl<'a> MayDetach<'a> { fn new(task_name: &'a str) -> MayDetach<'a> { MayDetach { output_file: None, task_name, } } /// Returns true if we are deteached from xcode pub fn is_detached(&self) -> bool { self.output_file.is_some() } /// If we are launched from xcode this detaches us from the xcode console /// and continues execution in the background. From this moment on output /// is captured and the user is notified with notifications. #[cfg(target_os = "macos")] pub fn may_detach(&mut self) -> Result<bool> { if!launched_from_xcode() { return Ok(false); } println!("Continuing in background."); show_notification("Sentry", &format!("{} starting", self.task_name))?; let output_file = TempFile::create()?; daemonize_redirect( Some(output_file.path()), Some(output_file.path()), ChdirMode::NoChdir, ) .unwrap(); self.output_file = Some(output_file); Ok(true) } /// For non mac platforms this just never detaches. #[cfg(not(target_os = "macos"))] pub fn may_detach(&mut self) -> Result<bool> { Ok(false) } /// Wraps the execution of a code block. Does not detach until someone /// calls into `may_detach`. #[cfg(target_os = "macos")] pub fn wrap<T, F: FnOnce(&mut MayDetach<'_>) -> Result<T>>( task_name: &'a str, f: F, ) -> Result<T> { use std::time::Duration; let mut md = MayDetach::new(task_name); match f(&mut md) { Ok(x) => { md.show_done()?; Ok(x) } Err(err) => { if let Some(ref output_file) = md.output_file { crate::utils::system::print_error(&err); if md.show_critical_info()? { open::that(output_file.path())?; std::thread::sleep(Duration::from_millis(5000)); } } Err(err) } } } /// Dummy wrap call that never detaches for non mac platforms. #[cfg(not(target_os = "macos"))] pub fn wrap<T, F: FnOnce(&mut MayDetach) -> Result<T>>(task_name: &'a str, f: F) -> Result<T> { f(&mut MayDetach::new(task_name)) } #[cfg(target_os = "macos")] fn show_critical_info(&self) -> Result<bool> { show_critical_info( &format!("{} failed", self.task_name), "The Sentry build step failed while running in the background. \ You can ignore this error or view details to attempt to resolve \ it. Ignoring it might cause your crashes not to be handled \ properly.", ) } #[cfg(target_os = "macos")] fn show_done(&self) -> Result<()> { if self.is_detached() { show_notification("Sentry", &format!("{} finished", self.task_name))?; } Ok(()) } } /// Returns true if we were invoked from xcode #[cfg(target_os = "macos")] pub fn launched_from_xcode() -> bool { if env::var("XCODE_VERSION_ACTUAL").is_err() { return false; } let mut pid = unsafe { getpid() as u32 }; while let Some(parent) = mac_process_info::get_parent_pid(pid) { if parent == 1 { break; } if let Ok(name) = mac_process_info::get_process_name(parent) { if name == "Xcode" { return true; } } pid = parent; } false } /// Returns true if we were invoked from xcode #[cfg(not(target_os = "macos"))] pub fn launched_from_xcode() -> bool { false } /// Shows a dialog in xcode and blocks. The dialog will have a title and a /// message as well as the buttons "Show details" and "Ignore". Returns /// `true` if the `show details` button has been pressed. #[cfg(target_os = "macos")] pub fn show_critical_info(title: &str, message: &str) -> Result<bool> { use serde::Serialize; lazy_static! { static ref SCRIPT: osascript::JavaScript = osascript::JavaScript::new( " var App = Application('XCode'); App.includeStandardAdditions = true; return App.displayAlert($params.title, { message: $params.message, as: \"critical\", buttons: [\"Show details\", \"Ignore\"] }); " ); } #[derive(Serialize)] struct AlertParams<'a> { title: &'a str, message: &'a str, } #[derive(Debug, Deserialize)] struct AlertResult { #[serde(rename = "buttonReturned")] button: String, } let rv: AlertResult = SCRIPT .execute_with_params(AlertParams { title, message }) .context("Failed to display Xcode dialog")?; Ok(&rv.button!= "Ignore") } /// Shows a notification in xcode #[cfg(target_os = "macos")] pub fn show_notification(title: &str, message: &str) -> Result<()> { use crate::config::Config; use serde::Serialize; lazy_static! { static ref SCRIPT: osascript::JavaScript = osascript::JavaScript::new( " var App = Application.currentApplication(); App.includeStandardAdditions = true; App.displayNotification($params.message, { withTitle: $params.title }); " ); } let config = Config::current(); if!config.show_notifications()? { return Ok(()); } #[derive(Serialize)] struct NotificationParams<'a> { title: &'a str, message: &'a str, } SCRIPT .execute_with_params(NotificationParams { title, message }) .context("Failed to display Xcode notification")?; Ok(()) } #[test] fn test_expansion()
{ let mut vars = HashMap::new(); vars.insert("FOO_BAR".to_string(), "foo bar baz / blah".to_string()); assert_eq!( expand_xcodevars("A$(FOO_BAR:rfc1034identifier)B", &vars), "Afoo-bar-baz-blahB" ); assert_eq!( expand_xcodevars("A$(FOO_BAR:identifier)B", &vars), "Afoo_bar_baz_blahB" ); assert_eq!( expand_xcodevars("A${FOO_BAR:identifier}B", &vars), "Afoo_bar_baz_blahB" ); }
identifier_body
xcode.rs
bundle_id: String, #[serde(rename = "CFBundleShortVersionString")] version: String, #[serde(rename = "CFBundleVersion")] build: String, } #[derive(Deserialize, Debug)] pub struct XcodeProjectInfo { targets: Vec<String>, configurations: Vec<String>, #[serde(default = "PathBuf::new")] path: PathBuf, } impl fmt::Display for InfoPlist { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{} ({})", self.name(), &self.version) } } pub fn expand_xcodevars<S>(s: &str, vars: &HashMap<String, String, S>) -> String where S: BuildHasher, { lazy_static! { static ref SEP_RE: Regex = Regex::new(r"[\s/]+").unwrap(); } expand_vars(s, |key| { if key.is_empty() { return "".into(); } let mut iter = key.splitn(2, ':'); let value = vars .get(iter.next().unwrap()) .map(String::as_str) .unwrap_or(""); match iter.next() { Some("rfc1034identifier") => SEP_RE.replace_all(value, "-").into_owned(), Some("identifier") => SEP_RE.replace_all(value, "_").into_owned(), None | Some(_) => value.to_string(), } }) .into_owned() } fn get_xcode_project_info(path: &Path) -> Result<Option<XcodeProjectInfo>> { if_chain! { if let Some(filename_os) = path.file_name(); if let Some(filename) = filename_os.to_str(); if filename.ends_with(".xcodeproj"); then { return match XcodeProjectInfo::from_path(path) { Ok(info) => Ok(Some(info)), _ => Ok(None), }; } } let mut projects = vec![]; for entry in (fs::read_dir(path)?).flatten() { if let Some(filename) = entry.file_name().to_str() { if filename.ends_with(".xcodeproj") { projects.push(entry.path().to_path_buf()); } } } if projects.len() == 1 { match XcodeProjectInfo::from_path(&projects[0]) { Ok(info) => Ok(Some(info)), _ => Ok(None), } } else { Ok(None) } } impl XcodeProjectInfo { pub fn from_path<P: AsRef<Path>>(path: P) -> Result<XcodeProjectInfo> { #[derive(Deserialize)] struct Output { project: XcodeProjectInfo, } let p = process::Command::new("xcodebuild") .arg("-list") .arg("-json") .arg("-project") .arg(path.as_ref().as_os_str()) .output()?; match serde_json::from_slice::<Output>(&p.stdout) { Ok(mut rv) => { rv.project.path = path.as_ref().canonicalize()?; Ok(rv.project) } Err(e) => { warn!("Your.xcodeproj might be malformed. Command `xcodebuild -list -json -project {}` failed to produce a valid JSON output.", path.as_ref().display()); Err(e.into()) } } } pub fn base_path(&self) -> &Path { self.path.parent().unwrap() } pub fn get_build_vars( &self, target: &str, configuration: &str, ) -> Result<HashMap<String, String>> { let mut rv = HashMap::new(); let p = process::Command::new("xcodebuild") .arg("-showBuildSettings") .arg("-project") .arg(&self.path) .arg("-target") .arg(target) .arg("-configuration") .arg(configuration) .output()?; for line_rv in p.stdout.lines() { let line = line_rv?; if let Some(suffix) = line.strip_prefix(" ") { let mut sep = suffix.splitn(2, " = "); if_chain! { if let Some(key) = sep.next(); if let Some(value) = sep.next(); then { rv.insert(key.to_owned(), value.to_owned()); } } } } Ok(rv) } /// Return the first target pub fn get_first_target(&self) -> Option<&str> { if!self.targets.is_empty() { Some(&self.targets[0]) } else { None } } /// Returns the config with a certain name pub fn get_configuration(&self, name: &str) -> Option<&str> { let name = name.to_lowercase(); self.configurations .iter() .find(|&cfg| cfg.to_lowercase() == name) .map(|v| v.as_ref()) } } impl InfoPlist { /// Loads a processed plist file. pub fn discover_from_env() -> Result<Option<InfoPlist>> { // if we are loaded directly from xcode we can trust the os environment // and pass those variables to the processor. if env::var("XCODE_VERSION_ACTUAL").is_ok() { let vars: HashMap<_, _> = env::vars().collect(); if let Some(filename) = vars.get("INFOPLIST_FILE") { let base = vars.get("PROJECT_DIR").map(String::as_str).unwrap_or("."); let path = env::current_dir().unwrap().join(base).join(filename); Ok(Some(InfoPlist::load_and_process(path, &vars)?)) } else if let Ok(default_plist) = InfoPlist::from_env_vars(&vars) { Ok(Some(default_plist)) } else { Ok(None) } // otherwise, we discover the project info from the current path and // invoke xcodebuild to give us the project settings for the first // target. } else { if_chain! { if let Ok(here) = env::current_dir(); if let Some(pi) = get_xcode_project_info(&here)?; then { InfoPlist::from_project_info(&pi) } else { Ok(None) } } } } /// Loads an info plist from a given project info pub fn from_project_info(pi: &XcodeProjectInfo) -> Result<Option<InfoPlist>> { if_chain! { if let Some(config) = pi.get_configuration("release") .or_else(|| pi.get_configuration("debug")); if let Some(target) = pi.get_first_target(); then { let vars = pi.get_build_vars(target, config)?; if let Some(path) = vars.get("INFOPLIST_FILE") { let base = vars.get("PROJECT_DIR").map(Path::new) .unwrap_or_else(|| pi.base_path()); let path = base.join(path); return Ok(Some(InfoPlist::load_and_process(path, &vars)?)) } } } Ok(None) } /// Loads an info plist file from a path and processes it with the given vars pub fn load_and_process<P: AsRef<Path>>( path: P, vars: &HashMap<String, String>, ) -> Result<InfoPlist> { // do we want to preprocess the plist file? let plist = if vars.get("INFOPLIST_PREPROCESS").map(String::as_str) == Some("YES") { let mut c = process::Command::new("cc"); c.arg("-xc").arg("-P").arg("-E"); if let Some(defs) = vars.get("INFOPLIST_OTHER_PREPROCESSOR_FLAGS") { for token in defs.split_whitespace() { c.arg(token); } } if let Some(defs) = vars.get("INFOPLIST_PREPROCESSOR_DEFINITIONS") { for token in defs.split_whitespace() { c.arg(format!("-D{token}")); } } c.arg(path.as_ref()); let p = c.output()?; InfoPlist::from_reader(Cursor::new(&p.stdout[..])) } else { InfoPlist::from_path(path).or_else(|err| { /* This is sort of an edge-case, as XCode is not producing an `Info.plist` file by default anymore. However, it still does so for some templates. For example iOS Storyboard template will produce a partial `Info.plist` file, with a content only related to the Storyboard itself, but not the project as a whole. eg. <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>UIApplicationSceneManifest</key> <dict> <key>UISceneConfigurations</key> <dict> <key>UIWindowSceneSessionRoleApplication</key> <array> <dict> <key>UISceneStoryboardFile</key> <string>Main</string> </dict> </array> </dict> </dict> </dict> </plist> This causes a sort of false-positive, as `INFOPLIST_FILE` is present, yet it contains no data required by the CLI to correctly produce a `InfoPlist` struct. In the case like that, we try to fallback to env variables collected either by `xcodebuild` binary, or directly through `env` if we were called from within XCode itself. */ InfoPlist::from_env_vars(vars).map_err(|e| e.context(err)) }) }; plist.map(|raw| InfoPlist { name: expand_xcodevars(&raw.name, vars), bundle_id: expand_xcodevars(&raw.bundle_id, vars), version: expand_xcodevars(&raw.version, vars), build: expand_xcodevars(&raw.build, vars), }) } /// Loads an info plist from provided environment variables list pub fn from_env_vars(vars: &HashMap<String, String>) -> Result<InfoPlist> { let name = vars .get("PRODUCT_NAME") .map(String::to_owned) .ok_or_else(|| format_err!("PRODUCT_NAME is missing"))?; let bundle_id = vars .get("PRODUCT_BUNDLE_IDENTIFIER") .map(String::to_owned) .ok_or_else(|| format_err!("PRODUCT_BUNDLE_IDENTIFIER is missing"))?; let version = vars .get("MARKETING_VERSION") .map(String::to_owned) .ok_or_else(|| format_err!("MARKETING_VERSION is missing"))?; let build = vars .get("CURRENT_PROJECT_VERSION") .map(String::to_owned) .ok_or_else(|| format_err!("CURRENT_PROJECT_VERSION is missing"))?; Ok(InfoPlist { name, bundle_id, version, build, }) } /// Loads an info plist file from a path and does not process it. pub fn from_path<P: AsRef<Path>>(path: P) -> Result<InfoPlist> { let mut f = fs::File::open(path.as_ref()).context("Could not open Info.plist file")?; InfoPlist::from_reader(&mut f) } /// Loads an info plist file from a reader. pub fn from_reader<R: SeekRead>(rdr: R) -> Result<InfoPlist> { let rdr = BufReader::new(rdr); plist::from_reader(rdr).context("Could not parse Info.plist file") } pub fn get_release_name(&self) -> String { format!("{}@{}", self.bundle_id(), self.version()) } pub fn version(&self) -> &str { &self.version } pub fn build(&self) -> &str { &self.build } pub fn name(&self) -> &str { &self.name } pub fn bundle_id(&self) -> &str { &self.bundle_id } } /// Helper struct that allows the current execution to detach from /// the xcode console and continue in the background. This becomes /// a dummy shim for non xcode runs or platforms. pub struct
<'a> { output_file: Option<TempFile>, #[allow(dead_code)] task_name: &'a str, } impl<'a> MayDetach<'a> { fn new(task_name: &'a str) -> MayDetach<'a> { MayDetach { output_file: None, task_name, } } /// Returns true if we are deteached from xcode pub fn is_detached(&self) -> bool { self.output_file.is_some() } /// If we are launched from xcode this detaches us from the xcode console /// and continues execution in the background. From this moment on output /// is captured and the user is notified with notifications. #[cfg(target_os = "macos")] pub fn may_detach(&mut self) -> Result<bool> { if!launched_from_xcode() { return Ok(false); } println!("Continuing in background."); show_notification("Sentry", &format!("{} starting", self.task_name))?; let output_file = TempFile::create()?; daemonize_redirect( Some(output_file.path()), Some(output_file.path()), ChdirMode::NoChdir, ) .unwrap(); self.output_file = Some(output_file); Ok(true) } /// For non mac platforms this just never detaches. #[cfg(not(target_os = "macos"))] pub fn may_detach(&mut self) -> Result<bool> { Ok(false) } /// Wraps the execution of a code block. Does not detach until someone /// calls into `may_detach`. #[cfg(target_os = "macos")] pub fn wrap<T, F: FnOnce(&mut MayDetach<'_>) -> Result<T>>( task_name: &'a str, f: F, ) -> Result<T> { use std::time::Duration; let mut md = MayDetach::new(task_name); match f(&mut md) { Ok(x) => { md.show_done()?; Ok(x) } Err(err) => { if let Some(ref output_file) = md.output_file { crate::utils::system::print_error(&err); if md.show_critical_info()? { open::that(output_file.path())?; std::thread::sleep(Duration::from_millis(5000)); } } Err(err) } } } /// Dummy wrap call that never detaches for non mac platforms. #[cfg(not(target_os = "macos"))] pub fn wrap<T, F: FnOnce(&mut MayDetach) -> Result<T>>(task_name: &'a str, f: F) -> Result<T> { f(&mut MayDetach::new(task_name)) } #[cfg(target_os = "macos")] fn show_critical_info(&self) -> Result<bool> { show_critical_info( &format!("{} failed", self.task_name), "The Sentry build step failed while running in the background. \ You can ignore this error or view details to attempt to resolve \ it. Ignoring it might cause your crashes not to be handled \ properly.", ) } #[cfg(target_os = "macos")] fn show_done(&self) -> Result<()> { if self.is_detached() { show_notification("Sentry", &format!("{} finished", self.task_name))?; } Ok(()) } } /// Returns true if we were invoked from xcode #[cfg(target_os = "macos")] pub fn launched_from_xcode() -> bool { if env::var("XCODE_VERSION_ACTUAL").is_err() { return false; } let mut pid = unsafe { getpid() as u32 }; while let Some(parent) = mac_process_info::get_parent_pid(pid) { if parent == 1 { break; } if let Ok(name) = mac_process_info::get_process_name(parent) { if name == "Xcode" { return true; } } pid = parent; } false } /// Returns true if we were invoked from xcode #[cfg(not(target_os = "macos"))] pub fn launched_from_xcode() -> bool { false } /// Shows a dialog in xcode and blocks. The dialog will have a title and a /// message as well as the buttons "Show details" and "Ignore". Returns /// `true` if the `show details` button has been pressed. #[cfg(target_os = "macos")] pub fn show_critical_info(title: &str, message: &str) -> Result<bool> { use serde::Serialize; lazy_static! { static ref SCRIPT: osascript::JavaScript = osascript::JavaScript::new( " var App = Application('XCode'); App.includeStandardAdditions = true; return App.displayAlert($params.title, { message: $params.message, as: \"critical\", buttons: [\"Show details\", \"Ignore\"] }); " ); } #[derive(Serialize)] struct AlertParams<'a> { title: &'a str, message: &'a str, } #[derive(Debug, Deserialize)] struct AlertResult { #[serde(rename = "buttonReturned")] button: String, } let rv: AlertResult = SCRIPT .execute_with_params(AlertParams { title, message }) .context("Failed to display Xcode dialog")?; Ok(&rv.button!= "Ignore") } /// Shows a notification in xcode #[cfg(target_os = "macos")] pub fn show_notification(title: &str, message
MayDetach
identifier_name
xcode.rs
bundle_id: String, #[serde(rename = "CFBundleShortVersionString")] version: String, #[serde(rename = "CFBundleVersion")] build: String, } #[derive(Deserialize, Debug)] pub struct XcodeProjectInfo { targets: Vec<String>, configurations: Vec<String>, #[serde(default = "PathBuf::new")] path: PathBuf, } impl fmt::Display for InfoPlist { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{} ({})", self.name(), &self.version) } } pub fn expand_xcodevars<S>(s: &str, vars: &HashMap<String, String, S>) -> String where S: BuildHasher, { lazy_static! { static ref SEP_RE: Regex = Regex::new(r"[\s/]+").unwrap(); } expand_vars(s, |key| { if key.is_empty() { return "".into(); } let mut iter = key.splitn(2, ':'); let value = vars .get(iter.next().unwrap()) .map(String::as_str) .unwrap_or(""); match iter.next() { Some("rfc1034identifier") => SEP_RE.replace_all(value, "-").into_owned(), Some("identifier") => SEP_RE.replace_all(value, "_").into_owned(), None | Some(_) => value.to_string(), } }) .into_owned() } fn get_xcode_project_info(path: &Path) -> Result<Option<XcodeProjectInfo>> { if_chain! { if let Some(filename_os) = path.file_name(); if let Some(filename) = filename_os.to_str(); if filename.ends_with(".xcodeproj"); then { return match XcodeProjectInfo::from_path(path) { Ok(info) => Ok(Some(info)), _ => Ok(None), }; } } let mut projects = vec![]; for entry in (fs::read_dir(path)?).flatten() { if let Some(filename) = entry.file_name().to_str() { if filename.ends_with(".xcodeproj") { projects.push(entry.path().to_path_buf()); } } } if projects.len() == 1 { match XcodeProjectInfo::from_path(&projects[0]) { Ok(info) => Ok(Some(info)), _ => Ok(None), } } else { Ok(None) } } impl XcodeProjectInfo { pub fn from_path<P: AsRef<Path>>(path: P) -> Result<XcodeProjectInfo> { #[derive(Deserialize)] struct Output { project: XcodeProjectInfo, } let p = process::Command::new("xcodebuild") .arg("-list") .arg("-json") .arg("-project") .arg(path.as_ref().as_os_str()) .output()?; match serde_json::from_slice::<Output>(&p.stdout) { Ok(mut rv) => { rv.project.path = path.as_ref().canonicalize()?; Ok(rv.project) } Err(e) => { warn!("Your.xcodeproj might be malformed. Command `xcodebuild -list -json -project {}` failed to produce a valid JSON output.", path.as_ref().display()); Err(e.into()) } } } pub fn base_path(&self) -> &Path { self.path.parent().unwrap() } pub fn get_build_vars( &self, target: &str, configuration: &str, ) -> Result<HashMap<String, String>> { let mut rv = HashMap::new(); let p = process::Command::new("xcodebuild") .arg("-showBuildSettings") .arg("-project") .arg(&self.path) .arg("-target") .arg(target) .arg("-configuration") .arg(configuration) .output()?; for line_rv in p.stdout.lines() { let line = line_rv?; if let Some(suffix) = line.strip_prefix(" ") { let mut sep = suffix.splitn(2, " = "); if_chain! { if let Some(key) = sep.next(); if let Some(value) = sep.next(); then { rv.insert(key.to_owned(), value.to_owned()); } } } } Ok(rv) } /// Return the first target pub fn get_first_target(&self) -> Option<&str> { if!self.targets.is_empty() { Some(&self.targets[0]) } else { None } } /// Returns the config with a certain name pub fn get_configuration(&self, name: &str) -> Option<&str> { let name = name.to_lowercase(); self.configurations .iter() .find(|&cfg| cfg.to_lowercase() == name) .map(|v| v.as_ref()) } } impl InfoPlist { /// Loads a processed plist file. pub fn discover_from_env() -> Result<Option<InfoPlist>> { // if we are loaded directly from xcode we can trust the os environment // and pass those variables to the processor. if env::var("XCODE_VERSION_ACTUAL").is_ok() { let vars: HashMap<_, _> = env::vars().collect(); if let Some(filename) = vars.get("INFOPLIST_FILE") { let base = vars.get("PROJECT_DIR").map(String::as_str).unwrap_or("."); let path = env::current_dir().unwrap().join(base).join(filename); Ok(Some(InfoPlist::load_and_process(path, &vars)?)) } else if let Ok(default_plist) = InfoPlist::from_env_vars(&vars) { Ok(Some(default_plist)) } else { Ok(None) } // otherwise, we discover the project info from the current path and // invoke xcodebuild to give us the project settings for the first // target. } else { if_chain! { if let Ok(here) = env::current_dir(); if let Some(pi) = get_xcode_project_info(&here)?; then { InfoPlist::from_project_info(&pi) } else { Ok(None) } } } } /// Loads an info plist from a given project info pub fn from_project_info(pi: &XcodeProjectInfo) -> Result<Option<InfoPlist>> { if_chain! { if let Some(config) = pi.get_configuration("release") .or_else(|| pi.get_configuration("debug")); if let Some(target) = pi.get_first_target(); then { let vars = pi.get_build_vars(target, config)?; if let Some(path) = vars.get("INFOPLIST_FILE") { let base = vars.get("PROJECT_DIR").map(Path::new) .unwrap_or_else(|| pi.base_path()); let path = base.join(path); return Ok(Some(InfoPlist::load_and_process(path, &vars)?)) } } } Ok(None) } /// Loads an info plist file from a path and processes it with the given vars pub fn load_and_process<P: AsRef<Path>>( path: P, vars: &HashMap<String, String>, ) -> Result<InfoPlist> { // do we want to preprocess the plist file? let plist = if vars.get("INFOPLIST_PREPROCESS").map(String::as_str) == Some("YES") { let mut c = process::Command::new("cc"); c.arg("-xc").arg("-P").arg("-E"); if let Some(defs) = vars.get("INFOPLIST_OTHER_PREPROCESSOR_FLAGS") { for token in defs.split_whitespace() { c.arg(token); } } if let Some(defs) = vars.get("INFOPLIST_PREPROCESSOR_DEFINITIONS") { for token in defs.split_whitespace() { c.arg(format!("-D{token}")); } } c.arg(path.as_ref()); let p = c.output()?; InfoPlist::from_reader(Cursor::new(&p.stdout[..])) } else { InfoPlist::from_path(path).or_else(|err| { /* This is sort of an edge-case, as XCode is not producing an `Info.plist` file by default anymore. However, it still does so for some templates. For example iOS Storyboard template will produce a partial `Info.plist` file, with a content only related to the Storyboard itself, but not the project as a whole. eg. <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>UIApplicationSceneManifest</key> <dict> <key>UISceneConfigurations</key> <dict> <key>UIWindowSceneSessionRoleApplication</key> <array> <dict> <key>UISceneStoryboardFile</key> <string>Main</string> </dict> </array> </dict> </dict> </dict> </plist> This causes a sort of false-positive, as `INFOPLIST_FILE` is present, yet it contains no data required by the CLI to correctly produce a `InfoPlist` struct. In the case like that, we try to fallback to env variables collected either by `xcodebuild` binary, or directly through `env` if we were called from within XCode itself. */ InfoPlist::from_env_vars(vars).map_err(|e| e.context(err)) }) }; plist.map(|raw| InfoPlist { name: expand_xcodevars(&raw.name, vars), bundle_id: expand_xcodevars(&raw.bundle_id, vars), version: expand_xcodevars(&raw.version, vars), build: expand_xcodevars(&raw.build, vars), }) } /// Loads an info plist from provided environment variables list pub fn from_env_vars(vars: &HashMap<String, String>) -> Result<InfoPlist> { let name = vars .get("PRODUCT_NAME") .map(String::to_owned) .ok_or_else(|| format_err!("PRODUCT_NAME is missing"))?; let bundle_id = vars .get("PRODUCT_BUNDLE_IDENTIFIER") .map(String::to_owned) .ok_or_else(|| format_err!("PRODUCT_BUNDLE_IDENTIFIER is missing"))?; let version = vars .get("MARKETING_VERSION") .map(String::to_owned) .ok_or_else(|| format_err!("MARKETING_VERSION is missing"))?; let build = vars .get("CURRENT_PROJECT_VERSION") .map(String::to_owned) .ok_or_else(|| format_err!("CURRENT_PROJECT_VERSION is missing"))?; Ok(InfoPlist { name, bundle_id, version, build, }) } /// Loads an info plist file from a path and does not process it. pub fn from_path<P: AsRef<Path>>(path: P) -> Result<InfoPlist> { let mut f = fs::File::open(path.as_ref()).context("Could not open Info.plist file")?; InfoPlist::from_reader(&mut f) } /// Loads an info plist file from a reader. pub fn from_reader<R: SeekRead>(rdr: R) -> Result<InfoPlist> { let rdr = BufReader::new(rdr); plist::from_reader(rdr).context("Could not parse Info.plist file") } pub fn get_release_name(&self) -> String { format!("{}@{}", self.bundle_id(), self.version()) } pub fn version(&self) -> &str { &self.version } pub fn build(&self) -> &str { &self.build } pub fn name(&self) -> &str { &self.name } pub fn bundle_id(&self) -> &str { &self.bundle_id } } /// Helper struct that allows the current execution to detach from /// the xcode console and continue in the background. This becomes /// a dummy shim for non xcode runs or platforms. pub struct MayDetach<'a> { output_file: Option<TempFile>, #[allow(dead_code)] task_name: &'a str, } impl<'a> MayDetach<'a> { fn new(task_name: &'a str) -> MayDetach<'a> { MayDetach { output_file: None, task_name, } } /// Returns true if we are deteached from xcode pub fn is_detached(&self) -> bool { self.output_file.is_some() } /// If we are launched from xcode this detaches us from the xcode console /// and continues execution in the background. From this moment on output /// is captured and the user is notified with notifications. #[cfg(target_os = "macos")] pub fn may_detach(&mut self) -> Result<bool> { if!launched_from_xcode() { return Ok(false); } println!("Continuing in background."); show_notification("Sentry", &format!("{} starting", self.task_name))?; let output_file = TempFile::create()?; daemonize_redirect( Some(output_file.path()), Some(output_file.path()), ChdirMode::NoChdir, ) .unwrap(); self.output_file = Some(output_file); Ok(true) } /// For non mac platforms this just never detaches. #[cfg(not(target_os = "macos"))] pub fn may_detach(&mut self) -> Result<bool> { Ok(false) } /// Wraps the execution of a code block. Does not detach until someone /// calls into `may_detach`. #[cfg(target_os = "macos")] pub fn wrap<T, F: FnOnce(&mut MayDetach<'_>) -> Result<T>>( task_name: &'a str, f: F, ) -> Result<T> { use std::time::Duration; let mut md = MayDetach::new(task_name); match f(&mut md) { Ok(x) => { md.show_done()?; Ok(x) } Err(err) => { if let Some(ref output_file) = md.output_file { crate::utils::system::print_error(&err); if md.show_critical_info()? { open::that(output_file.path())?; std::thread::sleep(Duration::from_millis(5000)); } } Err(err) } } } /// Dummy wrap call that never detaches for non mac platforms. #[cfg(not(target_os = "macos"))] pub fn wrap<T, F: FnOnce(&mut MayDetach) -> Result<T>>(task_name: &'a str, f: F) -> Result<T> { f(&mut MayDetach::new(task_name)) } #[cfg(target_os = "macos")] fn show_critical_info(&self) -> Result<bool> { show_critical_info( &format!("{} failed", self.task_name), "The Sentry build step failed while running in the background. \ You can ignore this error or view details to attempt to resolve \ it. Ignoring it might cause your crashes not to be handled \ properly.", ) } #[cfg(target_os = "macos")] fn show_done(&self) -> Result<()> { if self.is_detached() { show_notification("Sentry", &format!("{} finished", self.task_name))?; } Ok(()) } } /// Returns true if we were invoked from xcode #[cfg(target_os = "macos")] pub fn launched_from_xcode() -> bool { if env::var("XCODE_VERSION_ACTUAL").is_err() { return false; } let mut pid = unsafe { getpid() as u32 }; while let Some(parent) = mac_process_info::get_parent_pid(pid) { if parent == 1 { break; } if let Ok(name) = mac_process_info::get_process_name(parent) { if name == "Xcode" { return true; } } pid = parent; } false } /// Returns true if we were invoked from xcode #[cfg(not(target_os = "macos"))] pub fn launched_from_xcode() -> bool { false }
/// Shows a dialog in xcode and blocks. The dialog will have a title and a /// message as well as the buttons "Show details" and "Ignore". Returns /// `true` if the `show details` button has been pressed. #[cfg(target_os = "macos")] pub fn show_critical_info(title: &str, message: &str) -> Result<bool> { use serde::Serialize; lazy_static! { static ref SCRIPT: osascript::JavaScript = osascript::JavaScript::new( " var App = Application('XCode'); App.includeStandardAdditions = true; return App.displayAlert($params.title, { message: $params.message, as: \"critical\", buttons: [\"Show details\", \"Ignore\"] }); " ); } #[derive(Serialize)] struct AlertParams<'a> { title: &'a str, message: &'a str, } #[derive(Debug, Deserialize)] struct AlertResult { #[serde(rename = "buttonReturned")] button: String, } let rv: AlertResult = SCRIPT .execute_with_params(AlertParams { title, message }) .context("Failed to display Xcode dialog")?; Ok(&rv.button!= "Ignore") } /// Shows a notification in xcode #[cfg(target_os = "macos")] pub fn show_notification(title: &str, message:
random_line_split
elf.rs
use std::convert::TryInto; use std::ffi::CStr; use std::mem; use std::ptr; use std::slice; use failure::{bail, format_err, Error, Fail, ResultExt}; use goblin::elf::{ header::{EM_BPF, ET_REL}, section_header::{SectionHeader, SHT_PROGBITS, SHT_REL}, sym::{Sym, STB_GLOBAL}, }; use ebpf_core::{ ffi, prog, Attach, Insn, Map, Object, Opcode, Program, Type, BPF_LICENSE_SEC, BPF_MAPS_SEC, BPF_VERSION_SEC, BTF_ELF_SEC, BTF_EXT_ELF_SEC, }; use crate::parser::Parser; use crate::prog::prog_type_by_name; impl<'a> Parser<goblin::elf::Elf<'a>> { pub fn parse(&self, buf: &[u8]) -> Result<Object, Error>
buf.get(sec.file_range()).ok_or_else(|| { format_err!( "`{}` section data {:?} out of bound", name, sec.file_range() ) }) }; match name { BPF_LICENSE_SEC if sec.sh_type == SHT_PROGBITS => { license = Some( CStr::from_bytes_with_nul(section_data()?)? .to_str()? .to_owned(), ); debug!("kernel license: {}", license.as_ref().unwrap()); } BPF_VERSION_SEC if sec.sh_type == SHT_PROGBITS => { version = Some(u32::from_ne_bytes(section_data()?.try_into()?)); debug!("kernel version: {:x}", version.as_ref().unwrap()); } BPF_MAPS_SEC => { debug!("`{}` section", name); maps_section = Some((idx, sec)); } BTF_ELF_SEC => { // TODO btf__new debug!("`{}` section", name); } BTF_EXT_ELF_SEC => { // TODO btf_ext_data debug!("`{}` section", name); } _ if sec.sh_type == SHT_PROGBITS && sec.is_executable() && sec.sh_size > 0 => { if name == ".text" { text_section = Some(idx); } // If type is not specified, try to guess it based on section name. let (ty, attach) = match self.prog_type { Some(ty) if ty!= Type::Unspec => (ty, self.expected_attach_type), _ => prog_type_by_name(name) .ok_or_else(|| format_err!("unexpected section name: {}", name))?, }; let insns = unsafe { let data = buf.as_ptr().add(sec.sh_offset as usize); let len = sec.sh_size as usize / mem::size_of::<Insn>(); slice::from_raw_parts(data as *const _, len) }; debug!( "{:?} kernel program #{} @ section `{}` with {} insns", ty, idx, name, insns.len() ); programs.push((name, ty, attach, idx, insns.to_vec())); } _ if sec.sh_type == SHT_REL => {} _ => { trace!("ignore `{}` section", name); } } } let maps = if let Some((idx, sec)) = maps_section { self.init_maps(buf, idx, sec)? } else { Vec::new() }; let mut programs = self .resolve_program_names(programs, text_section) .context("resolve program names")?; self.relocate_programs( &mut programs, &maps, maps_section.map(|(idx, _)| idx), text_section, )?; Ok(Object { license, version, programs, maps, }) } fn init_maps(&self, buf: &[u8], idx: usize, sec: &SectionHeader) -> Result<Vec<Map>, Error> { let mut maps = Vec::new(); let data = buf.get(sec.file_range()).ok_or_else(|| { format_err!("`maps` section data {:?} out of bound", sec.file_range()) })?; let nr_maps = self .obj .syms .iter() .filter(|sym| sym.st_shndx == idx) .count(); let map_def_sz = data.len() / nr_maps; for sym in self.obj.syms.iter().filter(|sym| sym.st_shndx == idx) { let name = self .obj .strtab .get(sym.st_name) .transpose()? .ok_or_else(|| format_err!("resolve map name failed, idx={:x}", sym.st_name))?; let mut map_def: ffi::bpf_map_def = unsafe { mem::zeroed() }; unsafe { ptr::copy_nonoverlapping( data.as_ptr() as *const u8, &mut map_def as *mut _ as *mut u8, mem::size_of::<ffi::bpf_map_def>().min(map_def_sz), ) } if map_def_sz > mem::size_of::<ffi::bpf_map_def>() && data[mem::size_of::<ffi::bpf_map_def>()..] .iter() .any(|&b| b!= 0) { bail!("maps section has unrecognized, non-zero options"); } let map = Map::with_def(name, sym.st_value as usize, self.ifindex, &map_def)?; debug!( "#{} map `{}` @ section `{}`: {:?}", maps.len(), name, self.resolve_name(sec.sh_name)?, map ); maps.push(map) } maps.sort_by_cached_key(|map| map.offset); Ok(maps) } fn resolve_program_names( &self, programs: impl IntoIterator<Item = (&'a str, Type, Option<Attach>, usize, Vec<Insn>)>, text_section: Option<usize>, ) -> Result<Vec<Program>, Error> { programs .into_iter() .map(|(title, ty, attach, idx, insns)| { let name = self .resolve_symbol(|sym| sym.st_shndx == idx && sym.st_bind() == STB_GLOBAL) .and_then(|sym| self.resolve_name(sym.st_name)) .or_else(|_| { if text_section == Some(idx) { Ok(".text") } else { Err(format_err!("program `{}` symbol not found", title)) } })?; debug!( "#{} `{:?}` program `{}` @ secion `{}` with {} insns", idx, ty, name, title, insns.len() ); Ok(Program::new(name, ty, attach, title, idx, insns)) }) .collect::<Result<Vec<_>, _>>() } fn resolve_symbol<P: FnMut(&Sym) -> bool>(&self, predicate: P) -> Result<Sym, Error> { self.obj .syms .iter() .find(predicate) .ok_or_else(|| format_err!("symbol not found")) } fn resolve_name(&self, idx: usize) -> Result<&str, Error> { self.obj .strtab .get(idx) .ok_or_else(|| format_err!("index out of bound"))? .map_err(|err| err.context("read string").into()) } fn relocate_programs( &self, programs: &mut [Program], maps: &[Map], maps_idx: Option<usize>, text_idx: Option<usize>, ) -> Result<(), Error> { for (idx, sec) in &self.obj.shdr_relocs { if let Some(prog) = programs.iter_mut().find(|prog| prog.idx == *idx) { trace!("relocate program #{} `{}`", prog.idx, prog.name); for reloc in sec.iter() { let sym = self.resolve_symbol(|sym| sym.st_shndx == reloc.r_sym)?; trace!( "reloc for #{}, value = {}, name = {}", reloc.r_sym, sym.st_value, sym.st_name ); if Some(sym.st_shndx)!= maps_idx && Some(sym.st_shndx)!= text_idx { bail!("program '{}' contains non-map related relo data pointing to section #{}", prog.name, sym.st_shndx); } let insn_idx = reloc.r_offset as usize / mem::size_of::<Insn>(); trace!("reloc insn #{}", insn_idx); if Opcode::from_bits_truncate(prog.insns[insn_idx].code) != Opcode::LD | Opcode::IMM | Opcode::DW { bail!( "invalid relocate for insns[{}].code = {:?}", insn_idx, prog.insns[insn_idx].code ); } let map_idx = maps .iter() .position(|map| map.offset == sym.st_value as usize) .ok_or_else(|| format_err!("map @ {} not found", sym.st_value))?; prog.relocs.push(prog::Reloc::LD64 { insn_idx, map_idx }) } } } Ok(()) } }
{ if self.obj.header.e_type != ET_REL || self.obj.header.e_machine != EM_BPF { bail!("not an eBPF object file"); } if self.obj.header.endianness()? != scroll::NATIVE { bail!("endianness mismatch.") } let mut license = None; let mut version = None; let mut programs = vec![]; let mut maps_section = None; let mut text_section = None; for (idx, sec) in self.obj.section_headers.iter().enumerate() { let name = self.resolve_name(sec.sh_name)?; trace!("parse `{}` section: {:?}", name, sec); let section_data = || {
identifier_body