file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
rxcb.rs | //! Objective XCB Wrapper
#![allow(dead_code)]
extern crate univstring; use self::univstring::UnivString;
extern crate xcb;
use self::xcb::ffi::*;
use std::ptr::{null, null_mut};
use std::marker::PhantomData;
use std::io::{Error as IOError, ErrorKind};
#[repr(C)] pub enum WindowIOClass
{
InputOnly = XCB_WINDOW_CLASS_INPUT_ONLY as _,
InputOutput = XCB_WINDOW_CLASS_INPUT_OUTPUT as _,
FromParent = XCB_WINDOW_CLASS_COPY_FROM_PARENT as _
}
pub struct Connection(*mut xcb_connection_t);
impl Connection
{
pub fn new<S: UnivString +?Sized>(display: Option<&S>) -> Option<Self>
{
let display_name = display.map(|s| s.to_cstr().unwrap());
let p = unsafe
{
xcb_connect(display_name.as_ref().map(|p| p.as_ptr()).unwrap_or(null()), null_mut())
};
if p.is_null() { None } else { Some(Connection(p)) }
}
#[cfg(feature = "with_ferrite")]
pub(crate) fn inner(&self) -> *mut xcb_connection_t { self.0 }
pub fn setup(&self) -> &Setup { unsafe { &*(xcb_get_setup(self.0) as *mut _) } }
pub fn new_id(&self) -> u32 { unsafe { xcb_generate_id(self.0) } }
pub fn new_window_id(&self) -> Window { Window(self.new_id()) }
/*pub fn try_intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 0, name.len() as _, name.as_ptr()) }, self)
}*/
pub fn intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 1, name.len() as _, name.as_ptr() as _) }, self)
}
pub fn flush(&self) { unsafe { xcb_flush(self.0); } }
pub fn create_window(&self, depth: Option<u8>, id: &Window, parent: Option<xcb_window_t>,
x: i16, y: i16, width: u16, height: u16, border_width: u16, class: WindowIOClass,
visual: Option<VisualID>, valuelist: &WindowValueList) -> Result<(), GenericError>
{
let serialized = valuelist.serialize();
unsafe
{
CheckedCookie(xcb_create_window_checked(self.0, depth.unwrap_or(XCB_COPY_FROM_PARENT as _), id.0,
parent.unwrap_or_else(|| self.setup().iter_roots().next().unwrap().root()),
x, y, width, height, border_width, class as _, visual.unwrap_or(XCB_COPY_FROM_PARENT as _),
valuelist.0, serialized.0 as *const _), self).check()
}
}
pub fn map_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_map_window_checked(self.0, w.0), self).check() }
}
pub fn destroy_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_destroy_window_checked(self.0, w.0), self).check() }
}
}
impl Drop for Connection { fn drop(&mut self) { unsafe { xcb_disconnect(self.0) } } }
pub struct Setup(xcb_setup_t);
impl Setup
{
pub fn iter_roots(&self) -> IterRootScreen { IterRootScreen(unsafe { xcb_setup_roots_iterator(&self.0) }) }
}
#[repr(C)] pub struct Screen(xcb_screen_t);
impl Screen
{
pub fn root(&self) -> xcb_window_t { self.0.root }
// pub fn default_colormap(&self) -> xcb_colormap_t { self.0.default_colormap }
}
pub struct IterRootScreen<'s>(xcb_screen_iterator_t<'s>);
impl<'s> Iterator for IterRootScreen<'s>
{
type Item = &'s Screen;
fn next(&mut self) -> Option<&'s Screen>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_screen_next(&mut self.0); Some(&*p) } }
}
}
pub type WindowID = xcb_window_t;
pub struct Window(WindowID);
impl Window
{
pub(crate) fn id(&self) -> WindowID { self.0 }
pub fn replace_property<T: PropertyType +?Sized>(&self, con: &Connection, property: Atom, value: &T)
{
value.change_property_of(con, self, property, XCB_PROP_MODE_REPLACE)
}
}
pub trait PropertyType
{
const TYPE_ATOM: Atom; const DATA_STRIDE: u32;
fn change_property_of(&self, connection: &Connection, window: &Window, property: Atom, mode: u32);
}
impl PropertyType for str
{
const TYPE_ATOM: Atom = XCB_ATOM_STRING; const DATA_STRIDE: u32 = 8;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_STRING, 8,
self.len() as _, self.as_ptr() as _);
}
}
}
impl PropertyType for Atom
{
const TYPE_ATOM: Atom = XCB_ATOM_ATOM; const DATA_STRIDE: u32 = 32;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_ATOM, 32, 1,
self as *const Atom as *const _);
}
}
}
impl<E: PropertyType> PropertyType for [E]
{
const TYPE_ATOM: Atom = E::TYPE_ATOM; const DATA_STRIDE: u32 = E::DATA_STRIDE;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, E::TYPE_ATOM, E::DATA_STRIDE as _,
self.len() as _, self.as_ptr() as _);
}
}
}
pub use self::xcb::ffi::XCB_ATOM_WM_NAME;
pub struct CheckedCookie<'s>(xcb_void_cookie_t, &'s Connection);
impl<'s> CheckedCookie<'s>
{
pub fn check(&self) -> Result<(), GenericError>
{
let r = unsafe { xcb_request_check(self.1.0, self.0) };
if r.is_null() { Ok(()) } else { Err(unsafe { GenericError::from_ptr(r) }) }
}
}
pub struct AtomCookie<'s>(xcb_intern_atom_cookie_t, &'s Connection);
pub type Atom = xcb_atom_t;
impl<'s> AtomCookie<'s>
{
pub fn reply(self) -> Result<Atom, GenericError>
{
let mut _eptr = null_mut();
let r = unsafe { xcb_intern_atom_reply(self.1.0, self.0, &mut _eptr) };
if r.is_null() { Err(unsafe { GenericError::from_ptr(_eptr) }) } else { Ok(MallocBox(r).atom) }
}
}
use std::mem::transmute;
pub struct GenericEvent(MallocBox<xcb_generic_event_t>);
impl Connection
{
pub fn wait_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_wait_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
pub fn poll_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_poll_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
}
impl GenericEvent
{
pub fn response_type(&self) -> u8 { self.0.response_type &!0x80 }
}
pub struct ClientMessageEvent(MallocBox<xcb_client_message_event_t>);
impl ClientMessageEvent
{
pub fn msg_type(&self) -> xcb_atom_t { self.0.type_ }
pub fn data_as_u32(&self) -> u32 { unsafe { *(self.0.data.data.as_ptr() as *const u32) } }
}
pub struct ExposeEvent(MallocBox<xcb_expose_event_t>);
pub struct GenericError(MallocBox<xcb_generic_error_t>);
impl GenericError
{
unsafe fn from_ptr(p: *mut xcb_generic_error_t) -> Self { GenericError(MallocBox(p)) }
}
impl Debug for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "GenericError(code={})", (*self.0).error_code) }
}
impl Display for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <Self as Debug>::fmt(self, fmt) }
}
impl From<GenericError> for IOError
{
fn from(v: GenericError) -> IOError { IOError::new(ErrorKind::Other, Box::new(v)) }
}
impl ::std::error::Error for GenericError
{
fn description(&self) -> &str { "XCB Generic Error" }
fn cause(&self) -> Option<&::std::error::Error> { None }
}
unsafe impl Send for GenericError {}
unsafe impl Sync for GenericError {}
pub trait Event
{
const RESPONSE_ENUM: u8;
unsafe fn from_ref(g: &GenericEvent) -> &Self;
}
impl Event for ClientMessageEvent
{
const RESPONSE_ENUM: u8 = XCB_CLIENT_MESSAGE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for ExposeEvent
{
const RESPONSE_ENUM: u8 = XCB_EXPOSE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for GenericError
{
const RESPONSE_ENUM: u8 = 0; // unused
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
#[repr(C)] pub struct Depth(xcb_depth_t);
impl Depth
{
pub fn depth(&self) -> u8 { self.0.depth }
}
pub struct IterDepths<'c>(xcb_depth_iterator_t<'c>);
impl<'c> Iterator for IterDepths<'c>
{
type Item = &'c Depth;
fn next(&mut self) -> Option<&'c Depth>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_depth_next(&mut self.0); Some(&*p) } }
}
fn size_hint(&self) -> (usize, Option<usize>) { (self.0.rem as _, Some(self.0.rem as _)) }
}
impl Screen
{
pub fn iter_allowed_depths(&self) -> IterDepths { IterDepths(unsafe { xcb_screen_allowed_depths_iterator(&self.0) }) }
}
pub type VisualID = xcb_visualid_t;
#[repr(C)] pub struct VisualType(xcb_visualtype_t);
impl VisualType
{
pub fn id(&self) -> VisualID { self.0.visual_id }
pub fn is_truecolor(&self) -> bool { self.0.class == XCB_VISUAL_CLASS_TRUE_COLOR as _ }
}
pub struct IterVisualTypes<'c>(xcb_visualtype_iterator_t, PhantomData<&'c Connection>);
impl<'c> Iterator for IterVisualTypes<'c>
{
type Item = &'c VisualType;
fn next(&mut self) -> Option<&'c VisualType>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_visualtype_next(&mut self.0); Some(&*p) } }
}
}
impl Depth
{
pub fn iter_visuals(&self) -> IterVisualTypes
{
IterVisualTypes(unsafe { xcb_depth_visuals_iterator(&self.0) }, PhantomData)
}
}
#[allow(non_camel_case_types)]
pub type xcb_bool32_t = u32;
#[repr(C)] #[allow(non_camel_case_types)]
pub struct xcb_create_window_value_list_t
{
pub background_pixmap: xcb_pixmap_t, pub background_pixel: u32,
pub border_pixmap: xcb_pixmap_t, pub border_pixel: u32,
pub bit_gravity: u32, pub win_gravity: u32, pub backing_store: u32, pub backing_planes: u32, pub backing_pixel: u32,
pub override_redirect: xcb_bool32_t, pub save_under: xcb_bool32_t, pub event_mask: u32,
pub do_not_propagate_mask: u32, pub colormap: xcb_colormap_t, pub cursor: xcb_cursor_t
}
extern "C"
{
fn xcb_create_window_value_list_serialize(buffer: *mut *mut ::libc::c_void, value_mask: u32,
aux: *const xcb_create_window_value_list_t) -> ::libc::c_int;
}
#[repr(C)]
pub struct WindowValueList(u32, xcb_create_window_value_list_t);
impl WindowValueList
{
pub fn new() -> Self { WindowValueList(0, unsafe { ::std::mem::zeroed() }) }
pub fn border_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BORDER_PIXEL; self.1.border_pixel = p; self
}
pub fn back_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BACK_PIXEL; self.1.background_pixel = p; self
}
pub fn colormap(&mut self, c: &Colormap) -> &mut Self
{
self.0 |= XCB_CW_COLORMAP; self.1.colormap = c.id(); self
}
pub fn eventmask(&mut self, m: xcb_event_mask_t) -> &mut Self
{
self.0 |= XCB_CW_EVENT_MASK; self.1.event_mask = m; self
}
pub fn serialize(&self) -> MallocBox<::libc::c_void>
{
let mut p = null_mut();
unsafe { xcb_create_window_value_list_serialize(&mut p, self.0, &self.1) };
MallocBox(p)
}
}
pub struct Colormap(xcb_colormap_t);
impl Colormap
{
pub fn new(con: &Connection, visual: VisualID, window: xcb_window_t) -> Self
{
let id = con.new_id();
unsafe { xcb_create_colormap(con.0, XCB_COLORMAP_ALLOC_NONE as _, id, window, visual) }; Colormap(id)
}
pub fn id(&self) -> xcb_colormap_t { self.0 }
}
pub use self::xcb::ffi::{
XCB_EVENT_MASK_EXPOSURE
};
use std::ops::{Deref, DerefMut};
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
/// Owned malloc-ed pointer box
pub struct MallocBox<T:?Sized>(pub *mut T);
impl<T:?Sized> Deref for MallocBox<T> { type Target = T; fn deref(&self) -> &T | }
impl<T:?Sized> DerefMut for MallocBox<T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.0 } } }
impl<T:?Sized> Drop for MallocBox<T>
{
fn drop(&mut self) { unsafe { ::libc::free(self.0 as *mut _) } }
}
impl<T:?Sized> Debug for MallocBox<T> where T: Debug
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <T as Debug>::fmt(&self, fmt) }
}
| { unsafe { &*self.0 } } | identifier_body |
rxcb.rs | //! Objective XCB Wrapper
#![allow(dead_code)]
extern crate univstring; use self::univstring::UnivString;
extern crate xcb;
use self::xcb::ffi::*;
use std::ptr::{null, null_mut};
use std::marker::PhantomData;
use std::io::{Error as IOError, ErrorKind};
#[repr(C)] pub enum WindowIOClass
{
InputOnly = XCB_WINDOW_CLASS_INPUT_ONLY as _,
InputOutput = XCB_WINDOW_CLASS_INPUT_OUTPUT as _,
FromParent = XCB_WINDOW_CLASS_COPY_FROM_PARENT as _
}
pub struct Connection(*mut xcb_connection_t);
impl Connection
{
pub fn new<S: UnivString +?Sized>(display: Option<&S>) -> Option<Self>
{
let display_name = display.map(|s| s.to_cstr().unwrap());
let p = unsafe
{
xcb_connect(display_name.as_ref().map(|p| p.as_ptr()).unwrap_or(null()), null_mut())
};
if p.is_null() { None } else { Some(Connection(p)) }
}
#[cfg(feature = "with_ferrite")]
pub(crate) fn inner(&self) -> *mut xcb_connection_t { self.0 }
pub fn setup(&self) -> &Setup { unsafe { &*(xcb_get_setup(self.0) as *mut _) } }
pub fn new_id(&self) -> u32 { unsafe { xcb_generate_id(self.0) } }
pub fn | (&self) -> Window { Window(self.new_id()) }
/*pub fn try_intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 0, name.len() as _, name.as_ptr()) }, self)
}*/
pub fn intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 1, name.len() as _, name.as_ptr() as _) }, self)
}
pub fn flush(&self) { unsafe { xcb_flush(self.0); } }
pub fn create_window(&self, depth: Option<u8>, id: &Window, parent: Option<xcb_window_t>,
x: i16, y: i16, width: u16, height: u16, border_width: u16, class: WindowIOClass,
visual: Option<VisualID>, valuelist: &WindowValueList) -> Result<(), GenericError>
{
let serialized = valuelist.serialize();
unsafe
{
CheckedCookie(xcb_create_window_checked(self.0, depth.unwrap_or(XCB_COPY_FROM_PARENT as _), id.0,
parent.unwrap_or_else(|| self.setup().iter_roots().next().unwrap().root()),
x, y, width, height, border_width, class as _, visual.unwrap_or(XCB_COPY_FROM_PARENT as _),
valuelist.0, serialized.0 as *const _), self).check()
}
}
pub fn map_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_map_window_checked(self.0, w.0), self).check() }
}
pub fn destroy_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_destroy_window_checked(self.0, w.0), self).check() }
}
}
impl Drop for Connection { fn drop(&mut self) { unsafe { xcb_disconnect(self.0) } } }
pub struct Setup(xcb_setup_t);
impl Setup
{
pub fn iter_roots(&self) -> IterRootScreen { IterRootScreen(unsafe { xcb_setup_roots_iterator(&self.0) }) }
}
#[repr(C)] pub struct Screen(xcb_screen_t);
impl Screen
{
pub fn root(&self) -> xcb_window_t { self.0.root }
// pub fn default_colormap(&self) -> xcb_colormap_t { self.0.default_colormap }
}
pub struct IterRootScreen<'s>(xcb_screen_iterator_t<'s>);
impl<'s> Iterator for IterRootScreen<'s>
{
type Item = &'s Screen;
fn next(&mut self) -> Option<&'s Screen>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_screen_next(&mut self.0); Some(&*p) } }
}
}
pub type WindowID = xcb_window_t;
pub struct Window(WindowID);
impl Window
{
pub(crate) fn id(&self) -> WindowID { self.0 }
pub fn replace_property<T: PropertyType +?Sized>(&self, con: &Connection, property: Atom, value: &T)
{
value.change_property_of(con, self, property, XCB_PROP_MODE_REPLACE)
}
}
pub trait PropertyType
{
const TYPE_ATOM: Atom; const DATA_STRIDE: u32;
fn change_property_of(&self, connection: &Connection, window: &Window, property: Atom, mode: u32);
}
impl PropertyType for str
{
const TYPE_ATOM: Atom = XCB_ATOM_STRING; const DATA_STRIDE: u32 = 8;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_STRING, 8,
self.len() as _, self.as_ptr() as _);
}
}
}
impl PropertyType for Atom
{
const TYPE_ATOM: Atom = XCB_ATOM_ATOM; const DATA_STRIDE: u32 = 32;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_ATOM, 32, 1,
self as *const Atom as *const _);
}
}
}
impl<E: PropertyType> PropertyType for [E]
{
const TYPE_ATOM: Atom = E::TYPE_ATOM; const DATA_STRIDE: u32 = E::DATA_STRIDE;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, E::TYPE_ATOM, E::DATA_STRIDE as _,
self.len() as _, self.as_ptr() as _);
}
}
}
pub use self::xcb::ffi::XCB_ATOM_WM_NAME;
pub struct CheckedCookie<'s>(xcb_void_cookie_t, &'s Connection);
impl<'s> CheckedCookie<'s>
{
pub fn check(&self) -> Result<(), GenericError>
{
let r = unsafe { xcb_request_check(self.1.0, self.0) };
if r.is_null() { Ok(()) } else { Err(unsafe { GenericError::from_ptr(r) }) }
}
}
pub struct AtomCookie<'s>(xcb_intern_atom_cookie_t, &'s Connection);
pub type Atom = xcb_atom_t;
impl<'s> AtomCookie<'s>
{
pub fn reply(self) -> Result<Atom, GenericError>
{
let mut _eptr = null_mut();
let r = unsafe { xcb_intern_atom_reply(self.1.0, self.0, &mut _eptr) };
if r.is_null() { Err(unsafe { GenericError::from_ptr(_eptr) }) } else { Ok(MallocBox(r).atom) }
}
}
use std::mem::transmute;
pub struct GenericEvent(MallocBox<xcb_generic_event_t>);
impl Connection
{
pub fn wait_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_wait_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
pub fn poll_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_poll_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
}
impl GenericEvent
{
pub fn response_type(&self) -> u8 { self.0.response_type &!0x80 }
}
pub struct ClientMessageEvent(MallocBox<xcb_client_message_event_t>);
impl ClientMessageEvent
{
pub fn msg_type(&self) -> xcb_atom_t { self.0.type_ }
pub fn data_as_u32(&self) -> u32 { unsafe { *(self.0.data.data.as_ptr() as *const u32) } }
}
pub struct ExposeEvent(MallocBox<xcb_expose_event_t>);
pub struct GenericError(MallocBox<xcb_generic_error_t>);
impl GenericError
{
unsafe fn from_ptr(p: *mut xcb_generic_error_t) -> Self { GenericError(MallocBox(p)) }
}
impl Debug for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "GenericError(code={})", (*self.0).error_code) }
}
impl Display for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <Self as Debug>::fmt(self, fmt) }
}
impl From<GenericError> for IOError
{
fn from(v: GenericError) -> IOError { IOError::new(ErrorKind::Other, Box::new(v)) }
}
impl ::std::error::Error for GenericError
{
fn description(&self) -> &str { "XCB Generic Error" }
fn cause(&self) -> Option<&::std::error::Error> { None }
}
unsafe impl Send for GenericError {}
unsafe impl Sync for GenericError {}
pub trait Event
{
const RESPONSE_ENUM: u8;
unsafe fn from_ref(g: &GenericEvent) -> &Self;
}
impl Event for ClientMessageEvent
{
const RESPONSE_ENUM: u8 = XCB_CLIENT_MESSAGE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for ExposeEvent
{
const RESPONSE_ENUM: u8 = XCB_EXPOSE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for GenericError
{
const RESPONSE_ENUM: u8 = 0; // unused
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
#[repr(C)] pub struct Depth(xcb_depth_t);
impl Depth
{
pub fn depth(&self) -> u8 { self.0.depth }
}
pub struct IterDepths<'c>(xcb_depth_iterator_t<'c>);
impl<'c> Iterator for IterDepths<'c>
{
type Item = &'c Depth;
fn next(&mut self) -> Option<&'c Depth>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_depth_next(&mut self.0); Some(&*p) } }
}
fn size_hint(&self) -> (usize, Option<usize>) { (self.0.rem as _, Some(self.0.rem as _)) }
}
impl Screen
{
pub fn iter_allowed_depths(&self) -> IterDepths { IterDepths(unsafe { xcb_screen_allowed_depths_iterator(&self.0) }) }
}
pub type VisualID = xcb_visualid_t;
#[repr(C)] pub struct VisualType(xcb_visualtype_t);
impl VisualType
{
pub fn id(&self) -> VisualID { self.0.visual_id }
pub fn is_truecolor(&self) -> bool { self.0.class == XCB_VISUAL_CLASS_TRUE_COLOR as _ }
}
pub struct IterVisualTypes<'c>(xcb_visualtype_iterator_t, PhantomData<&'c Connection>);
impl<'c> Iterator for IterVisualTypes<'c>
{
type Item = &'c VisualType;
fn next(&mut self) -> Option<&'c VisualType>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_visualtype_next(&mut self.0); Some(&*p) } }
}
}
impl Depth
{
pub fn iter_visuals(&self) -> IterVisualTypes
{
IterVisualTypes(unsafe { xcb_depth_visuals_iterator(&self.0) }, PhantomData)
}
}
#[allow(non_camel_case_types)]
pub type xcb_bool32_t = u32;
#[repr(C)] #[allow(non_camel_case_types)]
pub struct xcb_create_window_value_list_t
{
pub background_pixmap: xcb_pixmap_t, pub background_pixel: u32,
pub border_pixmap: xcb_pixmap_t, pub border_pixel: u32,
pub bit_gravity: u32, pub win_gravity: u32, pub backing_store: u32, pub backing_planes: u32, pub backing_pixel: u32,
pub override_redirect: xcb_bool32_t, pub save_under: xcb_bool32_t, pub event_mask: u32,
pub do_not_propagate_mask: u32, pub colormap: xcb_colormap_t, pub cursor: xcb_cursor_t
}
extern "C"
{
fn xcb_create_window_value_list_serialize(buffer: *mut *mut ::libc::c_void, value_mask: u32,
aux: *const xcb_create_window_value_list_t) -> ::libc::c_int;
}
#[repr(C)]
pub struct WindowValueList(u32, xcb_create_window_value_list_t);
impl WindowValueList
{
pub fn new() -> Self { WindowValueList(0, unsafe { ::std::mem::zeroed() }) }
pub fn border_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BORDER_PIXEL; self.1.border_pixel = p; self
}
pub fn back_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BACK_PIXEL; self.1.background_pixel = p; self
}
pub fn colormap(&mut self, c: &Colormap) -> &mut Self
{
self.0 |= XCB_CW_COLORMAP; self.1.colormap = c.id(); self
}
pub fn eventmask(&mut self, m: xcb_event_mask_t) -> &mut Self
{
self.0 |= XCB_CW_EVENT_MASK; self.1.event_mask = m; self
}
pub fn serialize(&self) -> MallocBox<::libc::c_void>
{
let mut p = null_mut();
unsafe { xcb_create_window_value_list_serialize(&mut p, self.0, &self.1) };
MallocBox(p)
}
}
pub struct Colormap(xcb_colormap_t);
impl Colormap
{
pub fn new(con: &Connection, visual: VisualID, window: xcb_window_t) -> Self
{
let id = con.new_id();
unsafe { xcb_create_colormap(con.0, XCB_COLORMAP_ALLOC_NONE as _, id, window, visual) }; Colormap(id)
}
pub fn id(&self) -> xcb_colormap_t { self.0 }
}
pub use self::xcb::ffi::{
XCB_EVENT_MASK_EXPOSURE
};
use std::ops::{Deref, DerefMut};
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
/// Owned malloc-ed pointer box
pub struct MallocBox<T:?Sized>(pub *mut T);
impl<T:?Sized> Deref for MallocBox<T> { type Target = T; fn deref(&self) -> &T { unsafe { &*self.0 } } }
impl<T:?Sized> DerefMut for MallocBox<T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.0 } } }
impl<T:?Sized> Drop for MallocBox<T>
{
fn drop(&mut self) { unsafe { ::libc::free(self.0 as *mut _) } }
}
impl<T:?Sized> Debug for MallocBox<T> where T: Debug
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <T as Debug>::fmt(&self, fmt) }
}
| new_window_id | identifier_name |
git.rs | //! Getting the Git status of files and directories.
use std::ffi::OsStr;
#[cfg(target_family = "unix")]
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use std::sync::Mutex;
use log::*;
use crate::fs::fields as f;
/// A **Git cache** is assembled based on the user’s input arguments.
///
/// This uses vectors to avoid the overhead of hashing: it’s not worth it when the
/// expected number of Git repositories per exa invocation is 0 or 1...
pub struct GitCache {
/// A list of discovered Git repositories and their paths.
repos: Vec<GitRepo>,
/// Paths that we’ve confirmed do not have Git repositories underneath them.
misses: Vec<PathBuf>,
}
impl GitCache {
pub fn has_anything_for(&self, index: &Path) -> bool {
self.repos.iter().any(|e| e.has_path(index))
}
pub fn get(&self, index: &Path, prefix_lookup: bool) -> f::Git {
self.repos.iter()
.find(|e| e.has_path(index))
.map(|repo| repo.search(index, prefix_lookup))
.unwrap_or_default()
}
}
use std::iter::FromIterator;
impl FromIterator<PathBuf> for GitCache {
fn from_iter<I>(iter: I) -> Self
where I: IntoIterator<Item=PathBuf>
{
let iter = iter.into_iter();
let mut git = Self {
repos: Vec::with_capacity(iter.size_hint().0),
misses: Vec::new(),
};
for path in iter {
if git.misses.contains(&path) {
debug!("Skipping {:?} because it already came back Gitless", path);
}
else if git.repos.iter().any(|e| e.has_path(&path)) {
debug!("Skipping {:?} because we already queried it", path);
}
else {
match GitRepo::discover(path) {
Ok(r) => {
if let Some(r2) = git.repos.iter_mut().find(|e| e.has_workdir(&r.workdir)) {
debug!("Adding to existing repo (workdir matches with {:?})", r2.workdir);
r2.extra_paths.push(r.original_path);
continue;
}
debug!("Discovered new Git repo");
git.repos.push(r);
}
Err(miss) => {
git.misses.push(miss)
}
}
}
}
git
}
}
/// A **Git repository** is one we’ve discovered somewhere on the filesystem.
pub struct GitRepo {
/// The queryable contents of the repository: either a `git2` repo, or the
/// cached results from when we queried it last time.
contents: Mutex<GitContents>,
/// The working directory of this repository.
/// This is used to check whether two repositories are the same.
workdir: PathBuf,
/// The path that was originally checked to discover this repository.
/// This is as important as the extra_paths (it gets checked first), but
/// is separate to avoid having to deal with a non-empty Vec.
original_path: PathBuf,
/// Any other paths that were checked only to result in this same
/// repository.
extra_paths: Vec<PathBuf>,
}
/// A repository’s queried state.
enum GitContents {
/// All the interesting Git stuff goes through this.
Before {
repo: git2::Repository,
},
/// Temporary value used in `repo_to_statuses` so we can move the
/// repository out of the `Before` variant.
Processing,
/// The data we’ve extracted from the repository, but only after we’ve
/// actually done so.
After {
statuses: Git,
},
}
impl GitRepo {
/// Searches through this repository for a path (to a file or directory,
/// depending on the prefix-lookup flag) and returns its Git status.
///
/// Actually querying the `git2` repository for the mapping of paths to
/// Git statuses is only done once, and gets cached so we don’t need to
/// re-query the entire repository the times after that.
///
/// The temporary `Processing` enum variant is used after the `git2`
/// repository is moved out, but before the results have been moved in!
/// See <https://stackoverflow.com/q/45985827/3484614>
fn search(&self, index: &Path, prefix_lookup: bool) -> f::Git {
use std::mem::replace;
let mut contents = self.contents.lock().unwrap();
if let GitContents::After { ref statuses } = *contents {
debug!("Git repo {:?} has been found in cache", &self.workdir);
return statuses.status(index, prefix_lookup);
}
debug!("Querying Git repo {:?} for the first time", &self.workdir);
let repo = replace(&mut *contents, GitContents::Processing).inner_repo();
let statuses = repo_to_statuses(&repo, &self.workdir);
let result = statuses.status(index, prefix_lookup);
let _processing = replace(&mut *contents, GitContents::After { statuses });
result
}
/// Whether this repository has the given working directory.
fn has_workdir(&self, path: &Path) -> bool {
self.workdir == path
}
/// Whether this repository cares about the given path at all.
fn has_path(&self, path: &Path) -> bool {
path.starts_with(&self.original_path) || self.extra_paths.iter().any(|e| path.starts_with(e))
}
/// Searches for a Git repository at any point above the given path.
/// Returns the original buffer if none is found.
fn discover(path: PathBuf) -> Result<Self, PathBuf> {
info!("Searching for Git repository above {:?}", path);
let repo = match git2::Repository::discover(&path) {
Ok(r) => r,
Err(e) => {
error!("Error discovering Git repositories: {:?}", e);
return Err(path);
}
};
if let Some(workdir) = repo.workdir() {
let workdir = workdir.to_path_buf();
let contents = Mutex::new(GitContents::Before { repo });
Ok(Self { contents, workdir, original_path: path, extra_paths: Vec::new() })
}
else {
warn!("Repository has no workdir?");
Err(path)
}
}
}
impl GitContents {
/// Assumes that the repository hasn’t been queried, and extracts it
/// (consuming the value) if it has. This is needed because the entire
/// enum variant gets replaced when a repo is queried (see above).
fn inner_repo(self) -> git2::Repository {
if let Self::Before { repo } = self {
repo
}
else {
unreachable!("Tried to extract a non-Repository")
}
}
}
/// Iterates through a repository’s statuses, consuming it and returning the
/// mapping of files to their Git status.
/// We will have already used the working directory at this point, so it gets
/// passed in rather than deriving it from the `Repository` again.
fn repo_to_statuses(repo: &git2::Repository, workdir: &Path) -> Git {
let mut statuses = Vec::new();
info!("Getting Git statuses for repo with workdir {:?}", workdir);
match repo.statuses(None) {
Ok(es) => {
for e in es.iter() {
#[cfg(target_family = "unix")]
let path = workdir.join(Path::new(OsStr::from_bytes(e.path_bytes())));
// TODO: handle non Unix systems better:
// https://github.com/ogham/exa/issues/698
#[cfg(not(target_family = "unix"))]
let path = workdir.join(Path::new(e.path().unwrap()));
let elem = (path, e.status());
statuses.push(elem);
}
}
Err(e) => {
error!("Error looking up Git statuses: {:?}", e);
}
}
Git { statuses }
}
// The `repo.statuses` call above takes a long time. exa debug output:
//
// 20.311276 INFO:exa::fs::feature::git: Getting Git statuses for repo with workdir "/vagrant/"
// 20.799610 DEBUG:exa::output::table: Getting Git status for file "./Cargo.toml"
//
// Even inserting another logging line immediately afterwards doesn’t make it
// look any faster.
/// Container of Git statuses for all the files in this folder’s Git repository.
struct Git {
statuses: Vec<(PathBuf, git2::Status)>,
}
impl Git {
/// Get either the file or directory status for the given path.
/// “Prefix lookup” means that it should report an aggregate status of all
/// paths starting with the given prefix (in other words, a directory).
fn status(&self, index: &Path, prefix_lookup: bool) -> f::Git {
if prefix_lookup { self.dir_status(index) }
else { self.file_status(index) }
}
/// Get the user-facing status of a file.
/// We check the statuses directly applying to a file, and for the ignored
/// status we check if any of its parents directories is ignored by git.
fn file_status(&self, file: &Path) -> f::Git {
let path = reorient(file);
let s = self.statuses.iter()
.filter(|p| if p.1 == git2::Status::IGNORED {
path.starts_with(&p.0)
} else {
p.0 == path
})
.fold(git2::Status::empty(), |a, b| a | b.1);
let staged = index_status(s);
let unstaged = working_tree_status(s);
f::Git { staged, unstaged }
}
/// Get the combined, user-facing status of a directory.
/// Statuses are aggregating (for example, a directory is considered
/// modified if any file under it has the status modified), except for
/// ignored status which applies to files under (for example, a directory
/// is considered ignored if one of its parent directories is ignored).
fn dir_status(&self, dir: &Path) -> f::Git {
let path = reorient(dir);
let s = self.statuses.iter()
.filter(|p| if p.1 == git2::Status::IGNORED {
path.starts_with(&p.0)
} else {
p.0.starts_with(&path)
})
.fold(git2::Status::empty(), |a, b| a | b.1);
let staged = index_status(s);
let unstaged = working_tree_status(s);
f::Git { staged, unstaged }
}
}
/// Converts a path to an absolute path based on the current directory.
/// Paths need to be absolute for them to be compared properly, otherwise
/// you’d ask a repo about “./README.md” but it only knows about
/// “/vagrant/README.md”, prefixed by the workdir.
#[cfg(unix)]
fn reorient(path: &Path) -> PathBuf {
use std::env::current_dir;
// TODO: I’m not 100% on this func tbh
let path = match current_dir() {
Err(_) => Path::new(".").join(&path),
Ok(dir) => dir.join(&path),
};
path.canonicalize().unwrap_or(path)
}
#[cfg(windows)]
fn reorient(path: &Path) -> PathBuf {
let unc_path = path.canonicalize().unwrap();
// On Windows UNC path is returned. We need to strip the prefix for it to work.
let normal_path = unc_path.as_os_str().to_str().unwrap().trim_left_matches("\\\\?\\");
return PathBuf::from(normal_path);
}
/// The character to display if the file has been modified, but not staged.
fn working_tree_status(status: git2::Status) -> f::GitStatus {
match status {
s if s.contains(git2::Status::WT_NEW) => f::GitStatus::New,
s if s.contains(git2::Status::WT_MODIFIED) => f::GitStatus::Modified,
s if s.contains(git2::Status::WT_DELETED) => f::GitStatus::Deleted,
s if s.contains(git2::Status::WT_RENAMED) => f::GitStatus::Renamed, | }
/// The character to display if the file has been modified and the change
/// has been staged.
fn index_status(status: git2::Status) -> f::GitStatus {
match status {
s if s.contains(git2::Status::INDEX_NEW) => f::GitStatus::New,
s if s.contains(git2::Status::INDEX_MODIFIED) => f::GitStatus::Modified,
s if s.contains(git2::Status::INDEX_DELETED) => f::GitStatus::Deleted,
s if s.contains(git2::Status::INDEX_RENAMED) => f::GitStatus::Renamed,
s if s.contains(git2::Status::INDEX_TYPECHANGE) => f::GitStatus::TypeChange,
_ => f::GitStatus::NotModified,
}
} | s if s.contains(git2::Status::WT_TYPECHANGE) => f::GitStatus::TypeChange,
s if s.contains(git2::Status::IGNORED) => f::GitStatus::Ignored,
s if s.contains(git2::Status::CONFLICTED) => f::GitStatus::Conflicted,
_ => f::GitStatus::NotModified,
} | random_line_split |
git.rs | //! Getting the Git status of files and directories.
use std::ffi::OsStr;
#[cfg(target_family = "unix")]
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use std::sync::Mutex;
use log::*;
use crate::fs::fields as f;
/// A **Git cache** is assembled based on the user’s input arguments.
///
/// This uses vectors to avoid the overhead of hashing: it’s not worth it when the
/// expected number of Git repositories per exa invocation is 0 or 1...
pub struct GitCache {
/// A list of discovered Git repositories and their paths.
repos: Vec<GitRepo>,
/// Paths that we’ve confirmed do not have Git repositories underneath them.
misses: Vec<PathBuf>,
}
impl GitCache {
pub fn has_anything_for(&self, index: &Path) -> bool {
self.repos.iter().any(|e| e.has_path(index))
}
pub fn get(&self, index: &Path, prefix_lookup: bool) -> f::Git {
self.repos.iter()
.find(|e| e.has_path(index))
.map(|repo| repo.search(index, prefix_lookup))
.unwrap_or_default()
}
}
use std::iter::FromIterator;
impl FromIterator<PathBuf> for GitCache {
fn from_iter<I>(iter: I) -> Self
where I: IntoIterator<Item=PathBuf>
{
let iter = iter.into_iter();
let mut git = Self {
repos: Vec::with_capacity(iter.size_hint().0),
misses: Vec::new(),
};
for path in iter {
if git.misses.contains(&path) {
debug!("Skipping {:?} because it already came back Gitless", path);
}
else if git.repos.iter().any(|e| e.has_path(&path)) {
debug!("Skipping {:?} because we already queried it", path);
}
else {
match GitRepo::discover(path) {
Ok(r) => {
if let Some(r2) = git.repos.iter_mut().find(|e| e.has_workdir(&r.workdir)) {
debug!("Adding to existing repo (workdir matches with {:?})", r2.workdir);
r2.extra_paths.push(r.original_path);
continue;
}
debug!("Discovered new Git repo");
git.repos.push(r);
}
Err(miss) => {
git.misses.push(miss)
}
}
}
}
git
}
}
/// A **Git repository** is one we’ve discovered somewhere on the filesystem.
pub struct GitRepo {
/// The queryable contents of the repository: either a `git2` repo, or the
/// cached results from when we queried it last time.
contents: Mutex<GitContents>,
/// The working directory of this repository.
/// This is used to check whether two repositories are the same.
workdir: PathBuf,
/// The path that was originally checked to discover this repository.
/// This is as important as the extra_paths (it gets checked first), but
/// is separate to avoid having to deal with a non-empty Vec.
original_path: PathBuf,
/// Any other paths that were checked only to result in this same
/// repository.
extra_paths: Vec<PathBuf>,
}
/// A repository’s queried state.
enum GitContents {
/// All the interesting Git stuff goes through this.
Before {
repo: git2::Repository,
},
/// Temporary value used in `repo_to_statuses` so we can move the
/// repository out of the `Before` variant.
Processing,
/// The data we’ve extracted from the repository, but only after we’ve
/// actually done so.
After {
statuses: Git,
},
}
impl GitRepo {
/// Searches through this repository for a path (to a file or directory,
/// depending on the prefix-lookup flag) and returns its Git status.
///
/// Actually querying the `git2` repository for the mapping of paths to
/// Git statuses is only done once, and gets cached so we don’t need to
/// re-query the entire repository the times after that.
///
/// The temporary `Processing` enum variant is used after the `git2`
/// repository is moved out, but before the results have been moved in!
/// See <https://stackoverflow.com/q/45985827/3484614>
fn search(&self, index: &Path, prefix_lookup: bool) -> f::Git {
use std::mem::replace;
let mut contents = self.contents.lock().unwrap();
if let GitContents::After { ref statuses } = *contents {
debug!("Git repo {:?} has been found in cache", &self.workdir);
return statuses.status(index, prefix_lookup);
}
debug!("Querying Git repo {:?} for the first time", &self.workdir);
let repo = replace(&mut *contents, GitContents::Processing).inner_repo();
let statuses = repo_to_statuses(&repo, &self.workdir);
let result = statuses.status(index, prefix_lookup);
let _processing = replace(&mut *contents, GitContents::After { statuses });
result
}
/// Whether this repository has the given working directory.
fn has_workdir(&self, path: &Path) -> bool {
self.workdir == path
}
/// Whether this repository cares about the given path at all.
fn has_path(&self, path: &Path) -> bool {
path.starts_with(&self.original_path) || self.extra_paths.iter().any(|e| path.starts_with(e))
}
/// Searches for a Git repository at any point above the given path.
/// Returns the original buffer if none is found.
fn discover(path: PathBuf) -> Result<Self, PathBuf> {
info!("Searching for Git repository above {:?}", path);
let repo = match git2::Repository::discover(&path) {
Ok(r) => r,
Err(e) => {
error!("Error discovering Git repositories: {:?}", e);
return Err(path);
}
};
if let Some(workdir) = repo.workdir() {
let workdir = workdir.to_path_buf();
let contents = Mutex::new(GitContents::Before { repo });
Ok(Self { contents, workdir, original_path: path, extra_paths: Vec::new() })
}
else {
warn!("Repository has no workdir?");
Err(path)
}
}
}
impl GitContents {
/// Assumes that the repository hasn’t been queried, and extracts it
/// (consuming the value) if it has. This is needed because the entire
/// enum variant gets replaced when a repo is queried (see above).
fn inner_repo(self) -> git2::Repository {
if let Self::Before { repo } = self {
repo
}
else {
unreachable!("Tried to extract a non-Repository")
}
}
}
/// Iterates through a repository’s statuses, consuming it and returning the
/// mapping of files to their Git status.
/// We will have already used the working directory at this point, so it gets
/// passed in rather than deriving it from the `Repository` again.
fn repo_to_statuses(repo: &git2::Repository, workdir: &Path) -> Git {
let mut statuses = Vec::new();
info!("Getting Git statuses for repo with workdir {:?}", workdir);
match repo.statuses(None) {
Ok(es) => {
for e in es.iter() {
#[cfg(target_family = "unix")]
let path = workdir.join(Path::new(OsStr::from_bytes(e.path_bytes())));
// TODO: handle non Unix systems better:
// https://github.com/ogham/exa/issues/698
#[cfg(not(target_family = "unix"))]
let path = workdir.join(Path::new(e.path().unwrap()));
let elem = (path, e.status());
statuses.push(elem);
}
}
Err(e) => {
error!("Error looking up Git statuses: {:?}", e);
}
}
Git { statuses }
}
// The `repo.statuses` call above takes a long time. exa debug output:
//
// 20.311276 INFO:exa::fs::feature::git: Getting Git statuses for repo with workdir "/vagrant/"
// 20.799610 DEBUG:exa::output::table: Getting Git status for file "./Cargo.toml"
//
// Even inserting another logging line immediately afterwards doesn’t make it
// look any faster.
/// Container of Git statuses for all the files in this folder’s Git repository.
struct Git {
statuses: Vec<(PathBuf, git2::Status)>,
}
impl Git {
/// Get either the file or directory status for the given path.
/// “Prefix lookup” means that it should report an aggregate status of all
/// paths starting with the given prefix (in other words, a directory).
fn status(&self, index: &Path, prefix_lookup: bool) -> f::Git {
if prefix_lookup { self.dir_status(index) }
else { self.file_status(index) }
}
/// Get the user-facing status of a file.
/// We check the statuses directly applying to a file, and for the ignored
/// status we check if any of its parents directories is ignored by git.
fn file_status(&self, file: &Path) -> f::Git {
let path = reorient(file);
let s = self.statuses.iter()
.filter(|p| if p.1 == git2::Status::IGNORED {
path.starts_with(&p.0)
} else {
p.0 == path
})
.fold(git2::Status::empty(), |a, b| a | b.1);
let staged = index_status(s);
let unstaged = working_tree_status(s);
f::Git { staged, unstaged }
}
/// Get the combined, user-facing status of a directory.
/// Statuses are aggregating (for example, a directory is considered
/// modified if any file under it has the status modified), except for
/// ignored status which applies to files under (for example, a directory
/// is considered ignored if one of its parent directories is ignored).
fn dir_status(&self, dir: &Path | t {
let path = reorient(dir);
let s = self.statuses.iter()
.filter(|p| if p.1 == git2::Status::IGNORED {
path.starts_with(&p.0)
} else {
p.0.starts_with(&path)
})
.fold(git2::Status::empty(), |a, b| a | b.1);
let staged = index_status(s);
let unstaged = working_tree_status(s);
f::Git { staged, unstaged }
}
}
/// Converts a path to an absolute path based on the current directory.
/// Paths need to be absolute for them to be compared properly, otherwise
/// you’d ask a repo about “./README.md” but it only knows about
/// “/vagrant/README.md”, prefixed by the workdir.
#[cfg(unix)]
fn reorient(path: &Path) -> PathBuf {
use std::env::current_dir;
// TODO: I’m not 100% on this func tbh
let path = match current_dir() {
Err(_) => Path::new(".").join(&path),
Ok(dir) => dir.join(&path),
};
path.canonicalize().unwrap_or(path)
}
#[cfg(windows)]
fn reorient(path: &Path) -> PathBuf {
let unc_path = path.canonicalize().unwrap();
// On Windows UNC path is returned. We need to strip the prefix for it to work.
let normal_path = unc_path.as_os_str().to_str().unwrap().trim_left_matches("\\\\?\\");
return PathBuf::from(normal_path);
}
/// The character to display if the file has been modified, but not staged.
fn working_tree_status(status: git2::Status) -> f::GitStatus {
match status {
s if s.contains(git2::Status::WT_NEW) => f::GitStatus::New,
s if s.contains(git2::Status::WT_MODIFIED) => f::GitStatus::Modified,
s if s.contains(git2::Status::WT_DELETED) => f::GitStatus::Deleted,
s if s.contains(git2::Status::WT_RENAMED) => f::GitStatus::Renamed,
s if s.contains(git2::Status::WT_TYPECHANGE) => f::GitStatus::TypeChange,
s if s.contains(git2::Status::IGNORED) => f::GitStatus::Ignored,
s if s.contains(git2::Status::CONFLICTED) => f::GitStatus::Conflicted,
_ => f::GitStatus::NotModified,
}
}
/// The character to display if the file has been modified and the change
/// has been staged.
fn index_status(status: git2::Status) -> f::GitStatus {
match status {
s if s.contains(git2::Status::INDEX_NEW) => f::GitStatus::New,
s if s.contains(git2::Status::INDEX_MODIFIED) => f::GitStatus::Modified,
s if s.contains(git2::Status::INDEX_DELETED) => f::GitStatus::Deleted,
s if s.contains(git2::Status::INDEX_RENAMED) => f::GitStatus::Renamed,
s if s.contains(git2::Status::INDEX_TYPECHANGE) => f::GitStatus::TypeChange,
_ => f::GitStatus::NotModified,
}
}
| ) -> f::Gi | identifier_name |
structure.rs | mod iter;
use super::{
drop_items::DropItem,
dyn_iter::{DynIter, DynIterMut},
items::ItemType,
underground_belt::UnderDirection,
water_well::FluidBox,
FactorishState, Inventory, InventoryTrait, Recipe,
};
use rotate_enum::RotateEnum;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use wasm_bindgen::prelude::*;
use web_sys::CanvasRenderingContext2d;
#[macro_export]
macro_rules! serialize_impl {
() => {
fn serialize(&self) -> serde_json::Result<serde_json::Value> {
serde_json::to_value(self)
}
};
}
#[macro_export]
macro_rules! draw_fuel_alarm {
($self_:expr, $state:expr, $context:expr) => {
if $self_.recipe.is_some() && $self_.power == 0. && $state.sim_time % 1. < 0.5 {
if let Some(img) = $state.image_fuel_alarm.as_ref() {
let (x, y) = (
$self_.position.x as f64 * 32.,
$self_.position.y as f64 * 32.,
);
$context.draw_image_with_image_bitmap(&img.bitmap, x, y)?;
} else {
return js_err!("fuel alarm image not available");
}
}
};
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(crate) struct StructureId {
pub id: u32,
pub gen: u32,
}
pub(crate) struct StructureEntryIterator<'a>(&'a mut [StructureEntry], &'a mut [StructureEntry]);
impl<'a> DynIter for StructureEntryIterator<'a> {
type Item = StructureEntry;
fn dyn_iter(&self) -> Box<dyn Iterator<Item = &Self::Item> + '_> {
Box::new(self.0.iter().chain(self.1.iter()))
}
fn as_dyn_iter(&self) -> &dyn DynIter<Item = Self::Item> {
self
}
}
impl<'a> DynIterMut for StructureEntryIterator<'a> {
fn dyn_iter_mut(&mut self) -> Box<dyn Iterator<Item = &mut Self::Item> + '_> {
Box::new(self.0.iter_mut().chain(self.1.iter_mut()))
}
}
pub(crate) use self::iter::StructureDynIter;
#[derive(Eq, PartialEq, Hash, Copy, Clone, Debug, Serialize, Deserialize)]
pub(crate) struct Position {
pub x: i32,
pub y: i32, | impl Position {
pub fn new(x: i32, y: i32) -> Self {
Self { x, y }
}
pub(crate) fn div_mod(&self, size: i32) -> (Position, Position) {
let div = Position::new(self.x.div_euclid(size), self.y.div_euclid(size));
let mod_ = Position::new(self.x.rem_euclid(size), self.y.rem_euclid(size));
(div, mod_)
}
pub(crate) fn add(&self, o: (i32, i32)) -> Position {
Self {
x: self.x + o.0,
y: self.y + o.1,
}
}
pub(crate) fn distance(&self, position: &Position) -> i32 {
(position.x - self.x).abs().max((position.y - self.y).abs())
}
/// Check whether the positions are neighbors. Return false if they are exactly the same.
#[allow(dead_code)]
pub(crate) fn is_neighbor(&self, pos2: &Position) -> bool {
[[-1, 0], [0, -1], [1, 0], [0, 1]].iter().any(|rel_pos| {
let pos = Position {
x: pos2.x + rel_pos[0],
y: pos2.y + rel_pos[1],
};
*self == pos
})
}
pub(crate) fn neighbor_index(&self, pos2: &Position) -> Option<u32> {
for (i, rel_pos) in [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().enumerate() {
let pos = Position {
x: pos2.x + rel_pos[0],
y: pos2.y + rel_pos[1],
};
if *self == pos {
return Some(i as u32);
}
}
None
}
}
impl From<&[i32; 2]> for Position {
fn from(xy: &[i32; 2]) -> Self {
Self { x: xy[0], y: xy[1] }
}
}
pub(crate) struct Size {
pub width: i32,
pub height: i32,
}
pub(crate) struct BoundingBox {
pub x0: i32,
pub y0: i32,
pub x1: i32,
pub y1: i32,
}
#[derive(Copy, Clone, Serialize, Deserialize, RotateEnum, PartialEq)]
pub(crate) enum Rotation {
Left,
Top,
Right,
Bottom,
}
impl Rotation {
pub fn delta(&self) -> (i32, i32) {
match self {
Rotation::Left => (-1, 0),
Rotation::Top => (0, -1),
Rotation::Right => (1, 0),
Rotation::Bottom => (0, 1),
}
}
pub fn delta_inv(&self) -> (i32, i32) {
let delta = self.delta();
(-delta.0, -delta.1)
}
pub fn angle_deg(&self) -> i32 {
self.angle_4() * 90
}
pub fn angle_4(&self) -> i32 {
match self {
Rotation::Left => 2,
Rotation::Top => 3,
Rotation::Right => 0,
Rotation::Bottom => 1,
}
}
pub fn angle_rad(&self) -> f64 {
self.angle_deg() as f64 * std::f64::consts::PI / 180.
}
pub fn is_horizontal(&self) -> bool {
matches!(self, Rotation::Left | Rotation::Right)
}
pub fn is_vertcial(&self) -> bool {
!self.is_horizontal()
}
}
pub(crate) enum FrameProcResult {
None,
InventoryChanged(Position),
}
pub(crate) enum ItemResponse {
Move(i32, i32),
Consume,
}
pub(crate) type ItemResponseResult = (ItemResponse, Option<FrameProcResult>);
#[derive(Debug)]
pub(crate) enum RotateErr {
NotFound,
NotSupported,
Other(JsValue),
}
pub(crate) trait Structure {
fn name(&self) -> &str;
fn position(&self) -> &Position;
fn rotation(&self) -> Option<Rotation> {
None
}
/// Specialized method to get underground belt direction.
/// We don't like to put this to Structure trait method, but we don't have an option
/// as long as we use trait object polymorphism.
/// TODO: Revise needed in ECS.
fn under_direction(&self) -> Option<UnderDirection> {
None
}
fn size(&self) -> Size {
Size {
width: 1,
height: 1,
}
}
fn bounding_box(&self) -> BoundingBox {
let (position, size) = (self.position(), self.size());
BoundingBox {
x0: position.x,
y0: position.y,
x1: position.x + size.width,
y1: position.y + size.height,
}
}
fn contains(&self, pos: &Position) -> bool {
let bb = self.bounding_box();
bb.x0 <= pos.x && pos.x < bb.x1 && bb.y0 <= pos.y && pos.y < bb.y1
}
fn draw(
&self,
state: &FactorishState,
context: &CanvasRenderingContext2d,
depth: i32,
is_tooptip: bool,
) -> Result<(), JsValue>;
fn desc(&self, _state: &FactorishState) -> String {
String::from("")
}
fn frame_proc(
&mut self,
_me: StructureId,
_state: &mut FactorishState,
_structures: &mut StructureDynIter,
) -> Result<FrameProcResult, ()> {
Ok(FrameProcResult::None)
}
/// event handler for costruction events around the structure.
fn on_construction(
&mut self,
_other_id: StructureId,
_other: &dyn Structure,
_others: &StructureDynIter,
_construct: bool,
) -> Result<(), JsValue> {
Ok(())
}
/// event handler for costruction events for this structure itself.
fn on_construction_self(
&mut self,
_id: StructureId,
_others: &StructureDynIter,
_construct: bool,
) -> Result<(), JsValue> {
Ok(())
}
fn movable(&self) -> bool {
false
}
fn rotate(
&mut self,
_state: &mut FactorishState,
_others: &StructureDynIter,
) -> Result<(), RotateErr> {
Err(RotateErr::NotSupported)
}
fn set_rotation(&mut self, _rotation: &Rotation) -> Result<(), ()> {
Err(())
}
/// Called every frame for each item that is on this structure.
fn item_response(&mut self, _item: &DropItem) -> Result<ItemResponseResult, ()> {
Err(())
}
fn input(&mut self, _o: &DropItem) -> Result<(), JsValue> {
Err(JsValue::from_str("Not supported"))
}
/// Returns wheter the structure can accept an item as the input. If this structure is a factory
/// that returns recipes by get_selected_recipe(), it will check if it's in the inputs.
fn can_input(&self, item_type: &ItemType) -> bool {
if let Some(recipe) = self.get_selected_recipe() {
recipe.input.get(item_type).is_some()
} else {
false
}
}
/// Query a set of items that this structure can output. Actual output would not happen until `output()`, thus
/// this method is immutable. It should return empty Inventory if it cannot output anything.
fn can_output(&self, _structures: &StructureDynIter) -> Inventory {
Inventory::new()
}
/// Perform actual output. The operation should always succeed since the output-tability is checked beforehand
/// with `can_output`.
fn output(&mut self, _state: &mut FactorishState, _item_type: &ItemType) -> Result<(), ()> {
Err(())
}
fn burner_inventory(&self) -> Option<&Inventory> {
None
}
fn add_burner_inventory(&mut self, _item_type: &ItemType, _amount: isize) -> isize {
0
}
fn burner_energy(&self) -> Option<(f64, f64)> {
None
}
fn inventory(&self, _is_input: bool) -> Option<&Inventory> {
None
}
fn inventory_mut(&mut self, _is_input: bool) -> Option<&mut Inventory> {
None
}
/// Some structures don't have an inventory, but still can have some item, e.g. inserter hands.
/// We need to retrieve them when we destory such a structure, or we might lose items into void.
/// It will take away the inventory by default, destroying the instance's inventory.
fn destroy_inventory(&mut self) -> Inventory {
let mut ret = self
.inventory_mut(true)
.map_or(Inventory::new(), |inventory| std::mem::take(inventory));
ret.merge(
self.inventory_mut(false)
.map_or(Inventory::new(), |inventory| std::mem::take(inventory)),
);
ret
}
/// Returns a list of recipes. The return value is wrapped in a Cow because some
/// structures can return dynamically configured list of recipes, while some others
/// have static fixed list of recipes. In reality, all our structures return a fixed list though.
fn get_recipes(&self) -> Cow<[Recipe]> {
Cow::from(&[][..])
}
fn select_recipe(&mut self, _index: usize) -> Result<bool, JsValue> {
Err(JsValue::from_str("recipes not available"))
}
fn get_selected_recipe(&self) -> Option<&Recipe> {
None
}
fn fluid_box(&self) -> Option<Vec<&FluidBox>> {
None
}
fn fluid_box_mut(&mut self) -> Option<Vec<&mut FluidBox>> {
None
}
fn connection(
&self,
state: &FactorishState,
structures: &dyn DynIter<Item = StructureEntry>,
) -> [bool; 4] {
// let mut structures_copy = structures.clone();
let has_fluid_box = |x, y| {
if x < 0 || state.width <= x as u32 || y < 0 || state.height <= y as u32 {
return false;
}
if let Some(structure) = structures
.dyn_iter()
.filter_map(|s| s.dynamic.as_deref())
.find(|s| *s.position() == Position { x, y })
{
return structure.fluid_box().is_some();
}
false
};
// Fluid containers connect to other containers
let Position { x, y } = *self.position();
let l = has_fluid_box(x - 1, y);
let t = has_fluid_box(x, y - 1);
let r = has_fluid_box(x + 1, y);
let b = has_fluid_box(x, y + 1);
[l, t, r, b]
}
/// If this structure can connect to power grid.
fn power_source(&self) -> bool {
false
}
/// If this structure drains power from the grid
fn power_sink(&self) -> bool {
false
}
/// Try to drain power from this structure.
/// @param demand in kilojoules.
/// @returns None if it does not support power supply.
fn power_outlet(&mut self, _demand: f64) -> Option<f64> {
None
}
fn wire_reach(&self) -> u32 {
3
}
fn serialize(&self) -> serde_json::Result<serde_json::Value>;
}
pub(crate) type StructureBoxed = Box<dyn Structure>;
pub(crate) struct StructureEntry {
pub gen: u32,
pub dynamic: Option<StructureBoxed>,
} | }
| random_line_split |
structure.rs | mod iter;
use super::{
drop_items::DropItem,
dyn_iter::{DynIter, DynIterMut},
items::ItemType,
underground_belt::UnderDirection,
water_well::FluidBox,
FactorishState, Inventory, InventoryTrait, Recipe,
};
use rotate_enum::RotateEnum;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use wasm_bindgen::prelude::*;
use web_sys::CanvasRenderingContext2d;
#[macro_export]
macro_rules! serialize_impl {
() => {
fn serialize(&self) -> serde_json::Result<serde_json::Value> {
serde_json::to_value(self)
}
};
}
#[macro_export]
macro_rules! draw_fuel_alarm {
($self_:expr, $state:expr, $context:expr) => {
if $self_.recipe.is_some() && $self_.power == 0. && $state.sim_time % 1. < 0.5 {
if let Some(img) = $state.image_fuel_alarm.as_ref() {
let (x, y) = (
$self_.position.x as f64 * 32.,
$self_.position.y as f64 * 32.,
);
$context.draw_image_with_image_bitmap(&img.bitmap, x, y)?;
} else {
return js_err!("fuel alarm image not available");
}
}
};
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(crate) struct StructureId {
pub id: u32,
pub gen: u32,
}
pub(crate) struct StructureEntryIterator<'a>(&'a mut [StructureEntry], &'a mut [StructureEntry]);
impl<'a> DynIter for StructureEntryIterator<'a> {
type Item = StructureEntry;
fn dyn_iter(&self) -> Box<dyn Iterator<Item = &Self::Item> + '_> {
Box::new(self.0.iter().chain(self.1.iter()))
}
fn as_dyn_iter(&self) -> &dyn DynIter<Item = Self::Item> {
self
}
}
impl<'a> DynIterMut for StructureEntryIterator<'a> {
fn dyn_iter_mut(&mut self) -> Box<dyn Iterator<Item = &mut Self::Item> + '_> {
Box::new(self.0.iter_mut().chain(self.1.iter_mut()))
}
}
pub(crate) use self::iter::StructureDynIter;
#[derive(Eq, PartialEq, Hash, Copy, Clone, Debug, Serialize, Deserialize)]
pub(crate) struct Position {
pub x: i32,
pub y: i32,
}
impl Position {
pub fn new(x: i32, y: i32) -> Self {
Self { x, y }
}
pub(crate) fn div_mod(&self, size: i32) -> (Position, Position) {
let div = Position::new(self.x.div_euclid(size), self.y.div_euclid(size));
let mod_ = Position::new(self.x.rem_euclid(size), self.y.rem_euclid(size));
(div, mod_)
}
pub(crate) fn add(&self, o: (i32, i32)) -> Position {
Self {
x: self.x + o.0,
y: self.y + o.1,
}
}
pub(crate) fn distance(&self, position: &Position) -> i32 {
(position.x - self.x).abs().max((position.y - self.y).abs())
}
/// Check whether the positions are neighbors. Return false if they are exactly the same.
#[allow(dead_code)]
pub(crate) fn is_neighbor(&self, pos2: &Position) -> bool {
[[-1, 0], [0, -1], [1, 0], [0, 1]].iter().any(|rel_pos| {
let pos = Position {
x: pos2.x + rel_pos[0],
y: pos2.y + rel_pos[1],
};
*self == pos
})
}
pub(crate) fn neighbor_index(&self, pos2: &Position) -> Option<u32> {
for (i, rel_pos) in [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().enumerate() {
let pos = Position {
x: pos2.x + rel_pos[0],
y: pos2.y + rel_pos[1],
};
if *self == pos {
return Some(i as u32);
}
}
None
}
}
impl From<&[i32; 2]> for Position {
fn from(xy: &[i32; 2]) -> Self {
Self { x: xy[0], y: xy[1] }
}
}
pub(crate) struct Size {
pub width: i32,
pub height: i32,
}
pub(crate) struct BoundingBox {
pub x0: i32,
pub y0: i32,
pub x1: i32,
pub y1: i32,
}
#[derive(Copy, Clone, Serialize, Deserialize, RotateEnum, PartialEq)]
pub(crate) enum Rotation {
Left,
Top,
Right,
Bottom,
}
impl Rotation {
pub fn delta(&self) -> (i32, i32) {
match self {
Rotation::Left => (-1, 0),
Rotation::Top => (0, -1),
Rotation::Right => (1, 0),
Rotation::Bottom => (0, 1),
}
}
pub fn delta_inv(&self) -> (i32, i32) {
let delta = self.delta();
(-delta.0, -delta.1)
}
pub fn angle_deg(&self) -> i32 {
self.angle_4() * 90
}
pub fn angle_4(&self) -> i32 {
match self {
Rotation::Left => 2,
Rotation::Top => 3,
Rotation::Right => 0,
Rotation::Bottom => 1,
}
}
pub fn angle_rad(&self) -> f64 {
self.angle_deg() as f64 * std::f64::consts::PI / 180.
}
pub fn is_horizontal(&self) -> bool {
matches!(self, Rotation::Left | Rotation::Right)
}
pub fn is_vertcial(&self) -> bool {
!self.is_horizontal()
}
}
pub(crate) enum FrameProcResult {
None,
InventoryChanged(Position),
}
pub(crate) enum ItemResponse {
Move(i32, i32),
Consume,
}
pub(crate) type ItemResponseResult = (ItemResponse, Option<FrameProcResult>);
#[derive(Debug)]
pub(crate) enum RotateErr {
NotFound,
NotSupported,
Other(JsValue),
}
pub(crate) trait Structure {
fn name(&self) -> &str;
fn position(&self) -> &Position;
fn rotation(&self) -> Option<Rotation> {
None
}
/// Specialized method to get underground belt direction.
/// We don't like to put this to Structure trait method, but we don't have an option
/// as long as we use trait object polymorphism.
/// TODO: Revise needed in ECS.
fn under_direction(&self) -> Option<UnderDirection> {
None
}
fn size(&self) -> Size {
Size {
width: 1,
height: 1,
}
}
fn bounding_box(&self) -> BoundingBox {
let (position, size) = (self.position(), self.size());
BoundingBox {
x0: position.x,
y0: position.y,
x1: position.x + size.width,
y1: position.y + size.height,
}
}
fn contains(&self, pos: &Position) -> bool {
let bb = self.bounding_box();
bb.x0 <= pos.x && pos.x < bb.x1 && bb.y0 <= pos.y && pos.y < bb.y1
}
fn draw(
&self,
state: &FactorishState,
context: &CanvasRenderingContext2d,
depth: i32,
is_tooptip: bool,
) -> Result<(), JsValue>;
fn desc(&self, _state: &FactorishState) -> String {
String::from("")
}
fn frame_proc(
&mut self,
_me: StructureId,
_state: &mut FactorishState,
_structures: &mut StructureDynIter,
) -> Result<FrameProcResult, ()> {
Ok(FrameProcResult::None)
}
/// event handler for costruction events around the structure.
fn on_construction(
&mut self,
_other_id: StructureId,
_other: &dyn Structure,
_others: &StructureDynIter,
_construct: bool,
) -> Result<(), JsValue> {
Ok(())
}
/// event handler for costruction events for this structure itself.
fn on_construction_self(
&mut self,
_id: StructureId,
_others: &StructureDynIter,
_construct: bool,
) -> Result<(), JsValue> {
Ok(())
}
fn movable(&self) -> bool {
false
}
fn rotate(
&mut self,
_state: &mut FactorishState,
_others: &StructureDynIter,
) -> Result<(), RotateErr> {
Err(RotateErr::NotSupported)
}
fn set_rotation(&mut self, _rotation: &Rotation) -> Result<(), ()> {
Err(())
}
/// Called every frame for each item that is on this structure.
fn item_response(&mut self, _item: &DropItem) -> Result<ItemResponseResult, ()> {
Err(())
}
fn input(&mut self, _o: &DropItem) -> Result<(), JsValue> {
Err(JsValue::from_str("Not supported"))
}
/// Returns wheter the structure can accept an item as the input. If this structure is a factory
/// that returns recipes by get_selected_recipe(), it will check if it's in the inputs.
fn can_input(&self, item_type: &ItemType) -> bool {
if let Some(recipe) = self.get_selected_recipe() {
recipe.input.get(item_type).is_some()
} else {
false
}
}
/// Query a set of items that this structure can output. Actual output would not happen until `output()`, thus
/// this method is immutable. It should return empty Inventory if it cannot output anything.
fn can_output(&self, _structures: &StructureDynIter) -> Inventory {
Inventory::new()
}
/// Perform actual output. The operation should always succeed since the output-tability is checked beforehand
/// with `can_output`.
fn output(&mut self, _state: &mut FactorishState, _item_type: &ItemType) -> Result<(), ()> {
Err(())
}
fn burner_inventory(&self) -> Option<&Inventory> {
None
}
fn add_burner_inventory(&mut self, _item_type: &ItemType, _amount: isize) -> isize {
0
}
fn burner_energy(&self) -> Option<(f64, f64)> {
None
}
fn inventory(&self, _is_input: bool) -> Option<&Inventory> {
None
}
fn inventory_mut(&mut self, _is_input: bool) -> Option<&mut Inventory> {
None
}
/// Some structures don't have an inventory, but still can have some item, e.g. inserter hands.
/// We need to retrieve them when we destory such a structure, or we might lose items into void.
/// It will take away the inventory by default, destroying the instance's inventory.
fn destroy_inventory(&mut self) -> Inventory {
let mut ret = self
.inventory_mut(true)
.map_or(Inventory::new(), |inventory| std::mem::take(inventory));
ret.merge(
self.inventory_mut(false)
.map_or(Inventory::new(), |inventory| std::mem::take(inventory)),
);
ret
}
/// Returns a list of recipes. The return value is wrapped in a Cow because some
/// structures can return dynamically configured list of recipes, while some others
/// have static fixed list of recipes. In reality, all our structures return a fixed list though.
fn get_recipes(&self) -> Cow<[Recipe]> |
fn select_recipe(&mut self, _index: usize) -> Result<bool, JsValue> {
Err(JsValue::from_str("recipes not available"))
}
fn get_selected_recipe(&self) -> Option<&Recipe> {
None
}
fn fluid_box(&self) -> Option<Vec<&FluidBox>> {
None
}
fn fluid_box_mut(&mut self) -> Option<Vec<&mut FluidBox>> {
None
}
fn connection(
&self,
state: &FactorishState,
structures: &dyn DynIter<Item = StructureEntry>,
) -> [bool; 4] {
// let mut structures_copy = structures.clone();
let has_fluid_box = |x, y| {
if x < 0 || state.width <= x as u32 || y < 0 || state.height <= y as u32 {
return false;
}
if let Some(structure) = structures
.dyn_iter()
.filter_map(|s| s.dynamic.as_deref())
.find(|s| *s.position() == Position { x, y })
{
return structure.fluid_box().is_some();
}
false
};
// Fluid containers connect to other containers
let Position { x, y } = *self.position();
let l = has_fluid_box(x - 1, y);
let t = has_fluid_box(x, y - 1);
let r = has_fluid_box(x + 1, y);
let b = has_fluid_box(x, y + 1);
[l, t, r, b]
}
/// If this structure can connect to power grid.
fn power_source(&self) -> bool {
false
}
/// If this structure drains power from the grid
fn power_sink(&self) -> bool {
false
}
/// Try to drain power from this structure.
/// @param demand in kilojoules.
/// @returns None if it does not support power supply.
fn power_outlet(&mut self, _demand: f64) -> Option<f64> {
None
}
fn wire_reach(&self) -> u32 {
3
}
fn serialize(&self) -> serde_json::Result<serde_json::Value>;
}
pub(crate) type StructureBoxed = Box<dyn Structure>;
pub(crate) struct StructureEntry {
pub gen: u32,
pub dynamic: Option<StructureBoxed>,
}
| {
Cow::from(&[][..])
} | identifier_body |
structure.rs | mod iter;
use super::{
drop_items::DropItem,
dyn_iter::{DynIter, DynIterMut},
items::ItemType,
underground_belt::UnderDirection,
water_well::FluidBox,
FactorishState, Inventory, InventoryTrait, Recipe,
};
use rotate_enum::RotateEnum;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use wasm_bindgen::prelude::*;
use web_sys::CanvasRenderingContext2d;
#[macro_export]
macro_rules! serialize_impl {
() => {
fn serialize(&self) -> serde_json::Result<serde_json::Value> {
serde_json::to_value(self)
}
};
}
#[macro_export]
macro_rules! draw_fuel_alarm {
($self_:expr, $state:expr, $context:expr) => {
if $self_.recipe.is_some() && $self_.power == 0. && $state.sim_time % 1. < 0.5 {
if let Some(img) = $state.image_fuel_alarm.as_ref() {
let (x, y) = (
$self_.position.x as f64 * 32.,
$self_.position.y as f64 * 32.,
);
$context.draw_image_with_image_bitmap(&img.bitmap, x, y)?;
} else {
return js_err!("fuel alarm image not available");
}
}
};
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(crate) struct StructureId {
pub id: u32,
pub gen: u32,
}
pub(crate) struct StructureEntryIterator<'a>(&'a mut [StructureEntry], &'a mut [StructureEntry]);
impl<'a> DynIter for StructureEntryIterator<'a> {
type Item = StructureEntry;
fn dyn_iter(&self) -> Box<dyn Iterator<Item = &Self::Item> + '_> {
Box::new(self.0.iter().chain(self.1.iter()))
}
fn as_dyn_iter(&self) -> &dyn DynIter<Item = Self::Item> {
self
}
}
impl<'a> DynIterMut for StructureEntryIterator<'a> {
fn dyn_iter_mut(&mut self) -> Box<dyn Iterator<Item = &mut Self::Item> + '_> {
Box::new(self.0.iter_mut().chain(self.1.iter_mut()))
}
}
pub(crate) use self::iter::StructureDynIter;
#[derive(Eq, PartialEq, Hash, Copy, Clone, Debug, Serialize, Deserialize)]
pub(crate) struct Position {
pub x: i32,
pub y: i32,
}
impl Position {
pub fn new(x: i32, y: i32) -> Self {
Self { x, y }
}
pub(crate) fn div_mod(&self, size: i32) -> (Position, Position) {
let div = Position::new(self.x.div_euclid(size), self.y.div_euclid(size));
let mod_ = Position::new(self.x.rem_euclid(size), self.y.rem_euclid(size));
(div, mod_)
}
pub(crate) fn add(&self, o: (i32, i32)) -> Position {
Self {
x: self.x + o.0,
y: self.y + o.1,
}
}
pub(crate) fn distance(&self, position: &Position) -> i32 {
(position.x - self.x).abs().max((position.y - self.y).abs())
}
/// Check whether the positions are neighbors. Return false if they are exactly the same.
#[allow(dead_code)]
pub(crate) fn is_neighbor(&self, pos2: &Position) -> bool {
[[-1, 0], [0, -1], [1, 0], [0, 1]].iter().any(|rel_pos| {
let pos = Position {
x: pos2.x + rel_pos[0],
y: pos2.y + rel_pos[1],
};
*self == pos
})
}
pub(crate) fn neighbor_index(&self, pos2: &Position) -> Option<u32> {
for (i, rel_pos) in [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().enumerate() {
let pos = Position {
x: pos2.x + rel_pos[0],
y: pos2.y + rel_pos[1],
};
if *self == pos {
return Some(i as u32);
}
}
None
}
}
impl From<&[i32; 2]> for Position {
fn from(xy: &[i32; 2]) -> Self {
Self { x: xy[0], y: xy[1] }
}
}
pub(crate) struct Size {
pub width: i32,
pub height: i32,
}
pub(crate) struct BoundingBox {
pub x0: i32,
pub y0: i32,
pub x1: i32,
pub y1: i32,
}
#[derive(Copy, Clone, Serialize, Deserialize, RotateEnum, PartialEq)]
pub(crate) enum Rotation {
Left,
Top,
Right,
Bottom,
}
impl Rotation {
pub fn delta(&self) -> (i32, i32) {
match self {
Rotation::Left => (-1, 0),
Rotation::Top => (0, -1),
Rotation::Right => (1, 0),
Rotation::Bottom => (0, 1),
}
}
pub fn delta_inv(&self) -> (i32, i32) {
let delta = self.delta();
(-delta.0, -delta.1)
}
pub fn angle_deg(&self) -> i32 {
self.angle_4() * 90
}
pub fn angle_4(&self) -> i32 {
match self {
Rotation::Left => 2,
Rotation::Top => 3,
Rotation::Right => 0,
Rotation::Bottom => 1,
}
}
pub fn angle_rad(&self) -> f64 {
self.angle_deg() as f64 * std::f64::consts::PI / 180.
}
pub fn is_horizontal(&self) -> bool {
matches!(self, Rotation::Left | Rotation::Right)
}
pub fn is_vertcial(&self) -> bool {
!self.is_horizontal()
}
}
pub(crate) enum FrameProcResult {
None,
InventoryChanged(Position),
}
pub(crate) enum ItemResponse {
Move(i32, i32),
Consume,
}
pub(crate) type ItemResponseResult = (ItemResponse, Option<FrameProcResult>);
#[derive(Debug)]
pub(crate) enum RotateErr {
NotFound,
NotSupported,
Other(JsValue),
}
pub(crate) trait Structure {
fn name(&self) -> &str;
fn position(&self) -> &Position;
fn rotation(&self) -> Option<Rotation> {
None
}
/// Specialized method to get underground belt direction.
/// We don't like to put this to Structure trait method, but we don't have an option
/// as long as we use trait object polymorphism.
/// TODO: Revise needed in ECS.
fn under_direction(&self) -> Option<UnderDirection> {
None
}
fn size(&self) -> Size {
Size {
width: 1,
height: 1,
}
}
fn bounding_box(&self) -> BoundingBox {
let (position, size) = (self.position(), self.size());
BoundingBox {
x0: position.x,
y0: position.y,
x1: position.x + size.width,
y1: position.y + size.height,
}
}
fn contains(&self, pos: &Position) -> bool {
let bb = self.bounding_box();
bb.x0 <= pos.x && pos.x < bb.x1 && bb.y0 <= pos.y && pos.y < bb.y1
}
fn draw(
&self,
state: &FactorishState,
context: &CanvasRenderingContext2d,
depth: i32,
is_tooptip: bool,
) -> Result<(), JsValue>;
fn desc(&self, _state: &FactorishState) -> String {
String::from("")
}
fn frame_proc(
&mut self,
_me: StructureId,
_state: &mut FactorishState,
_structures: &mut StructureDynIter,
) -> Result<FrameProcResult, ()> {
Ok(FrameProcResult::None)
}
/// event handler for costruction events around the structure.
fn on_construction(
&mut self,
_other_id: StructureId,
_other: &dyn Structure,
_others: &StructureDynIter,
_construct: bool,
) -> Result<(), JsValue> {
Ok(())
}
/// event handler for costruction events for this structure itself.
fn on_construction_self(
&mut self,
_id: StructureId,
_others: &StructureDynIter,
_construct: bool,
) -> Result<(), JsValue> {
Ok(())
}
fn movable(&self) -> bool {
false
}
fn rotate(
&mut self,
_state: &mut FactorishState,
_others: &StructureDynIter,
) -> Result<(), RotateErr> {
Err(RotateErr::NotSupported)
}
fn set_rotation(&mut self, _rotation: &Rotation) -> Result<(), ()> {
Err(())
}
/// Called every frame for each item that is on this structure.
fn item_response(&mut self, _item: &DropItem) -> Result<ItemResponseResult, ()> {
Err(())
}
fn input(&mut self, _o: &DropItem) -> Result<(), JsValue> {
Err(JsValue::from_str("Not supported"))
}
/// Returns wheter the structure can accept an item as the input. If this structure is a factory
/// that returns recipes by get_selected_recipe(), it will check if it's in the inputs.
fn can_input(&self, item_type: &ItemType) -> bool {
if let Some(recipe) = self.get_selected_recipe() {
recipe.input.get(item_type).is_some()
} else |
}
/// Query a set of items that this structure can output. Actual output would not happen until `output()`, thus
/// this method is immutable. It should return empty Inventory if it cannot output anything.
fn can_output(&self, _structures: &StructureDynIter) -> Inventory {
Inventory::new()
}
/// Perform actual output. The operation should always succeed since the output-tability is checked beforehand
/// with `can_output`.
fn output(&mut self, _state: &mut FactorishState, _item_type: &ItemType) -> Result<(), ()> {
Err(())
}
fn burner_inventory(&self) -> Option<&Inventory> {
None
}
fn add_burner_inventory(&mut self, _item_type: &ItemType, _amount: isize) -> isize {
0
}
fn burner_energy(&self) -> Option<(f64, f64)> {
None
}
fn inventory(&self, _is_input: bool) -> Option<&Inventory> {
None
}
fn inventory_mut(&mut self, _is_input: bool) -> Option<&mut Inventory> {
None
}
/// Some structures don't have an inventory, but still can have some item, e.g. inserter hands.
/// We need to retrieve them when we destory such a structure, or we might lose items into void.
/// It will take away the inventory by default, destroying the instance's inventory.
fn destroy_inventory(&mut self) -> Inventory {
let mut ret = self
.inventory_mut(true)
.map_or(Inventory::new(), |inventory| std::mem::take(inventory));
ret.merge(
self.inventory_mut(false)
.map_or(Inventory::new(), |inventory| std::mem::take(inventory)),
);
ret
}
/// Returns a list of recipes. The return value is wrapped in a Cow because some
/// structures can return dynamically configured list of recipes, while some others
/// have static fixed list of recipes. In reality, all our structures return a fixed list though.
fn get_recipes(&self) -> Cow<[Recipe]> {
Cow::from(&[][..])
}
fn select_recipe(&mut self, _index: usize) -> Result<bool, JsValue> {
Err(JsValue::from_str("recipes not available"))
}
fn get_selected_recipe(&self) -> Option<&Recipe> {
None
}
fn fluid_box(&self) -> Option<Vec<&FluidBox>> {
None
}
fn fluid_box_mut(&mut self) -> Option<Vec<&mut FluidBox>> {
None
}
fn connection(
&self,
state: &FactorishState,
structures: &dyn DynIter<Item = StructureEntry>,
) -> [bool; 4] {
// let mut structures_copy = structures.clone();
let has_fluid_box = |x, y| {
if x < 0 || state.width <= x as u32 || y < 0 || state.height <= y as u32 {
return false;
}
if let Some(structure) = structures
.dyn_iter()
.filter_map(|s| s.dynamic.as_deref())
.find(|s| *s.position() == Position { x, y })
{
return structure.fluid_box().is_some();
}
false
};
// Fluid containers connect to other containers
let Position { x, y } = *self.position();
let l = has_fluid_box(x - 1, y);
let t = has_fluid_box(x, y - 1);
let r = has_fluid_box(x + 1, y);
let b = has_fluid_box(x, y + 1);
[l, t, r, b]
}
/// If this structure can connect to power grid.
fn power_source(&self) -> bool {
false
}
/// If this structure drains power from the grid
fn power_sink(&self) -> bool {
false
}
/// Try to drain power from this structure.
/// @param demand in kilojoules.
/// @returns None if it does not support power supply.
fn power_outlet(&mut self, _demand: f64) -> Option<f64> {
None
}
fn wire_reach(&self) -> u32 {
3
}
fn serialize(&self) -> serde_json::Result<serde_json::Value>;
}
pub(crate) type StructureBoxed = Box<dyn Structure>;
pub(crate) struct StructureEntry {
pub gen: u32,
pub dynamic: Option<StructureBoxed>,
}
| {
false
} | conditional_block |
structure.rs | mod iter;
use super::{
drop_items::DropItem,
dyn_iter::{DynIter, DynIterMut},
items::ItemType,
underground_belt::UnderDirection,
water_well::FluidBox,
FactorishState, Inventory, InventoryTrait, Recipe,
};
use rotate_enum::RotateEnum;
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use wasm_bindgen::prelude::*;
use web_sys::CanvasRenderingContext2d;
#[macro_export]
macro_rules! serialize_impl {
() => {
fn serialize(&self) -> serde_json::Result<serde_json::Value> {
serde_json::to_value(self)
}
};
}
#[macro_export]
macro_rules! draw_fuel_alarm {
($self_:expr, $state:expr, $context:expr) => {
if $self_.recipe.is_some() && $self_.power == 0. && $state.sim_time % 1. < 0.5 {
if let Some(img) = $state.image_fuel_alarm.as_ref() {
let (x, y) = (
$self_.position.x as f64 * 32.,
$self_.position.y as f64 * 32.,
);
$context.draw_image_with_image_bitmap(&img.bitmap, x, y)?;
} else {
return js_err!("fuel alarm image not available");
}
}
};
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(crate) struct StructureId {
pub id: u32,
pub gen: u32,
}
pub(crate) struct StructureEntryIterator<'a>(&'a mut [StructureEntry], &'a mut [StructureEntry]);
impl<'a> DynIter for StructureEntryIterator<'a> {
type Item = StructureEntry;
fn dyn_iter(&self) -> Box<dyn Iterator<Item = &Self::Item> + '_> {
Box::new(self.0.iter().chain(self.1.iter()))
}
fn as_dyn_iter(&self) -> &dyn DynIter<Item = Self::Item> {
self
}
}
impl<'a> DynIterMut for StructureEntryIterator<'a> {
fn dyn_iter_mut(&mut self) -> Box<dyn Iterator<Item = &mut Self::Item> + '_> {
Box::new(self.0.iter_mut().chain(self.1.iter_mut()))
}
}
pub(crate) use self::iter::StructureDynIter;
#[derive(Eq, PartialEq, Hash, Copy, Clone, Debug, Serialize, Deserialize)]
pub(crate) struct Position {
pub x: i32,
pub y: i32,
}
impl Position {
pub fn new(x: i32, y: i32) -> Self {
Self { x, y }
}
pub(crate) fn div_mod(&self, size: i32) -> (Position, Position) {
let div = Position::new(self.x.div_euclid(size), self.y.div_euclid(size));
let mod_ = Position::new(self.x.rem_euclid(size), self.y.rem_euclid(size));
(div, mod_)
}
pub(crate) fn add(&self, o: (i32, i32)) -> Position {
Self {
x: self.x + o.0,
y: self.y + o.1,
}
}
pub(crate) fn distance(&self, position: &Position) -> i32 {
(position.x - self.x).abs().max((position.y - self.y).abs())
}
/// Check whether the positions are neighbors. Return false if they are exactly the same.
#[allow(dead_code)]
pub(crate) fn is_neighbor(&self, pos2: &Position) -> bool {
[[-1, 0], [0, -1], [1, 0], [0, 1]].iter().any(|rel_pos| {
let pos = Position {
x: pos2.x + rel_pos[0],
y: pos2.y + rel_pos[1],
};
*self == pos
})
}
pub(crate) fn neighbor_index(&self, pos2: &Position) -> Option<u32> {
for (i, rel_pos) in [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().enumerate() {
let pos = Position {
x: pos2.x + rel_pos[0],
y: pos2.y + rel_pos[1],
};
if *self == pos {
return Some(i as u32);
}
}
None
}
}
impl From<&[i32; 2]> for Position {
fn from(xy: &[i32; 2]) -> Self {
Self { x: xy[0], y: xy[1] }
}
}
pub(crate) struct Size {
pub width: i32,
pub height: i32,
}
pub(crate) struct BoundingBox {
pub x0: i32,
pub y0: i32,
pub x1: i32,
pub y1: i32,
}
#[derive(Copy, Clone, Serialize, Deserialize, RotateEnum, PartialEq)]
pub(crate) enum Rotation {
Left,
Top,
Right,
Bottom,
}
impl Rotation {
pub fn delta(&self) -> (i32, i32) {
match self {
Rotation::Left => (-1, 0),
Rotation::Top => (0, -1),
Rotation::Right => (1, 0),
Rotation::Bottom => (0, 1),
}
}
pub fn delta_inv(&self) -> (i32, i32) {
let delta = self.delta();
(-delta.0, -delta.1)
}
pub fn angle_deg(&self) -> i32 {
self.angle_4() * 90
}
pub fn angle_4(&self) -> i32 {
match self {
Rotation::Left => 2,
Rotation::Top => 3,
Rotation::Right => 0,
Rotation::Bottom => 1,
}
}
pub fn angle_rad(&self) -> f64 {
self.angle_deg() as f64 * std::f64::consts::PI / 180.
}
pub fn is_horizontal(&self) -> bool {
matches!(self, Rotation::Left | Rotation::Right)
}
pub fn is_vertcial(&self) -> bool {
!self.is_horizontal()
}
}
pub(crate) enum FrameProcResult {
None,
InventoryChanged(Position),
}
pub(crate) enum ItemResponse {
Move(i32, i32),
Consume,
}
pub(crate) type ItemResponseResult = (ItemResponse, Option<FrameProcResult>);
#[derive(Debug)]
pub(crate) enum RotateErr {
NotFound,
NotSupported,
Other(JsValue),
}
pub(crate) trait Structure {
fn name(&self) -> &str;
fn position(&self) -> &Position;
fn rotation(&self) -> Option<Rotation> {
None
}
/// Specialized method to get underground belt direction.
/// We don't like to put this to Structure trait method, but we don't have an option
/// as long as we use trait object polymorphism.
/// TODO: Revise needed in ECS.
fn under_direction(&self) -> Option<UnderDirection> {
None
}
fn size(&self) -> Size {
Size {
width: 1,
height: 1,
}
}
fn bounding_box(&self) -> BoundingBox {
let (position, size) = (self.position(), self.size());
BoundingBox {
x0: position.x,
y0: position.y,
x1: position.x + size.width,
y1: position.y + size.height,
}
}
fn contains(&self, pos: &Position) -> bool {
let bb = self.bounding_box();
bb.x0 <= pos.x && pos.x < bb.x1 && bb.y0 <= pos.y && pos.y < bb.y1
}
fn draw(
&self,
state: &FactorishState,
context: &CanvasRenderingContext2d,
depth: i32,
is_tooptip: bool,
) -> Result<(), JsValue>;
fn desc(&self, _state: &FactorishState) -> String {
String::from("")
}
fn frame_proc(
&mut self,
_me: StructureId,
_state: &mut FactorishState,
_structures: &mut StructureDynIter,
) -> Result<FrameProcResult, ()> {
Ok(FrameProcResult::None)
}
/// event handler for costruction events around the structure.
fn on_construction(
&mut self,
_other_id: StructureId,
_other: &dyn Structure,
_others: &StructureDynIter,
_construct: bool,
) -> Result<(), JsValue> {
Ok(())
}
/// event handler for costruction events for this structure itself.
fn on_construction_self(
&mut self,
_id: StructureId,
_others: &StructureDynIter,
_construct: bool,
) -> Result<(), JsValue> {
Ok(())
}
fn movable(&self) -> bool {
false
}
fn rotate(
&mut self,
_state: &mut FactorishState,
_others: &StructureDynIter,
) -> Result<(), RotateErr> {
Err(RotateErr::NotSupported)
}
fn set_rotation(&mut self, _rotation: &Rotation) -> Result<(), ()> {
Err(())
}
/// Called every frame for each item that is on this structure.
fn item_response(&mut self, _item: &DropItem) -> Result<ItemResponseResult, ()> {
Err(())
}
fn input(&mut self, _o: &DropItem) -> Result<(), JsValue> {
Err(JsValue::from_str("Not supported"))
}
/// Returns wheter the structure can accept an item as the input. If this structure is a factory
/// that returns recipes by get_selected_recipe(), it will check if it's in the inputs.
fn can_input(&self, item_type: &ItemType) -> bool {
if let Some(recipe) = self.get_selected_recipe() {
recipe.input.get(item_type).is_some()
} else {
false
}
}
/// Query a set of items that this structure can output. Actual output would not happen until `output()`, thus
/// this method is immutable. It should return empty Inventory if it cannot output anything.
fn can_output(&self, _structures: &StructureDynIter) -> Inventory {
Inventory::new()
}
/// Perform actual output. The operation should always succeed since the output-tability is checked beforehand
/// with `can_output`.
fn output(&mut self, _state: &mut FactorishState, _item_type: &ItemType) -> Result<(), ()> {
Err(())
}
fn burner_inventory(&self) -> Option<&Inventory> {
None
}
fn | (&mut self, _item_type: &ItemType, _amount: isize) -> isize {
0
}
fn burner_energy(&self) -> Option<(f64, f64)> {
None
}
fn inventory(&self, _is_input: bool) -> Option<&Inventory> {
None
}
fn inventory_mut(&mut self, _is_input: bool) -> Option<&mut Inventory> {
None
}
/// Some structures don't have an inventory, but still can have some item, e.g. inserter hands.
/// We need to retrieve them when we destory such a structure, or we might lose items into void.
/// It will take away the inventory by default, destroying the instance's inventory.
fn destroy_inventory(&mut self) -> Inventory {
let mut ret = self
.inventory_mut(true)
.map_or(Inventory::new(), |inventory| std::mem::take(inventory));
ret.merge(
self.inventory_mut(false)
.map_or(Inventory::new(), |inventory| std::mem::take(inventory)),
);
ret
}
/// Returns a list of recipes. The return value is wrapped in a Cow because some
/// structures can return dynamically configured list of recipes, while some others
/// have static fixed list of recipes. In reality, all our structures return a fixed list though.
fn get_recipes(&self) -> Cow<[Recipe]> {
Cow::from(&[][..])
}
fn select_recipe(&mut self, _index: usize) -> Result<bool, JsValue> {
Err(JsValue::from_str("recipes not available"))
}
fn get_selected_recipe(&self) -> Option<&Recipe> {
None
}
fn fluid_box(&self) -> Option<Vec<&FluidBox>> {
None
}
fn fluid_box_mut(&mut self) -> Option<Vec<&mut FluidBox>> {
None
}
fn connection(
&self,
state: &FactorishState,
structures: &dyn DynIter<Item = StructureEntry>,
) -> [bool; 4] {
// let mut structures_copy = structures.clone();
let has_fluid_box = |x, y| {
if x < 0 || state.width <= x as u32 || y < 0 || state.height <= y as u32 {
return false;
}
if let Some(structure) = structures
.dyn_iter()
.filter_map(|s| s.dynamic.as_deref())
.find(|s| *s.position() == Position { x, y })
{
return structure.fluid_box().is_some();
}
false
};
// Fluid containers connect to other containers
let Position { x, y } = *self.position();
let l = has_fluid_box(x - 1, y);
let t = has_fluid_box(x, y - 1);
let r = has_fluid_box(x + 1, y);
let b = has_fluid_box(x, y + 1);
[l, t, r, b]
}
/// If this structure can connect to power grid.
fn power_source(&self) -> bool {
false
}
/// If this structure drains power from the grid
fn power_sink(&self) -> bool {
false
}
/// Try to drain power from this structure.
/// @param demand in kilojoules.
/// @returns None if it does not support power supply.
fn power_outlet(&mut self, _demand: f64) -> Option<f64> {
None
}
fn wire_reach(&self) -> u32 {
3
}
fn serialize(&self) -> serde_json::Result<serde_json::Value>;
}
pub(crate) type StructureBoxed = Box<dyn Structure>;
pub(crate) struct StructureEntry {
pub gen: u32,
pub dynamic: Option<StructureBoxed>,
}
| add_burner_inventory | identifier_name |
driver.rs | use eventsim::{Process, ProcessState, EventId};
use super::infrastructure::*;
use input::staticinfrastructure::*;
use smallvec::SmallVec;
use super::dynamics::*;
use output::history::TrainLogEvent;
use super::Sim;
enum ModelContainment {
Inside,
Outside,
}
enum Activation {
Wait(EventId),
Activate,
Running,
}
#[derive(Debug)]
struct Train {
location: (NodeId, (Option<NodeId>, f64)),
velocity: f64,
params: TrainParams,
under_train: SmallVec<[(NodeId, f64); 4]>,
}
pub struct | {
id :usize,
train: Train,
authority: f64,
step: (DriverAction, f64),
connected_signals: SmallVec<[(ObjectId, f64); 4]>,
logger: Box<Fn(TrainLogEvent)>,
activation: Activation,
timestep: Option<f64>,
}
impl Driver {
pub fn new(sim: &mut Sim,
id :usize,
activated: EventId,
node: NodeId,
auth: f64,
params: TrainParams,
logger: Box<Fn(TrainLogEvent)>,
timestep: Option<f64>)
-> Self {
//println!("INITIAL AUTHORITY {:?}", auth);
let train = Train {
params: params,
location: (0, (Some(node),0.0)),
velocity: 0.0,
under_train: SmallVec::new(),
};
let d = Driver {
id: id,
train: train,
authority: auth - 20.0,
step: (DriverAction::Coast, *sim.time()),
connected_signals: SmallVec::new(),
logger: logger,
activation: Activation::Wait(activated),
timestep: timestep
};
d
}
fn activate(&mut self, sim:&mut Sim) {
if *sim.time() > 0.0 {
(self.logger)(TrainLogEvent::Wait(*sim.time()));
}
self.step = (DriverAction::Coast, *sim.time());
self.move_train_discrete(sim);
}
fn goto_node(&mut self, sim: &mut Sim, node: NodeId) {
//println!("TRAIN goto node {}", node);
for obj in sim.world.statics.nodes[node].objects.clone() {
if let Some(p) = sim.world.statics.objects[obj].arrive_front(node, self.id) {
sim.start_process(p);
}
self.arrive_front(sim, obj);
}
self.train.under_train.push((node, self.train.params.length));
}
fn arrive_front(&mut self, sim: &Sim, obj: ObjectId) {
match sim.world.statics.objects[obj] {
StaticObject::Sight { distance, signal } => {
self.connected_signals.push((signal, distance));
(self.logger)(TrainLogEvent::Sight(signal,true));
}
StaticObject::Signal {.. } => {
let log = &mut self.logger;
self.connected_signals.retain(|&mut (s, _d)| {
let lost = s == obj;
if lost { log(TrainLogEvent::Sight(s,false)); }
!lost
});
}
_ => {}
}
}
fn move_train(&mut self, sim: &mut Sim) -> ModelContainment {
let dt = *sim.time() - self.step.1;
if dt <= 1e-5 {
return ModelContainment::Inside;
}
self.move_train_continuous(sim);
self.move_train_discrete(sim);
if (self.train.location.1).0.is_none() && self.train.under_train.len() == 0 {
ModelContainment::Outside
} else {
ModelContainment::Inside
}
}
fn move_train_continuous(&mut self, sim :&mut Sim) {
let (action, action_time) = self.step;
let dt = *sim.time() - action_time;
let update = dynamic_update(&self.train.params, self.train.velocity,
DriverPlan { action: action, dt: dt, });
//println!("DYNAMIC UPDATE {:?}", (action,dt));
//println!("{:?}", update);
(self.logger)(TrainLogEvent::Move(dt, action, update));
self.train.velocity = update.v;
//println!("train loc {:?}", self.train.location);
(self.train.location.1).1 -= update.dx;
//println!("train loc {:?}", self.train.location);
// In case there are no signals in sight,
// the remembered authority is updated.
self.authority -= update.dx;
let id = self.id;
self.train.under_train.retain(|&mut (node, ref mut dist)| {
*dist -= update.dx;
if *dist < 1e-5 {
// Cleared a node.
for obj in sim.world.statics.nodes[node].objects.clone() {
if let Some(p) = sim.world.statics.objects[obj].arrive_back(node, id) {
sim.start_process(p);
}
}
false
} else {
true
}
});
{
let log = &mut self.logger;
self.connected_signals.retain(|&mut (obj, ref mut dist)| {
*dist -= update.dx;
let lost = *dist < 10.0; // If closer than 10 m, signal should already be green
// and seeing a red for a very short time should be because
// detector is placed in front of signal and this should not
// bother the driver.
if lost { log(TrainLogEvent::Sight(obj, false)); }
!lost
});
}
}
fn move_train_discrete(&mut self, sim :&mut Sim) {
loop {
let (_, (end_node, dist)) = self.train.location;
if dist > 1e-5 || end_node.is_none() { break; }
let new_start = sim.world.statics.nodes[end_node.unwrap()].other_node;
(self.logger)(TrainLogEvent::Node(end_node.unwrap()));
self.goto_node(sim, new_start);
(self.logger)(TrainLogEvent::Node(new_start));
match sim.world.edge_from(new_start) {
Some((Some(new_end_node), d)) => {
self.train.location = (new_start, (Some(new_end_node), d));
(self.logger)(TrainLogEvent::Edge(new_start, Some(new_end_node)));
}
Some((None, d)) => {
self.train.location = (new_start, (None, d));
(self.logger)(TrainLogEvent::Edge(new_start, None));
}
None => panic!("Derailed"),
}
}
}
fn plan_ahead(&mut self, sim: &Sim) -> DriverPlan {
// Travel distance is limited by next node
//println!("Travel distance is limited by next node");
//println!("{:?}", (self.train.location.1).1);
let mut max_dist = (self.train.location.1).1;
// Travel distance is limited by nodes under train
//println!("Travel distance is limited by nodes under train");
//println!("{:?}", self.train.under_train);
for &(_n, d) in self.train.under_train.iter() {
max_dist = max_dist.min(d);
}
// Travel distance is limited by sight distances
//println!("Travel distance is limited by sight distances");
//println!("{:?}", self.connected_signals);
for &(_n, d) in self.connected_signals.iter() {
max_dist = max_dist.min(d);
}
// Authority is updated by signals
for &(sig, dist) in self.connected_signals.iter() {
match sim.world.state[sig] {
ObjectState::Signal { ref authority } => {
match *authority.get() {
(Some(auth_dist), distant_sig) => {
//println!("Signal green in sight dist{} sigauth{} self.auth{}", dist, d, dist+d-20.0);
self.authority = dist + auth_dist + distant_sig.unwrap_or(0.0) - 20.0;
if self.authority < 0.0 { self.authority = 0.0; }
}
(None,_) => {
//println!("Signal red in sight dist{} self.auth{}", dist,dist-20.0);
self.authority = dist - 20.0;
if self.authority < 0.0 { self.authority = 0.0; }
break;
}
}
}
_ => panic!("Not a signal"),
}
}
//println!("Updated authority {}", self.authority);
// Static maximum speed profile ahead from current position
// TODO: other speed limitations
let static_speed_profile = StaticMaximumVelocityProfile {
local_max_velocity: self.train.params.max_vel,
max_velocity_ahead: SmallVec::from_slice(&[DistanceVelocity {
dx: self.authority, v: 0.0}]),
};
let plan = dynamic_plan_step(&self.train.params,
max_dist,
self.train.velocity,
&static_speed_profile);
//println!("PLAN: {:?} {:?} {:?} {:?} {:?} ", self.train.params, max_dist, self.train.velocity, static_speed_profile,plan);
plan
}
}
impl<'a> Process<Infrastructure<'a>> for Driver {
fn resume(&mut self, sim: &mut Sim) -> ProcessState {
match self.activation {
Activation::Wait(ev) => {
self.activation = Activation::Activate;
return ProcessState::Wait(SmallVec::from_slice(&[ev]));
},
Activation::Activate => {
self.activate(sim);
self.activation = Activation::Running;
},
Activation::Running => { }
};
//println!("resume train");
let modelcontainment = self.move_train(sim);
match modelcontainment {
ModelContainment::Outside => {
//println!("TRAIN FINISHED");
ProcessState::Finished
},
ModelContainment::Inside => {
let plan = self.plan_ahead(sim);
self.step = (plan.action, *sim.time());
//println!("PLAN {:?}", plan);
let mut events = SmallVec::new();
if plan.dt > 1e-5 {
let dt = match self.timestep {
Some(m) => if m < plan.dt && plan.dt.is_normal() { m } else { plan.dt },
None => plan.dt,
};
//println!("SET TIMOUT {:?} {:?}", plan.dt, dt);
events.push(sim.create_timeout(dt));
} else {
if self.train.velocity > 1e-5 { panic!("Velocity, but no plan."); }
self.train.velocity = 0.0;
self.step.0 = DriverAction::Coast;
}
//println!("Connected signals: {:?}", self.connected_signals);
for &(ref sig, _) in self.connected_signals.iter() {
match sim.world.state[*sig] {
ObjectState::Signal { ref authority } => events.push(authority.event()),
_ => panic!("Object is not a signal"),
}
}
ProcessState::Wait(events)
}
}
}
}
| Driver | identifier_name |
driver.rs | use eventsim::{Process, ProcessState, EventId};
use super::infrastructure::*;
use input::staticinfrastructure::*;
use smallvec::SmallVec;
use super::dynamics::*;
use output::history::TrainLogEvent;
use super::Sim;
enum ModelContainment {
Inside,
Outside,
}
enum Activation {
Wait(EventId),
Activate,
Running,
}
#[derive(Debug)]
struct Train {
location: (NodeId, (Option<NodeId>, f64)),
velocity: f64,
params: TrainParams,
under_train: SmallVec<[(NodeId, f64); 4]>,
}
pub struct Driver {
id :usize,
train: Train,
authority: f64,
step: (DriverAction, f64),
connected_signals: SmallVec<[(ObjectId, f64); 4]>,
logger: Box<Fn(TrainLogEvent)>,
activation: Activation,
timestep: Option<f64>,
}
impl Driver {
pub fn new(sim: &mut Sim,
id :usize,
activated: EventId,
node: NodeId,
auth: f64,
params: TrainParams,
logger: Box<Fn(TrainLogEvent)>,
timestep: Option<f64>)
-> Self {
//println!("INITIAL AUTHORITY {:?}", auth);
let train = Train {
params: params,
location: (0, (Some(node),0.0)),
velocity: 0.0,
under_train: SmallVec::new(),
};
let d = Driver {
id: id,
train: train,
authority: auth - 20.0,
step: (DriverAction::Coast, *sim.time()),
connected_signals: SmallVec::new(),
logger: logger,
activation: Activation::Wait(activated),
timestep: timestep
};
d
}
fn activate(&mut self, sim:&mut Sim) {
if *sim.time() > 0.0 {
(self.logger)(TrainLogEvent::Wait(*sim.time()));
}
self.step = (DriverAction::Coast, *sim.time());
self.move_train_discrete(sim);
}
fn goto_node(&mut self, sim: &mut Sim, node: NodeId) {
//println!("TRAIN goto node {}", node);
for obj in sim.world.statics.nodes[node].objects.clone() {
if let Some(p) = sim.world.statics.objects[obj].arrive_front(node, self.id) {
sim.start_process(p);
}
self.arrive_front(sim, obj);
}
self.train.under_train.push((node, self.train.params.length));
}
fn arrive_front(&mut self, sim: &Sim, obj: ObjectId) {
match sim.world.statics.objects[obj] {
StaticObject::Sight { distance, signal } => {
self.connected_signals.push((signal, distance));
(self.logger)(TrainLogEvent::Sight(signal,true));
}
StaticObject::Signal {.. } => {
let log = &mut self.logger;
self.connected_signals.retain(|&mut (s, _d)| {
let lost = s == obj;
if lost { log(TrainLogEvent::Sight(s,false)); }
!lost
});
}
_ => {}
}
}
fn move_train(&mut self, sim: &mut Sim) -> ModelContainment {
let dt = *sim.time() - self.step.1;
if dt <= 1e-5 {
return ModelContainment::Inside;
}
self.move_train_continuous(sim);
self.move_train_discrete(sim);
if (self.train.location.1).0.is_none() && self.train.under_train.len() == 0 {
ModelContainment::Outside
} else {
ModelContainment::Inside
}
}
fn move_train_continuous(&mut self, sim :&mut Sim) {
let (action, action_time) = self.step;
let dt = *sim.time() - action_time;
let update = dynamic_update(&self.train.params, self.train.velocity,
DriverPlan { action: action, dt: dt, });
//println!("DYNAMIC UPDATE {:?}", (action,dt));
//println!("{:?}", update);
(self.logger)(TrainLogEvent::Move(dt, action, update));
self.train.velocity = update.v;
//println!("train loc {:?}", self.train.location);
(self.train.location.1).1 -= update.dx;
//println!("train loc {:?}", self.train.location);
// In case there are no signals in sight,
// the remembered authority is updated.
self.authority -= update.dx;
let id = self.id;
self.train.under_train.retain(|&mut (node, ref mut dist)| {
*dist -= update.dx;
if *dist < 1e-5 {
// Cleared a node.
for obj in sim.world.statics.nodes[node].objects.clone() {
if let Some(p) = sim.world.statics.objects[obj].arrive_back(node, id) {
sim.start_process(p);
}
}
false
} else {
true
}
});
{
let log = &mut self.logger;
self.connected_signals.retain(|&mut (obj, ref mut dist)| {
*dist -= update.dx;
let lost = *dist < 10.0; // If closer than 10 m, signal should already be green
// and seeing a red for a very short time should be because
// detector is placed in front of signal and this should not
// bother the driver.
if lost { log(TrainLogEvent::Sight(obj, false)); }
!lost
});
}
}
fn move_train_discrete(&mut self, sim :&mut Sim) {
loop {
let (_, (end_node, dist)) = self.train.location;
if dist > 1e-5 || end_node.is_none() { break; }
let new_start = sim.world.statics.nodes[end_node.unwrap()].other_node;
(self.logger)(TrainLogEvent::Node(end_node.unwrap()));
self.goto_node(sim, new_start);
(self.logger)(TrainLogEvent::Node(new_start));
match sim.world.edge_from(new_start) {
Some((Some(new_end_node), d)) => {
self.train.location = (new_start, (Some(new_end_node), d));
(self.logger)(TrainLogEvent::Edge(new_start, Some(new_end_node)));
}
Some((None, d)) => {
self.train.location = (new_start, (None, d));
(self.logger)(TrainLogEvent::Edge(new_start, None));
}
None => panic!("Derailed"),
}
}
}
fn plan_ahead(&mut self, sim: &Sim) -> DriverPlan {
// Travel distance is limited by next node
//println!("Travel distance is limited by next node");
//println!("{:?}", (self.train.location.1).1);
let mut max_dist = (self.train.location.1).1;
// Travel distance is limited by nodes under train
//println!("Travel distance is limited by nodes under train");
//println!("{:?}", self.train.under_train);
for &(_n, d) in self.train.under_train.iter() {
max_dist = max_dist.min(d);
}
// Travel distance is limited by sight distances
//println!("Travel distance is limited by sight distances");
//println!("{:?}", self.connected_signals);
for &(_n, d) in self.connected_signals.iter() {
max_dist = max_dist.min(d);
}
// Authority is updated by signals
for &(sig, dist) in self.connected_signals.iter() {
match sim.world.state[sig] {
ObjectState::Signal { ref authority } => {
match *authority.get() {
(Some(auth_dist), distant_sig) => {
//println!("Signal green in sight dist{} sigauth{} self.auth{}", dist, d, dist+d-20.0);
self.authority = dist + auth_dist + distant_sig.unwrap_or(0.0) - 20.0;
if self.authority < 0.0 { self.authority = 0.0; }
}
(None,_) => |
}
}
_ => panic!("Not a signal"),
}
}
//println!("Updated authority {}", self.authority);
// Static maximum speed profile ahead from current position
// TODO: other speed limitations
let static_speed_profile = StaticMaximumVelocityProfile {
local_max_velocity: self.train.params.max_vel,
max_velocity_ahead: SmallVec::from_slice(&[DistanceVelocity {
dx: self.authority, v: 0.0}]),
};
let plan = dynamic_plan_step(&self.train.params,
max_dist,
self.train.velocity,
&static_speed_profile);
//println!("PLAN: {:?} {:?} {:?} {:?} {:?} ", self.train.params, max_dist, self.train.velocity, static_speed_profile,plan);
plan
}
}
impl<'a> Process<Infrastructure<'a>> for Driver {
fn resume(&mut self, sim: &mut Sim) -> ProcessState {
match self.activation {
Activation::Wait(ev) => {
self.activation = Activation::Activate;
return ProcessState::Wait(SmallVec::from_slice(&[ev]));
},
Activation::Activate => {
self.activate(sim);
self.activation = Activation::Running;
},
Activation::Running => { }
};
//println!("resume train");
let modelcontainment = self.move_train(sim);
match modelcontainment {
ModelContainment::Outside => {
//println!("TRAIN FINISHED");
ProcessState::Finished
},
ModelContainment::Inside => {
let plan = self.plan_ahead(sim);
self.step = (plan.action, *sim.time());
//println!("PLAN {:?}", plan);
let mut events = SmallVec::new();
if plan.dt > 1e-5 {
let dt = match self.timestep {
Some(m) => if m < plan.dt && plan.dt.is_normal() { m } else { plan.dt },
None => plan.dt,
};
//println!("SET TIMOUT {:?} {:?}", plan.dt, dt);
events.push(sim.create_timeout(dt));
} else {
if self.train.velocity > 1e-5 { panic!("Velocity, but no plan."); }
self.train.velocity = 0.0;
self.step.0 = DriverAction::Coast;
}
//println!("Connected signals: {:?}", self.connected_signals);
for &(ref sig, _) in self.connected_signals.iter() {
match sim.world.state[*sig] {
ObjectState::Signal { ref authority } => events.push(authority.event()),
_ => panic!("Object is not a signal"),
}
}
ProcessState::Wait(events)
}
}
}
}
| {
//println!("Signal red in sight dist{} self.auth{}", dist,dist-20.0);
self.authority = dist - 20.0;
if self.authority < 0.0 { self.authority = 0.0; }
break;
} | conditional_block |
driver.rs | use eventsim::{Process, ProcessState, EventId};
use super::infrastructure::*;
use input::staticinfrastructure::*;
use smallvec::SmallVec;
use super::dynamics::*;
use output::history::TrainLogEvent;
use super::Sim;
enum ModelContainment {
Inside,
Outside,
}
enum Activation {
Wait(EventId),
Activate,
Running,
}
#[derive(Debug)]
struct Train {
location: (NodeId, (Option<NodeId>, f64)),
velocity: f64,
params: TrainParams,
under_train: SmallVec<[(NodeId, f64); 4]>,
}
pub struct Driver {
id :usize,
train: Train,
authority: f64,
step: (DriverAction, f64),
connected_signals: SmallVec<[(ObjectId, f64); 4]>,
logger: Box<Fn(TrainLogEvent)>,
activation: Activation,
timestep: Option<f64>,
}
impl Driver {
pub fn new(sim: &mut Sim,
id :usize,
activated: EventId,
node: NodeId,
auth: f64,
params: TrainParams,
logger: Box<Fn(TrainLogEvent)>,
timestep: Option<f64>)
-> Self {
//println!("INITIAL AUTHORITY {:?}", auth);
let train = Train {
params: params,
location: (0, (Some(node),0.0)),
velocity: 0.0,
under_train: SmallVec::new(),
};
let d = Driver {
id: id,
train: train,
authority: auth - 20.0,
step: (DriverAction::Coast, *sim.time()),
connected_signals: SmallVec::new(),
logger: logger,
activation: Activation::Wait(activated),
timestep: timestep
};
d
}
fn activate(&mut self, sim:&mut Sim) {
if *sim.time() > 0.0 {
(self.logger)(TrainLogEvent::Wait(*sim.time()));
}
self.step = (DriverAction::Coast, *sim.time());
self.move_train_discrete(sim);
}
fn goto_node(&mut self, sim: &mut Sim, node: NodeId) {
//println!("TRAIN goto node {}", node);
for obj in sim.world.statics.nodes[node].objects.clone() {
if let Some(p) = sim.world.statics.objects[obj].arrive_front(node, self.id) {
sim.start_process(p);
}
self.arrive_front(sim, obj);
}
self.train.under_train.push((node, self.train.params.length));
}
fn arrive_front(&mut self, sim: &Sim, obj: ObjectId) {
match sim.world.statics.objects[obj] {
StaticObject::Sight { distance, signal } => {
self.connected_signals.push((signal, distance));
(self.logger)(TrainLogEvent::Sight(signal,true));
}
StaticObject::Signal {.. } => {
let log = &mut self.logger;
self.connected_signals.retain(|&mut (s, _d)| {
let lost = s == obj;
if lost { log(TrainLogEvent::Sight(s,false)); }
!lost
});
}
_ => {}
}
}
fn move_train(&mut self, sim: &mut Sim) -> ModelContainment {
let dt = *sim.time() - self.step.1;
if dt <= 1e-5 {
return ModelContainment::Inside;
}
self.move_train_continuous(sim);
self.move_train_discrete(sim);
if (self.train.location.1).0.is_none() && self.train.under_train.len() == 0 {
ModelContainment::Outside
} else {
ModelContainment::Inside
}
}
fn move_train_continuous(&mut self, sim :&mut Sim) {
let (action, action_time) = self.step;
let dt = *sim.time() - action_time;
let update = dynamic_update(&self.train.params, self.train.velocity,
DriverPlan { action: action, dt: dt, });
//println!("DYNAMIC UPDATE {:?}", (action,dt));
//println!("{:?}", update);
(self.logger)(TrainLogEvent::Move(dt, action, update));
self.train.velocity = update.v;
//println!("train loc {:?}", self.train.location);
(self.train.location.1).1 -= update.dx;
//println!("train loc {:?}", self.train.location);
// In case there are no signals in sight,
// the remembered authority is updated.
self.authority -= update.dx;
let id = self.id;
self.train.under_train.retain(|&mut (node, ref mut dist)| {
*dist -= update.dx;
if *dist < 1e-5 {
// Cleared a node.
for obj in sim.world.statics.nodes[node].objects.clone() {
if let Some(p) = sim.world.statics.objects[obj].arrive_back(node, id) { | }
false
} else {
true
}
});
{
let log = &mut self.logger;
self.connected_signals.retain(|&mut (obj, ref mut dist)| {
*dist -= update.dx;
let lost = *dist < 10.0; // If closer than 10 m, signal should already be green
// and seeing a red for a very short time should be because
// detector is placed in front of signal and this should not
// bother the driver.
if lost { log(TrainLogEvent::Sight(obj, false)); }
!lost
});
}
}
fn move_train_discrete(&mut self, sim :&mut Sim) {
loop {
let (_, (end_node, dist)) = self.train.location;
if dist > 1e-5 || end_node.is_none() { break; }
let new_start = sim.world.statics.nodes[end_node.unwrap()].other_node;
(self.logger)(TrainLogEvent::Node(end_node.unwrap()));
self.goto_node(sim, new_start);
(self.logger)(TrainLogEvent::Node(new_start));
match sim.world.edge_from(new_start) {
Some((Some(new_end_node), d)) => {
self.train.location = (new_start, (Some(new_end_node), d));
(self.logger)(TrainLogEvent::Edge(new_start, Some(new_end_node)));
}
Some((None, d)) => {
self.train.location = (new_start, (None, d));
(self.logger)(TrainLogEvent::Edge(new_start, None));
}
None => panic!("Derailed"),
}
}
}
fn plan_ahead(&mut self, sim: &Sim) -> DriverPlan {
// Travel distance is limited by next node
//println!("Travel distance is limited by next node");
//println!("{:?}", (self.train.location.1).1);
let mut max_dist = (self.train.location.1).1;
// Travel distance is limited by nodes under train
//println!("Travel distance is limited by nodes under train");
//println!("{:?}", self.train.under_train);
for &(_n, d) in self.train.under_train.iter() {
max_dist = max_dist.min(d);
}
// Travel distance is limited by sight distances
//println!("Travel distance is limited by sight distances");
//println!("{:?}", self.connected_signals);
for &(_n, d) in self.connected_signals.iter() {
max_dist = max_dist.min(d);
}
// Authority is updated by signals
for &(sig, dist) in self.connected_signals.iter() {
match sim.world.state[sig] {
ObjectState::Signal { ref authority } => {
match *authority.get() {
(Some(auth_dist), distant_sig) => {
//println!("Signal green in sight dist{} sigauth{} self.auth{}", dist, d, dist+d-20.0);
self.authority = dist + auth_dist + distant_sig.unwrap_or(0.0) - 20.0;
if self.authority < 0.0 { self.authority = 0.0; }
}
(None,_) => {
//println!("Signal red in sight dist{} self.auth{}", dist,dist-20.0);
self.authority = dist - 20.0;
if self.authority < 0.0 { self.authority = 0.0; }
break;
}
}
}
_ => panic!("Not a signal"),
}
}
//println!("Updated authority {}", self.authority);
// Static maximum speed profile ahead from current position
// TODO: other speed limitations
let static_speed_profile = StaticMaximumVelocityProfile {
local_max_velocity: self.train.params.max_vel,
max_velocity_ahead: SmallVec::from_slice(&[DistanceVelocity {
dx: self.authority, v: 0.0}]),
};
let plan = dynamic_plan_step(&self.train.params,
max_dist,
self.train.velocity,
&static_speed_profile);
//println!("PLAN: {:?} {:?} {:?} {:?} {:?} ", self.train.params, max_dist, self.train.velocity, static_speed_profile,plan);
plan
}
}
impl<'a> Process<Infrastructure<'a>> for Driver {
fn resume(&mut self, sim: &mut Sim) -> ProcessState {
match self.activation {
Activation::Wait(ev) => {
self.activation = Activation::Activate;
return ProcessState::Wait(SmallVec::from_slice(&[ev]));
},
Activation::Activate => {
self.activate(sim);
self.activation = Activation::Running;
},
Activation::Running => { }
};
//println!("resume train");
let modelcontainment = self.move_train(sim);
match modelcontainment {
ModelContainment::Outside => {
//println!("TRAIN FINISHED");
ProcessState::Finished
},
ModelContainment::Inside => {
let plan = self.plan_ahead(sim);
self.step = (plan.action, *sim.time());
//println!("PLAN {:?}", plan);
let mut events = SmallVec::new();
if plan.dt > 1e-5 {
let dt = match self.timestep {
Some(m) => if m < plan.dt && plan.dt.is_normal() { m } else { plan.dt },
None => plan.dt,
};
//println!("SET TIMOUT {:?} {:?}", plan.dt, dt);
events.push(sim.create_timeout(dt));
} else {
if self.train.velocity > 1e-5 { panic!("Velocity, but no plan."); }
self.train.velocity = 0.0;
self.step.0 = DriverAction::Coast;
}
//println!("Connected signals: {:?}", self.connected_signals);
for &(ref sig, _) in self.connected_signals.iter() {
match sim.world.state[*sig] {
ObjectState::Signal { ref authority } => events.push(authority.event()),
_ => panic!("Object is not a signal"),
}
}
ProcessState::Wait(events)
}
}
}
} | sim.start_process(p);
} | random_line_split |
lib.rs | /// When sending data, split it into frames whose maximum size is this value
/// (max 1MByte, as per the Mplex spec).
split_send_size: usize,
}
impl MplexConfig {
/// Builds the default configuration.
#[inline]
pub fn new() -> MplexConfig {
Default::default()
}
/// Sets the maximum number of simultaneously opened substreams, after which an error is
/// generated and the connection closes.
///
/// A limit is necessary in order to avoid DoS attacks.
#[inline]
pub fn max_substreams(&mut self, max: usize) -> &mut Self {
self.max_substreams = max;
self
}
/// Sets the maximum number of pending incoming messages.
///
/// A limit is necessary in order to avoid DoS attacks.
#[inline]
pub fn max_buffer_len(&mut self, max: usize) -> &mut Self {
self.max_buffer_len = max;
self
}
/// Sets the behaviour when the maximum buffer length has been reached.
///
/// See the documentation of `MaxBufferBehaviour`.
#[inline]
pub fn max_buffer_len_behaviour(&mut self, behaviour: MaxBufferBehaviour) -> &mut Self {
self.max_buffer_behaviour = behaviour;
self
}
/// Sets the frame size used when sending data. Capped at 1Mbyte as per the
/// Mplex spec.
pub fn split_send_size(&mut self, size: usize) -> &mut Self {
let size = cmp::min(size, codec::MAX_FRAME_SIZE);
self.split_send_size = size;
self
}
#[inline]
fn upgrade<C>(self, i: C) -> Multiplex<C>
where
C: AsyncRead + AsyncWrite
{
let max_buffer_len = self.max_buffer_len;
Multiplex {
inner: Mutex::new(MultiplexInner {
error: Ok(()),
inner: executor::spawn(Framed::new(i, codec::Codec::new()).fuse()),
config: self,
buffer: Vec::with_capacity(cmp::min(max_buffer_len, 512)),
opened_substreams: Default::default(),
next_outbound_stream_id: 0,
notifier_read: Arc::new(Notifier {
to_notify: Mutex::new(Default::default()),
}),
notifier_write: Arc::new(Notifier {
to_notify: Mutex::new(Default::default()),
}),
is_shutdown: false,
is_acknowledged: false,
})
}
}
}
impl Default for MplexConfig {
#[inline]
fn default() -> MplexConfig {
MplexConfig {
max_substreams: 128,
max_buffer_len: 4096,
max_buffer_behaviour: MaxBufferBehaviour::CloseAll,
split_send_size: 1024,
}
}
}
/// Behaviour when the maximum length of the buffer is reached.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MaxBufferBehaviour {
/// Produce an error on all the substreams.
CloseAll,
/// No new message will be read from the underlying connection if the buffer is full.
///
/// This can potentially introduce a deadlock if you are waiting for a message from a substream
/// before processing the messages received on another substream.
Block,
}
impl UpgradeInfo for MplexConfig {
type Info = &'static [u8];
type InfoIter = iter::Once<Self::Info>;
#[inline]
fn protocol_info(&self) -> Self::InfoIter {
iter::once(b"/mplex/6.7.0")
}
}
impl<C> InboundUpgrade<C> for MplexConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = Multiplex<Negotiated<C>>;
type Error = IoError;
type Future = future::FutureResult<Self::Output, IoError>;
fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
future::ok(self.upgrade(socket))
}
}
impl<C> OutboundUpgrade<C> for MplexConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = Multiplex<Negotiated<C>>;
type Error = IoError;
type Future = future::FutureResult<Self::Output, IoError>;
fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
future::ok(self.upgrade(socket))
}
}
/// Multiplexer. Implements the `StreamMuxer` trait.
pub struct Multiplex<C> {
inner: Mutex<MultiplexInner<C>>,
}
// Struct shared throughout the implementation.
struct MultiplexInner<C> {
// Error that happened earlier. Should poison any attempt to use this `MultiplexError`.
error: Result<(), IoError>,
// Underlying stream.
inner: executor::Spawn<Fuse<Framed<C, codec::Codec>>>,
/// The original configuration.
config: MplexConfig,
// Buffer of elements pulled from the stream but not processed yet.
buffer: Vec<codec::Elem>,
// List of Ids of opened substreams. Used to filter out messages that don't belong to any
// substream. Note that this is handled exclusively by `next_match`.
// The `Endpoint` value denotes who initiated the substream from our point of view
// (see note [StreamId]).
opened_substreams: FnvHashSet<(u32, Endpoint)>,
// Id of the next outgoing substream.
next_outbound_stream_id: u32,
/// List of tasks to notify when a read event happens on the underlying stream.
notifier_read: Arc<Notifier>,
/// List of tasks to notify when a write event happens on the underlying stream.
notifier_write: Arc<Notifier>,
/// If true, the connection has been shut down. We need to be careful not to accidentally
/// call `Sink::poll_complete` or `Sink::start_send` after `Sink::close`.
is_shutdown: bool,
/// If true, the remote has sent data to us.
is_acknowledged: bool,
}
struct Notifier {
/// List of tasks to notify.
to_notify: Mutex<FnvHashMap<usize, task::Task>>,
}
impl executor::Notify for Notifier {
fn notify(&self, _: usize) {
let tasks = mem::replace(&mut *self.to_notify.lock(), Default::default());
for (_, task) in tasks {
task.notify();
}
}
}
// TODO: replace with another system
static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
task_local!{
static TASK_ID: usize = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed)
}
// Note [StreamId]: mplex no longer partitions stream IDs into odd (for initiators) and
// even ones (for receivers). Streams are instead identified by a number and whether the flag
// is odd (for receivers) or even (for initiators). `Open` frames do not have a flag, but are
// sent unidirectional. As a consequence, we need to remember if the stream was initiated by us
// or remotely and we store the information from our point of view, i.e. receiving an `Open` frame
// is stored as `(<u32>, Listener)`, sending an `Open` frame as `(<u32>, Dialer)`. Receiving
// a `Data` frame with flag `MessageReceiver` (= 1) means that we initiated the stream, so the
// entry has been stored as `(<u32>, Dialer)`. So, when looking up streams based on frames
// received, we have to invert the `Endpoint`, except for `Open`.
/// Processes elements in `inner` until one matching `filter` is found.
///
/// If `NotReady` is returned, the current task is scheduled for later, just like with any `Poll`.
/// `Ready(Some())` is almost always returned. An error is returned if the stream is EOF.
fn next_match<C, F, O>(inner: &mut MultiplexInner<C>, mut filter: F) -> Poll<O, IoError>
where C: AsyncRead + AsyncWrite,
F: FnMut(&codec::Elem) -> Option<O>,
{
// If an error happened earlier, immediately return it.
if let Err(ref err) = inner.error {
return Err(IoError::new(err.kind(), err.to_string()));
}
if let Some((offset, out)) = inner.buffer.iter().enumerate().filter_map(|(n, v)| filter(v).map(|v| (n, v))).next() {
// The buffer was full and no longer is, so let's notify everything.
if inner.buffer.len() == inner.config.max_buffer_len {
executor::Notify::notify(&*inner.notifier_read, 0);
}
inner.buffer.remove(offset);
return Ok(Async::Ready(out));
}
loop {
// Check if we reached max buffer length first.
debug_assert!(inner.buffer.len() <= inner.config.max_buffer_len);
if inner.buffer.len() == inner.config.max_buffer_len {
debug!("Reached mplex maximum buffer length");
match inner.config.max_buffer_behaviour {
MaxBufferBehaviour::CloseAll => {
inner.error = Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length"));
return Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length"));
},
MaxBufferBehaviour::Block => {
inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady);
},
}
}
let elem = match inner.inner.poll_stream_notify(&inner.notifier_read, 0) {
Ok(Async::Ready(Some(item))) => item,
Ok(Async::Ready(None)) => return Err(IoErrorKind::BrokenPipe.into()),
Ok(Async::NotReady) => {
inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady);
},
Err(err) => {
let err2 = IoError::new(err.kind(), err.to_string());
inner.error = Err(err);
return Err(err2);
},
};
trace!("Received message: {:?}", elem);
inner.is_acknowledged = true;
// Handle substreams opening/closing.
match elem {
codec::Elem::Open { substream_id } => {
if!inner.opened_substreams.insert((substream_id, Endpoint::Listener)) {
debug!("Received open message for substream {} which was already open", substream_id)
}
}
codec::Elem::Close { substream_id, endpoint,.. } | codec::Elem::Reset { substream_id, endpoint,.. } => {
inner.opened_substreams.remove(&(substream_id,!endpoint));
}
_ => ()
}
if let Some(out) = filter(&elem) {
return Ok(Async::Ready(out));
} else {
let endpoint = elem.endpoint().unwrap_or(Endpoint::Dialer);
if inner.opened_substreams.contains(&(elem.substream_id(),!endpoint)) || elem.is_open_msg() {
inner.buffer.push(elem);
} else if!elem.is_close_or_reset_msg() {
debug!("Ignored message {:?} because the substream wasn't open", elem);
}
}
}
}
// Small convenience function that tries to write `elem` to the stream.
fn poll_send<C>(inner: &mut MultiplexInner<C>, elem: codec::Elem) -> Poll<(), IoError>
where C: AsyncRead + AsyncWrite
{
if inner.is_shutdown {
return Err(IoError::new(IoErrorKind::Other, "connection is shut down"))
}
match inner.inner.start_send_notify(elem, &inner.notifier_write, 0) {
Ok(AsyncSink::Ready) => {
Ok(Async::Ready(()))
},
Ok(AsyncSink::NotReady(_)) => {
inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
Ok(Async::NotReady)
},
Err(err) => Err(err)
}
}
impl<C> StreamMuxer for Multiplex<C>
where C: AsyncRead + AsyncWrite
{
type Substream = Substream;
type OutboundSubstream = OutboundSubstream;
type Error = IoError;
fn poll_inbound(&self) -> Poll<Self::Substream, IoError> {
let mut inner = self.inner.lock();
if inner.opened_substreams.len() >= inner.config.max_substreams {
debug!("Refused substream; reached maximum number of substreams {}", inner.config.max_substreams);
return Err(IoError::new(IoErrorKind::ConnectionRefused,
"exceeded maximum number of open substreams"));
}
let num = try_ready!(next_match(&mut inner, |elem| {
match elem {
codec::Elem::Open { substream_id } => Some(*substream_id),
_ => None,
}
}));
debug!("Successfully opened inbound substream {}", num);
Ok(Async::Ready(Substream {
current_data: Bytes::new(),
num,
endpoint: Endpoint::Listener,
local_open: true,
remote_open: true,
}))
}
fn open_outbound(&self) -> Self::OutboundSubstream {
let mut inner = self.inner.lock();
// Assign a substream ID now.
let substream_id = {
let n = inner.next_outbound_stream_id;
inner.next_outbound_stream_id = inner.next_outbound_stream_id.checked_add(1)
.expect("Mplex substream ID overflowed");
n
};
inner.opened_substreams.insert((substream_id, Endpoint::Dialer));
OutboundSubstream {
num: substream_id,
state: OutboundSubstreamState::SendElem(codec::Elem::Open { substream_id }),
}
}
fn poll_outbound(&self, substream: &mut Self::OutboundSubstream) -> Poll<Self::Substream, IoError> {
loop {
let mut inner = self.inner.lock();
let polling = match substream.state {
OutboundSubstreamState::SendElem(ref elem) => {
poll_send(&mut inner, elem.clone())
},
OutboundSubstreamState::Flush => {
if inner.is_shutdown {
return Err(IoError::new(IoErrorKind::Other, "connection is shut down"))
}
let inner = &mut *inner; // Avoids borrow errors
inner.inner.poll_flush_notify(&inner.notifier_write, 0)
},
OutboundSubstreamState::Done => {
panic!("Polling outbound substream after it's been succesfully open");
},
};
match polling {
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => {
inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady)
},
Err(err) => {
debug!("Failed to open outbound substream {}", substream.num);
inner.buffer.retain(|elem| {
elem.substream_id()!= substream.num || elem.endpoint() == Some(Endpoint::Dialer)
});
return Err(err)
},
};
drop(inner);
// Going to next step.
match substream.state {
OutboundSubstreamState::SendElem(_) => {
substream.state = OutboundSubstreamState::Flush;
},
OutboundSubstreamState::Flush => {
debug!("Successfully opened outbound substream {}", substream.num);
substream.state = OutboundSubstreamState::Done;
return Ok(Async::Ready(Substream {
num: substream.num,
current_data: Bytes::new(),
endpoint: Endpoint::Dialer,
local_open: true,
remote_open: true,
}));
},
OutboundSubstreamState::Done => unreachable!(),
}
}
}
#[inline]
fn destroy_outbound(&self, _substream: Self::OutboundSubstream) {
// Nothing to do.
}
unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
false
}
fn read_substream(&self, substream: &mut Self::Substream, buf: &mut [u8]) -> Poll<usize, IoError> {
loop {
// First, transfer from `current_data`.
if!substream.current_data.is_empty() {
let len = cmp::min(substream.current_data.len(), buf.len());
buf[..len].copy_from_slice(&substream.current_data.split_to(len));
return Ok(Async::Ready(len));
}
// If the remote writing side is closed, return EOF.
if!substream.remote_open {
return Ok(Async::Ready(0));
}
// Try to find a packet of data in the buffer.
let mut inner = self.inner.lock();
let next_data_poll = next_match(&mut inner, |elem| {
match elem {
codec::Elem::Data { substream_id, endpoint, data,.. }
if *substream_id == substream.num && *endpoint!= substream.endpoint => // see note [StreamId]
{
Some(Some(data.clone()))
}
codec::Elem::Close { substream_id, endpoint }
if *substream_id == substream.num && *endpoint!= substream.endpoint => // see note [StreamId]
{
Some(None)
}
_ => None
}
});
// We're in a loop, so all we need to do is set `substream.current_data` to the data we
// just read and wait for the next iteration.
match next_data_poll? {
Async::Ready(Some(data)) => substream.current_data = data,
Async::Ready(None) => {
substream.remote_open = false;
return Ok(Async::Ready(0));
},
Async::NotReady => | ,
}
}
}
fn write_substream(&self, substream: &mut Self::Substream, buf: &[u8]) -> Poll<usize, IoError> {
if!substream.local_open {
return Err(IoErrorKind::BrokenPipe.into());
}
let mut inner = self.inner.lock();
let to_write = cmp::min(buf.len(), inner.config.split_send_size);
let elem = codec::Elem::Data {
substream_id: substream.num,
data: From::from(&buf[..to_write]),
endpoint: substream.endpoint,
};
match poll_send(&mut inner, elem)? {
| {
// There was no data packet in the buffer about this substream; maybe it's
// because it has been closed.
if inner.opened_substreams.contains(&(substream.num, substream.endpoint)) {
return Ok(Async::NotReady)
} else {
return Ok(Async::Ready(0))
}
} | conditional_block |
lib.rs | /// When sending data, split it into frames whose maximum size is this value
/// (max 1MByte, as per the Mplex spec).
split_send_size: usize,
}
impl MplexConfig {
/// Builds the default configuration.
#[inline]
pub fn new() -> MplexConfig {
Default::default()
}
/// Sets the maximum number of simultaneously opened substreams, after which an error is
/// generated and the connection closes.
///
/// A limit is necessary in order to avoid DoS attacks.
#[inline]
pub fn max_substreams(&mut self, max: usize) -> &mut Self {
self.max_substreams = max;
self
}
/// Sets the maximum number of pending incoming messages.
///
/// A limit is necessary in order to avoid DoS attacks.
#[inline]
pub fn max_buffer_len(&mut self, max: usize) -> &mut Self {
self.max_buffer_len = max;
self
}
/// Sets the behaviour when the maximum buffer length has been reached.
///
/// See the documentation of `MaxBufferBehaviour`.
#[inline]
pub fn max_buffer_len_behaviour(&mut self, behaviour: MaxBufferBehaviour) -> &mut Self {
self.max_buffer_behaviour = behaviour;
self
}
/// Sets the frame size used when sending data. Capped at 1Mbyte as per the
/// Mplex spec.
pub fn split_send_size(&mut self, size: usize) -> &mut Self {
let size = cmp::min(size, codec::MAX_FRAME_SIZE);
self.split_send_size = size;
self
}
#[inline]
fn upgrade<C>(self, i: C) -> Multiplex<C>
where
C: AsyncRead + AsyncWrite
{
let max_buffer_len = self.max_buffer_len;
Multiplex {
inner: Mutex::new(MultiplexInner {
error: Ok(()),
inner: executor::spawn(Framed::new(i, codec::Codec::new()).fuse()),
config: self,
buffer: Vec::with_capacity(cmp::min(max_buffer_len, 512)),
opened_substreams: Default::default(),
next_outbound_stream_id: 0,
notifier_read: Arc::new(Notifier {
to_notify: Mutex::new(Default::default()),
}),
notifier_write: Arc::new(Notifier {
to_notify: Mutex::new(Default::default()),
}),
is_shutdown: false,
is_acknowledged: false,
})
}
}
}
impl Default for MplexConfig {
#[inline]
fn default() -> MplexConfig |
}
/// Behaviour when the maximum length of the buffer is reached.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MaxBufferBehaviour {
/// Produce an error on all the substreams.
CloseAll,
/// No new message will be read from the underlying connection if the buffer is full.
///
/// This can potentially introduce a deadlock if you are waiting for a message from a substream
/// before processing the messages received on another substream.
Block,
}
impl UpgradeInfo for MplexConfig {
type Info = &'static [u8];
type InfoIter = iter::Once<Self::Info>;
#[inline]
fn protocol_info(&self) -> Self::InfoIter {
iter::once(b"/mplex/6.7.0")
}
}
impl<C> InboundUpgrade<C> for MplexConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = Multiplex<Negotiated<C>>;
type Error = IoError;
type Future = future::FutureResult<Self::Output, IoError>;
fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
future::ok(self.upgrade(socket))
}
}
impl<C> OutboundUpgrade<C> for MplexConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = Multiplex<Negotiated<C>>;
type Error = IoError;
type Future = future::FutureResult<Self::Output, IoError>;
fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
future::ok(self.upgrade(socket))
}
}
/// Multiplexer. Implements the `StreamMuxer` trait.
pub struct Multiplex<C> {
inner: Mutex<MultiplexInner<C>>,
}
// Struct shared throughout the implementation.
struct MultiplexInner<C> {
// Error that happened earlier. Should poison any attempt to use this `MultiplexError`.
error: Result<(), IoError>,
// Underlying stream.
inner: executor::Spawn<Fuse<Framed<C, codec::Codec>>>,
/// The original configuration.
config: MplexConfig,
// Buffer of elements pulled from the stream but not processed yet.
buffer: Vec<codec::Elem>,
// List of Ids of opened substreams. Used to filter out messages that don't belong to any
// substream. Note that this is handled exclusively by `next_match`.
// The `Endpoint` value denotes who initiated the substream from our point of view
// (see note [StreamId]).
opened_substreams: FnvHashSet<(u32, Endpoint)>,
// Id of the next outgoing substream.
next_outbound_stream_id: u32,
/// List of tasks to notify when a read event happens on the underlying stream.
notifier_read: Arc<Notifier>,
/// List of tasks to notify when a write event happens on the underlying stream.
notifier_write: Arc<Notifier>,
/// If true, the connection has been shut down. We need to be careful not to accidentally
/// call `Sink::poll_complete` or `Sink::start_send` after `Sink::close`.
is_shutdown: bool,
/// If true, the remote has sent data to us.
is_acknowledged: bool,
}
struct Notifier {
/// List of tasks to notify.
to_notify: Mutex<FnvHashMap<usize, task::Task>>,
}
impl executor::Notify for Notifier {
fn notify(&self, _: usize) {
let tasks = mem::replace(&mut *self.to_notify.lock(), Default::default());
for (_, task) in tasks {
task.notify();
}
}
}
// TODO: replace with another system
static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
task_local!{
static TASK_ID: usize = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed)
}
// Note [StreamId]: mplex no longer partitions stream IDs into odd (for initiators) and
// even ones (for receivers). Streams are instead identified by a number and whether the flag
// is odd (for receivers) or even (for initiators). `Open` frames do not have a flag, but are
// sent unidirectional. As a consequence, we need to remember if the stream was initiated by us
// or remotely and we store the information from our point of view, i.e. receiving an `Open` frame
// is stored as `(<u32>, Listener)`, sending an `Open` frame as `(<u32>, Dialer)`. Receiving
// a `Data` frame with flag `MessageReceiver` (= 1) means that we initiated the stream, so the
// entry has been stored as `(<u32>, Dialer)`. So, when looking up streams based on frames
// received, we have to invert the `Endpoint`, except for `Open`.
/// Processes elements in `inner` until one matching `filter` is found.
///
/// If `NotReady` is returned, the current task is scheduled for later, just like with any `Poll`.
/// `Ready(Some())` is almost always returned. An error is returned if the stream is EOF.
fn next_match<C, F, O>(inner: &mut MultiplexInner<C>, mut filter: F) -> Poll<O, IoError>
where C: AsyncRead + AsyncWrite,
F: FnMut(&codec::Elem) -> Option<O>,
{
// If an error happened earlier, immediately return it.
if let Err(ref err) = inner.error {
return Err(IoError::new(err.kind(), err.to_string()));
}
if let Some((offset, out)) = inner.buffer.iter().enumerate().filter_map(|(n, v)| filter(v).map(|v| (n, v))).next() {
// The buffer was full and no longer is, so let's notify everything.
if inner.buffer.len() == inner.config.max_buffer_len {
executor::Notify::notify(&*inner.notifier_read, 0);
}
inner.buffer.remove(offset);
return Ok(Async::Ready(out));
}
loop {
// Check if we reached max buffer length first.
debug_assert!(inner.buffer.len() <= inner.config.max_buffer_len);
if inner.buffer.len() == inner.config.max_buffer_len {
debug!("Reached mplex maximum buffer length");
match inner.config.max_buffer_behaviour {
MaxBufferBehaviour::CloseAll => {
inner.error = Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length"));
return Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length"));
},
MaxBufferBehaviour::Block => {
inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady);
},
}
}
let elem = match inner.inner.poll_stream_notify(&inner.notifier_read, 0) {
Ok(Async::Ready(Some(item))) => item,
Ok(Async::Ready(None)) => return Err(IoErrorKind::BrokenPipe.into()),
Ok(Async::NotReady) => {
inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady);
},
Err(err) => {
let err2 = IoError::new(err.kind(), err.to_string());
inner.error = Err(err);
return Err(err2);
},
};
trace!("Received message: {:?}", elem);
inner.is_acknowledged = true;
// Handle substreams opening/closing.
match elem {
codec::Elem::Open { substream_id } => {
if!inner.opened_substreams.insert((substream_id, Endpoint::Listener)) {
debug!("Received open message for substream {} which was already open", substream_id)
}
}
codec::Elem::Close { substream_id, endpoint,.. } | codec::Elem::Reset { substream_id, endpoint,.. } => {
inner.opened_substreams.remove(&(substream_id,!endpoint));
}
_ => ()
}
if let Some(out) = filter(&elem) {
return Ok(Async::Ready(out));
} else {
let endpoint = elem.endpoint().unwrap_or(Endpoint::Dialer);
if inner.opened_substreams.contains(&(elem.substream_id(),!endpoint)) || elem.is_open_msg() {
inner.buffer.push(elem);
} else if!elem.is_close_or_reset_msg() {
debug!("Ignored message {:?} because the substream wasn't open", elem);
}
}
}
}
// Small convenience function that tries to write `elem` to the stream.
fn poll_send<C>(inner: &mut MultiplexInner<C>, elem: codec::Elem) -> Poll<(), IoError>
where C: AsyncRead + AsyncWrite
{
if inner.is_shutdown {
return Err(IoError::new(IoErrorKind::Other, "connection is shut down"))
}
match inner.inner.start_send_notify(elem, &inner.notifier_write, 0) {
Ok(AsyncSink::Ready) => {
Ok(Async::Ready(()))
},
Ok(AsyncSink::NotReady(_)) => {
inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
Ok(Async::NotReady)
},
Err(err) => Err(err)
}
}
impl<C> StreamMuxer for Multiplex<C>
where C: AsyncRead + AsyncWrite
{
type Substream = Substream;
type OutboundSubstream = OutboundSubstream;
type Error = IoError;
fn poll_inbound(&self) -> Poll<Self::Substream, IoError> {
let mut inner = self.inner.lock();
if inner.opened_substreams.len() >= inner.config.max_substreams {
debug!("Refused substream; reached maximum number of substreams {}", inner.config.max_substreams);
return Err(IoError::new(IoErrorKind::ConnectionRefused,
"exceeded maximum number of open substreams"));
}
let num = try_ready!(next_match(&mut inner, |elem| {
match elem {
codec::Elem::Open { substream_id } => Some(*substream_id),
_ => None,
}
}));
debug!("Successfully opened inbound substream {}", num);
Ok(Async::Ready(Substream {
current_data: Bytes::new(),
num,
endpoint: Endpoint::Listener,
local_open: true,
remote_open: true,
}))
}
fn open_outbound(&self) -> Self::OutboundSubstream {
let mut inner = self.inner.lock();
// Assign a substream ID now.
let substream_id = {
let n = inner.next_outbound_stream_id;
inner.next_outbound_stream_id = inner.next_outbound_stream_id.checked_add(1)
.expect("Mplex substream ID overflowed");
n
};
inner.opened_substreams.insert((substream_id, Endpoint::Dialer));
OutboundSubstream {
num: substream_id,
state: OutboundSubstreamState::SendElem(codec::Elem::Open { substream_id }),
}
}
fn poll_outbound(&self, substream: &mut Self::OutboundSubstream) -> Poll<Self::Substream, IoError> {
loop {
let mut inner = self.inner.lock();
let polling = match substream.state {
OutboundSubstreamState::SendElem(ref elem) => {
poll_send(&mut inner, elem.clone())
},
OutboundSubstreamState::Flush => {
if inner.is_shutdown {
return Err(IoError::new(IoErrorKind::Other, "connection is shut down"))
}
let inner = &mut *inner; // Avoids borrow errors
inner.inner.poll_flush_notify(&inner.notifier_write, 0)
},
OutboundSubstreamState::Done => {
panic!("Polling outbound substream after it's been succesfully open");
},
};
match polling {
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => {
inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady)
},
Err(err) => {
debug!("Failed to open outbound substream {}", substream.num);
inner.buffer.retain(|elem| {
elem.substream_id()!= substream.num || elem.endpoint() == Some(Endpoint::Dialer)
});
return Err(err)
},
};
drop(inner);
// Going to next step.
match substream.state {
OutboundSubstreamState::SendElem(_) => {
substream.state = OutboundSubstreamState::Flush;
},
OutboundSubstreamState::Flush => {
debug!("Successfully opened outbound substream {}", substream.num);
substream.state = OutboundSubstreamState::Done;
return Ok(Async::Ready(Substream {
num: substream.num,
current_data: Bytes::new(),
endpoint: Endpoint::Dialer,
local_open: true,
remote_open: true,
}));
},
OutboundSubstreamState::Done => unreachable!(),
}
}
}
#[inline]
fn destroy_outbound(&self, _substream: Self::OutboundSubstream) {
// Nothing to do.
}
unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
false
}
fn read_substream(&self, substream: &mut Self::Substream, buf: &mut [u8]) -> Poll<usize, IoError> {
loop {
// First, transfer from `current_data`.
if!substream.current_data.is_empty() {
let len = cmp::min(substream.current_data.len(), buf.len());
buf[..len].copy_from_slice(&substream.current_data.split_to(len));
return Ok(Async::Ready(len));
}
// If the remote writing side is closed, return EOF.
if!substream.remote_open {
return Ok(Async::Ready(0));
}
// Try to find a packet of data in the buffer.
let mut inner = self.inner.lock();
let next_data_poll = next_match(&mut inner, |elem| {
match elem {
codec::Elem::Data { substream_id, endpoint, data,.. }
if *substream_id == substream.num && *endpoint!= substream.endpoint => // see note [StreamId]
{
Some(Some(data.clone()))
}
codec::Elem::Close { substream_id, endpoint }
if *substream_id == substream.num && *endpoint!= substream.endpoint => // see note [StreamId]
{
Some(None)
}
_ => None
}
});
// We're in a loop, so all we need to do is set `substream.current_data` to the data we
// just read and wait for the next iteration.
match next_data_poll? {
Async::Ready(Some(data)) => substream.current_data = data,
Async::Ready(None) => {
substream.remote_open = false;
return Ok(Async::Ready(0));
},
Async::NotReady => {
// There was no data packet in the buffer about this substream; maybe it's
// because it has been closed.
if inner.opened_substreams.contains(&(substream.num, substream.endpoint)) {
return Ok(Async::NotReady)
} else {
return Ok(Async::Ready(0))
}
},
}
}
}
fn write_substream(&self, substream: &mut Self::Substream, buf: &[u8]) -> Poll<usize, IoError> {
if!substream.local_open {
return Err(IoErrorKind::BrokenPipe.into());
}
let mut inner = self.inner.lock();
let to_write = cmp::min(buf.len(), inner.config.split_send_size);
let elem = codec::Elem::Data {
substream_id: substream.num,
data: From::from(&buf[..to_write]),
endpoint: substream.endpoint,
};
match poll_send(&mut inner, elem)? {
| {
MplexConfig {
max_substreams: 128,
max_buffer_len: 4096,
max_buffer_behaviour: MaxBufferBehaviour::CloseAll,
split_send_size: 1024,
}
} | identifier_body |
lib.rs | /// When sending data, split it into frames whose maximum size is this value
/// (max 1MByte, as per the Mplex spec).
split_send_size: usize,
}
impl MplexConfig {
/// Builds the default configuration.
#[inline]
pub fn new() -> MplexConfig {
Default::default()
}
/// Sets the maximum number of simultaneously opened substreams, after which an error is
/// generated and the connection closes.
///
/// A limit is necessary in order to avoid DoS attacks.
#[inline]
pub fn max_substreams(&mut self, max: usize) -> &mut Self {
self.max_substreams = max;
self
}
/// Sets the maximum number of pending incoming messages.
///
/// A limit is necessary in order to avoid DoS attacks.
#[inline]
pub fn max_buffer_len(&mut self, max: usize) -> &mut Self {
self.max_buffer_len = max;
self
}
/// Sets the behaviour when the maximum buffer length has been reached.
///
/// See the documentation of `MaxBufferBehaviour`.
#[inline]
pub fn max_buffer_len_behaviour(&mut self, behaviour: MaxBufferBehaviour) -> &mut Self {
self.max_buffer_behaviour = behaviour;
self
}
/// Sets the frame size used when sending data. Capped at 1Mbyte as per the
/// Mplex spec.
pub fn | (&mut self, size: usize) -> &mut Self {
let size = cmp::min(size, codec::MAX_FRAME_SIZE);
self.split_send_size = size;
self
}
#[inline]
fn upgrade<C>(self, i: C) -> Multiplex<C>
where
C: AsyncRead + AsyncWrite
{
let max_buffer_len = self.max_buffer_len;
Multiplex {
inner: Mutex::new(MultiplexInner {
error: Ok(()),
inner: executor::spawn(Framed::new(i, codec::Codec::new()).fuse()),
config: self,
buffer: Vec::with_capacity(cmp::min(max_buffer_len, 512)),
opened_substreams: Default::default(),
next_outbound_stream_id: 0,
notifier_read: Arc::new(Notifier {
to_notify: Mutex::new(Default::default()),
}),
notifier_write: Arc::new(Notifier {
to_notify: Mutex::new(Default::default()),
}),
is_shutdown: false,
is_acknowledged: false,
})
}
}
}
impl Default for MplexConfig {
#[inline]
fn default() -> MplexConfig {
MplexConfig {
max_substreams: 128,
max_buffer_len: 4096,
max_buffer_behaviour: MaxBufferBehaviour::CloseAll,
split_send_size: 1024,
}
}
}
/// Behaviour when the maximum length of the buffer is reached.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MaxBufferBehaviour {
/// Produce an error on all the substreams.
CloseAll,
/// No new message will be read from the underlying connection if the buffer is full.
///
/// This can potentially introduce a deadlock if you are waiting for a message from a substream
/// before processing the messages received on another substream.
Block,
}
impl UpgradeInfo for MplexConfig {
type Info = &'static [u8];
type InfoIter = iter::Once<Self::Info>;
#[inline]
fn protocol_info(&self) -> Self::InfoIter {
iter::once(b"/mplex/6.7.0")
}
}
impl<C> InboundUpgrade<C> for MplexConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = Multiplex<Negotiated<C>>;
type Error = IoError;
type Future = future::FutureResult<Self::Output, IoError>;
fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
future::ok(self.upgrade(socket))
}
}
impl<C> OutboundUpgrade<C> for MplexConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = Multiplex<Negotiated<C>>;
type Error = IoError;
type Future = future::FutureResult<Self::Output, IoError>;
fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
future::ok(self.upgrade(socket))
}
}
/// Multiplexer. Implements the `StreamMuxer` trait.
pub struct Multiplex<C> {
inner: Mutex<MultiplexInner<C>>,
}
// Struct shared throughout the implementation.
struct MultiplexInner<C> {
// Error that happened earlier. Should poison any attempt to use this `MultiplexError`.
error: Result<(), IoError>,
// Underlying stream.
inner: executor::Spawn<Fuse<Framed<C, codec::Codec>>>,
/// The original configuration.
config: MplexConfig,
// Buffer of elements pulled from the stream but not processed yet.
buffer: Vec<codec::Elem>,
// List of Ids of opened substreams. Used to filter out messages that don't belong to any
// substream. Note that this is handled exclusively by `next_match`.
// The `Endpoint` value denotes who initiated the substream from our point of view
// (see note [StreamId]).
opened_substreams: FnvHashSet<(u32, Endpoint)>,
// Id of the next outgoing substream.
next_outbound_stream_id: u32,
/// List of tasks to notify when a read event happens on the underlying stream.
notifier_read: Arc<Notifier>,
/// List of tasks to notify when a write event happens on the underlying stream.
notifier_write: Arc<Notifier>,
/// If true, the connection has been shut down. We need to be careful not to accidentally
/// call `Sink::poll_complete` or `Sink::start_send` after `Sink::close`.
is_shutdown: bool,
/// If true, the remote has sent data to us.
is_acknowledged: bool,
}
struct Notifier {
/// List of tasks to notify.
to_notify: Mutex<FnvHashMap<usize, task::Task>>,
}
impl executor::Notify for Notifier {
fn notify(&self, _: usize) {
let tasks = mem::replace(&mut *self.to_notify.lock(), Default::default());
for (_, task) in tasks {
task.notify();
}
}
}
// TODO: replace with another system
static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
task_local!{
static TASK_ID: usize = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed)
}
// Note [StreamId]: mplex no longer partitions stream IDs into odd (for initiators) and
// even ones (for receivers). Streams are instead identified by a number and whether the flag
// is odd (for receivers) or even (for initiators). `Open` frames do not have a flag, but are
// sent unidirectional. As a consequence, we need to remember if the stream was initiated by us
// or remotely and we store the information from our point of view, i.e. receiving an `Open` frame
// is stored as `(<u32>, Listener)`, sending an `Open` frame as `(<u32>, Dialer)`. Receiving
// a `Data` frame with flag `MessageReceiver` (= 1) means that we initiated the stream, so the
// entry has been stored as `(<u32>, Dialer)`. So, when looking up streams based on frames
// received, we have to invert the `Endpoint`, except for `Open`.
/// Processes elements in `inner` until one matching `filter` is found.
///
/// If `NotReady` is returned, the current task is scheduled for later, just like with any `Poll`.
/// `Ready(Some())` is almost always returned. An error is returned if the stream is EOF.
fn next_match<C, F, O>(inner: &mut MultiplexInner<C>, mut filter: F) -> Poll<O, IoError>
where C: AsyncRead + AsyncWrite,
F: FnMut(&codec::Elem) -> Option<O>,
{
// If an error happened earlier, immediately return it.
if let Err(ref err) = inner.error {
return Err(IoError::new(err.kind(), err.to_string()));
}
if let Some((offset, out)) = inner.buffer.iter().enumerate().filter_map(|(n, v)| filter(v).map(|v| (n, v))).next() {
// The buffer was full and no longer is, so let's notify everything.
if inner.buffer.len() == inner.config.max_buffer_len {
executor::Notify::notify(&*inner.notifier_read, 0);
}
inner.buffer.remove(offset);
return Ok(Async::Ready(out));
}
loop {
// Check if we reached max buffer length first.
debug_assert!(inner.buffer.len() <= inner.config.max_buffer_len);
if inner.buffer.len() == inner.config.max_buffer_len {
debug!("Reached mplex maximum buffer length");
match inner.config.max_buffer_behaviour {
MaxBufferBehaviour::CloseAll => {
inner.error = Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length"));
return Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length"));
},
MaxBufferBehaviour::Block => {
inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady);
},
}
}
let elem = match inner.inner.poll_stream_notify(&inner.notifier_read, 0) {
Ok(Async::Ready(Some(item))) => item,
Ok(Async::Ready(None)) => return Err(IoErrorKind::BrokenPipe.into()),
Ok(Async::NotReady) => {
inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady);
},
Err(err) => {
let err2 = IoError::new(err.kind(), err.to_string());
inner.error = Err(err);
return Err(err2);
},
};
trace!("Received message: {:?}", elem);
inner.is_acknowledged = true;
// Handle substreams opening/closing.
match elem {
codec::Elem::Open { substream_id } => {
if!inner.opened_substreams.insert((substream_id, Endpoint::Listener)) {
debug!("Received open message for substream {} which was already open", substream_id)
}
}
codec::Elem::Close { substream_id, endpoint,.. } | codec::Elem::Reset { substream_id, endpoint,.. } => {
inner.opened_substreams.remove(&(substream_id,!endpoint));
}
_ => ()
}
if let Some(out) = filter(&elem) {
return Ok(Async::Ready(out));
} else {
let endpoint = elem.endpoint().unwrap_or(Endpoint::Dialer);
if inner.opened_substreams.contains(&(elem.substream_id(),!endpoint)) || elem.is_open_msg() {
inner.buffer.push(elem);
} else if!elem.is_close_or_reset_msg() {
debug!("Ignored message {:?} because the substream wasn't open", elem);
}
}
}
}
// Small convenience function that tries to write `elem` to the stream.
fn poll_send<C>(inner: &mut MultiplexInner<C>, elem: codec::Elem) -> Poll<(), IoError>
where C: AsyncRead + AsyncWrite
{
if inner.is_shutdown {
return Err(IoError::new(IoErrorKind::Other, "connection is shut down"))
}
match inner.inner.start_send_notify(elem, &inner.notifier_write, 0) {
Ok(AsyncSink::Ready) => {
Ok(Async::Ready(()))
},
Ok(AsyncSink::NotReady(_)) => {
inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
Ok(Async::NotReady)
},
Err(err) => Err(err)
}
}
impl<C> StreamMuxer for Multiplex<C>
where C: AsyncRead + AsyncWrite
{
type Substream = Substream;
type OutboundSubstream = OutboundSubstream;
type Error = IoError;
fn poll_inbound(&self) -> Poll<Self::Substream, IoError> {
let mut inner = self.inner.lock();
if inner.opened_substreams.len() >= inner.config.max_substreams {
debug!("Refused substream; reached maximum number of substreams {}", inner.config.max_substreams);
return Err(IoError::new(IoErrorKind::ConnectionRefused,
"exceeded maximum number of open substreams"));
}
let num = try_ready!(next_match(&mut inner, |elem| {
match elem {
codec::Elem::Open { substream_id } => Some(*substream_id),
_ => None,
}
}));
debug!("Successfully opened inbound substream {}", num);
Ok(Async::Ready(Substream {
current_data: Bytes::new(),
num,
endpoint: Endpoint::Listener,
local_open: true,
remote_open: true,
}))
}
fn open_outbound(&self) -> Self::OutboundSubstream {
let mut inner = self.inner.lock();
// Assign a substream ID now.
let substream_id = {
let n = inner.next_outbound_stream_id;
inner.next_outbound_stream_id = inner.next_outbound_stream_id.checked_add(1)
.expect("Mplex substream ID overflowed");
n
};
inner.opened_substreams.insert((substream_id, Endpoint::Dialer));
OutboundSubstream {
num: substream_id,
state: OutboundSubstreamState::SendElem(codec::Elem::Open { substream_id }),
}
}
fn poll_outbound(&self, substream: &mut Self::OutboundSubstream) -> Poll<Self::Substream, IoError> {
loop {
let mut inner = self.inner.lock();
let polling = match substream.state {
OutboundSubstreamState::SendElem(ref elem) => {
poll_send(&mut inner, elem.clone())
},
OutboundSubstreamState::Flush => {
if inner.is_shutdown {
return Err(IoError::new(IoErrorKind::Other, "connection is shut down"))
}
let inner = &mut *inner; // Avoids borrow errors
inner.inner.poll_flush_notify(&inner.notifier_write, 0)
},
OutboundSubstreamState::Done => {
panic!("Polling outbound substream after it's been succesfully open");
},
};
match polling {
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => {
inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady)
},
Err(err) => {
debug!("Failed to open outbound substream {}", substream.num);
inner.buffer.retain(|elem| {
elem.substream_id()!= substream.num || elem.endpoint() == Some(Endpoint::Dialer)
});
return Err(err)
},
};
drop(inner);
// Going to next step.
match substream.state {
OutboundSubstreamState::SendElem(_) => {
substream.state = OutboundSubstreamState::Flush;
},
OutboundSubstreamState::Flush => {
debug!("Successfully opened outbound substream {}", substream.num);
substream.state = OutboundSubstreamState::Done;
return Ok(Async::Ready(Substream {
num: substream.num,
current_data: Bytes::new(),
endpoint: Endpoint::Dialer,
local_open: true,
remote_open: true,
}));
},
OutboundSubstreamState::Done => unreachable!(),
}
}
}
#[inline]
fn destroy_outbound(&self, _substream: Self::OutboundSubstream) {
// Nothing to do.
}
unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
false
}
fn read_substream(&self, substream: &mut Self::Substream, buf: &mut [u8]) -> Poll<usize, IoError> {
loop {
// First, transfer from `current_data`.
if!substream.current_data.is_empty() {
let len = cmp::min(substream.current_data.len(), buf.len());
buf[..len].copy_from_slice(&substream.current_data.split_to(len));
return Ok(Async::Ready(len));
}
// If the remote writing side is closed, return EOF.
if!substream.remote_open {
return Ok(Async::Ready(0));
}
// Try to find a packet of data in the buffer.
let mut inner = self.inner.lock();
let next_data_poll = next_match(&mut inner, |elem| {
match elem {
codec::Elem::Data { substream_id, endpoint, data,.. }
if *substream_id == substream.num && *endpoint!= substream.endpoint => // see note [StreamId]
{
Some(Some(data.clone()))
}
codec::Elem::Close { substream_id, endpoint }
if *substream_id == substream.num && *endpoint!= substream.endpoint => // see note [StreamId]
{
Some(None)
}
_ => None
}
});
// We're in a loop, so all we need to do is set `substream.current_data` to the data we
// just read and wait for the next iteration.
match next_data_poll? {
Async::Ready(Some(data)) => substream.current_data = data,
Async::Ready(None) => {
substream.remote_open = false;
return Ok(Async::Ready(0));
},
Async::NotReady => {
// There was no data packet in the buffer about this substream; maybe it's
// because it has been closed.
if inner.opened_substreams.contains(&(substream.num, substream.endpoint)) {
return Ok(Async::NotReady)
} else {
return Ok(Async::Ready(0))
}
},
}
}
}
fn write_substream(&self, substream: &mut Self::Substream, buf: &[u8]) -> Poll<usize, IoError> {
if!substream.local_open {
return Err(IoErrorKind::BrokenPipe.into());
}
let mut inner = self.inner.lock();
let to_write = cmp::min(buf.len(), inner.config.split_send_size);
let elem = codec::Elem::Data {
substream_id: substream.num,
data: From::from(&buf[..to_write]),
endpoint: substream.endpoint,
};
match poll_send(&mut inner, elem)? {
| split_send_size | identifier_name |
lib.rs |
/// When sending data, split it into frames whose maximum size is this value
/// (max 1MByte, as per the Mplex spec).
split_send_size: usize,
}
impl MplexConfig {
/// Builds the default configuration.
#[inline]
pub fn new() -> MplexConfig {
Default::default()
}
/// Sets the maximum number of simultaneously opened substreams, after which an error is
/// generated and the connection closes.
///
/// A limit is necessary in order to avoid DoS attacks.
#[inline]
pub fn max_substreams(&mut self, max: usize) -> &mut Self {
self.max_substreams = max;
self
}
/// Sets the maximum number of pending incoming messages.
///
/// A limit is necessary in order to avoid DoS attacks.
#[inline]
pub fn max_buffer_len(&mut self, max: usize) -> &mut Self {
self.max_buffer_len = max;
self
}
/// Sets the behaviour when the maximum buffer length has been reached.
///
/// See the documentation of `MaxBufferBehaviour`.
#[inline]
pub fn max_buffer_len_behaviour(&mut self, behaviour: MaxBufferBehaviour) -> &mut Self {
self.max_buffer_behaviour = behaviour;
self
}
/// Sets the frame size used when sending data. Capped at 1Mbyte as per the
/// Mplex spec.
pub fn split_send_size(&mut self, size: usize) -> &mut Self {
let size = cmp::min(size, codec::MAX_FRAME_SIZE);
self.split_send_size = size;
self
}
#[inline]
fn upgrade<C>(self, i: C) -> Multiplex<C>
where
C: AsyncRead + AsyncWrite
{
let max_buffer_len = self.max_buffer_len;
Multiplex {
inner: Mutex::new(MultiplexInner {
error: Ok(()),
inner: executor::spawn(Framed::new(i, codec::Codec::new()).fuse()),
config: self,
buffer: Vec::with_capacity(cmp::min(max_buffer_len, 512)),
opened_substreams: Default::default(),
next_outbound_stream_id: 0,
notifier_read: Arc::new(Notifier {
to_notify: Mutex::new(Default::default()),
}),
notifier_write: Arc::new(Notifier {
to_notify: Mutex::new(Default::default()),
}),
is_shutdown: false,
is_acknowledged: false,
})
}
}
}
impl Default for MplexConfig {
#[inline]
fn default() -> MplexConfig {
MplexConfig {
max_substreams: 128,
max_buffer_len: 4096,
max_buffer_behaviour: MaxBufferBehaviour::CloseAll,
split_send_size: 1024,
}
}
}
/// Behaviour when the maximum length of the buffer is reached.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MaxBufferBehaviour {
/// Produce an error on all the substreams.
CloseAll,
/// No new message will be read from the underlying connection if the buffer is full.
///
/// This can potentially introduce a deadlock if you are waiting for a message from a substream
/// before processing the messages received on another substream.
Block,
}
impl UpgradeInfo for MplexConfig {
type Info = &'static [u8];
type InfoIter = iter::Once<Self::Info>;
#[inline]
fn protocol_info(&self) -> Self::InfoIter {
iter::once(b"/mplex/6.7.0")
}
}
impl<C> InboundUpgrade<C> for MplexConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = Multiplex<Negotiated<C>>;
type Error = IoError;
type Future = future::FutureResult<Self::Output, IoError>;
fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
future::ok(self.upgrade(socket))
}
}
impl<C> OutboundUpgrade<C> for MplexConfig
where
C: AsyncRead + AsyncWrite,
{
type Output = Multiplex<Negotiated<C>>;
type Error = IoError;
type Future = future::FutureResult<Self::Output, IoError>;
fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future {
future::ok(self.upgrade(socket))
}
}
/// Multiplexer. Implements the `StreamMuxer` trait.
pub struct Multiplex<C> {
inner: Mutex<MultiplexInner<C>>,
}
// Struct shared throughout the implementation.
struct MultiplexInner<C> {
// Error that happened earlier. Should poison any attempt to use this `MultiplexError`.
error: Result<(), IoError>,
// Underlying stream.
inner: executor::Spawn<Fuse<Framed<C, codec::Codec>>>,
/// The original configuration.
config: MplexConfig,
// Buffer of elements pulled from the stream but not processed yet.
buffer: Vec<codec::Elem>,
// List of Ids of opened substreams. Used to filter out messages that don't belong to any
// substream. Note that this is handled exclusively by `next_match`.
// The `Endpoint` value denotes who initiated the substream from our point of view
// (see note [StreamId]).
opened_substreams: FnvHashSet<(u32, Endpoint)>,
// Id of the next outgoing substream.
next_outbound_stream_id: u32,
/// List of tasks to notify when a read event happens on the underlying stream.
notifier_read: Arc<Notifier>,
/// List of tasks to notify when a write event happens on the underlying stream.
notifier_write: Arc<Notifier>,
/// If true, the connection has been shut down. We need to be careful not to accidentally
/// call `Sink::poll_complete` or `Sink::start_send` after `Sink::close`.
is_shutdown: bool,
/// If true, the remote has sent data to us.
is_acknowledged: bool,
}
struct Notifier {
/// List of tasks to notify.
to_notify: Mutex<FnvHashMap<usize, task::Task>>,
}
impl executor::Notify for Notifier {
fn notify(&self, _: usize) {
let tasks = mem::replace(&mut *self.to_notify.lock(), Default::default());
for (_, task) in tasks {
task.notify();
}
}
}
// TODO: replace with another system
static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0);
task_local!{
static TASK_ID: usize = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed)
}
// Note [StreamId]: mplex no longer partitions stream IDs into odd (for initiators) and
// even ones (for receivers). Streams are instead identified by a number and whether the flag
// is odd (for receivers) or even (for initiators). `Open` frames do not have a flag, but are
// sent unidirectional. As a consequence, we need to remember if the stream was initiated by us
// or remotely and we store the information from our point of view, i.e. receiving an `Open` frame
// is stored as `(<u32>, Listener)`, sending an `Open` frame as `(<u32>, Dialer)`. Receiving
// a `Data` frame with flag `MessageReceiver` (= 1) means that we initiated the stream, so the
// entry has been stored as `(<u32>, Dialer)`. So, when looking up streams based on frames
// received, we have to invert the `Endpoint`, except for `Open`.
/// Processes elements in `inner` until one matching `filter` is found.
///
/// If `NotReady` is returned, the current task is scheduled for later, just like with any `Poll`.
/// `Ready(Some())` is almost always returned. An error is returned if the stream is EOF.
fn next_match<C, F, O>(inner: &mut MultiplexInner<C>, mut filter: F) -> Poll<O, IoError>
where C: AsyncRead + AsyncWrite,
F: FnMut(&codec::Elem) -> Option<O>,
{
// If an error happened earlier, immediately return it.
if let Err(ref err) = inner.error {
return Err(IoError::new(err.kind(), err.to_string()));
}
if let Some((offset, out)) = inner.buffer.iter().enumerate().filter_map(|(n, v)| filter(v).map(|v| (n, v))).next() {
// The buffer was full and no longer is, so let's notify everything.
if inner.buffer.len() == inner.config.max_buffer_len {
executor::Notify::notify(&*inner.notifier_read, 0);
}
inner.buffer.remove(offset);
return Ok(Async::Ready(out));
}
loop {
// Check if we reached max buffer length first.
debug_assert!(inner.buffer.len() <= inner.config.max_buffer_len);
if inner.buffer.len() == inner.config.max_buffer_len {
debug!("Reached mplex maximum buffer length");
match inner.config.max_buffer_behaviour {
MaxBufferBehaviour::CloseAll => {
inner.error = Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length"));
return Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length"));
},
MaxBufferBehaviour::Block => {
inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady);
},
}
}
let elem = match inner.inner.poll_stream_notify(&inner.notifier_read, 0) {
Ok(Async::Ready(Some(item))) => item,
Ok(Async::Ready(None)) => return Err(IoErrorKind::BrokenPipe.into()),
Ok(Async::NotReady) => {
inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady);
},
Err(err) => {
let err2 = IoError::new(err.kind(), err.to_string());
inner.error = Err(err);
return Err(err2);
},
};
trace!("Received message: {:?}", elem);
inner.is_acknowledged = true;
// Handle substreams opening/closing.
match elem {
codec::Elem::Open { substream_id } => {
if!inner.opened_substreams.insert((substream_id, Endpoint::Listener)) {
debug!("Received open message for substream {} which was already open", substream_id)
}
}
codec::Elem::Close { substream_id, endpoint,.. } | codec::Elem::Reset { substream_id, endpoint,.. } => {
inner.opened_substreams.remove(&(substream_id,!endpoint));
}
_ => ()
}
if let Some(out) = filter(&elem) {
return Ok(Async::Ready(out));
} else {
let endpoint = elem.endpoint().unwrap_or(Endpoint::Dialer);
if inner.opened_substreams.contains(&(elem.substream_id(),!endpoint)) || elem.is_open_msg() {
inner.buffer.push(elem);
} else if!elem.is_close_or_reset_msg() {
debug!("Ignored message {:?} because the substream wasn't open", elem);
}
}
}
} | // Small convenience function that tries to write `elem` to the stream.
fn poll_send<C>(inner: &mut MultiplexInner<C>, elem: codec::Elem) -> Poll<(), IoError>
where C: AsyncRead + AsyncWrite
{
if inner.is_shutdown {
return Err(IoError::new(IoErrorKind::Other, "connection is shut down"))
}
match inner.inner.start_send_notify(elem, &inner.notifier_write, 0) {
Ok(AsyncSink::Ready) => {
Ok(Async::Ready(()))
},
Ok(AsyncSink::NotReady(_)) => {
inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
Ok(Async::NotReady)
},
Err(err) => Err(err)
}
}
impl<C> StreamMuxer for Multiplex<C>
where C: AsyncRead + AsyncWrite
{
type Substream = Substream;
type OutboundSubstream = OutboundSubstream;
type Error = IoError;
fn poll_inbound(&self) -> Poll<Self::Substream, IoError> {
let mut inner = self.inner.lock();
if inner.opened_substreams.len() >= inner.config.max_substreams {
debug!("Refused substream; reached maximum number of substreams {}", inner.config.max_substreams);
return Err(IoError::new(IoErrorKind::ConnectionRefused,
"exceeded maximum number of open substreams"));
}
let num = try_ready!(next_match(&mut inner, |elem| {
match elem {
codec::Elem::Open { substream_id } => Some(*substream_id),
_ => None,
}
}));
debug!("Successfully opened inbound substream {}", num);
Ok(Async::Ready(Substream {
current_data: Bytes::new(),
num,
endpoint: Endpoint::Listener,
local_open: true,
remote_open: true,
}))
}
fn open_outbound(&self) -> Self::OutboundSubstream {
let mut inner = self.inner.lock();
// Assign a substream ID now.
let substream_id = {
let n = inner.next_outbound_stream_id;
inner.next_outbound_stream_id = inner.next_outbound_stream_id.checked_add(1)
.expect("Mplex substream ID overflowed");
n
};
inner.opened_substreams.insert((substream_id, Endpoint::Dialer));
OutboundSubstream {
num: substream_id,
state: OutboundSubstreamState::SendElem(codec::Elem::Open { substream_id }),
}
}
fn poll_outbound(&self, substream: &mut Self::OutboundSubstream) -> Poll<Self::Substream, IoError> {
loop {
let mut inner = self.inner.lock();
let polling = match substream.state {
OutboundSubstreamState::SendElem(ref elem) => {
poll_send(&mut inner, elem.clone())
},
OutboundSubstreamState::Flush => {
if inner.is_shutdown {
return Err(IoError::new(IoErrorKind::Other, "connection is shut down"))
}
let inner = &mut *inner; // Avoids borrow errors
inner.inner.poll_flush_notify(&inner.notifier_write, 0)
},
OutboundSubstreamState::Done => {
panic!("Polling outbound substream after it's been succesfully open");
},
};
match polling {
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => {
inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current());
return Ok(Async::NotReady)
},
Err(err) => {
debug!("Failed to open outbound substream {}", substream.num);
inner.buffer.retain(|elem| {
elem.substream_id()!= substream.num || elem.endpoint() == Some(Endpoint::Dialer)
});
return Err(err)
},
};
drop(inner);
// Going to next step.
match substream.state {
OutboundSubstreamState::SendElem(_) => {
substream.state = OutboundSubstreamState::Flush;
},
OutboundSubstreamState::Flush => {
debug!("Successfully opened outbound substream {}", substream.num);
substream.state = OutboundSubstreamState::Done;
return Ok(Async::Ready(Substream {
num: substream.num,
current_data: Bytes::new(),
endpoint: Endpoint::Dialer,
local_open: true,
remote_open: true,
}));
},
OutboundSubstreamState::Done => unreachable!(),
}
}
}
#[inline]
fn destroy_outbound(&self, _substream: Self::OutboundSubstream) {
// Nothing to do.
}
unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool {
false
}
fn read_substream(&self, substream: &mut Self::Substream, buf: &mut [u8]) -> Poll<usize, IoError> {
loop {
// First, transfer from `current_data`.
if!substream.current_data.is_empty() {
let len = cmp::min(substream.current_data.len(), buf.len());
buf[..len].copy_from_slice(&substream.current_data.split_to(len));
return Ok(Async::Ready(len));
}
// If the remote writing side is closed, return EOF.
if!substream.remote_open {
return Ok(Async::Ready(0));
}
// Try to find a packet of data in the buffer.
let mut inner = self.inner.lock();
let next_data_poll = next_match(&mut inner, |elem| {
match elem {
codec::Elem::Data { substream_id, endpoint, data,.. }
if *substream_id == substream.num && *endpoint!= substream.endpoint => // see note [StreamId]
{
Some(Some(data.clone()))
}
codec::Elem::Close { substream_id, endpoint }
if *substream_id == substream.num && *endpoint!= substream.endpoint => // see note [StreamId]
{
Some(None)
}
_ => None
}
});
// We're in a loop, so all we need to do is set `substream.current_data` to the data we
// just read and wait for the next iteration.
match next_data_poll? {
Async::Ready(Some(data)) => substream.current_data = data,
Async::Ready(None) => {
substream.remote_open = false;
return Ok(Async::Ready(0));
},
Async::NotReady => {
// There was no data packet in the buffer about this substream; maybe it's
// because it has been closed.
if inner.opened_substreams.contains(&(substream.num, substream.endpoint)) {
return Ok(Async::NotReady)
} else {
return Ok(Async::Ready(0))
}
},
}
}
}
fn write_substream(&self, substream: &mut Self::Substream, buf: &[u8]) -> Poll<usize, IoError> {
if!substream.local_open {
return Err(IoErrorKind::BrokenPipe.into());
}
let mut inner = self.inner.lock();
let to_write = cmp::min(buf.len(), inner.config.split_send_size);
let elem = codec::Elem::Data {
substream_id: substream.num,
data: From::from(&buf[..to_write]),
endpoint: substream.endpoint,
};
match poll_send(&mut inner, elem)? {
| random_line_split |
|
lib.rs | //! Data Encryption Standard Rust implementation.
//!
//! The only supported mode is Electronic Codebook (ECB).
//!
//! # Example
//!
//! ```
//! extern crate des_rs_krautcat;
//!
//! let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1];
//! let message = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF];
//! let cipher = des_rs_krautcat::encrypt(&message, &key);
//! let message = des_rs_krautcat::decrypt(&cipher, &key);
//! ```
//!
//! # Usage
//!
//! Des exports two functions: `encrypt` and `decrypt`.
//! Use the former to encrypt some data with a key and the later to decrypt the data.
pub type Key = [u8; 8];
const FIRST_BIT: u32 = 1 << 31;
const HALF_KEY_SIZE: i64 = KEY_SIZE / 2;
const KEY_SIZE: i64 = 56;
enum Ip {
Direct,
Reverse
}
/// Циклический сдвиг влево половины ключа
fn circular_left_shift(ci: u32, di: u32, shift_count: u8) -> (u32, u32) {
let mut ci_next = ci;
let mut di_next = di;
for _ in 0.. shift_count {
ci_next = (ci_next << 1) | ((ci_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1));
di_next = (di_next << 1) | ((di_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1));
}
(ci_next, di_next)
}
/// Обмен битов с расстоянием delta и маской mask в числе a
fn delta_swap(a: u64, delta: u8, mask: u64) -> u64 {
let b = (a ^ (a >> delta)) & mask;
a ^ b ^ (b << delta)
}
/// Конвертирование ключа из массива u8 в одно число типа u64
fn key_to_u64(key: &Key) -> u64 {
let mut result = 0;
for &part in key {
result <<= 8;
result += part as u64;
}
result
}
/// Конвертирование сообщения из массива u8 в вектор u64
fn message_to_u64s(message: &[u8]) -> Vec<u64> {
message.chunks(8)
.map(|m| {
let mut result: u64 = 0;
for &part in m {
result <<= 8;
result += part as u64;
}
if m.len() < 8 {
result <<= 8 * (8 - m.len());
}
result
})
.collect()
}
/// Конвертирование u64 в вектор u8
fn to_u8_vec(num: u64) -> Vec<u8> {
vec![
((num & 0xFF00000000000000) >> 56) as u8,
((num & 0x00FF000000000000) >> 48) as u8,
((num & 0x0000FF0000000000) >> 40) as u8,
((num & 0x000000FF00000000) >> 32) as u8,
((num & 0x00000000FF000000) >> 24) as u8,
((num & 0x0000000000FF0000) >> 16) as u8,
((num & 0x000000000000FF00) >> 8) as u8,
((num & 0x00000000000000FF) >> 0) as u8
]
}
/// Процедура создания 16 подключей
fn compute_subkeys(key: u64) -> Vec<u64> {
let table = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1];
let k0 = pc1(key);
let mut subkeys = vec![k0];
for shift_count in &table {
let last_key = *subkeys.last().unwrap();
let last_ci = ((last_key & 0xFFFFFFF000000000) >> 32) as u32;
let last_di = (last_key >> 4) as u32;
let (ci, di) = circular_left_shift(last_ci, last_di, *shift_count);
let current_key = ((ci as u64) << 32) | ((di as u64) << 4);
subkeys.push(current_key);
}
subkeys.remove(0);
subkeys.iter().map(|&n| { pc2(n) }).collect()
}
/// Перестановка согласно таблице PC-1
fn pc1(key: u64) -> u64 {
let key = delta_swap(key, 2, 0x3333000033330000);
let key = delta_swap(key, 4, 0x0F0F0F0F00000000);
let key = delta_swap(key, 8, 0x009A000A00A200A8);
let key = delta_swap(key, 16, 0x00006C6C0000CCCC);
let key = delta_swap(key, 1, 0x1045500500550550);
let key = delta_swap(key, 32, 0x00000000F0F0F5FA);
let key = delta_swap(key, 8, 0x00550055006A00AA);
let key = delta_swap(key, 2, 0x0000333330000300);
key & 0xFFFFFFFFFFFFFF00
}
/// Перестановка согласно таблице PC-2
fn pc2(key: u64) -> u64 {
const PC_2_TABLE: [u8; 48] = [
14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34,53,
46, 42, 50, 36, 29, 32
];
const OUT_SIZE: u8 = 64;
let mut result: u64 = 0;
for m in 0.. PC_2_TABLE.len() as usize {
if PC_2_TABLE[m] > m as u8 {
result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) << PC_2_TABLE[m] - (m as u8 + 1);
} else {
result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) >> (m as u8 + 1) - PC_2_TABLE[m];
}
}
result & 0xFFFFFFFFFFFF0000
}
/// Перестановка согласно таблице E
fn e(block: u32) -> u64 {
const BLOCK_LEN: usize = 32;
const RESULT_LEN: usize = 48;
let block_exp = (block as u64) << 32;
let b1 = ((block_exp << (BLOCK_LEN - 1)) & 0x8000000000000000) as u64;
let b2 = ((block_exp >> 1) & 0x7C00000000000000) as u64;;
let b3 = ((block_exp >> 3) & 0x03F0000000000000) as u64;;
let b4 = ((block_exp >> 5) & 0x000FC00000000000) as u64;;
let b5 = ((block_exp >> 7) & 0x00003F0000000000) as u64;;
let b6 = ((block_exp >> 9) & 0x000000FC00000000) as u64;;
let b7 = ((block_exp >> 11) & 0x00000003F0000000) as u64;;
let b8 = ((block_exp >> 13) & 0x000000000FC00000) as u64;;
let b9 = ((block_exp >> 15) & 0x00000000003E0000) as u64;;
let b10 = ((block_exp >> (RESULT_LEN - 1)) & 0x0000000000010000) as u64;;
b1 | b2 | b3 | b4 | b5 | b6 | b7 | b8 | b9 | b10
}
/// Перестановка согласно таблицек P
fn p(block: u32) -> u32 {
const P_TABLE: [u8; 32] = [
16, 7, 20, 21, 29, 12, 28, 17,
1, 15, 23, 26, 5, 18, 31, 10,
2, 8, 24, 14, 32, 27, 3, 9,
19, 13, 30, 6, 22, 11, 4, 25
];
const BLOCK_SIZE: u8 = 32;
let mut result: u32 = 0;
for m in 0.. P_TABLE.len() as usize {
if P_TABLE[m] > m as u8 {
result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) << P_TABLE[m] - (m as u8 + 1);
} else {
result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) >> (m as u8 + 1) - P_TABLE[m];
}
}
result
}
/// Реализация S-блоков
fn s(box_id: usize, block: u8) -> u8 {
const TABLES: [[[u8; 16]; 4]; 8] = [
[
[ 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[ 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]
],
[
[ 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[ 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]
],
[
[ 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[ 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[ 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]
],
[
[ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[ 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[ 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]
],
[
[ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[ 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[ 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]
],
[
[ 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[ 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]
],
[
[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[ 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]
],
[
[ 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]
]
];
let i = ((block & 0x20) >> 4 | (block & 0x01)) as usize;
let j = ((block & 0x1E) >> 1) as usize;
TABLES[box_id][i][j]
}
/// ---------------------------------------------------------------
/// # Функции, используемые непосредственно в главном алгоритме DES
/// ---------------------------------------------------------------
/// IP-перестановка (прямая и обратная)
fn ip(message: u64, dir: Ip) -> u64 {
const COUNT: usize = 5;
const MASK: [u64; COUNT] = [
0x0055005500550055,
0x0000333300003333,
0x000000000F0F0F0F,
0x00000000FF00FF00,
0x000000FF000000FF
];
const DELTA: [u8; COUNT] = [ 9, 18, 36, 24, 24];
let mut result: u64 = message;
match dir {
Ip::Direct => for i in 0.. COUNT {
result = delta_swap(result, DELTA[i], MASK[i])
},
Ip::Reverse => for i in (0.. COUNT).rev() {
result = delta_swap(result, DELTA[i], MASK[i])
}
}
result
}
/// Функция Фейстеля
fn feistel(half_block: u32, subkey: u64) -> u32 {
let expanded = e(half_block);
let mut intermediate = expanded ^ subkey;
let mut result = 0 as u32;
for i in 0.. 8 {
let block = ((intermediate & 0xFC00000000000000) >> 58) as u8;
intermediate <<= 6;
result <<= 4;
result |= s(i, block) as u32;
}
p(result)
}
/// Алгоритм DES
fn des(message: &[u8], subkeys: Vec<u64>) -> Vec<u8> { |
let mut blocks = vec![];
for msg in message {
let permuted = ip(msg, Ip::Direct);
let mut li: u32 = ((permuted & 0xFFFFFFFF00000000) >> 32) as u32;
let mut ri: u32 = ((permuted & 0x00000000FFFFFFFF)) as u32;
for subkey in &subkeys {
let last_li = li;
li = ri;
ri = last_li ^ feistel(ri, *subkey);
}
let r16l16 = ( ( ri as u64 ) << 32 ) | li as u64;
blocks.push(to_u8_vec(ip(r16l16, Ip::Reverse)));
}
let mut result = Vec::with_capacity(message_len);
for mut block in blocks.into_iter() {
result.append(&mut block);
}
result
}
/// Шифрование
pub fn encrypt(message: &[u8], key: &Key) -> Vec<u8> {
let key = key_to_u64(key);
let subkeys = compute_subkeys(key);
des(message, subkeys)
}
/// Расшифрование
pub fn decrypt(cipher: &[u8], key: &Key) -> Vec<u8> {
let key = key_to_u64(key);
let mut subkeys = compute_subkeys(key);
subkeys.reverse();
des(cipher, subkeys)
}
#[cfg(test)]
mod tests {
use super::{decrypt, encrypt};
use super::{e, p, pc1, pc2};
#[test]
fn test_e() {
let result: [u64; 3] = [
e(0b1111_0000_1010_1010_1111_0000_1010_1010),
e(0b1111_0000_1010_1010_1111_0000_1010_1011),
e(0b1111_1111_1111_1111_1111_1111_1111_1111),
];
let expect: [u64; 3] = [
0b011110_100001_010101_010101_011110_100001_010101_010101u64 << 16,
0b111110_100001_010101_010101_011110_100001_010101_010111u64 << 16,
0b111111_111111_111111_111111_111111_111111_111111_111111u64 << 16,
];
for i in 0.. 3 {
assert_eq!(expect[i], result[i]);
}
}
#[test]
fn test_p() {
let result: [u32; 2] = [
p(0b1111_0000_0101_1010_1110_0111_1100_0011),
p(0b1011_0111_0001_1000_0000_1011_0110_1010),
];
let expect: [u32; 2] = [
0b0000_0101_1111_0111_1010_1010_1100_1011,
0b0101_1100_1011_0010_0110_0110_0101_0010,
];
for i in 0.. 2 {
assert_eq!(expect[i], result[i]);
}
}
#[test]
fn test_pc1() {
let result = pc1(0b00010011_00110100_01010111_01111001_10011011_10111100_11011111_11110001);
assert_eq!(0b1111000_0110011_0010101_0101111_0101010_1011001_1001111_0001111 << 8, result);
}
#[test]
fn test_pc2() {
let result = pc2(0b1110000_1100110_0101010_1011111_1010101_0110011_0011110_0011110 << 8);
assert_eq!(0b000110_110000_001011_101111_111111_000111_000001_110010 << 16, result);
}
#[test]
fn test_encrypt_decrypt() {
let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1];
let message = [0x52, 0x75, 0x73, 0x74, 0x20, 0x44, 0x45, 0x53];
let expected_cipher = vec![0x27, 0xC1, 0x4F, 0xA6, 0x9A, 0x04, 0x4E, 0x28];
let cipher = encrypt(&message, &key);
assert_eq!(cipher, expected_cipher);
let cipher = expected_cipher;
let expected_message = message;
let message = decrypt(&cipher, &key);
assert_eq!(message, expected_message);
let message = [0x64, 0x65, 0x73, 0x2D, 0x72, 0x73, 0x2D, 0x6B,
0x72, 0x61, 0x75, 0x74, 0x63, 0x61, 0x74, 0x20,
0x69, 0x73, 0x20, 0x6D, 0x79, 0x20, 0x69, 0x6D,
0x70, 0x6C, 0x65, 0x6D, 0x65, 0x6E, 0x74, 0x61,
0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20,
0x44, 0x45, 0x53, 0x20, 0x61, 0x6C, 0x67, 0x6F,
0x72, 0x69, 0x74, 0x68, 0x6D, 0x20, 0x69, 0x6E,
0x20, 0x52, 0x75, 0x73, 0x74];
let expected_cipher = vec![0x82, 0x8D, 0xB8, 0xD5, 0xFF, 0x41, 0xDF, 0xF7,
0x91, 0x34, 0xCC, 0x88, 0xFB, 0x52, 0xCB, 0xB7,
0x3C, 0x30, 0x17, 0x36, 0x9C, 0x3A, 0x70, 0xE0,
0x17, 0x64, 0x25, 0xDB, 0x17, 0xF5, 0x10, 0x80,
0x02, 0xAF, 0x08, 0x04, 0x6F, 0x3A, 0xA9, 0xB1,
0x3D, 0x74, 0x5C, 0xA7, 0x05, 0x8A, 0x13, 0x46,
0xB8, 0x0B, 0x5C, 0x9C, 0xE6, 0x01, 0x76, 0x92,
0x1C, 0x42, 0x30, 0x7E, 0xB6, 0xFA, 0xE4, 0xD3];
let cipher = encrypt(&message, &key);
assert_eq!(cipher, expected_cipher);
let cipher = expected_cipher;
let expected_message = message;
let message = decrypt(&cipher, &key);
assert_eq!(&message[..expected_message.len()], &expected_message[..]);
}
} | let message_len = message.len();
let message = message_to_u64s(message); | random_line_split |
lib.rs | //! Data Encryption Standard Rust implementation.
//!
//! The only supported mode is Electronic Codebook (ECB).
//!
//! # Example
//!
//! ```
//! extern crate des_rs_krautcat;
//!
//! let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1];
//! let message = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF];
//! let cipher = des_rs_krautcat::encrypt(&message, &key);
//! let message = des_rs_krautcat::decrypt(&cipher, &key);
//! ```
//!
//! # Usage
//!
//! Des exports two functions: `encrypt` and `decrypt`.
//! Use the former to encrypt some data with a key and the later to decrypt the data.
pub type Key = [u8; 8];
const FIRST_BIT: u32 = 1 << 31;
const HALF_KEY_SIZE: i64 = KEY_SIZE / 2;
const KEY_SIZE: i64 = 56;
enum Ip {
Direct,
Reverse
}
/// Циклический сдвиг влево половины ключа
fn circular_left_shift(ci: u32, di: u32, shift_count: u8) -> (u32, u32) {
let mut ci_next = ci;
let mut di_next = di;
for _ in 0.. shift_count {
ci_next = (ci_next << 1) | ((ci_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1));
di_next = (di_next << 1) | ((di_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1));
}
(ci_next, di_next)
}
/// Обмен битов с расстоянием delta и маской mask в числе a
fn delta_swap(a: u64, delta: u8, mask: u64) -> u64 {
let b = (a ^ (a >> delta)) & mask;
a ^ b ^ (b << delta)
}
/// Конвертирование ключа из массива u8 в одно число типа u64
fn key_to_u64(key: &Key) -> u64 {
let mut result = 0;
for &part in key {
result <<= 8;
result += part as u64;
}
result
}
/// Конвертирование сообщения из массива u8 в вектор u64
fn message_to_u64s(message: &[u8]) -> Vec<u64> {
message.chunks(8)
.map(|m| {
let mut result: u64 = 0;
for &part in m {
result <<= 8;
result += part as u64;
}
if m.len() < 8 {
result <<= 8 * (8 - m.len());
}
result
})
.collect()
}
/// Конвертирование u64 в вектор u8
fn to_u8_vec(num: u64) -> Vec<u8> {
vec![
((num & 0xFF00000000000000) >> 56) as u8,
((num & 0x00FF000000000000) >> 48) as u8,
((num & 0x0000FF0000000 | 0) as u8,
((num & 0x000000FF00000000) >> 32) as u8,
((num & 0x00000000FF000000) >> 24) as u8,
((num & 0x0000000000FF0000) >> 16) as u8,
((num & 0x000000000000FF00) >> 8) as u8,
((num & 0x00000000000000FF) >> 0) as u8
]
}
/// Процедура создания 16 подключей
fn compute_subkeys(key: u64) -> Vec<u64> {
let table = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1];
let k0 = pc1(key);
let mut subkeys = vec![k0];
for shift_count in &table {
let last_key = *subkeys.last().unwrap();
let last_ci = ((last_key & 0xFFFFFFF000000000) >> 32) as u32;
let last_di = (last_key >> 4) as u32;
let (ci, di) = circular_left_shift(last_ci, last_di, *shift_count);
let current_key = ((ci as u64) << 32) | ((di as u64) << 4);
subkeys.push(current_key);
}
subkeys.remove(0);
subkeys.iter().map(|&n| { pc2(n) }).collect()
}
/// Перестановка согласно таблице PC-1
fn pc1(key: u64) -> u64 {
let key = delta_swap(key, 2, 0x3333000033330000);
let key = delta_swap(key, 4, 0x0F0F0F0F00000000);
let key = delta_swap(key, 8, 0x009A000A00A200A8);
let key = delta_swap(key, 16, 0x00006C6C0000CCCC);
let key = delta_swap(key, 1, 0x1045500500550550);
let key = delta_swap(key, 32, 0x00000000F0F0F5FA);
let key = delta_swap(key, 8, 0x00550055006A00AA);
let key = delta_swap(key, 2, 0x0000333330000300);
key & 0xFFFFFFFFFFFFFF00
}
/// Перестановка согласно таблице PC-2
fn pc2(key: u64) -> u64 {
const PC_2_TABLE: [u8; 48] = [
14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34,53,
46, 42, 50, 36, 29, 32
];
const OUT_SIZE: u8 = 64;
let mut result: u64 = 0;
for m in 0.. PC_2_TABLE.len() as usize {
if PC_2_TABLE[m] > m as u8 {
result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) << PC_2_TABLE[m] - (m as u8 + 1);
} else {
result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) >> (m as u8 + 1) - PC_2_TABLE[m];
}
}
result & 0xFFFFFFFFFFFF0000
}
/// Перестановка согласно таблице E
fn e(block: u32) -> u64 {
const BLOCK_LEN: usize = 32;
const RESULT_LEN: usize = 48;
let block_exp = (block as u64) << 32;
let b1 = ((block_exp << (BLOCK_LEN - 1)) & 0x8000000000000000) as u64;
let b2 = ((block_exp >> 1) & 0x7C00000000000000) as u64;;
let b3 = ((block_exp >> 3) & 0x03F0000000000000) as u64;;
let b4 = ((block_exp >> 5) & 0x000FC00000000000) as u64;;
let b5 = ((block_exp >> 7) & 0x00003F0000000000) as u64;;
let b6 = ((block_exp >> 9) & 0x000000FC00000000) as u64;;
let b7 = ((block_exp >> 11) & 0x00000003F0000000) as u64;;
let b8 = ((block_exp >> 13) & 0x000000000FC00000) as u64;;
let b9 = ((block_exp >> 15) & 0x00000000003E0000) as u64;;
let b10 = ((block_exp >> (RESULT_LEN - 1)) & 0x0000000000010000) as u64;;
b1 | b2 | b3 | b4 | b5 | b6 | b7 | b8 | b9 | b10
}
/// Перестановка согласно таблицек P
fn p(block: u32) -> u32 {
const P_TABLE: [u8; 32] = [
16, 7, 20, 21, 29, 12, 28, 17,
1, 15, 23, 26, 5, 18, 31, 10,
2, 8, 24, 14, 32, 27, 3, 9,
19, 13, 30, 6, 22, 11, 4, 25
];
const BLOCK_SIZE: u8 = 32;
let mut result: u32 = 0;
for m in 0.. P_TABLE.len() as usize {
if P_TABLE[m] > m as u8 {
result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) << P_TABLE[m] - (m as u8 + 1);
} else {
result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) >> (m as u8 + 1) - P_TABLE[m];
}
}
result
}
/// Реализация S-блоков
fn s(box_id: usize, block: u8) -> u8 {
const TABLES: [[[u8; 16]; 4]; 8] = [
[
[ 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[ 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]
],
[
[ 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[ 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]
],
[
[ 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[ 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[ 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]
],
[
[ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[ 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[ 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]
],
[
[ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[ 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[ 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]
],
[
[ 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[ 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]
],
[
[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[ 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]
],
[
[ 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]
]
];
let i = ((block & 0x20) >> 4 | (block & 0x01)) as usize;
let j = ((block & 0x1E) >> 1) as usize;
TABLES[box_id][i][j]
}
/// ---------------------------------------------------------------
/// # Функции, используемые непосредственно в главном алгоритме DES
/// ---------------------------------------------------------------
/// IP-перестановка (прямая и обратная)
fn ip(message: u64, dir: Ip) -> u64 {
const COUNT: usize = 5;
const MASK: [u64; COUNT] = [
0x0055005500550055,
0x0000333300003333,
0x000000000F0F0F0F,
0x00000000FF00FF00,
0x000000FF000000FF
];
const DELTA: [u8; COUNT] = [ 9, 18, 36, 24, 24];
let mut result: u64 = message;
match dir {
Ip::Direct => for i in 0.. COUNT {
result = delta_swap(result, DELTA[i], MASK[i])
},
Ip::Reverse => for i in (0.. COUNT).rev() {
result = delta_swap(result, DELTA[i], MASK[i])
}
}
result
}
/// Функция Фейстеля
fn feistel(half_block: u32, subkey: u64) -> u32 {
let expanded = e(half_block);
let mut intermediate = expanded ^ subkey;
let mut result = 0 as u32;
for i in 0.. 8 {
let block = ((intermediate & 0xFC00000000000000) >> 58) as u8;
intermediate <<= 6;
result <<= 4;
result |= s(i, block) as u32;
}
p(result)
}
/// Алгоритм DES
fn des(message: &[u8], subkeys: Vec<u64>) -> Vec<u8> {
let message_len = message.len();
let message = message_to_u64s(message);
let mut blocks = vec![];
for msg in message {
let permuted = ip(msg, Ip::Direct);
let mut li: u32 = ((permuted & 0xFFFFFFFF00000000) >> 32) as u32;
let mut ri: u32 = ((permuted & 0x00000000FFFFFFFF)) as u32;
for subkey in &subkeys {
let last_li = li;
li = ri;
ri = last_li ^ feistel(ri, *subkey);
}
let r16l16 = ( ( ri as u64 ) << 32 ) | li as u64;
blocks.push(to_u8_vec(ip(r16l16, Ip::Reverse)));
}
let mut result = Vec::with_capacity(message_len);
for mut block in blocks.into_iter() {
result.append(&mut block);
}
result
}
/// Шифрование
pub fn encrypt(message: &[u8], key: &Key) -> Vec<u8> {
let key = key_to_u64(key);
let subkeys = compute_subkeys(key);
des(message, subkeys)
}
/// Расшифрование
pub fn decrypt(cipher: &[u8], key: &Key) -> Vec<u8> {
let key = key_to_u64(key);
let mut subkeys = compute_subkeys(key);
subkeys.reverse();
des(cipher, subkeys)
}
#[cfg(test)]
mod tests {
use super::{decrypt, encrypt};
use super::{e, p, pc1, pc2};
#[test]
fn test_e() {
let result: [u64; 3] = [
e(0b1111_0000_1010_1010_1111_0000_1010_1010),
e(0b1111_0000_1010_1010_1111_0000_1010_1011),
e(0b1111_1111_1111_1111_1111_1111_1111_1111),
];
let expect: [u64; 3] = [
0b011110_100001_010101_010101_011110_100001_010101_010101u64 << 16,
0b111110_100001_010101_010101_011110_100001_010101_010111u64 << 16,
0b111111_111111_111111_111111_111111_111111_111111_111111u64 << 16,
];
for i in 0.. 3 {
assert_eq!(expect[i], result[i]);
}
}
#[test]
fn test_p() {
let result: [u32; 2] = [
p(0b1111_0000_0101_1010_1110_0111_1100_0011),
p(0b1011_0111_0001_1000_0000_1011_0110_1010),
];
let expect: [u32; 2] = [
0b0000_0101_1111_0111_1010_1010_1100_1011,
0b0101_1100_1011_0010_0110_0110_0101_0010,
];
for i in 0.. 2 {
assert_eq!(expect[i], result[i]);
}
}
#[test]
fn test_pc1() {
let result = pc1(0b00010011_00110100_01010111_01111001_10011011_10111100_11011111_11110001);
assert_eq!(0b1111000_0110011_0010101_0101111_0101010_1011001_1001111_0001111 << 8, result);
}
#[test]
fn test_pc2() {
let result = pc2(0b1110000_1100110_0101010_1011111_1010101_0110011_0011110_0011110 << 8);
assert_eq!(0b000110_110000_001011_101111_111111_000111_000001_110010 << 16, result);
}
#[test]
fn test_encrypt_decrypt() {
let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1];
let message = [0x52, 0x75, 0x73, 0x74, 0x20, 0x44, 0x45, 0x53];
let expected_cipher = vec![0x27, 0xC1, 0x4F, 0xA6, 0x9A, 0x04, 0x4E, 0x28];
let cipher = encrypt(&message, &key);
assert_eq!(cipher, expected_cipher);
let cipher = expected_cipher;
let expected_message = message;
let message = decrypt(&cipher, &key);
assert_eq!(message, expected_message);
let message = [0x64, 0x65, 0x73, 0x2D, 0x72, 0x73, 0x2D, 0x6B,
0x72, 0x61, 0x75, 0x74, 0x63, 0x61, 0x74, 0x20,
0x69, 0x73, 0x20, 0x6D, 0x79, 0x20, 0x69, 0x6D,
0x70, 0x6C, 0x65, 0x6D, 0x65, 0x6E, 0x74, 0x61,
0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20,
0x44, 0x45, 0x53, 0x20, 0x61, 0x6C, 0x67, 0x6F,
0x72, 0x69, 0x74, 0x68, 0x6D, 0x20, 0x69, 0x6E,
0x20, 0x52, 0x75, 0x73, 0x74];
let expected_cipher = vec![0x82, 0x8D, 0xB8, 0xD5, 0xFF, 0x41, 0xDF, 0xF7,
0x91, 0x34, 0xCC, 0x88, 0xFB, 0x52, 0xCB, 0xB7,
0x3C, 0x30, 0x17, 0x36, 0x9C, 0x3A, 0x70, 0xE0,
0x17, 0x64, 0x25, 0xDB, 0x17, 0xF5, 0x10, 0x80,
0x02, 0xAF, 0x08, 0x04, 0x6F, 0x3A, 0xA9, 0xB1,
0x3D, 0x74, 0x5C, 0xA7, 0x05, 0x8A, 0x13, 0x46,
0xB8, 0x0B, 0x5C, 0x9C, 0xE6, 0x01, 0x76, 0x92,
0x1C, 0x42, 0x30, 0x7E, 0xB6, 0xFA, 0xE4, 0xD3];
let cipher = encrypt(&message, &key);
assert_eq!(cipher, expected_cipher);
let cipher = expected_cipher;
let expected_message = message;
let message = decrypt(&cipher, &key);
assert_eq!(&message[..expected_message.len()], &expected_message[..]);
}
}
| 000) >> 4 | identifier_name |
lib.rs | //! Data Encryption Standard Rust implementation.
//!
//! The only supported mode is Electronic Codebook (ECB).
//!
//! # Example
//!
//! ```
//! extern crate des_rs_krautcat;
//!
//! let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1];
//! let message = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF];
//! let cipher = des_rs_krautcat::encrypt(&message, &key);
//! let message = des_rs_krautcat::decrypt(&cipher, &key);
//! ```
//!
//! # Usage
//!
//! Des exports two functions: `encrypt` and `decrypt`.
//! Use the former to encrypt some data with a key and the later to decrypt the data.
pub type Key = [u8; 8];
const FIRST_BIT: u32 = 1 << 31;
const HALF_KEY_SIZE: i64 = KEY_SIZE / 2;
const KEY_SIZE: i64 = 56;
enum Ip {
Direct,
Reverse
}
/// Циклический сдвиг влево половины ключа
fn circular_left_shift(ci: u32, di: u32, shift_count: u8) -> (u32, u32) {
let mut ci_next = ci;
let mut di_next = di;
for _ in 0.. shift_count {
ci_next = (ci_next << 1) | ((ci_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1));
di_next = (di_next << 1) | ((di_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1));
}
(ci_next, di_next)
}
/// Обмен битов с расстоянием delta и маской mask в числе a
fn delta_swap(a: u64, delta: u8, mask: u64) -> u64 {
let b = (a ^ (a >> delta)) & mask;
a ^ b ^ (b << delta)
}
/// Конвертирование ключа из массива u8 в одно число типа u64
fn key_to_u64(key: &Key) -> u64 {
let mut result = 0;
for &part in key {
result <<= 8;
result += part as u64;
}
result
}
/// Конвертирование сообщения из массива u8 в вектор u64
fn message_to_u64s(message: &[u8]) -> Vec<u64> {
message.chunks(8)
.map(|m| {
let mut result: u64 = 0;
for &part in m {
result <<= 8;
result += part as u64;
}
if m.len() < 8 {
result <<= 8 * (8 - m.len());
}
result
})
.collect()
}
/// Конвертирование u64 в вектор u8
fn to_u8_vec(num: u64) -> Vec<u8> {
vec![
((num & 0xFF00000000000000) >> 56) as u8,
((num & 0x00FF000000000000) >> 48) as u8,
((num & 0x0000FF0000000000) >> 40) as u8,
((num & 0x000000FF00000000) >> 32) as u8,
((num & 0x00000000FF000000) >> 24) as u8,
((num & 0x0000000000FF0000) >> 16) as u8,
((num & 0x000000000000FF00) >> 8) as u8,
((num & 0x00000000000000FF) >> 0) as u8
]
}
/// Процедура создания 16 подключей
fn compute_subkeys(key: u64) -> Vec<u64> {
let table = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1];
let k0 = pc1(key);
let mut subkeys = vec![k0];
for shift_count in &table {
let last_key = *subkeys.last().unwrap();
let last_ci = ((last_key & 0xFFFFFFF000000000) >> 32) as u32;
let last_di = (last_key >> 4) as u32;
let (ci, di) = circular_left_shift(last_ci, last_di, *shift_count);
let current_key = ((ci as u64) << 32) | ((di as u64) << 4);
subkeys.push(current_key);
}
subkeys.remove(0);
subkeys.iter().map(|&n| { pc2(n) }).collect()
}
/// Перестановка согласно таблице PC-1
fn pc1(key: u64) -> u64 {
let key = delta_swap(key, 2, 0x3333000033330000);
let key = delta_swap(key, 4, 0x0F0F0F0F00000000);
let key = delta_swap(key, 8, 0x009A000A00A200A8);
let key = delta_swap(key, 16, 0x00006C6C0000CCCC);
let key = delta_swap(key, 1, 0x1045500500550550);
let key = delta_swap(key, 32, 0x00000000F0F0F5FA);
let key = delta_swap(key, 8, 0x00550055006A00AA);
let key = delta_swap(key, 2, 0x0000333330000300);
key & 0xFFFFFFFFFFFFFF00
}
/// Перестановка согласно таблице PC-2
fn pc2(key: u64) -> u64 {
const PC_2_TABLE: [u8; 48] = [
14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34,53,
46, 42, 50, 36, 29, 32
];
const OUT_SIZE: u8 = 64;
let mut result: u64 = 0;
for m in 0.. PC_2_TABLE.len() as usize {
if PC_2_TABLE[m] > m as u8 {
result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) << PC_2_TABLE[m] - (m as u8 + 1);
} else {
result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) >> (m as u8 + 1) - PC_2_TABLE[m];
}
}
result & 0xFFFFFFFFFFFF0000
}
/// Перестановка согласно таблице E
fn e(block: u32) -> u64 {
const BLOCK_LEN: usize = 32;
const RESULT_LEN: usize = 48;
let block_exp = (block as u64) << 32;
let b1 = ((block_exp << (BLOCK_LEN - 1)) & 0x8000000000000000) as u64;
let b2 = ((block_exp >> 1) & 0x7C00000000000000) as u64;;
let b3 = ((block_exp >> 3) & 0x03F0000000000000) as u64;;
let b4 = ((block_exp >> 5) & 0x000FC00000000000) as u64;;
let b5 = ((block_exp >> 7) & 0x00003F0000000000) as u64;;
let b6 = ((block_exp >> 9) & 0x000000FC00000000) as u64;;
let b7 = ((block_exp >> 11) & 0x00000003F0000000) as u64;;
let b8 = ((block_exp >> 13) & 0x000000000FC00000) as u64;;
let b9 = ((block_exp >> 15) & 0x00000000003E0000) as u64;;
let b10 = ((block_exp >> (RESULT_LEN - 1)) & 0x0000000000010000) as u64;;
b1 | b2 | b3 | b4 | b5 | b6 | b7 | b8 | b9 | b10
}
/// Перестановка согласно таблицек P
fn p(block: u32) -> u32 {
const P_TABLE: [u8; 32] = [
16, 7, 20, 21, 29, 12, 28, 17,
1, 15, 23, 26, 5, 18, 31, 10,
2, 8, 24, 14, 32, 27, 3, 9,
19, 13, 30, 6, 22, 11, 4, 25
];
const BLOCK_SIZE: u8 = 32;
let mut result: u32 = 0;
for m in 0.. P_TABLE.len() as usize {
if P_TABLE[m] > m as u8 {
result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) << P_TABLE[m] - (m as u8 + 1);
} else {
result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) >> (m as u8 + 1) - P_TABLE[m];
}
}
result
}
/// Реализация S-блоков
fn s(box_id: usize, block: u8) -> u8 {
const TABLES: [[[u8; 16]; 4]; 8] = [
[
[ 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[ 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]
],
[
[ 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[ 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]
],
[
[ 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[ 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[ 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]
],
[
[ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[ 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[ 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]
],
[
[ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[ 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[ 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]
],
[
[ 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[ 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]
],
[
[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[ 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]
],
[
[ 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]
]
];
let i = ((block & 0x20) >> 4 | (block & 0x01)) as usize;
let j = ((block & 0x1E) >> 1) as usize;
TABLES[box_id][i][j]
}
/// ---------------------------------------------------------------
/// # Функции, используемые непосредственно в главном алгоритме DES
/// ---------------------------------------------------------------
/// IP-перестановка (прямая и обратная)
fn ip(message: u64, dir: Ip) -> u64 {
const COUNT: usize = 5;
const MASK: [u64; COUNT] = [
0x0055005500550055,
0x0000333300003333,
0x000000000F0F0F0F,
0x00000000FF00FF00,
0x000000FF000000FF
];
const DELTA: [u8; COUNT] = [ 9, 18, 36, 24, 24];
let mut result: u64 = message;
match dir {
Ip::Direct => for i in 0.. COUNT {
result = delta_swap(result, DELTA[i], MASK[i])
},
Ip::Reverse => for i in (0.. COUNT).rev() {
result = delta_swap(result, DELTA[i], MASK[i])
}
}
result
}
/// Функция Фейстеля
fn feistel(half_block: u32, subkey: u64) -> u32 {
let expanded = e(half_block);
let mut intermediate = expanded ^ subkey;
let mut result = 0 as u32;
for i in 0.. 8 {
let block = ((intermediate & 0xFC00000000000000) >> 58) as u8;
intermediate <<= 6;
result <<= 4;
result |= s(i, block) as u32;
}
p(result)
}
/// Алгоритм DES
fn des(message: &[u8], subkeys: Vec<u64>) -> Vec<u8> {
let message_len = message.len();
let message = message_to_u64s(message);
let mut blocks = vec![];
for msg in message {
let permuted = ip(msg, Ip::Direct);
let mut li: u32 = ((permuted & 0xFFFFFFFF00000000) >> 32) as u32;
let mut ri: u32 = ((permuted & 0x00000000FFFFFFFF)) as u32;
for subkey in &subkeys {
let last_li = li;
li = ri;
ri = last_li ^ feistel(ri, *subkey);
}
let r16l16 = ( ( ri as u64 ) << 32 ) | li as u64;
blocks.push(to_u8_vec(ip(r16l16, Ip::Reverse)));
}
let mut result = Vec::with_capacity(message_len);
for mut block in blocks.into_iter() {
result.append(&mut block);
}
result
}
/// Шифрование
pub fn encrypt(message: &[u8], key: &Key) -> Vec<u8> {
let key = key_to_u64(key);
let subkeys = compute_subkeys(key);
des(message, subkeys)
}
/// Расшифрование
pub fn decrypt(cipher: &[u8], key: &Key) -> Vec<u8> {
let key = key_to_u64(key);
let mut subkeys = compute_subkeys(key);
subkeys.reverse();
des(cipher, subkeys)
}
#[cfg(test)]
mod tests {
use super::{decrypt, encrypt};
use super::{e, p, pc1, pc2};
#[test]
fn test_e() {
let result: [u64; 3] = [
e(0b1111_0000_1010_1010_1111_0000_1010_1010),
e(0b1111_0000_1010_1010_1111_0000_1010_1011),
e(0b1111_1111_1111_1111_1111_1111_1111_1111),
];
let expect: [u64; 3] = [
0b011110_100001_010101_010101_011110_100001_010101_010101u64 << 16,
0b111110_100001_010101_010101_011110_100001_010101_010111u64 << 16,
0b111111_111111_111111_111111_111111_111111_111111_111111u64 << 16,
];
for i in 0.. 3 {
assert_eq!(expect[i], result[i]);
}
}
#[test]
fn test_p() {
let result: [u32; 2] = [
p(0b1111_0000_0101_1010_1110_0111_1100_0011),
p(0b1011_0111_0001_1000_0000_1011_0110_1010),
];
let expect: [u32; 2] = [
0b0000_0101_1111_0111_1010_1010_1100_1011,
0b0101_1100_1011_0010_0110_0110_0101_0010,
];
for i in 0.. 2 {
assert_eq!(expect[i], result[i]);
}
}
#[test]
fn test_pc1() {
let | << 16, result);
}
#[test]
fn test_encrypt_decrypt() {
let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1];
let message = [0x52, 0x75, 0x73, 0x74, 0x20, 0x44, 0x45, 0x53];
let expected_cipher = vec![0x27, 0xC1, 0x4F, 0xA6, 0x9A, 0x04, 0x4E, 0x28];
let cipher = encrypt(&message, &key);
assert_eq!(cipher, expected_cipher);
let cipher = expected_cipher;
let expected_message = message;
let message = decrypt(&cipher, &key);
assert_eq!(message, expected_message);
let message = [0x64, 0x65, 0x73, 0x2D, 0x72, 0x73, 0x2D, 0x6B,
0x72, 0x61, 0x75, 0x74, 0x63, 0x61, 0x74, 0x20,
0x69, 0x73, 0x20, 0x6D, 0x79, 0x20, 0x69, 0x6D,
0x70, 0x6C, 0x65, 0x6D, 0x65, 0x6E, 0x74, 0x61,
0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20,
0x44, 0x45, 0x53, 0x20, 0x61, 0x6C, 0x67, 0x6F,
0x72, 0x69, 0x74, 0x68, 0x6D, 0x20, 0x69, 0x6E,
0x20, 0x52, 0x75, 0x73, 0x74];
let expected_cipher = vec![0x82, 0x8D, 0xB8, 0xD5, 0xFF, 0x41, 0xDF, 0xF7,
0x91, 0x34, 0xCC, 0x88, 0xFB, 0x52, 0xCB, 0xB7,
0x3C, 0x30, 0x17, 0x36, 0x9C, 0x3A, 0x70, 0xE0,
0x17, 0x64, 0x25, 0xDB, 0x17, 0xF5, 0x10, 0x80,
0x02, 0xAF, 0x08, 0x04, 0x6F, 0x3A, 0xA9, 0xB1,
0x3D, 0x74, 0x5C, 0xA7, 0x05, 0x8A, 0x13, 0x46,
0xB8, 0x0B, 0x5C, 0x9C, 0xE6, 0x01, 0x76, 0x92,
0x1C, 0x42, 0x30, 0x7E, 0xB6, 0xFA, 0xE4, 0xD3];
let cipher = encrypt(&message, &key);
assert_eq!(cipher, expected_cipher);
let cipher = expected_cipher;
let expected_message = message;
let message = decrypt(&cipher, &key);
assert_eq!(&message[..expected_message.len()], &expected_message[..]);
}
}
| result = pc1(0b00010011_00110100_01010111_01111001_10011011_10111100_11011111_11110001);
assert_eq!(0b1111000_0110011_0010101_0101111_0101010_1011001_1001111_0001111 << 8, result);
}
#[test]
fn test_pc2() {
let result = pc2(0b1110000_1100110_0101010_1011111_1010101_0110011_0011110_0011110 << 8);
assert_eq!(0b000110_110000_001011_101111_111111_000111_000001_110010 | identifier_body |
lib.rs | //! Data Encryption Standard Rust implementation.
//!
//! The only supported mode is Electronic Codebook (ECB).
//!
//! # Example
//!
//! ```
//! extern crate des_rs_krautcat;
//!
//! let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1];
//! let message = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF];
//! let cipher = des_rs_krautcat::encrypt(&message, &key);
//! let message = des_rs_krautcat::decrypt(&cipher, &key);
//! ```
//!
//! # Usage
//!
//! Des exports two functions: `encrypt` and `decrypt`.
//! Use the former to encrypt some data with a key and the later to decrypt the data.
pub type Key = [u8; 8];
const FIRST_BIT: u32 = 1 << 31;
const HALF_KEY_SIZE: i64 = KEY_SIZE / 2;
const KEY_SIZE: i64 = 56;
enum Ip {
Direct,
Reverse
}
/// Циклический сдвиг влево половины ключа
fn circular_left_shift(ci: u32, di: u32, shift_count: u8) -> (u32, u32) {
let mut ci_next = ci;
let mut di_next = di;
for _ in 0.. shift_count {
ci_next = (ci_next << 1) | ((ci_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1));
di_next = (di_next << 1) | ((di_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1));
}
(ci_next, di_next)
}
/// Обмен битов с расстоянием delta и маской mask в числе a
fn delta_swap(a: u64, delta: u8, mask: u64) -> u64 {
let b = (a ^ (a >> delta)) & mask;
a ^ b ^ (b << delta)
}
/// Конвертирование ключа из массива u8 в одно число типа u64
fn key_to_u64(key: &Key) -> u64 {
let mut result = 0;
for &part in key {
result <<= 8;
result += part as u64;
}
result
}
/// Конвертирование сообщения из массива u8 в вектор u64
fn message_to_u64s(message: &[u8]) -> Vec<u64> {
message.chunks(8)
.map(|m| {
let mut result: u64 = 0;
for &part in m {
result <<= 8;
result += part as u64;
}
if m.len() < 8 {
result <<= 8 * (8 - m.len());
}
result
})
.collect()
}
/// Конвертирование u64 в вектор u8
fn to_u8_vec(num: u64) -> Vec<u8> {
vec![
((num & 0xFF00000000000000) >> 56) as u8,
((num & 0x00FF000000000000) >> 48) as u8,
((num & 0x0000FF0000000000) >> 40) as u8,
((num & 0x000000FF00000000) >> 32) as u8,
((num & 0x00000000FF000000) >> 24) as u8,
((num & 0x0000000000FF0000) >> 16) as u8,
((num & 0x000000000000FF00) >> 8) as u8,
((num & 0x00000000000000FF) >> 0) as u8
]
}
/// Процедура создания 16 подключей
fn compute_subkeys(key: u64) -> Vec<u64> {
let table = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1];
let k0 = pc1(key);
let mut subkeys = vec![k0];
for shift_count in &table {
let last_key = *subkeys.last().unwrap();
let last_ci = ((last_key & 0xFFFFFFF000000000) >> 32) as u32;
let last_di = (last_key >> 4) as u32;
let (ci, di) = circular_left_shift(last_ci, last_di, *shift_count);
let current_key = ((ci as u64) << 32) | ((di as u64) << 4);
subkeys.push(current_key);
}
subkeys.remove(0);
subkeys.iter().map(|&n| { pc2(n) }).collect()
}
/// Перестановка согласно таблице PC-1
fn pc1(key: u64) -> u64 {
let key = delta_swap(key, 2, 0x3333000033330000);
let key = delta_swap(key, 4, 0x0F0F0F0F00000000);
let key = delta_swap(key, 8, 0x009A000A00A200A8);
let key = delta_swap(key, 16, 0x00006C6C0000CCCC);
let key = delta_swap(key, 1, 0x1045500500550550);
let key = delta_swap(key, 32, 0x00000000F0F0F5FA);
let key = delta_swap(key, 8, 0x00550055006A00AA);
let key = delta_swap(key, 2, 0x0000333330000300);
key & 0xFFFFFFFFFFFFFF00
}
/// Перестановка согласно таблице PC-2
fn pc2(key: u64) -> u64 {
const PC_2_TABLE: [u8; 48] = [
14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34,53,
46, 42, 50, 36, 29, 32
];
const OUT_SIZE: u8 = 64;
let mut result: u64 = 0;
for m in 0.. PC_2_TABLE.len() as usize {
if PC_2_TABLE[m] > m as u8 {
result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) << PC_2_TABLE[m] - (m as u8 + 1);
} else {
result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) >> (m as u8 + 1) - PC_2_TABLE[m];
}
}
result & 0xFFFFFFFFFFFF0000
}
/// Перестановка согласно таблице E
fn e(block: u32) -> u64 {
const BLOCK_LEN: usize = 32;
con | 1)) & 0x8000000000000000) as u64;
let b2 = ((block_exp >> 1) & 0x7C00000000000000) as u64;;
let b3 = ((block_exp >> 3) & 0x03F0000000000000) as u64;;
let b4 = ((block_exp >> 5) & 0x000FC00000000000) as u64;;
let b5 = ((block_exp >> 7) & 0x00003F0000000000) as u64;;
let b6 = ((block_exp >> 9) & 0x000000FC00000000) as u64;;
let b7 = ((block_exp >> 11) & 0x00000003F0000000) as u64;;
let b8 = ((block_exp >> 13) & 0x000000000FC00000) as u64;;
let b9 = ((block_exp >> 15) & 0x00000000003E0000) as u64;;
let b10 = ((block_exp >> (RESULT_LEN - 1)) & 0x0000000000010000) as u64;;
b1 | b2 | b3 | b4 | b5 | b6 | b7 | b8 | b9 | b10
}
/// Перестановка согласно таблицек P
fn p(block: u32) -> u32 {
const P_TABLE: [u8; 32] = [
16, 7, 20, 21, 29, 12, 28, 17,
1, 15, 23, 26, 5, 18, 31, 10,
2, 8, 24, 14, 32, 27, 3, 9,
19, 13, 30, 6, 22, 11, 4, 25
];
const BLOCK_SIZE: u8 = 32;
let mut result: u32 = 0;
for m in 0.. P_TABLE.len() as usize {
if P_TABLE[m] > m as u8 {
result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) << P_TABLE[m] - (m as u8 + 1);
} else {
result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) >> (m as u8 + 1) - P_TABLE[m];
}
}
result
}
/// Реализация S-блоков
fn s(box_id: usize, block: u8) -> u8 {
const TABLES: [[[u8; 16]; 4]; 8] = [
[
[ 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[ 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]
],
[
[ 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[ 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]
],
[
[ 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[ 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[ 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]
],
[
[ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[ 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[ 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]
],
[
[ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[ 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[ 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]
],
[
[ 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[ 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]
],
[
[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[ 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]
],
[
[ 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]
]
];
let i = ((block & 0x20) >> 4 | (block & 0x01)) as usize;
let j = ((block & 0x1E) >> 1) as usize;
TABLES[box_id][i][j]
}
/// ---------------------------------------------------------------
/// # Функции, используемые непосредственно в главном алгоритме DES
/// ---------------------------------------------------------------
/// IP-перестановка (прямая и обратная)
fn ip(message: u64, dir: Ip) -> u64 {
const COUNT: usize = 5;
const MASK: [u64; COUNT] = [
0x0055005500550055,
0x0000333300003333,
0x000000000F0F0F0F,
0x00000000FF00FF00,
0x000000FF000000FF
];
const DELTA: [u8; COUNT] = [ 9, 18, 36, 24, 24];
let mut result: u64 = message;
match dir {
Ip::Direct => for i in 0.. COUNT {
result = delta_swap(result, DELTA[i], MASK[i])
},
Ip::Reverse => for i in (0.. COUNT).rev() {
result = delta_swap(result, DELTA[i], MASK[i])
}
}
result
}
/// Функция Фейстеля
fn feistel(half_block: u32, subkey: u64) -> u32 {
let expanded = e(half_block);
let mut intermediate = expanded ^ subkey;
let mut result = 0 as u32;
for i in 0.. 8 {
let block = ((intermediate & 0xFC00000000000000) >> 58) as u8;
intermediate <<= 6;
result <<= 4;
result |= s(i, block) as u32;
}
p(result)
}
/// Алгоритм DES
fn des(message: &[u8], subkeys: Vec<u64>) -> Vec<u8> {
let message_len = message.len();
let message = message_to_u64s(message);
let mut blocks = vec![];
for msg in message {
let permuted = ip(msg, Ip::Direct);
let mut li: u32 = ((permuted & 0xFFFFFFFF00000000) >> 32) as u32;
let mut ri: u32 = ((permuted & 0x00000000FFFFFFFF)) as u32;
for subkey in &subkeys {
let last_li = li;
li = ri;
ri = last_li ^ feistel(ri, *subkey);
}
let r16l16 = ( ( ri as u64 ) << 32 ) | li as u64;
blocks.push(to_u8_vec(ip(r16l16, Ip::Reverse)));
}
let mut result = Vec::with_capacity(message_len);
for mut block in blocks.into_iter() {
result.append(&mut block);
}
result
}
/// Шифрование
pub fn encrypt(message: &[u8], key: &Key) -> Vec<u8> {
let key = key_to_u64(key);
let subkeys = compute_subkeys(key);
des(message, subkeys)
}
/// Расшифрование
pub fn decrypt(cipher: &[u8], key: &Key) -> Vec<u8> {
let key = key_to_u64(key);
let mut subkeys = compute_subkeys(key);
subkeys.reverse();
des(cipher, subkeys)
}
#[cfg(test)]
mod tests {
use super::{decrypt, encrypt};
use super::{e, p, pc1, pc2};
#[test]
fn test_e() {
let result: [u64; 3] = [
e(0b1111_0000_1010_1010_1111_0000_1010_1010),
e(0b1111_0000_1010_1010_1111_0000_1010_1011),
e(0b1111_1111_1111_1111_1111_1111_1111_1111),
];
let expect: [u64; 3] = [
0b011110_100001_010101_010101_011110_100001_010101_010101u64 << 16,
0b111110_100001_010101_010101_011110_100001_010101_010111u64 << 16,
0b111111_111111_111111_111111_111111_111111_111111_111111u64 << 16,
];
for i in 0.. 3 {
assert_eq!(expect[i], result[i]);
}
}
#[test]
fn test_p() {
let result: [u32; 2] = [
p(0b1111_0000_0101_1010_1110_0111_1100_0011),
p(0b1011_0111_0001_1000_0000_1011_0110_1010),
];
let expect: [u32; 2] = [
0b0000_0101_1111_0111_1010_1010_1100_1011,
0b0101_1100_1011_0010_0110_0110_0101_0010,
];
for i in 0.. 2 {
assert_eq!(expect[i], result[i]);
}
}
#[test]
fn test_pc1() {
let result = pc1(0b00010011_00110100_01010111_01111001_10011011_10111100_11011111_11110001);
assert_eq!(0b1111000_0110011_0010101_0101111_0101010_1011001_1001111_0001111 << 8, result);
}
#[test]
fn test_pc2() {
let result = pc2(0b1110000_1100110_0101010_1011111_1010101_0110011_0011110_0011110 << 8);
assert_eq!(0b000110_110000_001011_101111_111111_000111_000001_110010 << 16, result);
}
#[test]
fn test_encrypt_decrypt() {
let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1];
let message = [0x52, 0x75, 0x73, 0x74, 0x20, 0x44, 0x45, 0x53];
let expected_cipher = vec![0x27, 0xC1, 0x4F, 0xA6, 0x9A, 0x04, 0x4E, 0x28];
let cipher = encrypt(&message, &key);
assert_eq!(cipher, expected_cipher);
let cipher = expected_cipher;
let expected_message = message;
let message = decrypt(&cipher, &key);
assert_eq!(message, expected_message);
let message = [0x64, 0x65, 0x73, 0x2D, 0x72, 0x73, 0x2D, 0x6B,
0x72, 0x61, 0x75, 0x74, 0x63, 0x61, 0x74, 0x20,
0x69, 0x73, 0x20, 0x6D, 0x79, 0x20, 0x69, 0x6D,
0x70, 0x6C, 0x65, 0x6D, 0x65, 0x6E, 0x74, 0x61,
0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20,
0x44, 0x45, 0x53, 0x20, 0x61, 0x6C, 0x67, 0x6F,
0x72, 0x69, 0x74, 0x68, 0x6D, 0x20, 0x69, 0x6E,
0x20, 0x52, 0x75, 0x73, 0x74];
let expected_cipher = vec![0x82, 0x8D, 0xB8, 0xD5, 0xFF, 0x41, 0xDF, 0xF7,
0x91, 0x34, 0xCC, 0x88, 0xFB, 0x52, 0xCB, 0xB7,
0x3C, 0x30, 0x17, 0x36, 0x9C, 0x3A, 0x70, 0xE0,
0x17, 0x64, 0x25, 0xDB, 0x17, 0xF5, 0x10, 0x80,
0x02, 0xAF, 0x08, 0x04, 0x6F, 0x3A, 0xA9, 0xB1,
0x3D, 0x74, 0x5C, 0xA7, 0x05, 0x8A, 0x13, 0x46,
0xB8, 0x0B, 0x5C, 0x9C, 0xE6, 0x01, 0x76, 0x92,
0x1C, 0x42, 0x30, 0x7E, 0xB6, 0xFA, 0xE4, 0xD3];
let cipher = encrypt(&message, &key);
assert_eq!(cipher, expected_cipher);
let cipher = expected_cipher;
let expected_message = message;
let message = decrypt(&cipher, &key);
assert_eq!(&message[..expected_message.len()], &expected_message[..]);
}
}
| st RESULT_LEN: usize = 48;
let block_exp = (block as u64) << 32;
let b1 = ((block_exp << (BLOCK_LEN - | conditional_block |
proc_fork.rs | use super::*;
use crate::{
capture_snapshot,
os::task::OwnedTaskStatus,
runtime::task_manager::{TaskWasm, TaskWasmRunProperties},
syscalls::*,
WasiThreadHandle,
};
use serde::{Deserialize, Serialize};
use wasmer::Memory;
#[derive(Serialize, Deserialize)]
pub(crate) struct ForkResult {
pub pid: Pid,
pub ret: Errno,
}
/// ### `proc_fork()`
/// Forks the current process into a new subprocess. If the function
/// returns a zero then its the new subprocess. If it returns a positive
/// number then its the current process and the $pid represents the child.
#[instrument(level = "debug", skip_all, fields(pid = ctx.data().process.pid().raw()), ret, err)]
pub fn | <M: MemorySize>(
mut ctx: FunctionEnvMut<'_, WasiEnv>,
mut copy_memory: Bool,
pid_ptr: WasmPtr<Pid, M>,
) -> Result<Errno, WasiError> {
wasi_try_ok!(WasiEnv::process_signals_and_exit(&mut ctx)?);
// If we were just restored then we need to return the value instead
if let Some(result) = unsafe { handle_rewind::<M, ForkResult>(&mut ctx) } {
if result.pid == 0 {
trace!("handle_rewind - i am child (ret={})", result.ret);
} else {
trace!(
"handle_rewind - i am parent (child={}, ret={})",
result.pid,
result.ret
);
}
let memory = unsafe { ctx.data().memory_view(&ctx) };
wasi_try_mem_ok!(pid_ptr.write(&memory, result.pid));
return Ok(result.ret);
}
trace!(%copy_memory, "capturing");
// Fork the environment which will copy all the open file handlers
// and associate a new context but otherwise shares things like the
// file system interface. The handle to the forked process is stored
// in the parent process context
let (mut child_env, mut child_handle) = match ctx.data().fork() {
Ok(p) => p,
Err(err) => {
debug!("could not fork process: {err}");
// TODO: evaluate the appropriate error code, document it in the spec.
return Ok(Errno::Perm);
}
};
let child_pid = child_env.process.pid();
let child_finished = child_env.process.finished.clone();
// We write a zero to the PID before we capture the stack
// so that this is what will be returned to the child
{
let mut inner = ctx.data().process.inner.write().unwrap();
inner.children.push(child_env.process.clone());
}
let env = ctx.data();
let memory = unsafe { env.memory_view(&ctx) };
// Setup some properties in the child environment
wasi_try_mem_ok!(pid_ptr.write(&memory, 0));
let pid = child_env.pid();
let tid = child_env.tid();
// Pass some offsets to the unwind function
let pid_offset = pid_ptr.offset();
// If we are not copying the memory then we act like a `vfork`
// instead which will pretend to be the new process for a period
// of time until `proc_exec` is called at which point the fork
// actually occurs
if copy_memory == Bool::False {
// Perform the unwind action
return unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| {
// Grab all the globals and serialize them
let store_data = crate::utils::store::capture_snapshot(&mut ctx.as_store_mut())
.serialize()
.unwrap();
let store_data = Bytes::from(store_data);
// We first fork the environment and replace the current environment
// so that the process can continue to prepare for the real fork as
// if it had actually forked
child_env.swap_inner(ctx.data_mut());
std::mem::swap(ctx.data_mut(), &mut child_env);
ctx.data_mut().vfork.replace(WasiVFork {
rewind_stack: rewind_stack.clone(),
memory_stack: memory_stack.clone(),
store_data: store_data.clone(),
env: Box::new(child_env),
handle: child_handle,
});
// Carry on as if the fork had taken place (which basically means
// it prevents to be the new process with the old one suspended)
// Rewind the stack and carry on
match rewind::<M, _>(
ctx,
memory_stack.freeze(),
rewind_stack.freeze(),
store_data,
ForkResult {
pid: 0,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!("failed - could not rewind the stack - errno={}", err);
OnCalledAction::Trap(Box::new(WasiError::Exit(err.into())))
}
}
});
}
// Create the thread that will back this forked process
let state = env.state.clone();
let bin_factory = env.bin_factory.clone();
// Perform the unwind action
let snapshot = capture_snapshot(&mut ctx.as_store_mut());
unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| {
let tasks = ctx.data().tasks().clone();
let span = debug_span!(
"unwind",
memory_stack_len = memory_stack.len(),
rewind_stack_len = rewind_stack.len()
);
let _span_guard = span.enter();
let memory_stack = memory_stack.freeze();
let rewind_stack = rewind_stack.freeze();
// Grab all the globals and serialize them
let store_data = snapshot.serialize().unwrap();
let store_data = Bytes::from(store_data);
// Now we use the environment and memory references
let runtime = child_env.runtime.clone();
let tasks = child_env.tasks().clone();
let child_memory_stack = memory_stack.clone();
let child_rewind_stack = rewind_stack.clone();
let module = unsafe { ctx.data().inner() }.module_clone();
let memory = unsafe { ctx.data().inner() }.memory_clone();
let spawn_type = SpawnMemoryType::CopyMemory(memory, ctx.as_store_ref());
// Spawn a new process with this current execution environment
let signaler = Box::new(child_env.process.clone());
{
let runtime = runtime.clone();
let tasks = tasks.clone();
let tasks_outer = tasks.clone();
let store_data = store_data.clone();
let run = move |mut props: TaskWasmRunProperties| {
let ctx = props.ctx;
let mut store = props.store;
// Rewind the stack and carry on
{
trace!("rewinding child");
let mut ctx = ctx.env.clone().into_mut(&mut store);
let (data, mut store) = ctx.data_and_store_mut();
match rewind::<M, _>(
ctx,
child_memory_stack,
child_rewind_stack,
store_data.clone(),
ForkResult {
pid: 0,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!(
"wasm rewind failed - could not rewind the stack - errno={}",
err
);
return;
}
};
}
// Invoke the start function
run::<M>(ctx, store, child_handle, None);
};
tasks_outer
.task_wasm(
TaskWasm::new(Box::new(run), child_env, module, false)
.with_snapshot(&snapshot)
.with_memory(spawn_type),
)
.map_err(|err| {
warn!(
"failed to fork as the process could not be spawned - {}",
err
);
err
})
.ok();
};
// Rewind the stack and carry on
match rewind::<M, _>(
ctx,
memory_stack,
rewind_stack,
store_data,
ForkResult {
pid: child_pid.raw() as Pid,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!("failed - could not rewind the stack - errno={}", err);
OnCalledAction::Trap(Box::new(WasiError::Exit(err.into())))
}
}
})
}
fn run<M: MemorySize>(
ctx: WasiFunctionEnv,
mut store: Store,
child_handle: WasiThreadHandle,
rewind_state: Option<(RewindState, Bytes)>,
) -> ExitCode {
let env = ctx.data(&store);
let tasks = env.tasks().clone();
let pid = env.pid();
let tid = env.tid();
// If we need to rewind then do so
if let Some((rewind_state, rewind_result)) = rewind_state {
let res = rewind_ext::<M>(
ctx.env.clone().into_mut(&mut store),
rewind_state.memory_stack,
rewind_state.rewind_stack,
rewind_state.store_data,
rewind_result,
);
if res!= Errno::Success {
return res.into();
}
}
let mut ret: ExitCode = Errno::Success.into();
let err = if ctx.data(&store).thread.is_main() {
trace!(%pid, %tid, "re-invoking main");
let start = unsafe { ctx.data(&store).inner() }.start.clone().unwrap();
start.call(&mut store)
} else {
trace!(%pid, %tid, "re-invoking thread_spawn");
let start = unsafe { ctx.data(&store).inner() }
.thread_spawn
.clone()
.unwrap();
start.call(&mut store, 0, 0)
};
if let Err(err) = err {
match err.downcast::<WasiError>() {
Ok(WasiError::Exit(exit_code)) => {
ret = exit_code;
}
Ok(WasiError::DeepSleep(deep)) => {
trace!(%pid, %tid, "entered a deep sleep");
// Create the respawn function
let respawn = {
let tasks = tasks.clone();
let rewind_state = deep.rewind;
move |ctx, store, rewind_result| {
run::<M>(
ctx,
store,
child_handle,
Some((rewind_state, rewind_result)),
);
}
};
/// Spawns the WASM process after a trigger
unsafe {
tasks.resume_wasm_after_poller(Box::new(respawn), ctx, store, deep.trigger)
};
return Errno::Success.into();
}
_ => {}
}
}
trace!(%pid, %tid, "child exited (code = {})", ret);
// Clean up the environment and return the result
ctx.cleanup((&mut store), Some(ret));
// We drop the handle at the last moment which will close the thread
drop(child_handle);
ret
}
| proc_fork | identifier_name |
proc_fork.rs | use super::*;
use crate::{
capture_snapshot,
os::task::OwnedTaskStatus,
runtime::task_manager::{TaskWasm, TaskWasmRunProperties},
syscalls::*,
WasiThreadHandle,
};
use serde::{Deserialize, Serialize};
use wasmer::Memory;
#[derive(Serialize, Deserialize)]
pub(crate) struct ForkResult {
pub pid: Pid,
pub ret: Errno,
}
/// ### `proc_fork()`
/// Forks the current process into a new subprocess. If the function
/// returns a zero then its the new subprocess. If it returns a positive
/// number then its the current process and the $pid represents the child.
#[instrument(level = "debug", skip_all, fields(pid = ctx.data().process.pid().raw()), ret, err)]
pub fn proc_fork<M: MemorySize>(
mut ctx: FunctionEnvMut<'_, WasiEnv>,
mut copy_memory: Bool,
pid_ptr: WasmPtr<Pid, M>,
) -> Result<Errno, WasiError> {
wasi_try_ok!(WasiEnv::process_signals_and_exit(&mut ctx)?);
// If we were just restored then we need to return the value instead
if let Some(result) = unsafe { handle_rewind::<M, ForkResult>(&mut ctx) } {
if result.pid == 0 {
trace!("handle_rewind - i am child (ret={})", result.ret);
} else {
trace!(
"handle_rewind - i am parent (child={}, ret={})",
result.pid,
result.ret
);
}
let memory = unsafe { ctx.data().memory_view(&ctx) };
wasi_try_mem_ok!(pid_ptr.write(&memory, result.pid));
return Ok(result.ret);
}
trace!(%copy_memory, "capturing");
// Fork the environment which will copy all the open file handlers
// and associate a new context but otherwise shares things like the
// file system interface. The handle to the forked process is stored
// in the parent process context
let (mut child_env, mut child_handle) = match ctx.data().fork() {
Ok(p) => p,
Err(err) => {
debug!("could not fork process: {err}");
// TODO: evaluate the appropriate error code, document it in the spec.
return Ok(Errno::Perm);
}
};
let child_pid = child_env.process.pid();
let child_finished = child_env.process.finished.clone();
// We write a zero to the PID before we capture the stack
// so that this is what will be returned to the child
{
let mut inner = ctx.data().process.inner.write().unwrap();
inner.children.push(child_env.process.clone());
}
let env = ctx.data();
let memory = unsafe { env.memory_view(&ctx) };
// Setup some properties in the child environment
wasi_try_mem_ok!(pid_ptr.write(&memory, 0));
let pid = child_env.pid();
let tid = child_env.tid();
// Pass some offsets to the unwind function
let pid_offset = pid_ptr.offset();
// If we are not copying the memory then we act like a `vfork`
// instead which will pretend to be the new process for a period
// of time until `proc_exec` is called at which point the fork
// actually occurs
if copy_memory == Bool::False {
// Perform the unwind action
return unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| {
// Grab all the globals and serialize them
let store_data = crate::utils::store::capture_snapshot(&mut ctx.as_store_mut())
.serialize()
.unwrap();
let store_data = Bytes::from(store_data);
// We first fork the environment and replace the current environment
// so that the process can continue to prepare for the real fork as
// if it had actually forked
child_env.swap_inner(ctx.data_mut());
std::mem::swap(ctx.data_mut(), &mut child_env);
ctx.data_mut().vfork.replace(WasiVFork {
rewind_stack: rewind_stack.clone(),
memory_stack: memory_stack.clone(),
store_data: store_data.clone(),
env: Box::new(child_env),
handle: child_handle,
});
// Carry on as if the fork had taken place (which basically means
// it prevents to be the new process with the old one suspended)
// Rewind the stack and carry on
match rewind::<M, _>(
ctx,
memory_stack.freeze(),
rewind_stack.freeze(),
store_data,
ForkResult {
pid: 0,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!("failed - could not rewind the stack - errno={}", err);
OnCalledAction::Trap(Box::new(WasiError::Exit(err.into())))
}
}
});
}
// Create the thread that will back this forked process
let state = env.state.clone();
let bin_factory = env.bin_factory.clone();
// Perform the unwind action
let snapshot = capture_snapshot(&mut ctx.as_store_mut());
unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| {
let tasks = ctx.data().tasks().clone();
let span = debug_span!(
"unwind",
memory_stack_len = memory_stack.len(),
rewind_stack_len = rewind_stack.len()
);
let _span_guard = span.enter();
let memory_stack = memory_stack.freeze();
let rewind_stack = rewind_stack.freeze();
// Grab all the globals and serialize them
let store_data = snapshot.serialize().unwrap();
let store_data = Bytes::from(store_data);
// Now we use the environment and memory references
let runtime = child_env.runtime.clone();
let tasks = child_env.tasks().clone();
let child_memory_stack = memory_stack.clone();
let child_rewind_stack = rewind_stack.clone();
let module = unsafe { ctx.data().inner() }.module_clone();
let memory = unsafe { ctx.data().inner() }.memory_clone();
let spawn_type = SpawnMemoryType::CopyMemory(memory, ctx.as_store_ref());
// Spawn a new process with this current execution environment
let signaler = Box::new(child_env.process.clone());
{
let runtime = runtime.clone();
let tasks = tasks.clone();
let tasks_outer = tasks.clone();
let store_data = store_data.clone();
let run = move |mut props: TaskWasmRunProperties| {
let ctx = props.ctx;
let mut store = props.store;
// Rewind the stack and carry on
{
trace!("rewinding child");
let mut ctx = ctx.env.clone().into_mut(&mut store);
let (data, mut store) = ctx.data_and_store_mut();
match rewind::<M, _>(
ctx,
child_memory_stack,
child_rewind_stack,
store_data.clone(),
ForkResult {
pid: 0,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!(
"wasm rewind failed - could not rewind the stack - errno={}",
err
);
return;
}
};
}
// Invoke the start function
run::<M>(ctx, store, child_handle, None);
};
tasks_outer
.task_wasm(
TaskWasm::new(Box::new(run), child_env, module, false)
.with_snapshot(&snapshot)
.with_memory(spawn_type),
)
.map_err(|err| {
warn!(
"failed to fork as the process could not be spawned - {}",
err
);
err
})
.ok();
};
// Rewind the stack and carry on
match rewind::<M, _>(
ctx,
memory_stack,
rewind_stack,
store_data,
ForkResult {
pid: child_pid.raw() as Pid,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!("failed - could not rewind the stack - errno={}", err);
OnCalledAction::Trap(Box::new(WasiError::Exit(err.into())))
}
}
})
}
fn run<M: MemorySize>(
ctx: WasiFunctionEnv,
mut store: Store,
child_handle: WasiThreadHandle,
rewind_state: Option<(RewindState, Bytes)>,
) -> ExitCode {
let env = ctx.data(&store);
let tasks = env.tasks().clone();
let pid = env.pid();
let tid = env.tid();
// If we need to rewind then do so
if let Some((rewind_state, rewind_result)) = rewind_state {
let res = rewind_ext::<M>(
ctx.env.clone().into_mut(&mut store),
rewind_state.memory_stack,
rewind_state.rewind_stack,
rewind_state.store_data,
rewind_result,
);
if res!= Errno::Success {
return res.into();
}
}
let mut ret: ExitCode = Errno::Success.into();
let err = if ctx.data(&store).thread.is_main() {
trace!(%pid, %tid, "re-invoking main");
let start = unsafe { ctx.data(&store).inner() }.start.clone().unwrap();
start.call(&mut store)
} else {
trace!(%pid, %tid, "re-invoking thread_spawn");
let start = unsafe { ctx.data(&store).inner() }
.thread_spawn
.clone()
.unwrap();
start.call(&mut store, 0, 0)
};
if let Err(err) = err {
match err.downcast::<WasiError>() {
Ok(WasiError::Exit(exit_code)) => {
ret = exit_code;
}
Ok(WasiError::DeepSleep(deep)) => {
trace!(%pid, %tid, "entered a deep sleep");
// Create the respawn function
let respawn = {
let tasks = tasks.clone();
let rewind_state = deep.rewind;
move |ctx, store, rewind_result| {
run::<M>(
ctx,
store,
child_handle,
Some((rewind_state, rewind_result)),
);
}
};
/// Spawns the WASM process after a trigger
unsafe {
tasks.resume_wasm_after_poller(Box::new(respawn), ctx, store, deep.trigger)
};
return Errno::Success.into();
}
_ => |
}
}
trace!(%pid, %tid, "child exited (code = {})", ret);
// Clean up the environment and return the result
ctx.cleanup((&mut store), Some(ret));
// We drop the handle at the last moment which will close the thread
drop(child_handle);
ret
}
| {} | conditional_block |
proc_fork.rs | use super::*;
use crate::{
capture_snapshot,
os::task::OwnedTaskStatus,
runtime::task_manager::{TaskWasm, TaskWasmRunProperties},
syscalls::*,
WasiThreadHandle,
};
use serde::{Deserialize, Serialize};
use wasmer::Memory;
#[derive(Serialize, Deserialize)]
pub(crate) struct ForkResult {
pub pid: Pid,
pub ret: Errno,
}
/// ### `proc_fork()`
/// Forks the current process into a new subprocess. If the function
/// returns a zero then its the new subprocess. If it returns a positive
/// number then its the current process and the $pid represents the child.
#[instrument(level = "debug", skip_all, fields(pid = ctx.data().process.pid().raw()), ret, err)]
pub fn proc_fork<M: MemorySize>(
mut ctx: FunctionEnvMut<'_, WasiEnv>,
mut copy_memory: Bool,
pid_ptr: WasmPtr<Pid, M>,
) -> Result<Errno, WasiError> {
wasi_try_ok!(WasiEnv::process_signals_and_exit(&mut ctx)?);
// If we were just restored then we need to return the value instead
if let Some(result) = unsafe { handle_rewind::<M, ForkResult>(&mut ctx) } {
if result.pid == 0 {
trace!("handle_rewind - i am child (ret={})", result.ret);
} else {
trace!(
"handle_rewind - i am parent (child={}, ret={})",
result.pid,
result.ret
);
}
let memory = unsafe { ctx.data().memory_view(&ctx) };
wasi_try_mem_ok!(pid_ptr.write(&memory, result.pid));
return Ok(result.ret);
}
trace!(%copy_memory, "capturing");
// Fork the environment which will copy all the open file handlers
// and associate a new context but otherwise shares things like the
// file system interface. The handle to the forked process is stored
// in the parent process context
let (mut child_env, mut child_handle) = match ctx.data().fork() {
Ok(p) => p,
Err(err) => {
debug!("could not fork process: {err}");
// TODO: evaluate the appropriate error code, document it in the spec.
return Ok(Errno::Perm);
}
};
let child_pid = child_env.process.pid();
let child_finished = child_env.process.finished.clone();
// We write a zero to the PID before we capture the stack
// so that this is what will be returned to the child
{
let mut inner = ctx.data().process.inner.write().unwrap();
inner.children.push(child_env.process.clone());
}
let env = ctx.data();
let memory = unsafe { env.memory_view(&ctx) };
// Setup some properties in the child environment
wasi_try_mem_ok!(pid_ptr.write(&memory, 0));
let pid = child_env.pid();
let tid = child_env.tid();
// Pass some offsets to the unwind function
let pid_offset = pid_ptr.offset();
// If we are not copying the memory then we act like a `vfork`
// instead which will pretend to be the new process for a period
// of time until `proc_exec` is called at which point the fork
// actually occurs
if copy_memory == Bool::False {
// Perform the unwind action
return unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| {
// Grab all the globals and serialize them
let store_data = crate::utils::store::capture_snapshot(&mut ctx.as_store_mut())
.serialize()
.unwrap();
let store_data = Bytes::from(store_data);
// We first fork the environment and replace the current environment
// so that the process can continue to prepare for the real fork as
// if it had actually forked
child_env.swap_inner(ctx.data_mut());
std::mem::swap(ctx.data_mut(), &mut child_env);
ctx.data_mut().vfork.replace(WasiVFork {
rewind_stack: rewind_stack.clone(),
memory_stack: memory_stack.clone(),
store_data: store_data.clone(),
env: Box::new(child_env),
handle: child_handle,
});
// Carry on as if the fork had taken place (which basically means
// it prevents to be the new process with the old one suspended)
// Rewind the stack and carry on
match rewind::<M, _>(
ctx,
memory_stack.freeze(),
rewind_stack.freeze(),
store_data,
ForkResult {
pid: 0,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!("failed - could not rewind the stack - errno={}", err);
OnCalledAction::Trap(Box::new(WasiError::Exit(err.into())))
}
}
});
}
// Create the thread that will back this forked process
let state = env.state.clone();
let bin_factory = env.bin_factory.clone();
// Perform the unwind action
let snapshot = capture_snapshot(&mut ctx.as_store_mut());
unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| {
let tasks = ctx.data().tasks().clone();
let span = debug_span!(
"unwind",
memory_stack_len = memory_stack.len(),
rewind_stack_len = rewind_stack.len()
);
let _span_guard = span.enter();
let memory_stack = memory_stack.freeze();
let rewind_stack = rewind_stack.freeze();
// Grab all the globals and serialize them
let store_data = snapshot.serialize().unwrap();
let store_data = Bytes::from(store_data);
// Now we use the environment and memory references
let runtime = child_env.runtime.clone();
let tasks = child_env.tasks().clone();
let child_memory_stack = memory_stack.clone();
let child_rewind_stack = rewind_stack.clone();
let module = unsafe { ctx.data().inner() }.module_clone();
let memory = unsafe { ctx.data().inner() }.memory_clone();
let spawn_type = SpawnMemoryType::CopyMemory(memory, ctx.as_store_ref());
// Spawn a new process with this current execution environment
let signaler = Box::new(child_env.process.clone());
{
let runtime = runtime.clone();
let tasks = tasks.clone();
let tasks_outer = tasks.clone();
let store_data = store_data.clone();
let run = move |mut props: TaskWasmRunProperties| {
let ctx = props.ctx;
let mut store = props.store;
// Rewind the stack and carry on
{
trace!("rewinding child");
let mut ctx = ctx.env.clone().into_mut(&mut store);
let (data, mut store) = ctx.data_and_store_mut();
match rewind::<M, _>(
ctx,
child_memory_stack,
child_rewind_stack,
store_data.clone(),
ForkResult {
pid: 0,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!(
"wasm rewind failed - could not rewind the stack - errno={}",
err
);
return;
}
};
}
// Invoke the start function
run::<M>(ctx, store, child_handle, None);
};
tasks_outer
.task_wasm(
TaskWasm::new(Box::new(run), child_env, module, false)
.with_snapshot(&snapshot)
.with_memory(spawn_type),
)
.map_err(|err| {
warn!(
"failed to fork as the process could not be spawned - {}",
err
);
err
})
.ok();
};
// Rewind the stack and carry on
match rewind::<M, _>(
ctx,
memory_stack,
rewind_stack,
store_data,
ForkResult {
pid: child_pid.raw() as Pid,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!("failed - could not rewind the stack - errno={}", err);
OnCalledAction::Trap(Box::new(WasiError::Exit(err.into())))
}
}
})
}
fn run<M: MemorySize>(
ctx: WasiFunctionEnv,
mut store: Store,
child_handle: WasiThreadHandle,
rewind_state: Option<(RewindState, Bytes)>,
) -> ExitCode | let mut ret: ExitCode = Errno::Success.into();
let err = if ctx.data(&store).thread.is_main() {
trace!(%pid, %tid, "re-invoking main");
let start = unsafe { ctx.data(&store).inner() }.start.clone().unwrap();
start.call(&mut store)
} else {
trace!(%pid, %tid, "re-invoking thread_spawn");
let start = unsafe { ctx.data(&store).inner() }
.thread_spawn
.clone()
.unwrap();
start.call(&mut store, 0, 0)
};
if let Err(err) = err {
match err.downcast::<WasiError>() {
Ok(WasiError::Exit(exit_code)) => {
ret = exit_code;
}
Ok(WasiError::DeepSleep(deep)) => {
trace!(%pid, %tid, "entered a deep sleep");
// Create the respawn function
let respawn = {
let tasks = tasks.clone();
let rewind_state = deep.rewind;
move |ctx, store, rewind_result| {
run::<M>(
ctx,
store,
child_handle,
Some((rewind_state, rewind_result)),
);
}
};
/// Spawns the WASM process after a trigger
unsafe {
tasks.resume_wasm_after_poller(Box::new(respawn), ctx, store, deep.trigger)
};
return Errno::Success.into();
}
_ => {}
}
}
trace!(%pid, %tid, "child exited (code = {})", ret);
// Clean up the environment and return the result
ctx.cleanup((&mut store), Some(ret));
// We drop the handle at the last moment which will close the thread
drop(child_handle);
ret
}
| {
let env = ctx.data(&store);
let tasks = env.tasks().clone();
let pid = env.pid();
let tid = env.tid();
// If we need to rewind then do so
if let Some((rewind_state, rewind_result)) = rewind_state {
let res = rewind_ext::<M>(
ctx.env.clone().into_mut(&mut store),
rewind_state.memory_stack,
rewind_state.rewind_stack,
rewind_state.store_data,
rewind_result,
);
if res != Errno::Success {
return res.into();
}
}
| identifier_body |
proc_fork.rs | use super::*;
use crate::{
capture_snapshot,
os::task::OwnedTaskStatus,
runtime::task_manager::{TaskWasm, TaskWasmRunProperties},
syscalls::*,
WasiThreadHandle,
};
use serde::{Deserialize, Serialize};
use wasmer::Memory;
#[derive(Serialize, Deserialize)]
pub(crate) struct ForkResult {
pub pid: Pid,
pub ret: Errno,
}
/// ### `proc_fork()`
/// Forks the current process into a new subprocess. If the function
/// returns a zero then its the new subprocess. If it returns a positive
/// number then its the current process and the $pid represents the child.
#[instrument(level = "debug", skip_all, fields(pid = ctx.data().process.pid().raw()), ret, err)]
pub fn proc_fork<M: MemorySize>(
mut ctx: FunctionEnvMut<'_, WasiEnv>,
mut copy_memory: Bool,
pid_ptr: WasmPtr<Pid, M>,
) -> Result<Errno, WasiError> {
wasi_try_ok!(WasiEnv::process_signals_and_exit(&mut ctx)?);
// If we were just restored then we need to return the value instead
if let Some(result) = unsafe { handle_rewind::<M, ForkResult>(&mut ctx) } {
if result.pid == 0 {
trace!("handle_rewind - i am child (ret={})", result.ret);
} else {
trace!(
"handle_rewind - i am parent (child={}, ret={})",
result.pid,
result.ret
);
}
let memory = unsafe { ctx.data().memory_view(&ctx) };
wasi_try_mem_ok!(pid_ptr.write(&memory, result.pid));
return Ok(result.ret);
}
trace!(%copy_memory, "capturing");
// Fork the environment which will copy all the open file handlers
// and associate a new context but otherwise shares things like the
// file system interface. The handle to the forked process is stored
// in the parent process context
let (mut child_env, mut child_handle) = match ctx.data().fork() {
Ok(p) => p,
Err(err) => {
debug!("could not fork process: {err}");
// TODO: evaluate the appropriate error code, document it in the spec.
return Ok(Errno::Perm);
}
};
let child_pid = child_env.process.pid();
let child_finished = child_env.process.finished.clone();
// We write a zero to the PID before we capture the stack
// so that this is what will be returned to the child
{
let mut inner = ctx.data().process.inner.write().unwrap();
inner.children.push(child_env.process.clone());
}
let env = ctx.data();
let memory = unsafe { env.memory_view(&ctx) };
// Setup some properties in the child environment
wasi_try_mem_ok!(pid_ptr.write(&memory, 0));
let pid = child_env.pid();
let tid = child_env.tid();
// Pass some offsets to the unwind function
let pid_offset = pid_ptr.offset();
// If we are not copying the memory then we act like a `vfork`
// instead which will pretend to be the new process for a period
// of time until `proc_exec` is called at which point the fork
// actually occurs
if copy_memory == Bool::False {
// Perform the unwind action
return unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| {
// Grab all the globals and serialize them
let store_data = crate::utils::store::capture_snapshot(&mut ctx.as_store_mut())
.serialize()
.unwrap();
let store_data = Bytes::from(store_data);
// We first fork the environment and replace the current environment
// so that the process can continue to prepare for the real fork as
// if it had actually forked
child_env.swap_inner(ctx.data_mut());
std::mem::swap(ctx.data_mut(), &mut child_env);
ctx.data_mut().vfork.replace(WasiVFork {
rewind_stack: rewind_stack.clone(),
memory_stack: memory_stack.clone(),
store_data: store_data.clone(),
env: Box::new(child_env),
handle: child_handle,
});
// Carry on as if the fork had taken place (which basically means
// it prevents to be the new process with the old one suspended)
// Rewind the stack and carry on
match rewind::<M, _>(
ctx,
memory_stack.freeze(),
rewind_stack.freeze(),
store_data,
ForkResult {
pid: 0,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!("failed - could not rewind the stack - errno={}", err);
OnCalledAction::Trap(Box::new(WasiError::Exit(err.into())))
}
}
});
}
// Create the thread that will back this forked process
let state = env.state.clone();
let bin_factory = env.bin_factory.clone();
// Perform the unwind action
let snapshot = capture_snapshot(&mut ctx.as_store_mut());
unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| {
let tasks = ctx.data().tasks().clone();
let span = debug_span!(
"unwind",
memory_stack_len = memory_stack.len(),
rewind_stack_len = rewind_stack.len()
);
let _span_guard = span.enter();
let memory_stack = memory_stack.freeze();
let rewind_stack = rewind_stack.freeze();
// Grab all the globals and serialize them
let store_data = snapshot.serialize().unwrap();
let store_data = Bytes::from(store_data);
// Now we use the environment and memory references
let runtime = child_env.runtime.clone();
let tasks = child_env.tasks().clone();
let child_memory_stack = memory_stack.clone();
let child_rewind_stack = rewind_stack.clone();
let module = unsafe { ctx.data().inner() }.module_clone();
let memory = unsafe { ctx.data().inner() }.memory_clone();
let spawn_type = SpawnMemoryType::CopyMemory(memory, ctx.as_store_ref());
// Spawn a new process with this current execution environment
let signaler = Box::new(child_env.process.clone());
{
let runtime = runtime.clone();
let tasks = tasks.clone();
let tasks_outer = tasks.clone();
let store_data = store_data.clone();
let run = move |mut props: TaskWasmRunProperties| {
let ctx = props.ctx;
let mut store = props.store;
// Rewind the stack and carry on
{
trace!("rewinding child");
let mut ctx = ctx.env.clone().into_mut(&mut store);
let (data, mut store) = ctx.data_and_store_mut();
match rewind::<M, _>(
ctx,
child_memory_stack,
child_rewind_stack,
store_data.clone(),
ForkResult {
pid: 0,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!(
"wasm rewind failed - could not rewind the stack - errno={}",
err
);
return;
}
};
}
// Invoke the start function
run::<M>(ctx, store, child_handle, None);
};
tasks_outer
.task_wasm(
TaskWasm::new(Box::new(run), child_env, module, false)
.with_snapshot(&snapshot)
.with_memory(spawn_type),
)
.map_err(|err| {
warn!(
"failed to fork as the process could not be spawned - {}",
err
);
err
})
.ok();
};
// Rewind the stack and carry on
match rewind::<M, _>(
ctx,
memory_stack,
rewind_stack,
store_data,
ForkResult {
pid: child_pid.raw() as Pid,
ret: Errno::Success,
},
) {
Errno::Success => OnCalledAction::InvokeAgain,
err => {
warn!("failed - could not rewind the stack - errno={}", err);
OnCalledAction::Trap(Box::new(WasiError::Exit(err.into())))
}
}
})
}
fn run<M: MemorySize>(
ctx: WasiFunctionEnv,
mut store: Store,
child_handle: WasiThreadHandle,
rewind_state: Option<(RewindState, Bytes)>,
) -> ExitCode {
let env = ctx.data(&store);
let tasks = env.tasks().clone();
let pid = env.pid();
let tid = env.tid();
// If we need to rewind then do so
if let Some((rewind_state, rewind_result)) = rewind_state {
let res = rewind_ext::<M>(
ctx.env.clone().into_mut(&mut store),
rewind_state.memory_stack,
rewind_state.rewind_stack,
rewind_state.store_data,
rewind_result,
);
if res!= Errno::Success {
return res.into();
}
}
let mut ret: ExitCode = Errno::Success.into();
let err = if ctx.data(&store).thread.is_main() {
trace!(%pid, %tid, "re-invoking main");
let start = unsafe { ctx.data(&store).inner() }.start.clone().unwrap();
start.call(&mut store)
} else {
trace!(%pid, %tid, "re-invoking thread_spawn");
let start = unsafe { ctx.data(&store).inner() }
.thread_spawn
.clone()
.unwrap();
start.call(&mut store, 0, 0)
};
if let Err(err) = err {
match err.downcast::<WasiError>() {
Ok(WasiError::Exit(exit_code)) => {
ret = exit_code;
}
Ok(WasiError::DeepSleep(deep)) => {
trace!(%pid, %tid, "entered a deep sleep");
| let rewind_state = deep.rewind;
move |ctx, store, rewind_result| {
run::<M>(
ctx,
store,
child_handle,
Some((rewind_state, rewind_result)),
);
}
};
/// Spawns the WASM process after a trigger
unsafe {
tasks.resume_wasm_after_poller(Box::new(respawn), ctx, store, deep.trigger)
};
return Errno::Success.into();
}
_ => {}
}
}
trace!(%pid, %tid, "child exited (code = {})", ret);
// Clean up the environment and return the result
ctx.cleanup((&mut store), Some(ret));
// We drop the handle at the last moment which will close the thread
drop(child_handle);
ret
} | // Create the respawn function
let respawn = {
let tasks = tasks.clone(); | random_line_split |
main.rs | #[cfg(test)]
extern crate memsec;
use std::collections::BTreeMap;
use std::env;
use std::fmt::{self, Display, Formatter};
use std::fs;
use std::iter;
use std::process::{Command, Stdio};
use std::str::{self, FromStr};
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration;
use std::thread::{self, JoinHandle};
// The number of space characters (" ") between table columns.
const COLUMN_BUFFER: usize = 8;
// Ignore child processes with the following names.
const IGNORE_CHILD_PROCS: [&str; 3] = ["rustc", "[rustc]", "rustdoc"];
type Pid = u32;
type Pname = String;
#[derive(Clone, Debug)]
struct Pinfo {
pname: Pname,
max_locked: u64,
}
#[derive(Debug)]
struct Database(BTreeMap<Pid, Pinfo>);
impl Database {
fn new() -> Self {
Database(BTreeMap::new())
}
fn contains(&self, pid: &Pid) -> bool {
self.0.contains_key(pid)
}
fn new_child_process(&mut self, pid: Pid, pname: Pname) {
self.0.insert(pid, Pinfo { pname, max_locked: 0 });
}
fn update(&mut self, pid: Pid, kbs_locked: u64) |
fn table(&self) -> String {
let col1_heading = "Process Name";
let col2_heading = "Max Locked Memory (kb)";
let col1_heading_len = col1_heading.chars().count();
let col2_heading_len = col2_heading.chars().count();
let min_col2_start = col1_heading_len + COLUMN_BUFFER;
let col2_start = self.0
.values()
.fold(min_col2_start, |longest, pinfo| {
match pinfo.pname.chars().count() + COLUMN_BUFFER {
n_chars if n_chars > longest => n_chars,
_ => longest,
}
});
let heading_whitespace: String = (0..col2_start - col1_heading_len)
.map(|_|'')
.collect();
let heading = format!(
"{}{}{}",
col1_heading,
heading_whitespace,
col2_heading,
);
let top_border = format!(
"{}{}{}",
(0..col1_heading_len).map(|_| '=').collect::<String>(),
heading_whitespace,
(0..col2_heading_len).map(|_| '=').collect::<String>(),
);
let mut stdout = format!("\n{}\n{}\n", heading, top_border);
for Pinfo { pname, max_locked } in self.0.values() {
let pname_len = pname.chars().count();
let whitespace: String = (0..col2_start - pname_len)
.map(|_|'')
.collect();
let line = format!("{}{}{}\n", pname, whitespace, max_locked);
stdout.push_str(&line);
}
let table_width = col2_start + col2_heading_len;
let bottom_border: String = (0..table_width).map(|_| '=').collect();
stdout.push_str(&bottom_border);
stdout
}
}
#[derive(Debug)]
enum Limit {
Kb(u64),
Unlimited,
}
impl Display for Limit {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
Limit::Kb(kbs) => write!(f, "{}", kbs),
_ => write!(f, "unlimited"),
}
}
}
impl FromStr for Limit {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "unlimited" {
Ok(Limit::Unlimited)
} else {
let n_bytes: u64 = s.parse::<u64>().map_err(|_| ())?;
Ok(Limit::Kb(n_bytes / 1024))
}
}
}
#[derive(Debug)]
struct MlockLimit {
soft: Limit,
hard: Limit,
}
fn run_prlimit() -> MlockLimit {
let output = Command::new("prlimit")
.args(&["--memlock", "--output=SOFT,HARD", "--noheadings"])
.output()
.map(|output| String::from_utf8(output.stdout).unwrap())
.unwrap_or_else(|e| panic!("Subprocess failed: `ulimit`: {:?}", e));
let split: Vec<&str> = output.split_whitespace().collect();
let soft = Limit::from_str(split[0]).unwrap();
let hard = Limit::from_str(split[1]).unwrap();
MlockLimit { soft, hard }
}
fn run_ps(cargo_test_pid: Pid) -> Vec<(Pid, Pname)> {
let mut ps = vec![];
let ppid = cargo_test_pid.to_string();
let output = Command::new("ps")
.args(&["-f", "--ppid", &ppid])
.output()
.map(|output| String::from_utf8(output.stdout).unwrap())
.expect("Subprocess failed: `ps`");
for line in output.trim().lines().skip(1) {
let split: Vec<&str> = line.split_whitespace().collect();
let pid: Pid = split[1].parse().unwrap();
let pname: Pname = split[7]
.split_whitespace()
.nth(0)
.unwrap()
.split('/')
.last()
.unwrap()
.to_string();
if!IGNORE_CHILD_PROCS.contains(&pname.as_ref()) {
ps.push((pid, pname));
}
}
ps
}
// Launches a thread that continuously calls `ps`, updates the shared
// `child_pids` vector, and inserts the child processes' pids and names
// into the measurements database.
fn launch_ps_thread(
cargo_test_pid: Arc<Mutex<Option<Pid>>>,
child_pids: Arc<Mutex<Vec<Pid>>>,
db: Arc<Mutex<Database>>,
done: Arc<AtomicBool>,
) -> JoinHandle<()> {
thread::spawn(move || {
let cargo_test_pid = loop {
if let Some(pid) = *cargo_test_pid.lock().unwrap() {
break pid;
}
};
while!done.load(Ordering::Relaxed) {
let ps = run_ps(cargo_test_pid);
*child_pids.lock().unwrap() = ps.iter().map(|(pid, _pname)| *pid).collect();
let mut db = db.lock().unwrap();
for (pid, pname) in ps {
if!db.contains(&pid) {
db.new_child_process(pid, pname);
}
}
thread::sleep(Duration::from_millis(100));
}
})
}
// Launches a thread that continuously reads each child processes'
// "status" file, parses each file to get the ammount memory locked by that
// child process, then updates the database with the locked memory
// information.
fn launch_measurements_thread(
cargo_test_pid: Arc<Mutex<Option<Pid>>>,
child_pids: Arc<Mutex<Vec<Pid>>>,
db: Arc<Mutex<Database>>,
done: Arc<AtomicBool>,
) -> JoinHandle<()> {
thread::spawn(move || {
while cargo_test_pid.lock().unwrap().is_none() {
thread::sleep(Duration::from_millis(1));
}
while!done.load(Ordering::Relaxed) {
for child_pid in child_pids.lock().unwrap().iter() {
if let Some(kbs_locked) = parse_status_file(*child_pid) {
db.lock().unwrap().update(*child_pid, kbs_locked);
}
}
thread::sleep(Duration::from_millis(1));
}
})
}
// Reads a processes' "status" file; parsing it for the ammount of memory
// currently locked by the process.
fn parse_status_file(pid: Pid) -> Option<u64> {
let path = format!("/proc/{}/status", pid);
let file = fs::read_to_string(path).ok()?;
for line in file.lines() {
if line.starts_with("VmLck") {
match line.trim().split_whitespace().nth(1) {
Some(s) => return s.parse().ok(),
_ => return None,
};
}
}
None
}
fn main() {
println!("CURRENT CWD => {:?}", env::current_dir());
println!("CURRENT EXE => {:?}", env::current_exe());
// Initialize the values that will be shared between threads.
let cargo_test_pid: Arc<Mutex<Option<Pid>>> = Arc::new(Mutex::new(None));
let child_pids: Arc<Mutex<Vec<Pid>>> = Arc::new(Mutex::new(vec![]));
let db = Arc::new(Mutex::new(Database::new()));
let done = Arc::new(AtomicBool::new(false));
// Start the worker threads.
let ps_thread = launch_ps_thread(
cargo_test_pid.clone(),
child_pids.clone(),
db.clone(),
done.clone()
);
let file_reader_thread = launch_measurements_thread(
cargo_test_pid.clone(),
child_pids.clone(),
db.clone(),
done.clone()
);
// Get the system's locked memory limit.
let mlock_limit = run_prlimit();
println!("\nMlock Monitor for `cargo test`");
println!("===============================");
println!("Locked memory limit (soft, kb): {}", mlock_limit.soft);
println!("Lock memory limit (hard, kb): {}", mlock_limit.hard);
print!("\nRunning `cargo test`... ");
// Run `cargo test`.
let cwd = env::current_dir().unwrap();
let mut cargo_test_args = vec![
"test".to_string(),
format!("--manifest-path={}/Cargo.toml", cwd.to_str().unwrap()),
];
cargo_test_args.extend(env::args().skip(1));
/*
let mut cargo_test_cmd = Command::new("cargo")
.args(&cargo_test_args)
.envs(env::vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped());
println!("Running `cargo test`: {:?}", cargo_test_cmd);
let cargo_test_output = cargo_test_cmd
.spawn()
.and_then(|child| {
*cargo_test_pid.lock().unwrap() = Some(child.id());
child.wait_with_output()
})
.unwrap();
*/
let cargo_test_output = Command::new("cargo")
.args(&cargo_test_args)
.envs(env::vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.and_then(|child| {
*cargo_test_pid.lock().unwrap() = Some(child.id());
child.wait_with_output()
})
.unwrap();
// Once `cargo test` has finished, stop the worker the threads and
// print the measurement results.
println!("done!");
done.store(true, Ordering::Relaxed);
let _ = ps_thread.join();
let _ = file_reader_thread.join();
println!("{}", db.lock().unwrap().table());
println!("\nOutput `cargo test`");
println!("====================");
println!("{}", String::from_utf8_lossy(&cargo_test_output.stdout));
}
#[cfg(test)]
mod tests {
use std::mem::size_of_val;
use std::thread;
use std::time::Duration;
use memsec::mlock;
#[test]
fn test_mlock() {
println!("TEST TEST TEST");
let buf: [u64; 600] = [555; 600];
let ptr = (&buf).as_ptr() as *mut u8;
unsafe {
mlock(ptr, size_of_val(&buf));
}
thread::sleep(Duration::from_secs(2));
assert!(true);
}
}
| {
if let Some(pinfo) = self.0.get_mut(&pid) {
if kbs_locked > pinfo.max_locked {
pinfo.max_locked = kbs_locked;
}
}
} | identifier_body |
main.rs | #[cfg(test)]
extern crate memsec;
use std::collections::BTreeMap;
use std::env;
use std::fmt::{self, Display, Formatter};
use std::fs;
use std::iter;
use std::process::{Command, Stdio};
use std::str::{self, FromStr};
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration;
use std::thread::{self, JoinHandle};
// The number of space characters (" ") between table columns.
const COLUMN_BUFFER: usize = 8;
// Ignore child processes with the following names.
const IGNORE_CHILD_PROCS: [&str; 3] = ["rustc", "[rustc]", "rustdoc"];
type Pid = u32;
type Pname = String;
#[derive(Clone, Debug)]
struct Pinfo {
pname: Pname,
max_locked: u64,
}
#[derive(Debug)]
struct Database(BTreeMap<Pid, Pinfo>);
impl Database {
fn new() -> Self {
Database(BTreeMap::new())
}
fn contains(&self, pid: &Pid) -> bool {
self.0.contains_key(pid)
}
fn new_child_process(&mut self, pid: Pid, pname: Pname) {
self.0.insert(pid, Pinfo { pname, max_locked: 0 });
}
fn update(&mut self, pid: Pid, kbs_locked: u64) {
if let Some(pinfo) = self.0.get_mut(&pid) {
if kbs_locked > pinfo.max_locked {
pinfo.max_locked = kbs_locked;
}
}
}
fn table(&self) -> String {
let col1_heading = "Process Name";
let col2_heading = "Max Locked Memory (kb)";
let col1_heading_len = col1_heading.chars().count();
let col2_heading_len = col2_heading.chars().count();
let min_col2_start = col1_heading_len + COLUMN_BUFFER;
let col2_start = self.0
.values()
.fold(min_col2_start, |longest, pinfo| {
match pinfo.pname.chars().count() + COLUMN_BUFFER {
n_chars if n_chars > longest => n_chars,
_ => longest,
}
});
let heading_whitespace: String = (0..col2_start - col1_heading_len)
.map(|_|'')
.collect();
let heading = format!(
"{}{}{}",
col1_heading,
heading_whitespace,
col2_heading,
);
let top_border = format!(
"{}{}{}",
(0..col1_heading_len).map(|_| '=').collect::<String>(),
heading_whitespace,
(0..col2_heading_len).map(|_| '=').collect::<String>(),
);
let mut stdout = format!("\n{}\n{}\n", heading, top_border);
for Pinfo { pname, max_locked } in self.0.values() {
let pname_len = pname.chars().count();
let whitespace: String = (0..col2_start - pname_len)
.map(|_|'')
.collect();
let line = format!("{}{}{}\n", pname, whitespace, max_locked);
stdout.push_str(&line);
}
let table_width = col2_start + col2_heading_len;
let bottom_border: String = (0..table_width).map(|_| '=').collect();
stdout.push_str(&bottom_border);
stdout
}
}
#[derive(Debug)]
enum Limit {
Kb(u64),
Unlimited,
}
impl Display for Limit {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
Limit::Kb(kbs) => write!(f, "{}", kbs),
_ => write!(f, "unlimited"),
}
}
}
impl FromStr for Limit {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "unlimited" {
Ok(Limit::Unlimited)
} else {
let n_bytes: u64 = s.parse::<u64>().map_err(|_| ())?;
Ok(Limit::Kb(n_bytes / 1024))
}
}
}
#[derive(Debug)]
struct | {
soft: Limit,
hard: Limit,
}
fn run_prlimit() -> MlockLimit {
let output = Command::new("prlimit")
.args(&["--memlock", "--output=SOFT,HARD", "--noheadings"])
.output()
.map(|output| String::from_utf8(output.stdout).unwrap())
.unwrap_or_else(|e| panic!("Subprocess failed: `ulimit`: {:?}", e));
let split: Vec<&str> = output.split_whitespace().collect();
let soft = Limit::from_str(split[0]).unwrap();
let hard = Limit::from_str(split[1]).unwrap();
MlockLimit { soft, hard }
}
fn run_ps(cargo_test_pid: Pid) -> Vec<(Pid, Pname)> {
let mut ps = vec![];
let ppid = cargo_test_pid.to_string();
let output = Command::new("ps")
.args(&["-f", "--ppid", &ppid])
.output()
.map(|output| String::from_utf8(output.stdout).unwrap())
.expect("Subprocess failed: `ps`");
for line in output.trim().lines().skip(1) {
let split: Vec<&str> = line.split_whitespace().collect();
let pid: Pid = split[1].parse().unwrap();
let pname: Pname = split[7]
.split_whitespace()
.nth(0)
.unwrap()
.split('/')
.last()
.unwrap()
.to_string();
if!IGNORE_CHILD_PROCS.contains(&pname.as_ref()) {
ps.push((pid, pname));
}
}
ps
}
// Launches a thread that continuously calls `ps`, updates the shared
// `child_pids` vector, and inserts the child processes' pids and names
// into the measurements database.
fn launch_ps_thread(
cargo_test_pid: Arc<Mutex<Option<Pid>>>,
child_pids: Arc<Mutex<Vec<Pid>>>,
db: Arc<Mutex<Database>>,
done: Arc<AtomicBool>,
) -> JoinHandle<()> {
thread::spawn(move || {
let cargo_test_pid = loop {
if let Some(pid) = *cargo_test_pid.lock().unwrap() {
break pid;
}
};
while!done.load(Ordering::Relaxed) {
let ps = run_ps(cargo_test_pid);
*child_pids.lock().unwrap() = ps.iter().map(|(pid, _pname)| *pid).collect();
let mut db = db.lock().unwrap();
for (pid, pname) in ps {
if!db.contains(&pid) {
db.new_child_process(pid, pname);
}
}
thread::sleep(Duration::from_millis(100));
}
})
}
// Launches a thread that continuously reads each child processes'
// "status" file, parses each file to get the ammount memory locked by that
// child process, then updates the database with the locked memory
// information.
fn launch_measurements_thread(
cargo_test_pid: Arc<Mutex<Option<Pid>>>,
child_pids: Arc<Mutex<Vec<Pid>>>,
db: Arc<Mutex<Database>>,
done: Arc<AtomicBool>,
) -> JoinHandle<()> {
thread::spawn(move || {
while cargo_test_pid.lock().unwrap().is_none() {
thread::sleep(Duration::from_millis(1));
}
while!done.load(Ordering::Relaxed) {
for child_pid in child_pids.lock().unwrap().iter() {
if let Some(kbs_locked) = parse_status_file(*child_pid) {
db.lock().unwrap().update(*child_pid, kbs_locked);
}
}
thread::sleep(Duration::from_millis(1));
}
})
}
// Reads a processes' "status" file; parsing it for the ammount of memory
// currently locked by the process.
fn parse_status_file(pid: Pid) -> Option<u64> {
let path = format!("/proc/{}/status", pid);
let file = fs::read_to_string(path).ok()?;
for line in file.lines() {
if line.starts_with("VmLck") {
match line.trim().split_whitespace().nth(1) {
Some(s) => return s.parse().ok(),
_ => return None,
};
}
}
None
}
fn main() {
println!("CURRENT CWD => {:?}", env::current_dir());
println!("CURRENT EXE => {:?}", env::current_exe());
// Initialize the values that will be shared between threads.
let cargo_test_pid: Arc<Mutex<Option<Pid>>> = Arc::new(Mutex::new(None));
let child_pids: Arc<Mutex<Vec<Pid>>> = Arc::new(Mutex::new(vec![]));
let db = Arc::new(Mutex::new(Database::new()));
let done = Arc::new(AtomicBool::new(false));
// Start the worker threads.
let ps_thread = launch_ps_thread(
cargo_test_pid.clone(),
child_pids.clone(),
db.clone(),
done.clone()
);
let file_reader_thread = launch_measurements_thread(
cargo_test_pid.clone(),
child_pids.clone(),
db.clone(),
done.clone()
);
// Get the system's locked memory limit.
let mlock_limit = run_prlimit();
println!("\nMlock Monitor for `cargo test`");
println!("===============================");
println!("Locked memory limit (soft, kb): {}", mlock_limit.soft);
println!("Lock memory limit (hard, kb): {}", mlock_limit.hard);
print!("\nRunning `cargo test`... ");
// Run `cargo test`.
let cwd = env::current_dir().unwrap();
let mut cargo_test_args = vec![
"test".to_string(),
format!("--manifest-path={}/Cargo.toml", cwd.to_str().unwrap()),
];
cargo_test_args.extend(env::args().skip(1));
/*
let mut cargo_test_cmd = Command::new("cargo")
.args(&cargo_test_args)
.envs(env::vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped());
println!("Running `cargo test`: {:?}", cargo_test_cmd);
let cargo_test_output = cargo_test_cmd
.spawn()
.and_then(|child| {
*cargo_test_pid.lock().unwrap() = Some(child.id());
child.wait_with_output()
})
.unwrap();
*/
let cargo_test_output = Command::new("cargo")
.args(&cargo_test_args)
.envs(env::vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.and_then(|child| {
*cargo_test_pid.lock().unwrap() = Some(child.id());
child.wait_with_output()
})
.unwrap();
// Once `cargo test` has finished, stop the worker the threads and
// print the measurement results.
println!("done!");
done.store(true, Ordering::Relaxed);
let _ = ps_thread.join();
let _ = file_reader_thread.join();
println!("{}", db.lock().unwrap().table());
println!("\nOutput `cargo test`");
println!("====================");
println!("{}", String::from_utf8_lossy(&cargo_test_output.stdout));
}
#[cfg(test)]
mod tests {
use std::mem::size_of_val;
use std::thread;
use std::time::Duration;
use memsec::mlock;
#[test]
fn test_mlock() {
println!("TEST TEST TEST");
let buf: [u64; 600] = [555; 600];
let ptr = (&buf).as_ptr() as *mut u8;
unsafe {
mlock(ptr, size_of_val(&buf));
}
thread::sleep(Duration::from_secs(2));
assert!(true);
}
}
| MlockLimit | identifier_name |
main.rs | #[cfg(test)]
extern crate memsec;
use std::collections::BTreeMap;
use std::env;
use std::fmt::{self, Display, Formatter};
use std::fs;
use std::iter;
use std::process::{Command, Stdio};
use std::str::{self, FromStr};
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration;
use std::thread::{self, JoinHandle};
// The number of space characters (" ") between table columns.
const COLUMN_BUFFER: usize = 8;
// Ignore child processes with the following names.
const IGNORE_CHILD_PROCS: [&str; 3] = ["rustc", "[rustc]", "rustdoc"];
type Pid = u32;
type Pname = String;
#[derive(Clone, Debug)]
struct Pinfo {
pname: Pname,
max_locked: u64,
}
#[derive(Debug)]
struct Database(BTreeMap<Pid, Pinfo>);
impl Database {
fn new() -> Self {
Database(BTreeMap::new())
}
fn contains(&self, pid: &Pid) -> bool {
self.0.contains_key(pid)
}
fn new_child_process(&mut self, pid: Pid, pname: Pname) {
self.0.insert(pid, Pinfo { pname, max_locked: 0 });
}
fn update(&mut self, pid: Pid, kbs_locked: u64) {
if let Some(pinfo) = self.0.get_mut(&pid) {
if kbs_locked > pinfo.max_locked {
pinfo.max_locked = kbs_locked;
}
}
}
fn table(&self) -> String {
let col1_heading = "Process Name";
let col2_heading = "Max Locked Memory (kb)";
let col1_heading_len = col1_heading.chars().count();
let col2_heading_len = col2_heading.chars().count();
let min_col2_start = col1_heading_len + COLUMN_BUFFER;
let col2_start = self.0
.values()
.fold(min_col2_start, |longest, pinfo| {
match pinfo.pname.chars().count() + COLUMN_BUFFER {
n_chars if n_chars > longest => n_chars,
_ => longest,
}
});
let heading_whitespace: String = (0..col2_start - col1_heading_len)
.map(|_|'')
.collect();
let heading = format!(
"{}{}{}",
col1_heading,
heading_whitespace,
col2_heading,
);
let top_border = format!(
"{}{}{}",
(0..col1_heading_len).map(|_| '=').collect::<String>(),
heading_whitespace,
(0..col2_heading_len).map(|_| '=').collect::<String>(),
);
let mut stdout = format!("\n{}\n{}\n", heading, top_border);
for Pinfo { pname, max_locked } in self.0.values() {
let pname_len = pname.chars().count();
let whitespace: String = (0..col2_start - pname_len)
.map(|_|'')
.collect();
let line = format!("{}{}{}\n", pname, whitespace, max_locked);
stdout.push_str(&line);
}
let table_width = col2_start + col2_heading_len;
let bottom_border: String = (0..table_width).map(|_| '=').collect();
stdout.push_str(&bottom_border);
stdout
}
}
#[derive(Debug)]
enum Limit {
Kb(u64),
Unlimited,
}
impl Display for Limit {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
Limit::Kb(kbs) => write!(f, "{}", kbs),
_ => write!(f, "unlimited"),
}
}
}
impl FromStr for Limit {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "unlimited" {
Ok(Limit::Unlimited)
} else {
let n_bytes: u64 = s.parse::<u64>().map_err(|_| ())?;
Ok(Limit::Kb(n_bytes / 1024))
}
}
}
#[derive(Debug)]
struct MlockLimit {
soft: Limit,
hard: Limit,
}
fn run_prlimit() -> MlockLimit {
let output = Command::new("prlimit")
.args(&["--memlock", "--output=SOFT,HARD", "--noheadings"])
.output()
.map(|output| String::from_utf8(output.stdout).unwrap())
.unwrap_or_else(|e| panic!("Subprocess failed: `ulimit`: {:?}", e));
let split: Vec<&str> = output.split_whitespace().collect();
let soft = Limit::from_str(split[0]).unwrap();
let hard = Limit::from_str(split[1]).unwrap();
MlockLimit { soft, hard }
}
fn run_ps(cargo_test_pid: Pid) -> Vec<(Pid, Pname)> {
let mut ps = vec![];
let ppid = cargo_test_pid.to_string();
let output = Command::new("ps")
.args(&["-f", "--ppid", &ppid])
.output()
.map(|output| String::from_utf8(output.stdout).unwrap())
.expect("Subprocess failed: `ps`");
for line in output.trim().lines().skip(1) {
let split: Vec<&str> = line.split_whitespace().collect();
let pid: Pid = split[1].parse().unwrap();
let pname: Pname = split[7]
.split_whitespace()
.nth(0)
.unwrap()
.split('/')
.last()
.unwrap()
.to_string();
if!IGNORE_CHILD_PROCS.contains(&pname.as_ref()) {
ps.push((pid, pname));
}
}
ps
}
// Launches a thread that continuously calls `ps`, updates the shared
// `child_pids` vector, and inserts the child processes' pids and names
// into the measurements database.
fn launch_ps_thread(
cargo_test_pid: Arc<Mutex<Option<Pid>>>,
child_pids: Arc<Mutex<Vec<Pid>>>,
db: Arc<Mutex<Database>>,
done: Arc<AtomicBool>,
) -> JoinHandle<()> {
thread::spawn(move || {
let cargo_test_pid = loop {
if let Some(pid) = *cargo_test_pid.lock().unwrap() {
break pid;
}
};
while!done.load(Ordering::Relaxed) {
let ps = run_ps(cargo_test_pid);
*child_pids.lock().unwrap() = ps.iter().map(|(pid, _pname)| *pid).collect();
let mut db = db.lock().unwrap();
for (pid, pname) in ps {
if!db.contains(&pid) {
db.new_child_process(pid, pname);
}
}
thread::sleep(Duration::from_millis(100));
}
})
}
// Launches a thread that continuously reads each child processes'
// "status" file, parses each file to get the ammount memory locked by that
// child process, then updates the database with the locked memory
// information.
fn launch_measurements_thread( | cargo_test_pid: Arc<Mutex<Option<Pid>>>,
child_pids: Arc<Mutex<Vec<Pid>>>,
db: Arc<Mutex<Database>>,
done: Arc<AtomicBool>,
) -> JoinHandle<()> {
thread::spawn(move || {
while cargo_test_pid.lock().unwrap().is_none() {
thread::sleep(Duration::from_millis(1));
}
while!done.load(Ordering::Relaxed) {
for child_pid in child_pids.lock().unwrap().iter() {
if let Some(kbs_locked) = parse_status_file(*child_pid) {
db.lock().unwrap().update(*child_pid, kbs_locked);
}
}
thread::sleep(Duration::from_millis(1));
}
})
}
// Reads a processes' "status" file; parsing it for the ammount of memory
// currently locked by the process.
fn parse_status_file(pid: Pid) -> Option<u64> {
let path = format!("/proc/{}/status", pid);
let file = fs::read_to_string(path).ok()?;
for line in file.lines() {
if line.starts_with("VmLck") {
match line.trim().split_whitespace().nth(1) {
Some(s) => return s.parse().ok(),
_ => return None,
};
}
}
None
}
fn main() {
println!("CURRENT CWD => {:?}", env::current_dir());
println!("CURRENT EXE => {:?}", env::current_exe());
// Initialize the values that will be shared between threads.
let cargo_test_pid: Arc<Mutex<Option<Pid>>> = Arc::new(Mutex::new(None));
let child_pids: Arc<Mutex<Vec<Pid>>> = Arc::new(Mutex::new(vec![]));
let db = Arc::new(Mutex::new(Database::new()));
let done = Arc::new(AtomicBool::new(false));
// Start the worker threads.
let ps_thread = launch_ps_thread(
cargo_test_pid.clone(),
child_pids.clone(),
db.clone(),
done.clone()
);
let file_reader_thread = launch_measurements_thread(
cargo_test_pid.clone(),
child_pids.clone(),
db.clone(),
done.clone()
);
// Get the system's locked memory limit.
let mlock_limit = run_prlimit();
println!("\nMlock Monitor for `cargo test`");
println!("===============================");
println!("Locked memory limit (soft, kb): {}", mlock_limit.soft);
println!("Lock memory limit (hard, kb): {}", mlock_limit.hard);
print!("\nRunning `cargo test`... ");
// Run `cargo test`.
let cwd = env::current_dir().unwrap();
let mut cargo_test_args = vec![
"test".to_string(),
format!("--manifest-path={}/Cargo.toml", cwd.to_str().unwrap()),
];
cargo_test_args.extend(env::args().skip(1));
/*
let mut cargo_test_cmd = Command::new("cargo")
.args(&cargo_test_args)
.envs(env::vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped());
println!("Running `cargo test`: {:?}", cargo_test_cmd);
let cargo_test_output = cargo_test_cmd
.spawn()
.and_then(|child| {
*cargo_test_pid.lock().unwrap() = Some(child.id());
child.wait_with_output()
})
.unwrap();
*/
let cargo_test_output = Command::new("cargo")
.args(&cargo_test_args)
.envs(env::vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.and_then(|child| {
*cargo_test_pid.lock().unwrap() = Some(child.id());
child.wait_with_output()
})
.unwrap();
// Once `cargo test` has finished, stop the worker the threads and
// print the measurement results.
println!("done!");
done.store(true, Ordering::Relaxed);
let _ = ps_thread.join();
let _ = file_reader_thread.join();
println!("{}", db.lock().unwrap().table());
println!("\nOutput `cargo test`");
println!("====================");
println!("{}", String::from_utf8_lossy(&cargo_test_output.stdout));
}
#[cfg(test)]
mod tests {
use std::mem::size_of_val;
use std::thread;
use std::time::Duration;
use memsec::mlock;
#[test]
fn test_mlock() {
println!("TEST TEST TEST");
let buf: [u64; 600] = [555; 600];
let ptr = (&buf).as_ptr() as *mut u8;
unsafe {
mlock(ptr, size_of_val(&buf));
}
thread::sleep(Duration::from_secs(2));
assert!(true);
}
} | random_line_split |
|
dep_cache.rs |
(Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>, bool),
>,
/// all the cases we ended up using a supplied replacement
used_replacements: HashMap<PackageId, Summary>,
}
impl<'a> RegistryQueryer<'a> {
pub fn new(
registry: &'a mut dyn Registry,
replacements: &'a [(PackageIdSpec, Dependency)],
version_prefs: &'a VersionPreferences,
minimal_versions: bool,
max_rust_version: Option<PartialVersion>,
) -> Self {
RegistryQueryer {
registry,
replacements,
version_prefs,
minimal_versions,
max_rust_version,
registry_cache: HashMap::new(),
summary_cache: HashMap::new(),
used_replacements: HashMap::new(),
}
}
pub fn reset_pending(&mut self) -> bool {
let mut all_ready = true;
self.registry_cache.retain(|_, r| {
if!r.is_ready() {
all_ready = false;
}
r.is_ready()
});
self.summary_cache.retain(|_, (_, r)| {
if!*r {
all_ready = false;
}
*r
});
all_ready
}
pub fn used_replacement_for(&self, p: PackageId) -> Option<(PackageId, PackageId)> {
self.used_replacements.get(&p).map(|r| (p, r.package_id()))
}
pub fn replacement_summary(&self, p: PackageId) -> Option<&Summary> {
self.used_replacements.get(&p)
}
/// Queries the `registry` to return a list of candidates for `dep`.
///
/// This method is the location where overrides are taken into account. If
/// any candidates are returned which match an override then the override is
/// applied by performing a second query for what the override should
/// return.
pub fn query(
&mut self,
dep: &Dependency,
first_minimal_version: bool,
) -> Poll<CargoResult<Rc<Vec<Summary>>>> {
let registry_cache_key = (dep.clone(), first_minimal_version);
if let Some(out) = self.registry_cache.get(®istry_cache_key).cloned() {
return out.map(Result::Ok);
}
let mut ret = Vec::new();
let ready = self.registry.query(dep, QueryKind::Exact, &mut |s| {
if self.max_rust_version.is_none() || s.rust_version() <= self.max_rust_version {
ret.push(s);
}
})?;
if ready.is_pending() {
self.registry_cache
.insert((dep.clone(), first_minimal_version), Poll::Pending);
return Poll::Pending;
}
for summary in ret.iter() {
let mut potential_matches = self
.replacements
.iter()
.filter(|&&(ref spec, _)| spec.matches(summary.package_id()));
let &(ref spec, ref dep) = match potential_matches.next() {
None => continue,
Some(replacement) => replacement,
};
debug!(
"found an override for {} {}",
dep.package_name(),
dep.version_req()
);
let mut summaries = match self.registry.query_vec(dep, QueryKind::Exact)? {
Poll::Ready(s) => s.into_iter(),
Poll::Pending => {
self.registry_cache
.insert((dep.clone(), first_minimal_version), Poll::Pending);
return Poll::Pending;
}
};
let s = summaries.next().ok_or_else(|| {
anyhow::format_err!(
"no matching package for override `{}` found\n\
location searched: {}\n\
version required: {}",
spec,
dep.source_id(),
dep.version_req()
)
})?;
let summaries = summaries.collect::<Vec<_>>();
if!summaries.is_empty() {
let bullets = summaries
.iter()
.map(|s| format!(" * {}", s.package_id()))
.collect::<Vec<_>>();
return Poll::Ready(Err(anyhow::anyhow!(
"the replacement specification `{}` matched \
multiple packages:\n * {}\n{}",
spec,
s.package_id(),
bullets.join("\n")
)));
}
// The dependency should be hard-coded to have the same name and an
// exact version requirement, so both of these assertions should
// never fail.
assert_eq!(s.version(), summary.version());
assert_eq!(s.name(), summary.name());
let replace = if s.source_id() == summary.source_id() {
debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s);
None
} else {
Some(s)
};
let matched_spec = spec.clone();
// Make sure no duplicates
if let Some(&(ref spec, _)) = potential_matches.next() {
return Poll::Ready(Err(anyhow::anyhow!(
"overlapping replacement specifications found:\n\n \
* {}\n * {}\n\nboth specifications match: {}",
matched_spec,
spec,
summary.package_id()
)));
}
for dep in summary.dependencies() {
debug!("\t{} => {}", dep.package_name(), dep.version_req());
}
if let Some(r) = replace {
self.used_replacements.insert(summary.package_id(), r);
}
}
// When we attempt versions for a package we'll want to do so in a sorted fashion to pick
// the "best candidates" first. VersionPreferences implements this notion.
let ordering = if first_minimal_version || self.minimal_versions {
VersionOrdering::MinimumVersionsFirst
} else {
VersionOrdering::MaximumVersionsFirst
};
let first_version = first_minimal_version;
self.version_prefs
.sort_summaries(&mut ret, ordering, first_version);
let out = Poll::Ready(Rc::new(ret));
self.registry_cache.insert(registry_cache_key, out.clone());
out.map(Result::Ok)
}
/// Find out what dependencies will be added by activating `candidate`,
/// with features described in `opts`. Then look up in the `registry`
/// the candidates that will fulfil each of these dependencies, as it is the
/// next obvious question.
pub fn build_deps(
&mut self,
cx: &Context,
parent: Option<PackageId>,
candidate: &Summary,
opts: &ResolveOpts,
first_minimal_version: bool,
) -> ActivateResult<Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>> {
// if we have calculated a result before, then we can just return it,
// as it is a "pure" query of its arguments.
if let Some(out) = self
.summary_cache
.get(&(parent, candidate.clone(), opts.clone()))
{
return Ok(out.0.clone());
}
// First, figure out our set of dependencies based on the requested set
// of features. This also calculates what features we're going to enable
// for our own dependencies.
let (used_features, deps) = resolve_features(parent, candidate, opts)?;
// Next, transform all dependencies into a list of possible candidates
// which can satisfy that dependency.
let mut all_ready = true;
let mut deps = deps
.into_iter()
.filter_map(
|(dep, features)| match self.query(&dep, first_minimal_version) {
Poll::Ready(Ok(candidates)) => Some(Ok((dep, candidates, features))),
Poll::Pending => {
all_ready = false;
// we can ignore Pending deps, resolve will be repeatedly called
// until there are none to ignore
None
}
Poll::Ready(Err(e)) => Some(Err(e).with_context(|| {
format!(
"failed to get `{}` as a dependency of {}",
dep.package_name(),
describe_path_in_context(cx, &candidate.package_id()),
)
})),
},
)
.collect::<CargoResult<Vec<DepInfo>>>()?;
// Attempt to resolve dependencies with fewer candidates before trying
// dependencies with more candidates. This way if the dependency with
// only one candidate can't be resolved we don't have to do a bunch of
// work before we figure that out.
deps.sort_by_key(|&(_, ref a, _)| a.len());
let out = Rc::new((used_features, Rc::new(deps)));
// If we succeed we add the result to the cache so we can use it again next time.
// We don't cache the failure cases as they don't impl Clone.
self.summary_cache.insert(
(parent, candidate.clone(), opts.clone()),
(out.clone(), all_ready),
);
Ok(out)
}
}
/// Returns the features we ended up using and
/// all dependencies and the features we want from each of them.
pub fn resolve_features<'b>(
parent: Option<PackageId>,
s: &'b Summary,
opts: &'b ResolveOpts,
) -> ActivateResult<(HashSet<InternedString>, Vec<(Dependency, FeaturesSet)>)> {
// First, filter by dev-dependencies.
let deps = s.dependencies();
let deps = deps.iter().filter(|d| d.is_transitive() || opts.dev_deps);
let reqs = build_requirements(parent, s, opts)?;
let mut ret = Vec::new();
let default_dep = BTreeSet::new();
let mut valid_dep_names = HashSet::new();
// Next, collect all actually enabled dependencies and their features.
for dep in deps {
// Skip optional dependencies, but not those enabled through a
// feature
if dep.is_optional() &&!reqs.deps.contains_key(&dep.name_in_toml()) {
continue;
}
valid_dep_names.insert(dep.name_in_toml());
// So we want this dependency. Move the features we want from
// `feature_deps` to `ret` and register ourselves as using this
// name.
let mut base = reqs
.deps
.get(&dep.name_in_toml())
.unwrap_or(&default_dep)
.clone();
base.extend(dep.features().iter());
ret.push((dep.clone(), Rc::new(base)));
}
// This is a special case for command-line `--features
// dep_name/feat_name` where `dep_name` does not exist. All other
// validation is done either in `build_requirements` or
// `build_feature_map`.
if parent.is_none() {
for dep_name in reqs.deps.keys() {
if!valid_dep_names.contains(dep_name) {
let e = RequirementError::MissingDependency(*dep_name);
return Err(e.into_activate_error(parent, s));
}
}
}
Ok((reqs.into_features(), ret))
}
/// Takes requested features for a single package from the input `ResolveOpts` and
/// recurses to find all requested features, dependencies and requested
/// dependency features in a `Requirements` object, returning it to the resolver.
fn build_requirements<'a, 'b: 'a>(
parent: Option<PackageId>,
s: &'a Summary,
opts: &'b ResolveOpts,
) -> ActivateResult<Requirements<'a>> {
let mut reqs = Requirements::new(s);
let handle_default = |uses_default_features, reqs: &mut Requirements<'_>| {
if uses_default_features && s.features().contains_key("default") {
if let Err(e) = reqs.require_feature(InternedString::new("default")) {
return Err(e.into_activate_error(parent, s));
}
}
Ok(())
};
match &opts.features {
RequestedFeatures::CliFeatures(CliFeatures {
features,
all_features,
uses_default_features,
}) => {
if *all_features {
for key in s.features().keys() {
if let Err(e) = reqs.require_feature(*key) {
return Err(e.into_activate_error(parent, s));
}
}
}
for fv in features.iter() {
if let Err(e) = reqs.require_value(fv) {
return Err(e.into_activate_error(parent, s));
}
}
handle_default(*uses_default_features, &mut reqs)?;
}
RequestedFeatures::DepFeatures {
features,
uses_default_features,
} => {
for feature in features.iter() {
if let Err(e) = reqs.require_feature(*feature) {
return Err(e.into_activate_error(parent, s));
}
}
handle_default(*uses_default_features, &mut reqs)?;
}
}
Ok(reqs)
}
/// Set of feature and dependency requirements for a package.
#[derive(Debug)]
struct Requirements<'a> {
summary: &'a Summary,
/// The deps map is a mapping of dependency name to list of features enabled.
///
/// The resolver will activate all of these dependencies, with the given
/// features enabled.
deps: HashMap<InternedString, BTreeSet<InternedString>>,
/// The set of features enabled on this package which is later used when
/// compiling to instruct the code what features were enabled.
features: HashSet<InternedString>,
}
/// An error for a requirement.
///
/// This will later be converted to an `ActivateError` depending on whether or
/// not this is a dependency or a root package.
enum RequirementError {
/// The package does not have the requested feature.
MissingFeature(InternedString),
/// The package does not have the requested dependency.
MissingDependency(InternedString),
/// A feature has a direct cycle to itself.
///
/// Note that cycles through multiple features are allowed (but perhaps
/// they shouldn't be?).
Cycle(InternedString),
} | impl Requirements<'_> {
fn new(summary: &Summary) -> Requirements<'_> {
Requirements {
summary,
deps: HashMap::new(),
features: HashSet::new(),
}
}
fn into_features(self) -> HashSet<InternedString> {
self.features
}
fn require_dep_feature(
&mut self,
package: InternedString,
feat: InternedString,
weak: bool,
) -> Result<(), RequirementError> {
// If `package` is indeed an optional dependency then we activate the
// feature named `package`, but otherwise if `package` is a required
// dependency then there's no feature associated with it.
if!weak
&& self
.summary
.dependencies()
.iter()
.any(|dep| dep.name_in_toml() == package && dep.is_optional())
{
// This optional dependency may not have an implicit feature of
// the same name if the `dep:` syntax is used to avoid creating
// that implicit feature.
if self.summary.features().contains_key(&package) {
self.require_feature(package)?;
}
}
self.deps.entry(package).or_default().insert(feat);
Ok(())
}
fn require_dependency(&mut self, pkg: InternedString) {
self.deps.entry(pkg).or_default();
}
fn require_feature(&mut self, feat: InternedString) -> Result<(), RequirementError> {
if!self.features.insert(feat) {
// Already seen this feature.
return Ok(());
}
let fvs = match self.summary.features().get(&feat) {
Some(fvs) => fvs,
None => return Err(RequirementError::MissingFeature(feat)),
};
for fv in fvs {
if let FeatureValue::Feature(dep_feat) = fv {
if *dep_feat == feat {
return Err(RequirementError::Cycle(feat));
}
}
self.require_value(fv)?;
}
Ok(())
}
fn require_value(&mut self, fv: &FeatureValue) -> Result<(), RequirementError> {
match fv {
FeatureValue::Feature(feat) => self.require_feature(*feat)?,
FeatureValue::Dep { dep_name } => self.require_dependency(*dep_name),
FeatureValue::DepFeature {
dep_name,
dep_feature,
// Weak features are always activated in the dependency
// resolver. They will be narrowed inside the new feature
// resolver.
weak,
} => self.require_dep_feature(*dep_name, *dep_feature, *weak)?,
};
Ok(())
}
}
impl RequirementError {
fn into_activate_error(self, parent: Option<PackageId>, summary: &Summary) -> ActivateError {
match self {
RequirementError::MissingFeature(feat) => {
let deps: Vec<_> = summary
.dependencies()
.iter()
.filter(|dep| dep.name_in_toml() == feat)
.collect();
if deps.is_empty() {
return match parent {
None => ActivateError::Fatal(anyhow::format_err!(
"Package `{}` does not have the feature `{}`",
summary.package_id(),
feat
)),
Some(p) => ActivateError::Conflict(
p,
| random_line_split |
|
dep_cache.rs | pub fn new(
registry: &'a mut dyn Registry,
replacements: &'a [(PackageIdSpec, Dependency)],
version_prefs: &'a VersionPreferences,
minimal_versions: bool,
max_rust_version: Option<PartialVersion>,
) -> Self {
RegistryQueryer {
registry,
replacements,
version_prefs,
minimal_versions,
max_rust_version,
registry_cache: HashMap::new(),
summary_cache: HashMap::new(),
used_replacements: HashMap::new(),
}
}
pub fn reset_pending(&mut self) -> bool {
let mut all_ready = true;
self.registry_cache.retain(|_, r| {
if!r.is_ready() {
all_ready = false;
}
r.is_ready()
});
self.summary_cache.retain(|_, (_, r)| {
if!*r {
all_ready = false;
}
*r
});
all_ready
}
pub fn used_replacement_for(&self, p: PackageId) -> Option<(PackageId, PackageId)> {
self.used_replacements.get(&p).map(|r| (p, r.package_id()))
}
pub fn replacement_summary(&self, p: PackageId) -> Option<&Summary> {
self.used_replacements.get(&p)
}
/// Queries the `registry` to return a list of candidates for `dep`.
///
/// This method is the location where overrides are taken into account. If
/// any candidates are returned which match an override then the override is
/// applied by performing a second query for what the override should
/// return.
pub fn query(
&mut self,
dep: &Dependency,
first_minimal_version: bool,
) -> Poll<CargoResult<Rc<Vec<Summary>>>> {
let registry_cache_key = (dep.clone(), first_minimal_version);
if let Some(out) = self.registry_cache.get(®istry_cache_key).cloned() {
return out.map(Result::Ok);
}
let mut ret = Vec::new();
let ready = self.registry.query(dep, QueryKind::Exact, &mut |s| {
if self.max_rust_version.is_none() || s.rust_version() <= self.max_rust_version {
ret.push(s);
}
})?;
if ready.is_pending() {
self.registry_cache
.insert((dep.clone(), first_minimal_version), Poll::Pending);
return Poll::Pending;
}
for summary in ret.iter() {
let mut potential_matches = self
.replacements
.iter()
.filter(|&&(ref spec, _)| spec.matches(summary.package_id()));
let &(ref spec, ref dep) = match potential_matches.next() {
None => continue,
Some(replacement) => replacement,
};
debug!(
"found an override for {} {}",
dep.package_name(),
dep.version_req()
);
let mut summaries = match self.registry.query_vec(dep, QueryKind::Exact)? {
Poll::Ready(s) => s.into_iter(),
Poll::Pending => {
self.registry_cache
.insert((dep.clone(), first_minimal_version), Poll::Pending);
return Poll::Pending;
}
};
let s = summaries.next().ok_or_else(|| {
anyhow::format_err!(
"no matching package for override `{}` found\n\
location searched: {}\n\
version required: {}",
spec,
dep.source_id(),
dep.version_req()
)
})?;
let summaries = summaries.collect::<Vec<_>>();
if!summaries.is_empty() {
let bullets = summaries
.iter()
.map(|s| format!(" * {}", s.package_id()))
.collect::<Vec<_>>();
return Poll::Ready(Err(anyhow::anyhow!(
"the replacement specification `{}` matched \
multiple packages:\n * {}\n{}",
spec,
s.package_id(),
bullets.join("\n")
)));
}
// The dependency should be hard-coded to have the same name and an
// exact version requirement, so both of these assertions should
// never fail.
assert_eq!(s.version(), summary.version());
assert_eq!(s.name(), summary.name());
let replace = if s.source_id() == summary.source_id() {
debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s);
None
} else {
Some(s)
};
let matched_spec = spec.clone();
// Make sure no duplicates
if let Some(&(ref spec, _)) = potential_matches.next() {
return Poll::Ready(Err(anyhow::anyhow!(
"overlapping replacement specifications found:\n\n \
* {}\n * {}\n\nboth specifications match: {}",
matched_spec,
spec,
summary.package_id()
)));
}
for dep in summary.dependencies() {
debug!("\t{} => {}", dep.package_name(), dep.version_req());
}
if let Some(r) = replace {
self.used_replacements.insert(summary.package_id(), r);
}
}
// When we attempt versions for a package we'll want to do so in a sorted fashion to pick
// the "best candidates" first. VersionPreferences implements this notion.
let ordering = if first_minimal_version || self.minimal_versions {
VersionOrdering::MinimumVersionsFirst
} else {
VersionOrdering::MaximumVersionsFirst
};
let first_version = first_minimal_version;
self.version_prefs
.sort_summaries(&mut ret, ordering, first_version);
let out = Poll::Ready(Rc::new(ret));
self.registry_cache.insert(registry_cache_key, out.clone());
out.map(Result::Ok)
}
/// Find out what dependencies will be added by activating `candidate`,
/// with features described in `opts`. Then look up in the `registry`
/// the candidates that will fulfil each of these dependencies, as it is the
/// next obvious question.
pub fn build_deps(
&mut self,
cx: &Context,
parent: Option<PackageId>,
candidate: &Summary,
opts: &ResolveOpts,
first_minimal_version: bool,
) -> ActivateResult<Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>> {
// if we have calculated a result before, then we can just return it,
// as it is a "pure" query of its arguments.
if let Some(out) = self
.summary_cache
.get(&(parent, candidate.clone(), opts.clone()))
{
return Ok(out.0.clone());
}
// First, figure out our set of dependencies based on the requested set
// of features. This also calculates what features we're going to enable
// for our own dependencies.
let (used_features, deps) = resolve_features(parent, candidate, opts)?;
// Next, transform all dependencies into a list of possible candidates
// which can satisfy that dependency.
let mut all_ready = true;
let mut deps = deps
.into_iter()
.filter_map(
|(dep, features)| match self.query(&dep, first_minimal_version) {
Poll::Ready(Ok(candidates)) => Some(Ok((dep, candidates, features))),
Poll::Pending => {
all_ready = false;
// we can ignore Pending deps, resolve will be repeatedly called
// until there are none to ignore
None
}
Poll::Ready(Err(e)) => Some(Err(e).with_context(|| {
format!(
"failed to get `{}` as a dependency of {}",
dep.package_name(),
describe_path_in_context(cx, &candidate.package_id()),
)
})),
},
)
.collect::<CargoResult<Vec<DepInfo>>>()?;
// Attempt to resolve dependencies with fewer candidates before trying
// dependencies with more candidates. This way if the dependency with
// only one candidate can't be resolved we don't have to do a bunch of
// work before we figure that out.
deps.sort_by_key(|&(_, ref a, _)| a.len());
let out = Rc::new((used_features, Rc::new(deps)));
// If we succeed we add the result to the cache so we can use it again next time.
// We don't cache the failure cases as they don't impl Clone.
self.summary_cache.insert(
(parent, candidate.clone(), opts.clone()),
(out.clone(), all_ready),
);
Ok(out)
}
}
/// Returns the features we ended up using and
/// all dependencies and the features we want from each of them.
pub fn resolve_features<'b>(
parent: Option<PackageId>,
s: &'b Summary,
opts: &'b ResolveOpts,
) -> ActivateResult<(HashSet<InternedString>, Vec<(Dependency, FeaturesSet)>)> {
// First, filter by dev-dependencies.
let deps = s.dependencies();
let deps = deps.iter().filter(|d| d.is_transitive() || opts.dev_deps);
let reqs = build_requirements(parent, s, opts)?;
let mut ret = Vec::new();
let default_dep = BTreeSet::new();
let mut valid_dep_names = HashSet::new();
// Next, collect all actually enabled dependencies and their features.
for dep in deps {
// Skip optional dependencies, but not those enabled through a
// feature
if dep.is_optional() &&!reqs.deps.contains_key(&dep.name_in_toml()) {
continue;
}
valid_dep_names.insert(dep.name_in_toml());
// So we want this dependency. Move the features we want from
// `feature_deps` to `ret` and register ourselves as using this
// name.
let mut base = reqs
.deps
.get(&dep.name_in_toml())
.unwrap_or(&default_dep)
.clone();
base.extend(dep.features().iter());
ret.push((dep.clone(), Rc::new(base)));
}
// This is a special case for command-line `--features
// dep_name/feat_name` where `dep_name` does not exist. All other
// validation is done either in `build_requirements` or
// `build_feature_map`.
if parent.is_none() {
for dep_name in reqs.deps.keys() {
if!valid_dep_names.contains(dep_name) {
let e = RequirementError::MissingDependency(*dep_name);
return Err(e.into_activate_error(parent, s));
}
}
}
Ok((reqs.into_features(), ret))
}
/// Takes requested features for a single package from the input `ResolveOpts` and
/// recurses to find all requested features, dependencies and requested
/// dependency features in a `Requirements` object, returning it to the resolver.
fn build_requirements<'a, 'b: 'a>(
parent: Option<PackageId>,
s: &'a Summary,
opts: &'b ResolveOpts,
) -> ActivateResult<Requirements<'a>> {
let mut reqs = Requirements::new(s);
let handle_default = |uses_default_features, reqs: &mut Requirements<'_>| {
if uses_default_features && s.features().contains_key("default") {
if let Err(e) = reqs.require_feature(InternedString::new("default")) {
return Err(e.into_activate_error(parent, s));
}
}
Ok(())
};
match &opts.features {
RequestedFeatures::CliFeatures(CliFeatures {
features,
all_features,
uses_default_features,
}) => {
if *all_features {
for key in s.features().keys() {
if let Err(e) = reqs.require_feature(*key) {
return Err(e.into_activate_error(parent, s));
}
}
}
for fv in features.iter() {
if let Err(e) = reqs.require_value(fv) {
return Err(e.into_activate_error(parent, s));
}
}
handle_default(*uses_default_features, &mut reqs)?;
}
RequestedFeatures::DepFeatures {
features,
uses_default_features,
} => {
for feature in features.iter() {
if let Err(e) = reqs.require_feature(*feature) {
return Err(e.into_activate_error(parent, s));
}
}
handle_default(*uses_default_features, &mut reqs)?;
}
}
Ok(reqs)
}
/// Set of feature and dependency requirements for a package.
#[derive(Debug)]
struct Requirements<'a> {
summary: &'a Summary,
/// The deps map is a mapping of dependency name to list of features enabled.
///
/// The resolver will activate all of these dependencies, with the given
/// features enabled.
deps: HashMap<InternedString, BTreeSet<InternedString>>,
/// The set of features enabled on this package which is later used when
/// compiling to instruct the code what features were enabled.
features: HashSet<InternedString>,
}
/// An error for a requirement.
///
/// This will later be converted to an `ActivateError` depending on whether or
/// not this is a dependency or a root package.
enum RequirementError {
/// The package does not have the requested feature.
MissingFeature(InternedString),
/// The package does not have the requested dependency.
MissingDependency(InternedString),
/// A feature has a direct cycle to itself.
///
/// Note that cycles through multiple features are allowed (but perhaps
/// they shouldn't be?).
Cycle(InternedString),
}
impl Requirements<'_> {
fn new(summary: &Summary) -> Requirements<'_> {
Requirements {
summary,
deps: HashMap::new(),
features: HashSet::new(),
}
}
fn into_features(self) -> HashSet<InternedString> {
self.features
}
fn require_dep_feature(
&mut self,
package: InternedString,
feat: InternedString,
weak: bool,
) -> Result<(), RequirementError> {
// If `package` is indeed an optional dependency then we activate the
// feature named `package`, but otherwise if `package` is a required
// dependency then there's no feature associated with it.
if!weak
&& self
.summary
.dependencies()
.iter()
.any(|dep| dep.name_in_toml() == package && dep.is_optional())
{
// This optional dependency may not have an implicit feature of
// the same name if the `dep:` syntax is used to avoid creating
// that implicit feature.
if self.summary.features().contains_key(&package) {
self.require_feature(package)?;
}
}
self.deps.entry(package).or_default().insert(feat);
Ok(())
}
fn require_dependency(&mut self, pkg: InternedString) {
self.deps.entry(pkg).or_default();
}
fn require_feature(&mut self, feat: InternedString) -> Result<(), RequirementError> {
if!self.features.insert(feat) {
// Already seen this feature.
return Ok(());
}
let fvs = match self.summary.features().get(&feat) {
Some(fvs) => fvs,
None => return Err(RequirementError::MissingFeature(feat)),
};
for fv in fvs {
if let FeatureValue::Feature(dep_feat) = fv {
if *dep_feat == feat {
return Err(RequirementError::Cycle(feat));
}
}
self.require_value(fv)?;
}
Ok(())
}
fn require_value(&mut self, fv: &FeatureValue) -> Result<(), RequirementError> {
match fv {
FeatureValue::Feature(feat) => self.require_feature(*feat)?,
FeatureValue::Dep { dep_name } => self.require_dependency(*dep_name),
FeatureValue::DepFeature {
dep_name,
dep_feature,
// Weak features are always activated in the dependency
// resolver. They will be narrowed inside the new feature
// resolver.
weak,
} => self.require_dep_feature(*dep_name, *dep_feature, *weak)?,
};
Ok(())
}
}
impl RequirementError {
fn into_activate_error(self, parent: Option<PackageId>, summary: &Summary) -> ActivateError | {
match self {
RequirementError::MissingFeature(feat) => {
let deps: Vec<_> = summary
.dependencies()
.iter()
.filter(|dep| dep.name_in_toml() == feat)
.collect();
if deps.is_empty() {
return match parent {
None => ActivateError::Fatal(anyhow::format_err!(
"Package `{}` does not have the feature `{}`",
summary.package_id(),
feat
)),
Some(p) => ActivateError::Conflict(
p,
ConflictReason::MissingFeatures(feat.to_string()),
),
}; | identifier_body |
|
dep_cache.rs | (Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>, bool),
>,
/// all the cases we ended up using a supplied replacement
used_replacements: HashMap<PackageId, Summary>,
}
impl<'a> RegistryQueryer<'a> {
pub fn new(
registry: &'a mut dyn Registry,
replacements: &'a [(PackageIdSpec, Dependency)],
version_prefs: &'a VersionPreferences,
minimal_versions: bool,
max_rust_version: Option<PartialVersion>,
) -> Self {
RegistryQueryer {
registry,
replacements,
version_prefs,
minimal_versions,
max_rust_version,
registry_cache: HashMap::new(),
summary_cache: HashMap::new(),
used_replacements: HashMap::new(),
}
}
pub fn reset_pending(&mut self) -> bool {
let mut all_ready = true;
self.registry_cache.retain(|_, r| {
if!r.is_ready() {
all_ready = false;
}
r.is_ready()
});
self.summary_cache.retain(|_, (_, r)| {
if!*r {
all_ready = false;
}
*r
});
all_ready
}
pub fn used_replacement_for(&self, p: PackageId) -> Option<(PackageId, PackageId)> {
self.used_replacements.get(&p).map(|r| (p, r.package_id()))
}
pub fn replacement_summary(&self, p: PackageId) -> Option<&Summary> {
self.used_replacements.get(&p)
}
/// Queries the `registry` to return a list of candidates for `dep`.
///
/// This method is the location where overrides are taken into account. If
/// any candidates are returned which match an override then the override is
/// applied by performing a second query for what the override should
/// return.
pub fn query(
&mut self,
dep: &Dependency,
first_minimal_version: bool,
) -> Poll<CargoResult<Rc<Vec<Summary>>>> {
let registry_cache_key = (dep.clone(), first_minimal_version);
if let Some(out) = self.registry_cache.get(®istry_cache_key).cloned() {
return out.map(Result::Ok);
}
let mut ret = Vec::new();
let ready = self.registry.query(dep, QueryKind::Exact, &mut |s| {
if self.max_rust_version.is_none() || s.rust_version() <= self.max_rust_version {
ret.push(s);
}
})?;
if ready.is_pending() {
self.registry_cache
.insert((dep.clone(), first_minimal_version), Poll::Pending);
return Poll::Pending;
}
for summary in ret.iter() {
let mut potential_matches = self
.replacements
.iter()
.filter(|&&(ref spec, _)| spec.matches(summary.package_id()));
let &(ref spec, ref dep) = match potential_matches.next() {
None => continue,
Some(replacement) => replacement,
};
debug!(
"found an override for {} {}",
dep.package_name(),
dep.version_req()
);
let mut summaries = match self.registry.query_vec(dep, QueryKind::Exact)? {
Poll::Ready(s) => s.into_iter(),
Poll::Pending => {
self.registry_cache
.insert((dep.clone(), first_minimal_version), Poll::Pending);
return Poll::Pending;
}
};
let s = summaries.next().ok_or_else(|| {
anyhow::format_err!(
"no matching package for override `{}` found\n\
location searched: {}\n\
version required: {}",
spec,
dep.source_id(),
dep.version_req()
)
})?;
let summaries = summaries.collect::<Vec<_>>();
if!summaries.is_empty() {
let bullets = summaries
.iter()
.map(|s| format!(" * {}", s.package_id()))
.collect::<Vec<_>>();
return Poll::Ready(Err(anyhow::anyhow!(
"the replacement specification `{}` matched \
multiple packages:\n * {}\n{}",
spec,
s.package_id(),
bullets.join("\n")
)));
}
// The dependency should be hard-coded to have the same name and an
// exact version requirement, so both of these assertions should
// never fail.
assert_eq!(s.version(), summary.version());
assert_eq!(s.name(), summary.name());
let replace = if s.source_id() == summary.source_id() {
debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s);
None
} else {
Some(s)
};
let matched_spec = spec.clone();
// Make sure no duplicates
if let Some(&(ref spec, _)) = potential_matches.next() {
return Poll::Ready(Err(anyhow::anyhow!(
"overlapping replacement specifications found:\n\n \
* {}\n * {}\n\nboth specifications match: {}",
matched_spec,
spec,
summary.package_id()
)));
}
for dep in summary.dependencies() {
debug!("\t{} => {}", dep.package_name(), dep.version_req());
}
if let Some(r) = replace {
self.used_replacements.insert(summary.package_id(), r);
}
}
// When we attempt versions for a package we'll want to do so in a sorted fashion to pick
// the "best candidates" first. VersionPreferences implements this notion.
let ordering = if first_minimal_version || self.minimal_versions {
VersionOrdering::MinimumVersionsFirst
} else {
VersionOrdering::MaximumVersionsFirst
};
let first_version = first_minimal_version;
self.version_prefs
.sort_summaries(&mut ret, ordering, first_version);
let out = Poll::Ready(Rc::new(ret));
self.registry_cache.insert(registry_cache_key, out.clone());
out.map(Result::Ok)
}
/// Find out what dependencies will be added by activating `candidate`,
/// with features described in `opts`. Then look up in the `registry`
/// the candidates that will fulfil each of these dependencies, as it is the
/// next obvious question.
pub fn build_deps(
&mut self,
cx: &Context,
parent: Option<PackageId>,
candidate: &Summary,
opts: &ResolveOpts,
first_minimal_version: bool,
) -> ActivateResult<Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>> {
// if we have calculated a result before, then we can just return it,
// as it is a "pure" query of its arguments.
if let Some(out) = self
.summary_cache
.get(&(parent, candidate.clone(), opts.clone()))
{
return Ok(out.0.clone());
}
// First, figure out our set of dependencies based on the requested set
// of features. This also calculates what features we're going to enable
// for our own dependencies.
let (used_features, deps) = resolve_features(parent, candidate, opts)?;
// Next, transform all dependencies into a list of possible candidates
// which can satisfy that dependency.
let mut all_ready = true;
let mut deps = deps
.into_iter()
.filter_map(
|(dep, features)| match self.query(&dep, first_minimal_version) {
Poll::Ready(Ok(candidates)) => Some(Ok((dep, candidates, features))),
Poll::Pending => {
all_ready = false;
// we can ignore Pending deps, resolve will be repeatedly called
// until there are none to ignore
None
}
Poll::Ready(Err(e)) => Some(Err(e).with_context(|| {
format!(
"failed to get `{}` as a dependency of {}",
dep.package_name(),
describe_path_in_context(cx, &candidate.package_id()),
)
})),
},
)
.collect::<CargoResult<Vec<DepInfo>>>()?;
// Attempt to resolve dependencies with fewer candidates before trying
// dependencies with more candidates. This way if the dependency with
// only one candidate can't be resolved we don't have to do a bunch of
// work before we figure that out.
deps.sort_by_key(|&(_, ref a, _)| a.len());
let out = Rc::new((used_features, Rc::new(deps)));
// If we succeed we add the result to the cache so we can use it again next time.
// We don't cache the failure cases as they don't impl Clone.
self.summary_cache.insert(
(parent, candidate.clone(), opts.clone()),
(out.clone(), all_ready),
);
Ok(out)
}
}
/// Returns the features we ended up using and
/// all dependencies and the features we want from each of them.
pub fn resolve_features<'b>(
parent: Option<PackageId>,
s: &'b Summary,
opts: &'b ResolveOpts,
) -> ActivateResult<(HashSet<InternedString>, Vec<(Dependency, FeaturesSet)>)> {
// First, filter by dev-dependencies.
let deps = s.dependencies();
let deps = deps.iter().filter(|d| d.is_transitive() || opts.dev_deps);
let reqs = build_requirements(parent, s, opts)?;
let mut ret = Vec::new();
let default_dep = BTreeSet::new();
let mut valid_dep_names = HashSet::new();
// Next, collect all actually enabled dependencies and their features.
for dep in deps {
// Skip optional dependencies, but not those enabled through a
// feature
if dep.is_optional() &&!reqs.deps.contains_key(&dep.name_in_toml()) {
continue;
}
valid_dep_names.insert(dep.name_in_toml());
// So we want this dependency. Move the features we want from
// `feature_deps` to `ret` and register ourselves as using this
// name.
let mut base = reqs
.deps
.get(&dep.name_in_toml())
.unwrap_or(&default_dep)
.clone();
base.extend(dep.features().iter());
ret.push((dep.clone(), Rc::new(base)));
}
// This is a special case for command-line `--features
// dep_name/feat_name` where `dep_name` does not exist. All other
// validation is done either in `build_requirements` or
// `build_feature_map`.
if parent.is_none() {
for dep_name in reqs.deps.keys() {
if!valid_dep_names.contains(dep_name) {
let e = RequirementError::MissingDependency(*dep_name);
return Err(e.into_activate_error(parent, s));
}
}
}
Ok((reqs.into_features(), ret))
}
/// Takes requested features for a single package from the input `ResolveOpts` and
/// recurses to find all requested features, dependencies and requested
/// dependency features in a `Requirements` object, returning it to the resolver.
fn build_requirements<'a, 'b: 'a>(
parent: Option<PackageId>,
s: &'a Summary,
opts: &'b ResolveOpts,
) -> ActivateResult<Requirements<'a>> {
let mut reqs = Requirements::new(s);
let handle_default = |uses_default_features, reqs: &mut Requirements<'_>| {
if uses_default_features && s.features().contains_key("default") {
if let Err(e) = reqs.require_feature(InternedString::new("default")) {
return Err(e.into_activate_error(parent, s));
}
}
Ok(())
};
match &opts.features {
RequestedFeatures::CliFeatures(CliFeatures {
features,
all_features,
uses_default_features,
}) => {
if *all_features {
for key in s.features().keys() {
if let Err(e) = reqs.require_feature(*key) {
return Err(e.into_activate_error(parent, s));
}
}
}
for fv in features.iter() {
if let Err(e) = reqs.require_value(fv) {
return Err(e.into_activate_error(parent, s));
}
}
handle_default(*uses_default_features, &mut reqs)?;
}
RequestedFeatures::DepFeatures {
features,
uses_default_features,
} => {
for feature in features.iter() {
if let Err(e) = reqs.require_feature(*feature) {
return Err(e.into_activate_error(parent, s));
}
}
handle_default(*uses_default_features, &mut reqs)?;
}
}
Ok(reqs)
}
/// Set of feature and dependency requirements for a package.
#[derive(Debug)]
struct Requirements<'a> {
summary: &'a Summary,
/// The deps map is a mapping of dependency name to list of features enabled.
///
/// The resolver will activate all of these dependencies, with the given
/// features enabled.
deps: HashMap<InternedString, BTreeSet<InternedString>>,
/// The set of features enabled on this package which is later used when
/// compiling to instruct the code what features were enabled.
features: HashSet<InternedString>,
}
/// An error for a requirement.
///
/// This will later be converted to an `ActivateError` depending on whether or
/// not this is a dependency or a root package.
enum RequirementError {
/// The package does not have the requested feature.
MissingFeature(InternedString),
/// The package does not have the requested dependency.
MissingDependency(InternedString),
/// A feature has a direct cycle to itself.
///
/// Note that cycles through multiple features are allowed (but perhaps
/// they shouldn't be?).
Cycle(InternedString),
}
impl Requirements<'_> {
fn new(summary: &Summary) -> Requirements<'_> {
Requirements {
summary,
deps: HashMap::new(),
features: HashSet::new(),
}
}
fn | (self) -> HashSet<InternedString> {
self.features
}
fn require_dep_feature(
&mut self,
package: InternedString,
feat: InternedString,
weak: bool,
) -> Result<(), RequirementError> {
// If `package` is indeed an optional dependency then we activate the
// feature named `package`, but otherwise if `package` is a required
// dependency then there's no feature associated with it.
if!weak
&& self
.summary
.dependencies()
.iter()
.any(|dep| dep.name_in_toml() == package && dep.is_optional())
{
// This optional dependency may not have an implicit feature of
// the same name if the `dep:` syntax is used to avoid creating
// that implicit feature.
if self.summary.features().contains_key(&package) {
self.require_feature(package)?;
}
}
self.deps.entry(package).or_default().insert(feat);
Ok(())
}
fn require_dependency(&mut self, pkg: InternedString) {
self.deps.entry(pkg).or_default();
}
fn require_feature(&mut self, feat: InternedString) -> Result<(), RequirementError> {
if!self.features.insert(feat) {
// Already seen this feature.
return Ok(());
}
let fvs = match self.summary.features().get(&feat) {
Some(fvs) => fvs,
None => return Err(RequirementError::MissingFeature(feat)),
};
for fv in fvs {
if let FeatureValue::Feature(dep_feat) = fv {
if *dep_feat == feat {
return Err(RequirementError::Cycle(feat));
}
}
self.require_value(fv)?;
}
Ok(())
}
fn require_value(&mut self, fv: &FeatureValue) -> Result<(), RequirementError> {
match fv {
FeatureValue::Feature(feat) => self.require_feature(*feat)?,
FeatureValue::Dep { dep_name } => self.require_dependency(*dep_name),
FeatureValue::DepFeature {
dep_name,
dep_feature,
// Weak features are always activated in the dependency
// resolver. They will be narrowed inside the new feature
// resolver.
weak,
} => self.require_dep_feature(*dep_name, *dep_feature, *weak)?,
};
Ok(())
}
}
impl RequirementError {
fn into_activate_error(self, parent: Option<PackageId>, summary: &Summary) -> ActivateError {
match self {
RequirementError::MissingFeature(feat) => {
let deps: Vec<_> = summary
.dependencies()
.iter()
.filter(|dep| dep.name_in_toml() == feat)
.collect();
if deps.is_empty() {
return match parent {
None => ActivateError::Fatal(anyhow::format_err!(
"Package `{}` does not have the feature `{}`",
summary.package_id(),
feat
)),
Some(p) => ActivateError::Conflict(
p,
| into_features | identifier_name |
benchmark.rs | #![feature(duration_as_u128)]
//! Executes all mjtests in the /exec/big folder.
use compiler_cli::optimization_arg;
use compiler_shared::timing::{AsciiDisp, CompilerMeasurements, SingleMeasurement};
use humantime::format_duration;
use optimization;
use regex::Regex;
use runner_integration_tests::{compiler_call, Backend, CompilerCall, CompilerPhase};
use stats::OnlineStats;
use std::{
collections::HashMap,
ffi::OsStr,
fmt,
fs::{self, File, OpenOptions},
io::{self, BufReader},
path::PathBuf,
process::Command,
time::{Duration, SystemTime},
};
use structopt::StructOpt;
fn test_folder() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../mjtest-rs/tests")
}
#[derive(Debug, Clone)]
struct BigTest {
minijava: PathBuf,
stdin: Option<PathBuf>,
}
fn big_tests() -> Vec<BigTest> {
let dirpath = test_folder().join("exec/big");
log::info!("test directory is {}", dirpath.display());
let dirlisting = fs::read_dir(dirpath).unwrap();
let mut big_tests = vec![];
for entry in dirlisting {
let path = entry.unwrap().path();
if path.extension() == Some(OsStr::new("java")) {
let test = BigTest {
stdin: {
let mut stdin = path.clone();
let stem = path.file_stem().unwrap();
// remove extension
stdin.pop();
stdin.push(stem);
// set new extension
// TODO: support multiple input files
stdin.set_extension("0.inputc");
log::debug!("looking for stdin file at {}", stdin.display());
if stdin.is_file() {
Some(stdin)
} else {
None
}
},
minijava: path,
};
log::debug!("Found test: {:?}", test);
big_tests.push(test);
}
}
big_tests
}
fn profile_compiler(
test: &BigTest,
optimizations: optimization::Level,
backend: Backend,
) -> Option<(PathBuf, CompilerMeasurements)> {
let outpath = test.minijava.with_extension("benchmark.out");
let mut cmd = compiler_call(
CompilerCall::RawCompiler(CompilerPhase::Binary {
backend,
// TODO: use temp dir, don't trash
output: outpath.clone(),
assembly: None,
optimizations,
}),
&test.minijava,
);
let measurement_path = "measurement.json";
cmd.env("MEASURE_JSON", measurement_path);
// TODO: run and benchmark the binary
//if let Some(stdin_path) = test.stdin {
//cmd.stdin(Stdio::piped());
//let mut stdin_reader = File::open(&stdin_path).expect("failed to open stdin
// file"); io::copy(&mut stdin_reader, stdin)
//.expect("failed to write to stdin of binary");
//}
log::debug!("calling compiler as: {:?}", cmd);
match cmd.status() {
Ok(status) if status.success() => (),
Ok(status) => {
log::error!("compiler failed with non-zero exit code: {:?}", status);
return None;
}
Err(msg) => {
log::error!("compiler crash {:?}", msg);
return None;
}
}
let stats_file = File::open(measurement_path).unwrap();
let stats_reader = BufReader::new(stats_file);
let profile = serde_json::from_reader(stats_reader).unwrap();
log::debug!("Stats:\n{}", AsciiDisp(&profile));
Some((outpath, profile))
}
#[derive(StructOpt)]
#[structopt(name = "benchmark")]
/// Small utility to benchmark each step of the compiler pipeline
pub struct Opts {
/// Number of invokations per test file
#[structopt(short = "s", long = "samples", default_value = "2")]
samples: usize,
/// Only test filenames matching the given regex are benchmarked
#[structopt(short = "o", long = "only", default_value = "")]
filter: Regex,
/// Optimization level that should be applied
#[structopt(long = "--optimization", short = "-O", default_value = "aggressive")]
opt_level: optimization_arg::Arg,
#[structopt(long = "--backend", short = "-b")]
backend: Backend,
}
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct ReferenceBenchmark {
mean: f64,
timestamp: SystemTime,
}
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct ReferenceFormat {
// TODO: in contrast to the other code in this file this does
// not support multiple benchmarks with identical names
measurements: HashMap<String, ReferenceBenchmark>,
}
impl ReferenceFormat {
fn new() -> Self {
Self {
measurements: HashMap::new(),
}
}
}
fn main() {
env_logger::init();
let opts = Opts::from_args();
for big_test in &big_tests() {
if!opts.filter.is_match(&big_test.minijava.to_string_lossy()) {
log::info!("skipping {}", big_test.minijava.display());
continue;
}
let mut bench = Benchmark::new(big_test.minijava.clone());
let mut out = None;
for _ in 0..opts.samples {
if let Some((outbinary, timings)) =
profile_compiler(big_test, opts.opt_level.clone().into(), opts.backend)
{
bench.add(&timings);
out = Some(outbinary);
}
}
let title = format!(
"BENCHMARK {}",
big_test.minijava.file_stem().unwrap().to_string_lossy()
);
bench.load_reference_from_disk();
println!("{}\n{}\n", title, "=".repeat(title.len()));
println!("{}\n", bench);
bench.write_to_disk();
if let (Ok(cmd_str), Some(binary_path)) = (
if big_test.stdin.is_some() {
std::env::var("COMPILED_PROGRAM_BENCHMARK_WITH_STDIN")
} else {
std::env::var("COMPILED_PROGRAM_BENCHMARK")
},
out,
) {
let cmd_str = cmd_str.replace("BINARY_PATH", binary_path.as_path().to_str().unwrap());
let cmd_str = if let Some(stdin_file) = &big_test.stdin {
cmd_str.replace(
"INPUT_PATH",
&stdin_file.as_path().to_str().unwrap().to_owned(),
)
} else {
cmd_str
};
let pieces = shell_words::split(&cmd_str).expect("invalid program benchmark command");
let (prog, args) = pieces.split_at(1);
let mut cmd = Command::new(&prog[0]);
cmd.args(args);
log::debug!("Benchmarking generated binary using: {:?}", cmd);
match cmd.status() {
Ok(status) if status.success() => {}
Ok(status) => {
log::error!(
"binary benchmark failed with non-zero exit code: {:?}",
status
);
}
Err(msg) => {
log::error!("binary benchmark crash {:?}", msg);
}
}
}
}
}
#[derive(Debug, Clone)]
pub struct BenchmarkEntry {
label: String,
indent: usize,
stats: OnlineStats,
}
pub struct Benchmark {
file: PathBuf,
measurements: Vec<BenchmarkEntry>,
reference: Option<ReferenceFormat>,
}
impl Benchmark {
pub fn new(file: PathBuf) -> Self {
Self {
measurements: Vec::new(),
reference: None,
file,
}
}
pub fn add(&mut self, measurements: &[SingleMeasurement]) {
if self.measurements.is_empty() {
for measurement in measurements {
self.measurements.push(BenchmarkEntry {
label: measurement.label.clone(),
indent: measurement.indent,
stats: OnlineStats::from_slice(&[measurement.duration.as_millis()]),
});
}
return;
}
if!self.is_compatible(measurements) {
panic!("measurements incomplete");
}
for (i, item) in measurements.iter().enumerate() {
self.measurements[i].stats.add(item.duration.as_millis());
}
}
fn is_compatible(&self, measurements: &[SingleMeasurement]) -> bool {
if measurements.len()!= self.measurements.len() {
return false;
}
for (this, other) in self.measurements.iter().zip(measurements.iter()) {
if this.label!= other.label || this.indent!= other.indent {
return false;
}
}
true
}
fn filename(&self) -> PathBuf {
self.file.with_extension("benchmark.json")
}
fn write_to_disk(&self) {
let mut diskformat = ReferenceFormat::new();
let now = SystemTime::now();
for measurement in self.measurements.iter() {
diskformat.measurements.insert(
measurement.label.clone(),
ReferenceBenchmark {
mean: measurement.stats.mean(),
timestamp: now,
},
);
}
match OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(self.filename())
{
Ok(outfile) => |
Err(msg) => {
log::debug!(
"could not open file for reference benchmark of {}: {:?}",
self.file.display(),
msg
);
}
}
}
fn load_reference_from_disk(&mut self) {
if let Ok(res) = self._load_reference() {
self.reference = Some(res);
} else {
log::debug!(
"could not find reference benchmark for {}",
self.file.display()
);
}
}
fn _load_reference(&mut self) -> io::Result<ReferenceFormat> {
let file = File::open(self.filename())?;
let reader = BufReader::new(file);
// Read the JSON contents of the file as an instance of `User`.
let u = serde_json::from_reader(reader)?;
Ok(u)
}
}
impl fmt::Display for Benchmark {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let min_label_width = 50;
let now = SystemTime::now();
for timing in &self.measurements {
let indent = " ".repeat(timing.indent);
let (change, timestamp) = if let Some(previous_invocation) = &self.reference {
if let Some(previous_result) = previous_invocation.measurements.get(&timing.label) {
let change = (timing.stats.mean() / previous_result.mean * 100.0) - 100.0;
let reference_date = now.duration_since(previous_result.timestamp).unwrap();
// remove some precession
let reference_date = Duration::from_secs(reference_date.as_secs());
(
format!("{: >+8.3}%", change),
format_duration(reference_date).to_string(),
)
} else {
("n/a".to_string(), "".to_string())
}
} else {
("".to_string(), "".to_string())
};
writeln!(
f,
"{nesting}{label: <label_width$} \
{ms: >ms_width$.5} +/- {stddev: >stddev_width$.5}ms \
{samples} samples \
{change} {timestamp} ago",
label = timing.label,
ms = timing.stats.mean(),
stddev = timing.stats.stddev(),
samples = timing.stats.len(),
nesting = indent,
label_width = min_label_width - indent.len(),
ms_width = 20,
stddev_width = 10,
change = change,
timestamp = timestamp
)?;
}
Ok(())
}
}
| {
if let Err(msg) = serde_json::ser::to_writer(&outfile, &diskformat) {
log::debug!(
"could not write file for reference benchmark of {}: {:?}",
self.file.display(),
msg
);
}
} | conditional_block |
benchmark.rs | #![feature(duration_as_u128)]
//! Executes all mjtests in the /exec/big folder.
use compiler_cli::optimization_arg;
use compiler_shared::timing::{AsciiDisp, CompilerMeasurements, SingleMeasurement};
use humantime::format_duration;
use optimization;
use regex::Regex;
use runner_integration_tests::{compiler_call, Backend, CompilerCall, CompilerPhase};
use stats::OnlineStats;
use std::{
collections::HashMap,
ffi::OsStr,
fmt,
fs::{self, File, OpenOptions},
io::{self, BufReader},
path::PathBuf,
process::Command,
time::{Duration, SystemTime},
};
use structopt::StructOpt;
fn test_folder() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../mjtest-rs/tests")
}
#[derive(Debug, Clone)]
struct BigTest {
minijava: PathBuf,
stdin: Option<PathBuf>,
}
fn big_tests() -> Vec<BigTest> {
let dirpath = test_folder().join("exec/big");
log::info!("test directory is {}", dirpath.display());
let dirlisting = fs::read_dir(dirpath).unwrap();
let mut big_tests = vec![];
for entry in dirlisting {
let path = entry.unwrap().path();
if path.extension() == Some(OsStr::new("java")) {
let test = BigTest {
stdin: {
let mut stdin = path.clone();
let stem = path.file_stem().unwrap();
// remove extension
stdin.pop();
stdin.push(stem);
// set new extension
// TODO: support multiple input files
stdin.set_extension("0.inputc");
log::debug!("looking for stdin file at {}", stdin.display());
if stdin.is_file() {
Some(stdin)
} else {
None
}
},
minijava: path,
};
log::debug!("Found test: {:?}", test);
big_tests.push(test);
}
}
big_tests
}
fn profile_compiler(
test: &BigTest,
optimizations: optimization::Level,
backend: Backend,
) -> Option<(PathBuf, CompilerMeasurements)> {
let outpath = test.minijava.with_extension("benchmark.out");
let mut cmd = compiler_call(
CompilerCall::RawCompiler(CompilerPhase::Binary {
backend,
// TODO: use temp dir, don't trash
output: outpath.clone(),
assembly: None,
optimizations,
}),
&test.minijava,
);
let measurement_path = "measurement.json";
cmd.env("MEASURE_JSON", measurement_path);
// TODO: run and benchmark the binary
//if let Some(stdin_path) = test.stdin {
//cmd.stdin(Stdio::piped());
//let mut stdin_reader = File::open(&stdin_path).expect("failed to open stdin
// file"); io::copy(&mut stdin_reader, stdin)
//.expect("failed to write to stdin of binary");
//}
log::debug!("calling compiler as: {:?}", cmd);
match cmd.status() {
Ok(status) if status.success() => (),
Ok(status) => {
log::error!("compiler failed with non-zero exit code: {:?}", status);
return None;
}
Err(msg) => {
log::error!("compiler crash {:?}", msg);
return None;
}
}
let stats_file = File::open(measurement_path).unwrap();
let stats_reader = BufReader::new(stats_file);
let profile = serde_json::from_reader(stats_reader).unwrap();
log::debug!("Stats:\n{}", AsciiDisp(&profile));
Some((outpath, profile))
}
#[derive(StructOpt)]
#[structopt(name = "benchmark")]
/// Small utility to benchmark each step of the compiler pipeline
pub struct Opts {
/// Number of invokations per test file
#[structopt(short = "s", long = "samples", default_value = "2")]
samples: usize,
/// Only test filenames matching the given regex are benchmarked
#[structopt(short = "o", long = "only", default_value = "")]
filter: Regex,
/// Optimization level that should be applied
#[structopt(long = "--optimization", short = "-O", default_value = "aggressive")]
opt_level: optimization_arg::Arg,
#[structopt(long = "--backend", short = "-b")]
backend: Backend,
}
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct ReferenceBenchmark {
mean: f64,
timestamp: SystemTime,
}
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct ReferenceFormat {
// TODO: in contrast to the other code in this file this does
// not support multiple benchmarks with identical names
measurements: HashMap<String, ReferenceBenchmark>,
}
impl ReferenceFormat {
fn new() -> Self |
}
fn main() {
env_logger::init();
let opts = Opts::from_args();
for big_test in &big_tests() {
if!opts.filter.is_match(&big_test.minijava.to_string_lossy()) {
log::info!("skipping {}", big_test.minijava.display());
continue;
}
let mut bench = Benchmark::new(big_test.minijava.clone());
let mut out = None;
for _ in 0..opts.samples {
if let Some((outbinary, timings)) =
profile_compiler(big_test, opts.opt_level.clone().into(), opts.backend)
{
bench.add(&timings);
out = Some(outbinary);
}
}
let title = format!(
"BENCHMARK {}",
big_test.minijava.file_stem().unwrap().to_string_lossy()
);
bench.load_reference_from_disk();
println!("{}\n{}\n", title, "=".repeat(title.len()));
println!("{}\n", bench);
bench.write_to_disk();
if let (Ok(cmd_str), Some(binary_path)) = (
if big_test.stdin.is_some() {
std::env::var("COMPILED_PROGRAM_BENCHMARK_WITH_STDIN")
} else {
std::env::var("COMPILED_PROGRAM_BENCHMARK")
},
out,
) {
let cmd_str = cmd_str.replace("BINARY_PATH", binary_path.as_path().to_str().unwrap());
let cmd_str = if let Some(stdin_file) = &big_test.stdin {
cmd_str.replace(
"INPUT_PATH",
&stdin_file.as_path().to_str().unwrap().to_owned(),
)
} else {
cmd_str
};
let pieces = shell_words::split(&cmd_str).expect("invalid program benchmark command");
let (prog, args) = pieces.split_at(1);
let mut cmd = Command::new(&prog[0]);
cmd.args(args);
log::debug!("Benchmarking generated binary using: {:?}", cmd);
match cmd.status() {
Ok(status) if status.success() => {}
Ok(status) => {
log::error!(
"binary benchmark failed with non-zero exit code: {:?}",
status
);
}
Err(msg) => {
log::error!("binary benchmark crash {:?}", msg);
}
}
}
}
}
#[derive(Debug, Clone)]
pub struct BenchmarkEntry {
label: String,
indent: usize,
stats: OnlineStats,
}
pub struct Benchmark {
file: PathBuf,
measurements: Vec<BenchmarkEntry>,
reference: Option<ReferenceFormat>,
}
impl Benchmark {
pub fn new(file: PathBuf) -> Self {
Self {
measurements: Vec::new(),
reference: None,
file,
}
}
pub fn add(&mut self, measurements: &[SingleMeasurement]) {
if self.measurements.is_empty() {
for measurement in measurements {
self.measurements.push(BenchmarkEntry {
label: measurement.label.clone(),
indent: measurement.indent,
stats: OnlineStats::from_slice(&[measurement.duration.as_millis()]),
});
}
return;
}
if!self.is_compatible(measurements) {
panic!("measurements incomplete");
}
for (i, item) in measurements.iter().enumerate() {
self.measurements[i].stats.add(item.duration.as_millis());
}
}
fn is_compatible(&self, measurements: &[SingleMeasurement]) -> bool {
if measurements.len()!= self.measurements.len() {
return false;
}
for (this, other) in self.measurements.iter().zip(measurements.iter()) {
if this.label!= other.label || this.indent!= other.indent {
return false;
}
}
true
}
fn filename(&self) -> PathBuf {
self.file.with_extension("benchmark.json")
}
fn write_to_disk(&self) {
let mut diskformat = ReferenceFormat::new();
let now = SystemTime::now();
for measurement in self.measurements.iter() {
diskformat.measurements.insert(
measurement.label.clone(),
ReferenceBenchmark {
mean: measurement.stats.mean(),
timestamp: now,
},
);
}
match OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(self.filename())
{
Ok(outfile) => {
if let Err(msg) = serde_json::ser::to_writer(&outfile, &diskformat) {
log::debug!(
"could not write file for reference benchmark of {}: {:?}",
self.file.display(),
msg
);
}
}
Err(msg) => {
log::debug!(
"could not open file for reference benchmark of {}: {:?}",
self.file.display(),
msg
);
}
}
}
fn load_reference_from_disk(&mut self) {
if let Ok(res) = self._load_reference() {
self.reference = Some(res);
} else {
log::debug!(
"could not find reference benchmark for {}",
self.file.display()
);
}
}
fn _load_reference(&mut self) -> io::Result<ReferenceFormat> {
let file = File::open(self.filename())?;
let reader = BufReader::new(file);
// Read the JSON contents of the file as an instance of `User`.
let u = serde_json::from_reader(reader)?;
Ok(u)
}
}
impl fmt::Display for Benchmark {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let min_label_width = 50;
let now = SystemTime::now();
for timing in &self.measurements {
let indent = " ".repeat(timing.indent);
let (change, timestamp) = if let Some(previous_invocation) = &self.reference {
if let Some(previous_result) = previous_invocation.measurements.get(&timing.label) {
let change = (timing.stats.mean() / previous_result.mean * 100.0) - 100.0;
let reference_date = now.duration_since(previous_result.timestamp).unwrap();
// remove some precession
let reference_date = Duration::from_secs(reference_date.as_secs());
(
format!("{: >+8.3}%", change),
format_duration(reference_date).to_string(),
)
} else {
("n/a".to_string(), "".to_string())
}
} else {
("".to_string(), "".to_string())
};
writeln!(
f,
"{nesting}{label: <label_width$} \
{ms: >ms_width$.5} +/- {stddev: >stddev_width$.5}ms \
{samples} samples \
{change} {timestamp} ago",
label = timing.label,
ms = timing.stats.mean(),
stddev = timing.stats.stddev(),
samples = timing.stats.len(),
nesting = indent,
label_width = min_label_width - indent.len(),
ms_width = 20,
stddev_width = 10,
change = change,
timestamp = timestamp
)?;
}
Ok(())
}
}
| {
Self {
measurements: HashMap::new(),
}
} | identifier_body |
benchmark.rs | #![feature(duration_as_u128)]
//! Executes all mjtests in the /exec/big folder.
use compiler_cli::optimization_arg;
use compiler_shared::timing::{AsciiDisp, CompilerMeasurements, SingleMeasurement};
use humantime::format_duration;
use optimization;
use regex::Regex;
use runner_integration_tests::{compiler_call, Backend, CompilerCall, CompilerPhase};
use stats::OnlineStats;
use std::{
collections::HashMap,
ffi::OsStr,
fmt,
fs::{self, File, OpenOptions},
io::{self, BufReader},
path::PathBuf,
process::Command,
time::{Duration, SystemTime},
};
use structopt::StructOpt;
fn test_folder() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../mjtest-rs/tests")
}
#[derive(Debug, Clone)]
struct BigTest {
minijava: PathBuf,
stdin: Option<PathBuf>,
}
fn big_tests() -> Vec<BigTest> {
let dirpath = test_folder().join("exec/big");
log::info!("test directory is {}", dirpath.display());
let dirlisting = fs::read_dir(dirpath).unwrap();
let mut big_tests = vec![];
for entry in dirlisting {
let path = entry.unwrap().path();
if path.extension() == Some(OsStr::new("java")) {
let test = BigTest {
stdin: {
let mut stdin = path.clone();
let stem = path.file_stem().unwrap();
// remove extension
stdin.pop();
stdin.push(stem);
// set new extension
// TODO: support multiple input files
stdin.set_extension("0.inputc");
log::debug!("looking for stdin file at {}", stdin.display());
if stdin.is_file() {
Some(stdin)
} else {
None
}
},
minijava: path,
};
log::debug!("Found test: {:?}", test);
big_tests.push(test);
}
}
big_tests
}
fn profile_compiler(
test: &BigTest,
optimizations: optimization::Level,
backend: Backend,
) -> Option<(PathBuf, CompilerMeasurements)> {
let outpath = test.minijava.with_extension("benchmark.out");
let mut cmd = compiler_call(
CompilerCall::RawCompiler(CompilerPhase::Binary {
backend,
// TODO: use temp dir, don't trash
output: outpath.clone(),
assembly: None,
optimizations,
}),
&test.minijava,
);
let measurement_path = "measurement.json";
cmd.env("MEASURE_JSON", measurement_path);
// TODO: run and benchmark the binary
//if let Some(stdin_path) = test.stdin {
//cmd.stdin(Stdio::piped());
//let mut stdin_reader = File::open(&stdin_path).expect("failed to open stdin
// file"); io::copy(&mut stdin_reader, stdin)
//.expect("failed to write to stdin of binary");
//}
log::debug!("calling compiler as: {:?}", cmd);
match cmd.status() {
Ok(status) if status.success() => (),
Ok(status) => {
log::error!("compiler failed with non-zero exit code: {:?}", status);
return None;
}
Err(msg) => {
log::error!("compiler crash {:?}", msg);
return None;
}
}
let stats_file = File::open(measurement_path).unwrap();
let stats_reader = BufReader::new(stats_file);
let profile = serde_json::from_reader(stats_reader).unwrap();
log::debug!("Stats:\n{}", AsciiDisp(&profile));
Some((outpath, profile))
}
#[derive(StructOpt)]
#[structopt(name = "benchmark")]
/// Small utility to benchmark each step of the compiler pipeline
pub struct Opts {
/// Number of invokations per test file
#[structopt(short = "s", long = "samples", default_value = "2")]
samples: usize,
/// Only test filenames matching the given regex are benchmarked
#[structopt(short = "o", long = "only", default_value = "")]
filter: Regex,
/// Optimization level that should be applied
#[structopt(long = "--optimization", short = "-O", default_value = "aggressive")]
opt_level: optimization_arg::Arg,
#[structopt(long = "--backend", short = "-b")]
backend: Backend,
}
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct ReferenceBenchmark {
mean: f64,
timestamp: SystemTime,
}
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct ReferenceFormat {
// TODO: in contrast to the other code in this file this does
// not support multiple benchmarks with identical names
measurements: HashMap<String, ReferenceBenchmark>,
}
impl ReferenceFormat {
fn new() -> Self {
Self {
measurements: HashMap::new(),
}
}
}
fn main() {
env_logger::init();
let opts = Opts::from_args();
for big_test in &big_tests() {
if!opts.filter.is_match(&big_test.minijava.to_string_lossy()) {
log::info!("skipping {}", big_test.minijava.display());
continue;
}
let mut bench = Benchmark::new(big_test.minijava.clone());
let mut out = None;
for _ in 0..opts.samples {
if let Some((outbinary, timings)) =
profile_compiler(big_test, opts.opt_level.clone().into(), opts.backend)
{
bench.add(&timings);
out = Some(outbinary);
}
}
let title = format!(
"BENCHMARK {}",
big_test.minijava.file_stem().unwrap().to_string_lossy()
);
bench.load_reference_from_disk();
println!("{}\n{}\n", title, "=".repeat(title.len()));
println!("{}\n", bench);
bench.write_to_disk();
if let (Ok(cmd_str), Some(binary_path)) = (
if big_test.stdin.is_some() {
std::env::var("COMPILED_PROGRAM_BENCHMARK_WITH_STDIN")
} else {
std::env::var("COMPILED_PROGRAM_BENCHMARK")
},
out,
) {
let cmd_str = cmd_str.replace("BINARY_PATH", binary_path.as_path().to_str().unwrap());
let cmd_str = if let Some(stdin_file) = &big_test.stdin {
cmd_str.replace(
"INPUT_PATH",
&stdin_file.as_path().to_str().unwrap().to_owned(),
)
} else {
cmd_str
};
let pieces = shell_words::split(&cmd_str).expect("invalid program benchmark command");
let (prog, args) = pieces.split_at(1);
let mut cmd = Command::new(&prog[0]);
cmd.args(args);
log::debug!("Benchmarking generated binary using: {:?}", cmd);
match cmd.status() {
Ok(status) if status.success() => {}
Ok(status) => {
log::error!(
"binary benchmark failed with non-zero exit code: {:?}",
status
);
}
Err(msg) => {
log::error!("binary benchmark crash {:?}", msg);
}
}
}
}
}
#[derive(Debug, Clone)]
pub struct BenchmarkEntry {
label: String,
indent: usize,
stats: OnlineStats,
}
pub struct Benchmark {
file: PathBuf,
measurements: Vec<BenchmarkEntry>,
reference: Option<ReferenceFormat>,
}
impl Benchmark {
pub fn | (file: PathBuf) -> Self {
Self {
measurements: Vec::new(),
reference: None,
file,
}
}
pub fn add(&mut self, measurements: &[SingleMeasurement]) {
if self.measurements.is_empty() {
for measurement in measurements {
self.measurements.push(BenchmarkEntry {
label: measurement.label.clone(),
indent: measurement.indent,
stats: OnlineStats::from_slice(&[measurement.duration.as_millis()]),
});
}
return;
}
if!self.is_compatible(measurements) {
panic!("measurements incomplete");
}
for (i, item) in measurements.iter().enumerate() {
self.measurements[i].stats.add(item.duration.as_millis());
}
}
fn is_compatible(&self, measurements: &[SingleMeasurement]) -> bool {
if measurements.len()!= self.measurements.len() {
return false;
}
for (this, other) in self.measurements.iter().zip(measurements.iter()) {
if this.label!= other.label || this.indent!= other.indent {
return false;
}
}
true
}
fn filename(&self) -> PathBuf {
self.file.with_extension("benchmark.json")
}
fn write_to_disk(&self) {
let mut diskformat = ReferenceFormat::new();
let now = SystemTime::now();
for measurement in self.measurements.iter() {
diskformat.measurements.insert(
measurement.label.clone(),
ReferenceBenchmark {
mean: measurement.stats.mean(),
timestamp: now,
},
);
}
match OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(self.filename())
{
Ok(outfile) => {
if let Err(msg) = serde_json::ser::to_writer(&outfile, &diskformat) {
log::debug!(
"could not write file for reference benchmark of {}: {:?}",
self.file.display(),
msg
);
}
}
Err(msg) => {
log::debug!(
"could not open file for reference benchmark of {}: {:?}",
self.file.display(),
msg
);
}
}
}
fn load_reference_from_disk(&mut self) {
if let Ok(res) = self._load_reference() {
self.reference = Some(res);
} else {
log::debug!(
"could not find reference benchmark for {}",
self.file.display()
);
}
}
fn _load_reference(&mut self) -> io::Result<ReferenceFormat> {
let file = File::open(self.filename())?;
let reader = BufReader::new(file);
// Read the JSON contents of the file as an instance of `User`.
let u = serde_json::from_reader(reader)?;
Ok(u)
}
}
impl fmt::Display for Benchmark {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let min_label_width = 50;
let now = SystemTime::now();
for timing in &self.measurements {
let indent = " ".repeat(timing.indent);
let (change, timestamp) = if let Some(previous_invocation) = &self.reference {
if let Some(previous_result) = previous_invocation.measurements.get(&timing.label) {
let change = (timing.stats.mean() / previous_result.mean * 100.0) - 100.0;
let reference_date = now.duration_since(previous_result.timestamp).unwrap();
// remove some precession
let reference_date = Duration::from_secs(reference_date.as_secs());
(
format!("{: >+8.3}%", change),
format_duration(reference_date).to_string(),
)
} else {
("n/a".to_string(), "".to_string())
}
} else {
("".to_string(), "".to_string())
};
writeln!(
f,
"{nesting}{label: <label_width$} \
{ms: >ms_width$.5} +/- {stddev: >stddev_width$.5}ms \
{samples} samples \
{change} {timestamp} ago",
label = timing.label,
ms = timing.stats.mean(),
stddev = timing.stats.stddev(),
samples = timing.stats.len(),
nesting = indent,
label_width = min_label_width - indent.len(),
ms_width = 20,
stddev_width = 10,
change = change,
timestamp = timestamp
)?;
}
Ok(())
}
}
| new | identifier_name |
benchmark.rs | #![feature(duration_as_u128)]
//! Executes all mjtests in the /exec/big folder.
use compiler_cli::optimization_arg;
use compiler_shared::timing::{AsciiDisp, CompilerMeasurements, SingleMeasurement};
use humantime::format_duration;
use optimization;
use regex::Regex;
use runner_integration_tests::{compiler_call, Backend, CompilerCall, CompilerPhase};
use stats::OnlineStats;
use std::{
collections::HashMap,
ffi::OsStr,
fmt,
fs::{self, File, OpenOptions},
io::{self, BufReader},
path::PathBuf,
process::Command,
time::{Duration, SystemTime},
};
use structopt::StructOpt;
fn test_folder() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../mjtest-rs/tests")
}
#[derive(Debug, Clone)]
struct BigTest {
minijava: PathBuf,
stdin: Option<PathBuf>,
}
fn big_tests() -> Vec<BigTest> {
let dirpath = test_folder().join("exec/big");
log::info!("test directory is {}", dirpath.display());
let dirlisting = fs::read_dir(dirpath).unwrap();
let mut big_tests = vec![];
for entry in dirlisting {
let path = entry.unwrap().path();
if path.extension() == Some(OsStr::new("java")) {
let test = BigTest {
stdin: {
let mut stdin = path.clone();
let stem = path.file_stem().unwrap();
// remove extension
stdin.pop();
stdin.push(stem);
// set new extension
// TODO: support multiple input files
stdin.set_extension("0.inputc");
log::debug!("looking for stdin file at {}", stdin.display());
if stdin.is_file() {
Some(stdin)
} else {
None
}
},
minijava: path,
};
log::debug!("Found test: {:?}", test);
big_tests.push(test);
}
}
big_tests
}
fn profile_compiler(
test: &BigTest,
optimizations: optimization::Level,
backend: Backend,
) -> Option<(PathBuf, CompilerMeasurements)> {
let outpath = test.minijava.with_extension("benchmark.out");
let mut cmd = compiler_call(
CompilerCall::RawCompiler(CompilerPhase::Binary {
backend,
// TODO: use temp dir, don't trash
output: outpath.clone(),
assembly: None,
optimizations,
}),
&test.minijava,
);
let measurement_path = "measurement.json";
cmd.env("MEASURE_JSON", measurement_path);
// TODO: run and benchmark the binary
//if let Some(stdin_path) = test.stdin {
//cmd.stdin(Stdio::piped());
//let mut stdin_reader = File::open(&stdin_path).expect("failed to open stdin
// file"); io::copy(&mut stdin_reader, stdin)
//.expect("failed to write to stdin of binary");
//}
log::debug!("calling compiler as: {:?}", cmd);
match cmd.status() {
Ok(status) if status.success() => (),
Ok(status) => {
log::error!("compiler failed with non-zero exit code: {:?}", status);
return None;
}
Err(msg) => {
log::error!("compiler crash {:?}", msg);
return None;
}
}
let stats_file = File::open(measurement_path).unwrap();
let stats_reader = BufReader::new(stats_file);
let profile = serde_json::from_reader(stats_reader).unwrap();
log::debug!("Stats:\n{}", AsciiDisp(&profile));
Some((outpath, profile))
}
#[derive(StructOpt)]
#[structopt(name = "benchmark")]
/// Small utility to benchmark each step of the compiler pipeline
pub struct Opts {
/// Number of invokations per test file
#[structopt(short = "s", long = "samples", default_value = "2")]
samples: usize,
/// Only test filenames matching the given regex are benchmarked
#[structopt(short = "o", long = "only", default_value = "")]
filter: Regex,
/// Optimization level that should be applied
#[structopt(long = "--optimization", short = "-O", default_value = "aggressive")]
opt_level: optimization_arg::Arg,
#[structopt(long = "--backend", short = "-b")]
backend: Backend,
}
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct ReferenceBenchmark {
mean: f64,
timestamp: SystemTime,
}
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct ReferenceFormat {
// TODO: in contrast to the other code in this file this does
// not support multiple benchmarks with identical names
measurements: HashMap<String, ReferenceBenchmark>,
}
impl ReferenceFormat {
fn new() -> Self {
Self {
measurements: HashMap::new(), | fn main() {
env_logger::init();
let opts = Opts::from_args();
for big_test in &big_tests() {
if!opts.filter.is_match(&big_test.minijava.to_string_lossy()) {
log::info!("skipping {}", big_test.minijava.display());
continue;
}
let mut bench = Benchmark::new(big_test.minijava.clone());
let mut out = None;
for _ in 0..opts.samples {
if let Some((outbinary, timings)) =
profile_compiler(big_test, opts.opt_level.clone().into(), opts.backend)
{
bench.add(&timings);
out = Some(outbinary);
}
}
let title = format!(
"BENCHMARK {}",
big_test.minijava.file_stem().unwrap().to_string_lossy()
);
bench.load_reference_from_disk();
println!("{}\n{}\n", title, "=".repeat(title.len()));
println!("{}\n", bench);
bench.write_to_disk();
if let (Ok(cmd_str), Some(binary_path)) = (
if big_test.stdin.is_some() {
std::env::var("COMPILED_PROGRAM_BENCHMARK_WITH_STDIN")
} else {
std::env::var("COMPILED_PROGRAM_BENCHMARK")
},
out,
) {
let cmd_str = cmd_str.replace("BINARY_PATH", binary_path.as_path().to_str().unwrap());
let cmd_str = if let Some(stdin_file) = &big_test.stdin {
cmd_str.replace(
"INPUT_PATH",
&stdin_file.as_path().to_str().unwrap().to_owned(),
)
} else {
cmd_str
};
let pieces = shell_words::split(&cmd_str).expect("invalid program benchmark command");
let (prog, args) = pieces.split_at(1);
let mut cmd = Command::new(&prog[0]);
cmd.args(args);
log::debug!("Benchmarking generated binary using: {:?}", cmd);
match cmd.status() {
Ok(status) if status.success() => {}
Ok(status) => {
log::error!(
"binary benchmark failed with non-zero exit code: {:?}",
status
);
}
Err(msg) => {
log::error!("binary benchmark crash {:?}", msg);
}
}
}
}
}
#[derive(Debug, Clone)]
pub struct BenchmarkEntry {
label: String,
indent: usize,
stats: OnlineStats,
}
pub struct Benchmark {
file: PathBuf,
measurements: Vec<BenchmarkEntry>,
reference: Option<ReferenceFormat>,
}
impl Benchmark {
pub fn new(file: PathBuf) -> Self {
Self {
measurements: Vec::new(),
reference: None,
file,
}
}
pub fn add(&mut self, measurements: &[SingleMeasurement]) {
if self.measurements.is_empty() {
for measurement in measurements {
self.measurements.push(BenchmarkEntry {
label: measurement.label.clone(),
indent: measurement.indent,
stats: OnlineStats::from_slice(&[measurement.duration.as_millis()]),
});
}
return;
}
if!self.is_compatible(measurements) {
panic!("measurements incomplete");
}
for (i, item) in measurements.iter().enumerate() {
self.measurements[i].stats.add(item.duration.as_millis());
}
}
fn is_compatible(&self, measurements: &[SingleMeasurement]) -> bool {
if measurements.len()!= self.measurements.len() {
return false;
}
for (this, other) in self.measurements.iter().zip(measurements.iter()) {
if this.label!= other.label || this.indent!= other.indent {
return false;
}
}
true
}
fn filename(&self) -> PathBuf {
self.file.with_extension("benchmark.json")
}
fn write_to_disk(&self) {
let mut diskformat = ReferenceFormat::new();
let now = SystemTime::now();
for measurement in self.measurements.iter() {
diskformat.measurements.insert(
measurement.label.clone(),
ReferenceBenchmark {
mean: measurement.stats.mean(),
timestamp: now,
},
);
}
match OpenOptions::new()
.write(true)
.truncate(true)
.create(true)
.open(self.filename())
{
Ok(outfile) => {
if let Err(msg) = serde_json::ser::to_writer(&outfile, &diskformat) {
log::debug!(
"could not write file for reference benchmark of {}: {:?}",
self.file.display(),
msg
);
}
}
Err(msg) => {
log::debug!(
"could not open file for reference benchmark of {}: {:?}",
self.file.display(),
msg
);
}
}
}
fn load_reference_from_disk(&mut self) {
if let Ok(res) = self._load_reference() {
self.reference = Some(res);
} else {
log::debug!(
"could not find reference benchmark for {}",
self.file.display()
);
}
}
fn _load_reference(&mut self) -> io::Result<ReferenceFormat> {
let file = File::open(self.filename())?;
let reader = BufReader::new(file);
// Read the JSON contents of the file as an instance of `User`.
let u = serde_json::from_reader(reader)?;
Ok(u)
}
}
impl fmt::Display for Benchmark {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let min_label_width = 50;
let now = SystemTime::now();
for timing in &self.measurements {
let indent = " ".repeat(timing.indent);
let (change, timestamp) = if let Some(previous_invocation) = &self.reference {
if let Some(previous_result) = previous_invocation.measurements.get(&timing.label) {
let change = (timing.stats.mean() / previous_result.mean * 100.0) - 100.0;
let reference_date = now.duration_since(previous_result.timestamp).unwrap();
// remove some precession
let reference_date = Duration::from_secs(reference_date.as_secs());
(
format!("{: >+8.3}%", change),
format_duration(reference_date).to_string(),
)
} else {
("n/a".to_string(), "".to_string())
}
} else {
("".to_string(), "".to_string())
};
writeln!(
f,
"{nesting}{label: <label_width$} \
{ms: >ms_width$.5} +/- {stddev: >stddev_width$.5}ms \
{samples} samples \
{change} {timestamp} ago",
label = timing.label,
ms = timing.stats.mean(),
stddev = timing.stats.stddev(),
samples = timing.stats.len(),
nesting = indent,
label_width = min_label_width - indent.len(),
ms_width = 20,
stddev_width = 10,
change = change,
timestamp = timestamp
)?;
}
Ok(())
}
} | }
}
}
| random_line_split |
lib.rs | //! Monie-in-the-middle http(s) proxy library
//!
//! Observe and manipulate requests by implementing `monie::Mitm`.
//!
//! Here is a skeleton to help get started:
//!
//! ```
//! use futures::future::Future;
//! use hyper::{Body, Chunk, Request, Response, Server};
//! use monie::{Mitm, MitmProxyService};
//!
//! struct MyMitm;
//!
//! impl Mitm for MyMitm {
//! fn new(_: http::uri::Uri) -> MyMitm { MyMitm {} }
//! fn request_headers(&self, req: Request<Body>) -> Request<Body> { req }
//! fn response_headers(&self, res: Response<Body>) -> Response<Body> { res }
//! fn request_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! fn response_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! }
//!
//! fn main() {
//! let addr = ([127, 0, 0, 1], 8000).into();
//! let svc = MitmProxyService::<MyMitm>::new();
//! let server = Server::bind(&addr)
//! .serve(svc)
//! .map_err(|e| eprintln!("server error: {}", e));
//! println!("noop mitm proxy listening on http://{}", addr);
//! hyper::rt::run(server);
//! }
//! ```
//!
//! Other examples can be found at
//! <https://github.com/nlevitt/monie/tree/master/examples>.
#![deny(warnings)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
pub mod certauth;
use std::error::Error;
use std::sync::Arc;
use bytes::Bytes;
use futures::future::{self, Future, FutureResult};
use futures::stream::Stream;
use http::method::Method;
use http::uri::{Authority, Uri};
use hyper::client::pool::Pooled;
use hyper::client::{HttpConnector, PoolClient};
use hyper::server::conn::Http;
use hyper::service::{service_fn, NewService, Service};
use hyper::upgrade::Upgraded;
use hyper::{Body, Chunk, Client, Request, Response};
use hyper_rustls::HttpsConnector;
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
/// Represents the interception of a single request. Users of the library must
/// implement this trait. With it you can observe and manipulate the request
/// and response payload and headers.
pub trait Mitm {
/// Create a new instance of this `Mitm` implementation. The argument `uri`
/// is the uri being proxied. Implementations may do with this what they
/// wish (log it, stash it, ignore it, etc).
fn new(uri: Uri) -> Self;
/// Observe and manipulate the request headers. The `req` argument contains
/// the original request headers received from the proxy client. The
/// request headers returned by this function are sent to the remote
/// server.
fn request_headers(&self, req: Request<Body>) -> Request<Body>;
/// Observe and manipulate a chunk of the request payload. This function
/// may be called zero or more times, depending on the size of the request
/// payload. It will not be called at all in the common case of a GET
/// request with no payload. The `chunk` argument contains an original
/// chunk of the request payload as received from the proxy client. The
/// return value of this function is sent to the remote server.
fn request_body_chunk(&self, chunk: Chunk) -> Chunk;
/// Observe and manipulate the response headers. The `res` argument
/// contains the original response headers received from the remote server.
/// The response headers returned by this function are sent to the proxy
/// client.
fn response_headers(&self, res: Response<Body>) -> Response<Body>;
/// Observe and manipulate a chunk of the response payload. This function
/// may be called zero or more times, depending on the size of the payload.
/// The `chunk` argument represents an unaltered chunk of the response
/// payload as received from the remote server. The return value of this
/// function is sent to the remote server.
fn response_body_chunk(&self, chunk: Chunk) -> Chunk;
}
lazy_static! {
static ref CLIENT: Client<HttpsConnector<HttpConnector>, Body> =
Client::builder().build(HttpsConnector::new(4));
static ref HTTP: Http = Http::new();
}
/// The `hyper::service::Service` that does the proxying and calls your `Mitm`
/// implementation.
#[derive(Debug)]
pub struct MitmProxyService<T: Mitm + Sync> {
_phantom: std::marker::PhantomData<T>,
}
impl<T: Mitm + Sync> MitmProxyService<T> {
/// Creates a new `MitmProxyService`.
#[inline]
pub fn new() -> Self |
}
impl<T: Mitm + Sync + Send +'static> NewService for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Service = MitmProxyService<T>;
type InitError = std::io::Error;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
future::ok(MitmProxyService::new())
}
}
impl<T: Mitm + Sync + Send +'static> Service for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Future =
Box<dyn Future<Item = Response<Body>, Error = std::io::Error> + Send>;
fn call(&mut self, req: Request<Body>) -> Self::Future {
info!("MitmProxyService::call() handling {:?}", req);
if *req.method() == Method::CONNECT {
Box::new(proxy_connect::<T>(req))
} else {
Box::new(proxy_request::<T>(req))
}
}
}
/// Obtains a connection to the scheme://authority of `uri` from the connection
/// pool.
fn obtain_connection(
uri: Uri,
) -> impl Future<Item = Pooled<PoolClient<Body>>, Error = std::io::Error> {
let key1 = Arc::new(format!(
"{}://{}",
uri.scheme_part().unwrap(),
uri.authority_part().unwrap()
));
let key2 = Arc::clone(&key1);
let result = CLIENT.connection_for(uri, key1).map_err(move |e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("error obtaining connection to {}: {:?}", key2, e),
)
});
result
}
/// Obtains a connection to the remote server and proxies the request, calling
/// the `Mitm` implementation functions, which may manipulate the request and
/// reponse. Returns a future that resolves to the response or error.
///
/// This function is called for plain http requests, and for https requests
/// received "inside" the fake, tapped `CONNECT` tunnel.
fn proxy_request<T: Mitm + Sync + Send +'static>(
req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
obtain_connection(req.uri().to_owned())
.map(|mut connection| {
let mitm1 = Arc::new(T::new(req.uri().to_owned()));
let mitm2 = Arc::clone(&mitm1);
let req = mitm1.request_headers(req);
let (parts, body) = req.into_parts();
let body = Body::wrap_stream(
body.map(move |chunk| mitm1.request_body_chunk(chunk)),
);
let req = Request::from_parts(parts, body);
info!("proxy_request() sending request {:?}", req);
connection
.send_request_retryable(req)
.map(|response| {
let response = mitm2.response_headers(response);
let (parts, body) = response.into_parts();
let body =
Body::wrap_stream(body.map(move |chunk| {
mitm2.response_body_chunk(chunk)
}));
Response::from_parts(parts, body)
})
.map_err(|(e, _f)| {
std::io::Error::new(std::io::ErrorKind::Other, e)
})
})
.flatten()
.or_else(|e| {
info!("proxy_request() returning 502 ({})", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Handles a CONNECT request. Tries to obtain an https connection to the
/// remote server. If that fails, returns 502 Bad Gateway. Otherwise returns
/// 200 OK, then attempts to establish a TLS connection with the proxy client,
/// masquerading as the remote server.
fn proxy_connect<T: Mitm + Sync + Send +'static>(
connect_req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
info!("proxy_connect() impersonating {:?}", connect_req.uri());
let authority =
Authority::from_shared(Bytes::from(connect_req.uri().to_string()))
.unwrap();
let tls_cfg = certauth::tls_config(&authority);
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query("/")
.build()
.unwrap();
obtain_connection(uri)
.map(move |_pooled| {
let inner = connect_req.into_body().on_upgrade().map_err(|e| {
info!("proxy_connect() on_upgrade error: {:?}", e);
std::io::Error::new(std::io::ErrorKind::Other, e)
})
.and_then(|upgraded: Upgraded| -> Accept<Upgraded> {
TlsAcceptor::from(tls_cfg).accept(upgraded)
})
.map(move |stream: TlsStream<Upgraded, rustls::ServerSession>| {
info!("proxy_connect() tls connection established with proxy \
client: {:?}", stream);
service_inner_requests::<T>(stream)
})
.map_err(|e: std::io::Error| {
error!("proxy_connect() error from somewhere: {}", e);
})
.flatten();
hyper::rt::spawn(inner);
Response::builder().status(200).body(Body::empty()).unwrap()
})
.or_else(|e| {
info!("proxy_connect() returning 502, failed to connect: {:?}", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Called by `proxy_connect()` once the TLS session has been established with
/// the proxy client. Proxies requests received on the TLS connection.
fn service_inner_requests<T: Mitm + Sync + Send +'static>(
stream: TlsStream<Upgraded, rustls::ServerSession>,
) -> impl Future<Item = (), Error = ()> {
let svc = service_fn(move |req: Request<Body>| {
// "host" header is required for http 1.1
// XXX but we could fall back on authority
let authority = req.headers().get("host").unwrap().to_str().unwrap();
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query(&req.uri().to_string() as &str)
.build()
.unwrap();
let (mut parts, body) = req.into_parts();
parts.uri = uri;
let req = Request::from_parts(parts, body);
proxy_request::<T>(req)
});
HTTP.serve_connection(stream, svc)
.map_err(|e: hyper::Error| {
if match e.source() {
Some(source) => source
.to_string()
.find("Connection reset by peer")
.is_some(),
None => false,
} {
info!(
"service_inner_requests() serve_connection: \
client closed connection"
);
} else {
error!("service_inner_requests() serve_connection: {}", e);
};
})
}
| {
MitmProxyService::<T> {
_phantom: std::marker::PhantomData,
}
} | identifier_body |
lib.rs | //! Monie-in-the-middle http(s) proxy library
//!
//! Observe and manipulate requests by implementing `monie::Mitm`.
//!
//! Here is a skeleton to help get started:
//!
//! ```
//! use futures::future::Future;
//! use hyper::{Body, Chunk, Request, Response, Server};
//! use monie::{Mitm, MitmProxyService};
//!
//! struct MyMitm;
//!
//! impl Mitm for MyMitm {
//! fn new(_: http::uri::Uri) -> MyMitm { MyMitm {} }
//! fn request_headers(&self, req: Request<Body>) -> Request<Body> { req }
//! fn response_headers(&self, res: Response<Body>) -> Response<Body> { res }
//! fn request_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! fn response_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! }
//!
//! fn main() {
//! let addr = ([127, 0, 0, 1], 8000).into();
//! let svc = MitmProxyService::<MyMitm>::new();
//! let server = Server::bind(&addr)
//! .serve(svc)
//! .map_err(|e| eprintln!("server error: {}", e));
//! println!("noop mitm proxy listening on http://{}", addr);
//! hyper::rt::run(server);
//! }
//! ```
//!
//! Other examples can be found at
//! <https://github.com/nlevitt/monie/tree/master/examples>.
#![deny(warnings)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
pub mod certauth;
use std::error::Error;
use std::sync::Arc;
use bytes::Bytes;
use futures::future::{self, Future, FutureResult};
use futures::stream::Stream;
use http::method::Method;
use http::uri::{Authority, Uri};
use hyper::client::pool::Pooled;
use hyper::client::{HttpConnector, PoolClient};
use hyper::server::conn::Http;
use hyper::service::{service_fn, NewService, Service};
use hyper::upgrade::Upgraded;
use hyper::{Body, Chunk, Client, Request, Response};
use hyper_rustls::HttpsConnector;
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
/// Represents the interception of a single request. Users of the library must
/// implement this trait. With it you can observe and manipulate the request
/// and response payload and headers.
pub trait Mitm {
/// Create a new instance of this `Mitm` implementation. The argument `uri`
/// is the uri being proxied. Implementations may do with this what they
/// wish (log it, stash it, ignore it, etc).
fn new(uri: Uri) -> Self;
/// Observe and manipulate the request headers. The `req` argument contains
/// the original request headers received from the proxy client. The
/// request headers returned by this function are sent to the remote
/// server.
fn request_headers(&self, req: Request<Body>) -> Request<Body>;
/// Observe and manipulate a chunk of the request payload. This function
/// may be called zero or more times, depending on the size of the request
/// payload. It will not be called at all in the common case of a GET
/// request with no payload. The `chunk` argument contains an original
/// chunk of the request payload as received from the proxy client. The
/// return value of this function is sent to the remote server.
fn request_body_chunk(&self, chunk: Chunk) -> Chunk;
/// Observe and manipulate the response headers. The `res` argument
/// contains the original response headers received from the remote server.
/// The response headers returned by this function are sent to the proxy
/// client.
fn response_headers(&self, res: Response<Body>) -> Response<Body>;
/// Observe and manipulate a chunk of the response payload. This function
/// may be called zero or more times, depending on the size of the payload.
/// The `chunk` argument represents an unaltered chunk of the response
/// payload as received from the remote server. The return value of this
/// function is sent to the remote server.
fn response_body_chunk(&self, chunk: Chunk) -> Chunk;
}
lazy_static! {
static ref CLIENT: Client<HttpsConnector<HttpConnector>, Body> =
Client::builder().build(HttpsConnector::new(4));
static ref HTTP: Http = Http::new();
}
/// The `hyper::service::Service` that does the proxying and calls your `Mitm`
/// implementation.
#[derive(Debug)]
pub struct MitmProxyService<T: Mitm + Sync> {
_phantom: std::marker::PhantomData<T>,
}
impl<T: Mitm + Sync> MitmProxyService<T> {
/// Creates a new `MitmProxyService`.
#[inline]
pub fn new() -> Self {
MitmProxyService::<T> {
_phantom: std::marker::PhantomData,
}
}
}
impl<T: Mitm + Sync + Send +'static> NewService for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Service = MitmProxyService<T>;
type InitError = std::io::Error;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
future::ok(MitmProxyService::new())
}
}
impl<T: Mitm + Sync + Send +'static> Service for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Future =
Box<dyn Future<Item = Response<Body>, Error = std::io::Error> + Send>;
fn call(&mut self, req: Request<Body>) -> Self::Future {
info!("MitmProxyService::call() handling {:?}", req);
if *req.method() == Method::CONNECT {
Box::new(proxy_connect::<T>(req))
} else {
Box::new(proxy_request::<T>(req))
}
}
}
/// Obtains a connection to the scheme://authority of `uri` from the connection
/// pool.
fn obtain_connection(
uri: Uri,
) -> impl Future<Item = Pooled<PoolClient<Body>>, Error = std::io::Error> {
let key1 = Arc::new(format!(
"{}://{}",
uri.scheme_part().unwrap(),
uri.authority_part().unwrap()
));
let key2 = Arc::clone(&key1);
let result = CLIENT.connection_for(uri, key1).map_err(move |e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("error obtaining connection to {}: {:?}", key2, e),
)
});
result
}
/// Obtains a connection to the remote server and proxies the request, calling
/// the `Mitm` implementation functions, which may manipulate the request and
/// reponse. Returns a future that resolves to the response or error.
///
/// This function is called for plain http requests, and for https requests
/// received "inside" the fake, tapped `CONNECT` tunnel.
fn proxy_request<T: Mitm + Sync + Send +'static>(
req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
obtain_connection(req.uri().to_owned())
.map(|mut connection| {
let mitm1 = Arc::new(T::new(req.uri().to_owned()));
let mitm2 = Arc::clone(&mitm1);
let req = mitm1.request_headers(req);
let (parts, body) = req.into_parts();
let body = Body::wrap_stream(
body.map(move |chunk| mitm1.request_body_chunk(chunk)),
);
let req = Request::from_parts(parts, body);
info!("proxy_request() sending request {:?}", req);
connection
.send_request_retryable(req)
.map(|response| {
let response = mitm2.response_headers(response);
let (parts, body) = response.into_parts();
let body =
Body::wrap_stream(body.map(move |chunk| {
mitm2.response_body_chunk(chunk)
}));
Response::from_parts(parts, body)
})
.map_err(|(e, _f)| {
std::io::Error::new(std::io::ErrorKind::Other, e)
})
})
.flatten()
.or_else(|e| {
info!("proxy_request() returning 502 ({})", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Handles a CONNECT request. Tries to obtain an https connection to the
/// remote server. If that fails, returns 502 Bad Gateway. Otherwise returns
/// 200 OK, then attempts to establish a TLS connection with the proxy client,
/// masquerading as the remote server.
fn proxy_connect<T: Mitm + Sync + Send +'static>(
connect_req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
info!("proxy_connect() impersonating {:?}", connect_req.uri());
let authority =
Authority::from_shared(Bytes::from(connect_req.uri().to_string()))
.unwrap();
let tls_cfg = certauth::tls_config(&authority);
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query("/")
.build()
.unwrap();
obtain_connection(uri)
.map(move |_pooled| {
let inner = connect_req.into_body().on_upgrade().map_err(|e| {
info!("proxy_connect() on_upgrade error: {:?}", e);
std::io::Error::new(std::io::ErrorKind::Other, e)
})
.and_then(|upgraded: Upgraded| -> Accept<Upgraded> {
TlsAcceptor::from(tls_cfg).accept(upgraded)
})
.map(move |stream: TlsStream<Upgraded, rustls::ServerSession>| {
info!("proxy_connect() tls connection established with proxy \
client: {:?}", stream);
service_inner_requests::<T>(stream)
})
.map_err(|e: std::io::Error| {
error!("proxy_connect() error from somewhere: {}", e);
})
.flatten();
hyper::rt::spawn(inner);
Response::builder().status(200).body(Body::empty()).unwrap()
})
.or_else(|e| {
info!("proxy_connect() returning 502, failed to connect: {:?}", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Called by `proxy_connect()` once the TLS session has been established with
/// the proxy client. Proxies requests received on the TLS connection.
fn service_inner_requests<T: Mitm + Sync + Send +'static>(
stream: TlsStream<Upgraded, rustls::ServerSession>,
) -> impl Future<Item = (), Error = ()> {
let svc = service_fn(move |req: Request<Body>| {
// "host" header is required for http 1.1
// XXX but we could fall back on authority
let authority = req.headers().get("host").unwrap().to_str().unwrap();
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query(&req.uri().to_string() as &str)
.build()
.unwrap();
let (mut parts, body) = req.into_parts();
parts.uri = uri;
let req = Request::from_parts(parts, body);
proxy_request::<T>(req)
});
HTTP.serve_connection(stream, svc)
.map_err(|e: hyper::Error| {
if match e.source() {
Some(source) => source
.to_string()
.find("Connection reset by peer")
.is_some(),
None => false,
} | else {
error!("service_inner_requests() serve_connection: {}", e);
};
})
}
| {
info!(
"service_inner_requests() serve_connection: \
client closed connection"
);
} | conditional_block |
lib.rs | //! Monie-in-the-middle http(s) proxy library
//!
//! Observe and manipulate requests by implementing `monie::Mitm`.
//!
//! Here is a skeleton to help get started:
//!
//! ```
//! use futures::future::Future;
//! use hyper::{Body, Chunk, Request, Response, Server};
//! use monie::{Mitm, MitmProxyService};
//!
//! struct MyMitm;
//!
//! impl Mitm for MyMitm {
//! fn new(_: http::uri::Uri) -> MyMitm { MyMitm {} }
//! fn request_headers(&self, req: Request<Body>) -> Request<Body> { req }
//! fn response_headers(&self, res: Response<Body>) -> Response<Body> { res }
//! fn request_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! fn response_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! }
//!
//! fn main() {
//! let addr = ([127, 0, 0, 1], 8000).into();
//! let svc = MitmProxyService::<MyMitm>::new();
//! let server = Server::bind(&addr)
//! .serve(svc)
//! .map_err(|e| eprintln!("server error: {}", e));
//! println!("noop mitm proxy listening on http://{}", addr);
//! hyper::rt::run(server);
//! }
//! ```
//!
//! Other examples can be found at
//! <https://github.com/nlevitt/monie/tree/master/examples>.
#![deny(warnings)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
pub mod certauth;
use std::error::Error;
use std::sync::Arc;
use bytes::Bytes;
use futures::future::{self, Future, FutureResult};
use futures::stream::Stream;
use http::method::Method;
use http::uri::{Authority, Uri};
use hyper::client::pool::Pooled;
use hyper::client::{HttpConnector, PoolClient};
use hyper::server::conn::Http;
use hyper::service::{service_fn, NewService, Service};
use hyper::upgrade::Upgraded;
use hyper::{Body, Chunk, Client, Request, Response};
use hyper_rustls::HttpsConnector;
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
/// Represents the interception of a single request. Users of the library must
/// implement this trait. With it you can observe and manipulate the request
/// and response payload and headers.
pub trait Mitm {
/// Create a new instance of this `Mitm` implementation. The argument `uri`
/// is the uri being proxied. Implementations may do with this what they
/// wish (log it, stash it, ignore it, etc).
fn new(uri: Uri) -> Self;
/// Observe and manipulate the request headers. The `req` argument contains
/// the original request headers received from the proxy client. The
/// request headers returned by this function are sent to the remote
/// server.
fn request_headers(&self, req: Request<Body>) -> Request<Body>;
/// Observe and manipulate a chunk of the request payload. This function
/// may be called zero or more times, depending on the size of the request
/// payload. It will not be called at all in the common case of a GET
/// request with no payload. The `chunk` argument contains an original
/// chunk of the request payload as received from the proxy client. The
/// return value of this function is sent to the remote server.
fn request_body_chunk(&self, chunk: Chunk) -> Chunk;
/// Observe and manipulate the response headers. The `res` argument
/// contains the original response headers received from the remote server.
/// The response headers returned by this function are sent to the proxy
/// client.
fn response_headers(&self, res: Response<Body>) -> Response<Body>;
/// Observe and manipulate a chunk of the response payload. This function
/// may be called zero or more times, depending on the size of the payload.
/// The `chunk` argument represents an unaltered chunk of the response
/// payload as received from the remote server. The return value of this
/// function is sent to the remote server.
fn response_body_chunk(&self, chunk: Chunk) -> Chunk;
}
lazy_static! {
static ref CLIENT: Client<HttpsConnector<HttpConnector>, Body> =
Client::builder().build(HttpsConnector::new(4));
static ref HTTP: Http = Http::new();
}
/// The `hyper::service::Service` that does the proxying and calls your `Mitm`
/// implementation.
#[derive(Debug)]
pub struct MitmProxyService<T: Mitm + Sync> {
_phantom: std::marker::PhantomData<T>,
}
impl<T: Mitm + Sync> MitmProxyService<T> {
/// Creates a new `MitmProxyService`.
#[inline]
pub fn new() -> Self {
MitmProxyService::<T> {
_phantom: std::marker::PhantomData,
}
}
}
impl<T: Mitm + Sync + Send +'static> NewService for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Service = MitmProxyService<T>;
type InitError = std::io::Error;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
future::ok(MitmProxyService::new())
}
}
impl<T: Mitm + Sync + Send +'static> Service for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Future =
Box<dyn Future<Item = Response<Body>, Error = std::io::Error> + Send>;
fn call(&mut self, req: Request<Body>) -> Self::Future {
info!("MitmProxyService::call() handling {:?}", req);
if *req.method() == Method::CONNECT {
Box::new(proxy_connect::<T>(req))
} else {
Box::new(proxy_request::<T>(req))
}
}
}
/// Obtains a connection to the scheme://authority of `uri` from the connection
/// pool.
fn obtain_connection(
uri: Uri,
) -> impl Future<Item = Pooled<PoolClient<Body>>, Error = std::io::Error> {
let key1 = Arc::new(format!(
"{}://{}",
uri.scheme_part().unwrap(),
uri.authority_part().unwrap()
));
let key2 = Arc::clone(&key1);
let result = CLIENT.connection_for(uri, key1).map_err(move |e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("error obtaining connection to {}: {:?}", key2, e),
)
});
result
}
/// Obtains a connection to the remote server and proxies the request, calling
/// the `Mitm` implementation functions, which may manipulate the request and
/// reponse. Returns a future that resolves to the response or error.
///
/// This function is called for plain http requests, and for https requests
/// received "inside" the fake, tapped `CONNECT` tunnel.
fn proxy_request<T: Mitm + Sync + Send +'static>(
req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
obtain_connection(req.uri().to_owned())
.map(|mut connection| {
let mitm1 = Arc::new(T::new(req.uri().to_owned()));
let mitm2 = Arc::clone(&mitm1);
let req = mitm1.request_headers(req);
let (parts, body) = req.into_parts();
let body = Body::wrap_stream(
body.map(move |chunk| mitm1.request_body_chunk(chunk)),
);
let req = Request::from_parts(parts, body);
info!("proxy_request() sending request {:?}", req);
connection
.send_request_retryable(req)
.map(|response| {
let response = mitm2.response_headers(response);
let (parts, body) = response.into_parts();
let body =
Body::wrap_stream(body.map(move |chunk| {
mitm2.response_body_chunk(chunk)
}));
Response::from_parts(parts, body)
})
.map_err(|(e, _f)| {
std::io::Error::new(std::io::ErrorKind::Other, e)
})
})
.flatten()
.or_else(|e| {
info!("proxy_request() returning 502 ({})", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Handles a CONNECT request. Tries to obtain an https connection to the
/// remote server. If that fails, returns 502 Bad Gateway. Otherwise returns
/// 200 OK, then attempts to establish a TLS connection with the proxy client,
/// masquerading as the remote server.
fn | <T: Mitm + Sync + Send +'static>(
connect_req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
info!("proxy_connect() impersonating {:?}", connect_req.uri());
let authority =
Authority::from_shared(Bytes::from(connect_req.uri().to_string()))
.unwrap();
let tls_cfg = certauth::tls_config(&authority);
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query("/")
.build()
.unwrap();
obtain_connection(uri)
.map(move |_pooled| {
let inner = connect_req.into_body().on_upgrade().map_err(|e| {
info!("proxy_connect() on_upgrade error: {:?}", e);
std::io::Error::new(std::io::ErrorKind::Other, e)
})
.and_then(|upgraded: Upgraded| -> Accept<Upgraded> {
TlsAcceptor::from(tls_cfg).accept(upgraded)
})
.map(move |stream: TlsStream<Upgraded, rustls::ServerSession>| {
info!("proxy_connect() tls connection established with proxy \
client: {:?}", stream);
service_inner_requests::<T>(stream)
})
.map_err(|e: std::io::Error| {
error!("proxy_connect() error from somewhere: {}", e);
})
.flatten();
hyper::rt::spawn(inner);
Response::builder().status(200).body(Body::empty()).unwrap()
})
.or_else(|e| {
info!("proxy_connect() returning 502, failed to connect: {:?}", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Called by `proxy_connect()` once the TLS session has been established with
/// the proxy client. Proxies requests received on the TLS connection.
fn service_inner_requests<T: Mitm + Sync + Send +'static>(
stream: TlsStream<Upgraded, rustls::ServerSession>,
) -> impl Future<Item = (), Error = ()> {
let svc = service_fn(move |req: Request<Body>| {
// "host" header is required for http 1.1
// XXX but we could fall back on authority
let authority = req.headers().get("host").unwrap().to_str().unwrap();
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query(&req.uri().to_string() as &str)
.build()
.unwrap();
let (mut parts, body) = req.into_parts();
parts.uri = uri;
let req = Request::from_parts(parts, body);
proxy_request::<T>(req)
});
HTTP.serve_connection(stream, svc)
.map_err(|e: hyper::Error| {
if match e.source() {
Some(source) => source
.to_string()
.find("Connection reset by peer")
.is_some(),
None => false,
} {
info!(
"service_inner_requests() serve_connection: \
client closed connection"
);
} else {
error!("service_inner_requests() serve_connection: {}", e);
};
})
}
| proxy_connect | identifier_name |
lib.rs | //! Monie-in-the-middle http(s) proxy library
//!
//! Observe and manipulate requests by implementing `monie::Mitm`.
//!
//! Here is a skeleton to help get started:
//!
//! ```
//! use futures::future::Future;
//! use hyper::{Body, Chunk, Request, Response, Server};
//! use monie::{Mitm, MitmProxyService};
//!
//! struct MyMitm;
//!
//! impl Mitm for MyMitm {
//! fn new(_: http::uri::Uri) -> MyMitm { MyMitm {} }
//! fn request_headers(&self, req: Request<Body>) -> Request<Body> { req }
//! fn response_headers(&self, res: Response<Body>) -> Response<Body> { res }
//! fn request_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! fn response_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! }
//!
//! fn main() {
//! let addr = ([127, 0, 0, 1], 8000).into();
//! let svc = MitmProxyService::<MyMitm>::new();
//! let server = Server::bind(&addr)
//! .serve(svc)
//! .map_err(|e| eprintln!("server error: {}", e));
//! println!("noop mitm proxy listening on http://{}", addr);
//! hyper::rt::run(server);
//! }
//! ```
//!
//! Other examples can be found at
//! <https://github.com/nlevitt/monie/tree/master/examples>.
#![deny(warnings)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
pub mod certauth;
use std::error::Error;
use std::sync::Arc;
use bytes::Bytes;
use futures::future::{self, Future, FutureResult};
use futures::stream::Stream;
use http::method::Method;
use http::uri::{Authority, Uri};
use hyper::client::pool::Pooled;
use hyper::client::{HttpConnector, PoolClient};
use hyper::server::conn::Http;
use hyper::service::{service_fn, NewService, Service};
use hyper::upgrade::Upgraded;
use hyper::{Body, Chunk, Client, Request, Response};
use hyper_rustls::HttpsConnector;
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
/// Represents the interception of a single request. Users of the library must
/// implement this trait. With it you can observe and manipulate the request
/// and response payload and headers.
pub trait Mitm {
/// Create a new instance of this `Mitm` implementation. The argument `uri`
/// is the uri being proxied. Implementations may do with this what they
/// wish (log it, stash it, ignore it, etc).
fn new(uri: Uri) -> Self;
/// Observe and manipulate the request headers. The `req` argument contains
/// the original request headers received from the proxy client. The
/// request headers returned by this function are sent to the remote
/// server.
fn request_headers(&self, req: Request<Body>) -> Request<Body>;
/// Observe and manipulate a chunk of the request payload. This function
/// may be called zero or more times, depending on the size of the request
/// payload. It will not be called at all in the common case of a GET
/// request with no payload. The `chunk` argument contains an original
/// chunk of the request payload as received from the proxy client. The
/// return value of this function is sent to the remote server.
fn request_body_chunk(&self, chunk: Chunk) -> Chunk;
/// Observe and manipulate the response headers. The `res` argument
/// contains the original response headers received from the remote server.
/// The response headers returned by this function are sent to the proxy
/// client.
fn response_headers(&self, res: Response<Body>) -> Response<Body>;
/// Observe and manipulate a chunk of the response payload. This function
/// may be called zero or more times, depending on the size of the payload.
/// The `chunk` argument represents an unaltered chunk of the response
/// payload as received from the remote server. The return value of this
/// function is sent to the remote server.
fn response_body_chunk(&self, chunk: Chunk) -> Chunk;
}
lazy_static! {
static ref CLIENT: Client<HttpsConnector<HttpConnector>, Body> =
Client::builder().build(HttpsConnector::new(4));
static ref HTTP: Http = Http::new();
}
/// The `hyper::service::Service` that does the proxying and calls your `Mitm`
/// implementation.
#[derive(Debug)]
pub struct MitmProxyService<T: Mitm + Sync> {
_phantom: std::marker::PhantomData<T>,
}
impl<T: Mitm + Sync> MitmProxyService<T> {
/// Creates a new `MitmProxyService`.
#[inline]
pub fn new() -> Self {
MitmProxyService::<T> {
_phantom: std::marker::PhantomData,
}
}
}
impl<T: Mitm + Sync + Send +'static> NewService for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Service = MitmProxyService<T>;
type InitError = std::io::Error;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
future::ok(MitmProxyService::new())
}
}
impl<T: Mitm + Sync + Send +'static> Service for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Future =
Box<dyn Future<Item = Response<Body>, Error = std::io::Error> + Send>;
fn call(&mut self, req: Request<Body>) -> Self::Future {
info!("MitmProxyService::call() handling {:?}", req);
if *req.method() == Method::CONNECT {
Box::new(proxy_connect::<T>(req))
} else {
Box::new(proxy_request::<T>(req))
}
}
}
/// Obtains a connection to the scheme://authority of `uri` from the connection
/// pool.
fn obtain_connection(
uri: Uri,
) -> impl Future<Item = Pooled<PoolClient<Body>>, Error = std::io::Error> {
let key1 = Arc::new(format!(
"{}://{}",
uri.scheme_part().unwrap(),
uri.authority_part().unwrap()
));
let key2 = Arc::clone(&key1);
let result = CLIENT.connection_for(uri, key1).map_err(move |e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("error obtaining connection to {}: {:?}", key2, e),
)
});
result
}
/// Obtains a connection to the remote server and proxies the request, calling
/// the `Mitm` implementation functions, which may manipulate the request and
/// reponse. Returns a future that resolves to the response or error. | /// received "inside" the fake, tapped `CONNECT` tunnel.
fn proxy_request<T: Mitm + Sync + Send +'static>(
req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
obtain_connection(req.uri().to_owned())
.map(|mut connection| {
let mitm1 = Arc::new(T::new(req.uri().to_owned()));
let mitm2 = Arc::clone(&mitm1);
let req = mitm1.request_headers(req);
let (parts, body) = req.into_parts();
let body = Body::wrap_stream(
body.map(move |chunk| mitm1.request_body_chunk(chunk)),
);
let req = Request::from_parts(parts, body);
info!("proxy_request() sending request {:?}", req);
connection
.send_request_retryable(req)
.map(|response| {
let response = mitm2.response_headers(response);
let (parts, body) = response.into_parts();
let body =
Body::wrap_stream(body.map(move |chunk| {
mitm2.response_body_chunk(chunk)
}));
Response::from_parts(parts, body)
})
.map_err(|(e, _f)| {
std::io::Error::new(std::io::ErrorKind::Other, e)
})
})
.flatten()
.or_else(|e| {
info!("proxy_request() returning 502 ({})", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Handles a CONNECT request. Tries to obtain an https connection to the
/// remote server. If that fails, returns 502 Bad Gateway. Otherwise returns
/// 200 OK, then attempts to establish a TLS connection with the proxy client,
/// masquerading as the remote server.
fn proxy_connect<T: Mitm + Sync + Send +'static>(
connect_req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
info!("proxy_connect() impersonating {:?}", connect_req.uri());
let authority =
Authority::from_shared(Bytes::from(connect_req.uri().to_string()))
.unwrap();
let tls_cfg = certauth::tls_config(&authority);
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query("/")
.build()
.unwrap();
obtain_connection(uri)
.map(move |_pooled| {
let inner = connect_req.into_body().on_upgrade().map_err(|e| {
info!("proxy_connect() on_upgrade error: {:?}", e);
std::io::Error::new(std::io::ErrorKind::Other, e)
})
.and_then(|upgraded: Upgraded| -> Accept<Upgraded> {
TlsAcceptor::from(tls_cfg).accept(upgraded)
})
.map(move |stream: TlsStream<Upgraded, rustls::ServerSession>| {
info!("proxy_connect() tls connection established with proxy \
client: {:?}", stream);
service_inner_requests::<T>(stream)
})
.map_err(|e: std::io::Error| {
error!("proxy_connect() error from somewhere: {}", e);
})
.flatten();
hyper::rt::spawn(inner);
Response::builder().status(200).body(Body::empty()).unwrap()
})
.or_else(|e| {
info!("proxy_connect() returning 502, failed to connect: {:?}", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Called by `proxy_connect()` once the TLS session has been established with
/// the proxy client. Proxies requests received on the TLS connection.
fn service_inner_requests<T: Mitm + Sync + Send +'static>(
stream: TlsStream<Upgraded, rustls::ServerSession>,
) -> impl Future<Item = (), Error = ()> {
let svc = service_fn(move |req: Request<Body>| {
// "host" header is required for http 1.1
// XXX but we could fall back on authority
let authority = req.headers().get("host").unwrap().to_str().unwrap();
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query(&req.uri().to_string() as &str)
.build()
.unwrap();
let (mut parts, body) = req.into_parts();
parts.uri = uri;
let req = Request::from_parts(parts, body);
proxy_request::<T>(req)
});
HTTP.serve_connection(stream, svc)
.map_err(|e: hyper::Error| {
if match e.source() {
Some(source) => source
.to_string()
.find("Connection reset by peer")
.is_some(),
None => false,
} {
info!(
"service_inner_requests() serve_connection: \
client closed connection"
);
} else {
error!("service_inner_requests() serve_connection: {}", e);
};
})
} | ///
/// This function is called for plain http requests, and for https requests | random_line_split |
block.rs | use crate::lookup_table::LookupTable;
use crate::properties::Block;
use Block::*;
impl From<char> for Block {
#[inline]
fn from(c: char) -> Self {
if c < ROW0_LIMIT {
return ROW0_TABLE.get_or(&(c as u8), No_Block);
}
if c < PLANE0_LIMIT {
return PLANE0_TABLE.get_or(&(c as u16), No_Block);
}
return SUPPLEMENTARY_TABLE.get_or(&(c as u32), No_Block);
}
}
#[test]
fn validate_tables() |
const ROW0_TABLE: LookupTable<u8, Block> = lookup_table![
(0x00, 0x7F, Basic),
];
const ROW0_LIMIT: char = '\u{80}';
const PLANE0_TABLE: LookupTable<u16, Block> = lookup_table![
(0x0080, 0x024F, Latin),
(0x0250, 0x02AF, IPA),
(0x02B0, 0x02FF, Spacing),
(0x0300, 0x036F, Combining),
(0x0370, 0x03FF, Greek),
(0x0400, 0x052F, Cyrillic),
(0x0530, 0x058F, Armenian),
(0x0590, 0x05FF, Hebrew),
(0x0600, 0x06FF, Arabic),
(0x0700, 0x074F, Syriac),
(0x0750, 0x077F, Arabic),
(0x0780, 0x07BF, Thaana),
(0x07C0, 0x07FF, NKo),
(0x0800, 0x083F, Samaritan),
(0x0840, 0x085F, Mandaic),
(0x0860, 0x086F, Syriac),
(0x08A0, 0x08FF, Arabic),
(0x0900, 0x097F, Devanagari),
(0x0980, 0x09FF, Bengali),
(0x0A00, 0x0A7F, Gurmukhi),
(0x0A80, 0x0AFF, Gujarati),
(0x0B00, 0x0B7F, Oriya),
(0x0B80, 0x0BFF, Tamil),
(0x0C00, 0x0C7F, Telugu),
(0x0C80, 0x0CFF, Kannada),
(0x0D00, 0x0D7F, Malayalam),
(0x0D80, 0x0DFF, Sinhala),
(0x0E00, 0x0E7F, Thai),
(0x0E80, 0x0EFF, Lao),
(0x0F00, 0x0FFF, Tibetan),
(0x1000, 0x109F, Myanmar),
(0x10A0, 0x10FF, Georgian),
(0x1100, 0x11FF, Hangul),
(0x1200, 0x139F, Ethiopic),
(0x13A0, 0x13FF, Cherokee),
(0x1400, 0x167F, Unified),
(0x1680, 0x169F, Ogham),
(0x16A0, 0x16FF, Runic),
(0x1700, 0x171F, Tagalog),
(0x1720, 0x173F, Hanunoo),
(0x1740, 0x175F, Buhid),
(0x1760, 0x177F, Tagbanwa),
(0x1780, 0x17FF, Khmer),
(0x1800, 0x18AF, Mongolian),
(0x18B0, 0x18FF, Unified),
(0x1900, 0x194F, Limbu),
(0x1950, 0x197F, Tai),
(0x1980, 0x19DF, New),
(0x19E0, 0x19FF, Khmer),
(0x1A00, 0x1A1F, Buginese),
(0x1A20, 0x1AAF, Tai),
(0x1AB0, 0x1AFF, Combining),
(0x1B00, 0x1B7F, Balinese),
(0x1B80, 0x1BBF, Sundanese),
(0x1BC0, 0x1BFF, Batak),
(0x1C00, 0x1C4F, Lepcha),
(0x1C50, 0x1C7F, Ol),
(0x1C80, 0x1C8F, Cyrillic),
(0x1C90, 0x1CBF, Georgian),
(0x1CC0, 0x1CCF, Sundanese),
(0x1CD0, 0x1CFF, Vedic),
(0x1D00, 0x1DBF, Phonetic),
(0x1DC0, 0x1DFF, Combining),
(0x1E00, 0x1EFF, Latin),
(0x1F00, 0x1FFF, Greek),
(0x2000, 0x206F, General),
(0x2070, 0x209F, Superscripts),
(0x20A0, 0x20CF, Currency),
(0x20D0, 0x20FF, Combining),
(0x2100, 0x214F, Letterlike),
(0x2150, 0x218F, Number),
(0x2190, 0x21FF, Arrows),
(0x2200, 0x22FF, Mathematical),
(0x2300, 0x23FF, Miscellaneous),
(0x2400, 0x243F, Control),
(0x2440, 0x245F, Optical),
(0x2460, 0x24FF, Enclosed),
(0x2500, 0x257F, Box),
(0x2580, 0x259F, Block),
(0x25A0, 0x25FF, Geometric),
(0x2600, 0x26FF, Miscellaneous),
(0x2700, 0x27BF, Dingbats),
(0x27C0, 0x27EF, Miscellaneous),
(0x27F0, 0x27FF, Supplemental),
(0x2800, 0x28FF, Braille),
(0x2900, 0x297F, Supplemental),
(0x2980, 0x29FF, Miscellaneous),
(0x2A00, 0x2AFF, Supplemental),
(0x2B00, 0x2BFF, Miscellaneous),
(0x2C00, 0x2C5F, Glagolitic),
(0x2C60, 0x2C7F, Latin),
(0x2C80, 0x2CFF, Coptic),
(0x2D00, 0x2D2F, Georgian),
(0x2D30, 0x2D7F, Tifinagh),
(0x2D80, 0x2DDF, Ethiopic),
(0x2DE0, 0x2DFF, Cyrillic),
(0x2E00, 0x2E7F, Supplemental),
(0x2E80, 0x2EFF, CJK),
(0x2F00, 0x2FDF, Kangxi),
(0x2FF0, 0x2FFF, Ideographic),
(0x3000, 0x303F, CJK),
(0x3040, 0x309F, Hiragana),
(0x30A0, 0x30FF, Katakana),
(0x3100, 0x312F, Bopomofo),
(0x3130, 0x318F, Hangul),
(0x3190, 0x319F, Kanbun),
(0x31A0, 0x31BF, Bopomofo),
(0x31C0, 0x31EF, CJK),
(0x31F0, 0x31FF, Katakana),
(0x3200, 0x32FF, Enclosed),
(0x3300, 0x4DBF, CJK),
(0x4DC0, 0x4DFF, Yijing),
(0x4E00, 0x9FFF, CJK),
(0xA000, 0xA4CF, Yi),
(0xA4D0, 0xA4FF, Lisu),
(0xA500, 0xA63F, Vai),
(0xA640, 0xA69F, Cyrillic),
(0xA6A0, 0xA6FF, Bamum),
(0xA700, 0xA71F, Modifier),
(0xA720, 0xA7FF, Latin),
(0xA800, 0xA82F, Syloti),
(0xA830, 0xA83F, Common),
(0xA840, 0xA87F, Phags),
(0xA880, 0xA8DF, Saurashtra),
(0xA8E0, 0xA8FF, Devanagari),
(0xA900, 0xA92F, Kayah),
(0xA930, 0xA95F, Rejang),
(0xA960, 0xA97F, Hangul),
(0xA980, 0xA9DF, Javanese),
(0xA9E0, 0xA9FF, Myanmar),
(0xAA00, 0xAA5F, Cham),
(0xAA60, 0xAA7F, Myanmar),
(0xAA80, 0xAADF, Tai),
(0xAAE0, 0xAAFF, Meetei),
(0xAB00, 0xAB2F, Ethiopic),
(0xAB30, 0xAB6F, Latin),
(0xAB70, 0xABBF, Cherokee),
(0xABC0, 0xABFF, Meetei),
(0xAC00, 0xD7FF, Hangul),
(0xD800, 0xDBFF, High),
(0xDC00, 0xDFFF, Low),
(0xE000, 0xF8FF, Private),
(0xF900, 0xFAFF, CJK),
(0xFB00, 0xFB4F, Alphabetic),
(0xFB50, 0xFDFF, Arabic),
(0xFE00, 0xFE0F, Variation),
(0xFE10, 0xFE1F, Vertical),
(0xFE20, 0xFE2F, Combining),
(0xFE30, 0xFE4F, CJK),
(0xFE50, 0xFE6F, Small),
(0xFE70, 0xFEFF, Arabic),
(0xFF00, 0xFFEF, Halfwidth),
(0xFFF0, 0xFFFF, Specials),
];
const PLANE0_LIMIT: char = '\u{10000}';
const SUPPLEMENTARY_TABLE: LookupTable<u32, Block> = lookup_table![
(0x010000, 0x0100FF, Linear),
(0x010100, 0x01013F, Aegean),
(0x010140, 0x0101CF, Ancient),
(0x0101D0, 0x0101FF, Phaistos),
(0x010280, 0x01029F, Lycian),
(0x0102A0, 0x0102DF, Carian),
(0x0102E0, 0x0102FF, Coptic),
(0x010300, 0x01032F, Old),
(0x010330, 0x01034F, Gothic),
(0x010350, 0x01037F, Old),
(0x010380, 0x01039F, Ugaritic),
(0x0103A0, 0x0103DF, Old),
(0x010400, 0x01044F, Deseret),
(0x010450, 0x01047F, Shavian),
(0x010480, 0x0104AF, Osmanya),
(0x0104B0, 0x0104FF, Osage),
(0x010500, 0x01052F, Elbasan),
(0x010530, 0x01056F, Caucasian),
(0x010600, 0x01077F, Linear),
(0x010800, 0x01083F, Cypriot),
(0x010840, 0x01085F, Imperial),
(0x010860, 0x01087F, Palmyrene),
(0x010880, 0x0108AF, Nabataean),
(0x0108E0, 0x0108FF, Hatran),
(0x010900, 0x01091F, Phoenician),
(0x010920, 0x01093F, Lydian),
(0x010980, 0x0109FF, Meroitic),
(0x010A00, 0x010A5F, Kharoshthi),
(0x010A60, 0x010A9F, Old),
(0x010AC0, 0x010AFF, Manichaean),
(0x010B00, 0x010B3F, Avestan),
(0x010B40, 0x010B7F, Inscriptional),
(0x010B80, 0x010BAF, Psalter),
(0x010C00, 0x010C4F, Old),
(0x010C80, 0x010CFF, Old),
(0x010D00, 0x010D3F, Hanifi),
(0x010E60, 0x010E7F, Rumi),
(0x010F00, 0x010F2F, Old),
(0x010F30, 0x010F6F, Sogdian),
(0x010FE0, 0x010FFF, Elymaic),
(0x011000, 0x01107F, Brahmi),
(0x011080, 0x0110CF, Kaithi),
(0x0110D0, 0x0110FF, Sora),
(0x011100, 0x01114F, Chakma),
(0x011150, 0x01117F, Mahajani),
(0x011180, 0x0111DF, Sharada),
(0x0111E0, 0x0111FF, Sinhala),
(0x011200, 0x01124F, Khojki),
(0x011280, 0x0112AF, Multani),
(0x0112B0, 0x0112FF, Khudawadi),
(0x011300, 0x01137F, Grantha),
(0x011400, 0x01147F, Newa),
(0x011480, 0x0114DF, Tirhuta),
(0x011580, 0x0115FF, Siddham),
(0x011600, 0x01165F, Modi),
(0x011660, 0x01167F, Mongolian),
(0x011680, 0x0116CF, Takri),
(0x011700, 0x01173F, Ahom),
(0x011800, 0x01184F, Dogra),
(0x0118A0, 0x0118FF, Warang),
(0x0119A0, 0x0119FF, Nandinagari),
(0x011A00, 0x011A4F, Zanabazar),
(0x011A50, 0x011AAF, Soyombo),
(0x011AC0, 0x011AFF, Pau),
(0x011C00, 0x011C6F, Bhaiksuki),
(0x011C70, 0x011CBF, Marchen),
(0x011D00, 0x011D5F, Masaram),
(0x011D60, 0x011DAF, Gunjala),
(0x011EE0, 0x011EFF, Makasar),
(0x011FC0, 0x011FFF, Tamil),
(0x012000, 0x01247F, Cuneiform),
(0x012480, 0x01254F, Early),
(0x013000, 0x01343F, Egyptian),
(0x014400, 0x01467F, Anatolian),
(0x016800, 0x016A3F, Bamum),
(0x016A40, 0x016A6F, Mro),
(0x016AD0, 0x016AFF, Bassa),
(0x016B00, 0x016B8F, Pahawh),
(0x016E40, 0x016E9F, Medefaidrin),
(0x016F00, 0x016F9F, Miao),
(0x016FE0, 0x016FFF, Ideographic),
(0x017000, 0x018AFF, Tangut),
(0x01B000, 0x01B12F, Kana),
(0x01B130, 0x01B16F, Small),
(0x01B170, 0x01B2FF, Nushu),
(0x01BC00, 0x01BC9F, Duployan),
(0x01BCA0, 0x01BCAF, Shorthand),
(0x01D000, 0x01D0FF, Byzantine),
(0x01D100, 0x01D1FF, Musical),
(0x01D200, 0x01D24F, Ancient),
(0x01D2E0, 0x01D2FF, Mayan),
(0x01D300, 0x01D35F, Tai),
(0x01D360, 0x01D37F, Counting),
(0x01D400, 0x01D7FF, Mathematical),
(0x01D800, 0x01DAAF, Sutton),
(0x01E000, 0x01E02F, Glagolitic),
(0x01E100, 0x01E14F, Nyiakeng),
(0x01E2C0, 0x01E2FF, Wancho),
(0x01E800, 0x01E8DF, Mende),
(0x01E900, 0x01E95F, Adlam),
(0x01EC70, 0x01ECBF, Indic),
(0x01ED00, 0x01ED4F, Ottoman),
(0x01EE00, 0x01EEFF, Arabic),
(0x01F000, 0x01F02F, Mahjong),
(0x01F030, 0x01F09F, Domino),
(0x01F0A0, 0x01F0FF, Playing),
(0x01F100, 0x01F2FF, Enclosed),
(0x01F300, 0x01F5FF, Miscellaneous),
(0x01F600, 0x01F64F, Emoticons),
(0x01F650, 0x01F67F, Ornamental),
(0x01F680, 0x01F6FF, Transport),
(0x01F700, 0x01F77F, Alchemical),
(0x01F780, 0x01F7FF, Geometric),
(0x01F800, 0x01F9FF, Supplemental),
(0x01FA00, 0x01FA6F, Chess),
(0x01FA70, 0x01FAFF, Symbols),
(0x020000, 0x02A6DF, CJK),
(0x02A700, 0x02EBEF, CJK),
(0x02F800, 0x02FA1F, CJK),
(0x0E0000, 0x0E007F, Tags),
(0x0E0100, 0x0E01EF, Variation),
(0x0F0000, 0x10FFFF, Supplementary),
];
| {
use std::convert::TryInto;
ROW0_TABLE.validate();
if let Ok(x) = (ROW0_LIMIT as u32).try_into() { assert!(!ROW0_TABLE.contains(&x)); }
PLANE0_TABLE.validate();
if let Ok(x) = (PLANE0_LIMIT as u32).try_into() { assert!(!PLANE0_TABLE.contains(&x)); }
SUPPLEMENTARY_TABLE.validate();
} | identifier_body |
block.rs | use crate::lookup_table::LookupTable;
use crate::properties::Block;
use Block::*;
impl From<char> for Block {
#[inline]
fn from(c: char) -> Self {
if c < ROW0_LIMIT {
return ROW0_TABLE.get_or(&(c as u8), No_Block);
}
if c < PLANE0_LIMIT {
return PLANE0_TABLE.get_or(&(c as u16), No_Block);
}
return SUPPLEMENTARY_TABLE.get_or(&(c as u32), No_Block);
}
}
#[test]
fn validate_tables() {
use std::convert::TryInto;
ROW0_TABLE.validate();
if let Ok(x) = (ROW0_LIMIT as u32).try_into() { assert!(!ROW0_TABLE.contains(&x)); }
PLANE0_TABLE.validate();
if let Ok(x) = (PLANE0_LIMIT as u32).try_into() { assert!(!PLANE0_TABLE.contains(&x)); }
SUPPLEMENTARY_TABLE.validate();
}
const ROW0_TABLE: LookupTable<u8, Block> = lookup_table![
(0x00, 0x7F, Basic),
];
const ROW0_LIMIT: char = '\u{80}';
const PLANE0_TABLE: LookupTable<u16, Block> = lookup_table![
(0x0080, 0x024F, Latin),
(0x0250, 0x02AF, IPA),
(0x02B0, 0x02FF, Spacing),
(0x0300, 0x036F, Combining),
(0x0370, 0x03FF, Greek),
(0x0400, 0x052F, Cyrillic),
(0x0530, 0x058F, Armenian),
(0x0590, 0x05FF, Hebrew),
(0x0600, 0x06FF, Arabic),
(0x0700, 0x074F, Syriac),
(0x0750, 0x077F, Arabic),
(0x0780, 0x07BF, Thaana),
(0x07C0, 0x07FF, NKo),
(0x0800, 0x083F, Samaritan),
(0x0840, 0x085F, Mandaic),
(0x0860, 0x086F, Syriac),
(0x08A0, 0x08FF, Arabic),
(0x0900, 0x097F, Devanagari),
(0x0980, 0x09FF, Bengali),
(0x0A00, 0x0A7F, Gurmukhi),
(0x0A80, 0x0AFF, Gujarati),
(0x0B00, 0x0B7F, Oriya),
(0x0B80, 0x0BFF, Tamil),
(0x0C00, 0x0C7F, Telugu),
(0x0C80, 0x0CFF, Kannada),
(0x0D00, 0x0D7F, Malayalam),
(0x0D80, 0x0DFF, Sinhala),
(0x0E00, 0x0E7F, Thai),
(0x0E80, 0x0EFF, Lao),
(0x0F00, 0x0FFF, Tibetan),
(0x1000, 0x109F, Myanmar),
(0x10A0, 0x10FF, Georgian),
(0x1100, 0x11FF, Hangul),
(0x1200, 0x139F, Ethiopic),
(0x13A0, 0x13FF, Cherokee),
(0x1400, 0x167F, Unified),
(0x1680, 0x169F, Ogham),
(0x16A0, 0x16FF, Runic),
(0x1700, 0x171F, Tagalog),
(0x1720, 0x173F, Hanunoo),
(0x1740, 0x175F, Buhid),
(0x1760, 0x177F, Tagbanwa),
(0x1780, 0x17FF, Khmer),
(0x1800, 0x18AF, Mongolian),
(0x18B0, 0x18FF, Unified),
(0x1900, 0x194F, Limbu),
(0x1950, 0x197F, Tai),
(0x1980, 0x19DF, New),
(0x19E0, 0x19FF, Khmer),
(0x1A00, 0x1A1F, Buginese),
(0x1A20, 0x1AAF, Tai),
(0x1AB0, 0x1AFF, Combining),
(0x1B00, 0x1B7F, Balinese),
(0x1B80, 0x1BBF, Sundanese),
(0x1BC0, 0x1BFF, Batak),
(0x1C00, 0x1C4F, Lepcha),
(0x1C50, 0x1C7F, Ol), | (0x1CC0, 0x1CCF, Sundanese),
(0x1CD0, 0x1CFF, Vedic),
(0x1D00, 0x1DBF, Phonetic),
(0x1DC0, 0x1DFF, Combining),
(0x1E00, 0x1EFF, Latin),
(0x1F00, 0x1FFF, Greek),
(0x2000, 0x206F, General),
(0x2070, 0x209F, Superscripts),
(0x20A0, 0x20CF, Currency),
(0x20D0, 0x20FF, Combining),
(0x2100, 0x214F, Letterlike),
(0x2150, 0x218F, Number),
(0x2190, 0x21FF, Arrows),
(0x2200, 0x22FF, Mathematical),
(0x2300, 0x23FF, Miscellaneous),
(0x2400, 0x243F, Control),
(0x2440, 0x245F, Optical),
(0x2460, 0x24FF, Enclosed),
(0x2500, 0x257F, Box),
(0x2580, 0x259F, Block),
(0x25A0, 0x25FF, Geometric),
(0x2600, 0x26FF, Miscellaneous),
(0x2700, 0x27BF, Dingbats),
(0x27C0, 0x27EF, Miscellaneous),
(0x27F0, 0x27FF, Supplemental),
(0x2800, 0x28FF, Braille),
(0x2900, 0x297F, Supplemental),
(0x2980, 0x29FF, Miscellaneous),
(0x2A00, 0x2AFF, Supplemental),
(0x2B00, 0x2BFF, Miscellaneous),
(0x2C00, 0x2C5F, Glagolitic),
(0x2C60, 0x2C7F, Latin),
(0x2C80, 0x2CFF, Coptic),
(0x2D00, 0x2D2F, Georgian),
(0x2D30, 0x2D7F, Tifinagh),
(0x2D80, 0x2DDF, Ethiopic),
(0x2DE0, 0x2DFF, Cyrillic),
(0x2E00, 0x2E7F, Supplemental),
(0x2E80, 0x2EFF, CJK),
(0x2F00, 0x2FDF, Kangxi),
(0x2FF0, 0x2FFF, Ideographic),
(0x3000, 0x303F, CJK),
(0x3040, 0x309F, Hiragana),
(0x30A0, 0x30FF, Katakana),
(0x3100, 0x312F, Bopomofo),
(0x3130, 0x318F, Hangul),
(0x3190, 0x319F, Kanbun),
(0x31A0, 0x31BF, Bopomofo),
(0x31C0, 0x31EF, CJK),
(0x31F0, 0x31FF, Katakana),
(0x3200, 0x32FF, Enclosed),
(0x3300, 0x4DBF, CJK),
(0x4DC0, 0x4DFF, Yijing),
(0x4E00, 0x9FFF, CJK),
(0xA000, 0xA4CF, Yi),
(0xA4D0, 0xA4FF, Lisu),
(0xA500, 0xA63F, Vai),
(0xA640, 0xA69F, Cyrillic),
(0xA6A0, 0xA6FF, Bamum),
(0xA700, 0xA71F, Modifier),
(0xA720, 0xA7FF, Latin),
(0xA800, 0xA82F, Syloti),
(0xA830, 0xA83F, Common),
(0xA840, 0xA87F, Phags),
(0xA880, 0xA8DF, Saurashtra),
(0xA8E0, 0xA8FF, Devanagari),
(0xA900, 0xA92F, Kayah),
(0xA930, 0xA95F, Rejang),
(0xA960, 0xA97F, Hangul),
(0xA980, 0xA9DF, Javanese),
(0xA9E0, 0xA9FF, Myanmar),
(0xAA00, 0xAA5F, Cham),
(0xAA60, 0xAA7F, Myanmar),
(0xAA80, 0xAADF, Tai),
(0xAAE0, 0xAAFF, Meetei),
(0xAB00, 0xAB2F, Ethiopic),
(0xAB30, 0xAB6F, Latin),
(0xAB70, 0xABBF, Cherokee),
(0xABC0, 0xABFF, Meetei),
(0xAC00, 0xD7FF, Hangul),
(0xD800, 0xDBFF, High),
(0xDC00, 0xDFFF, Low),
(0xE000, 0xF8FF, Private),
(0xF900, 0xFAFF, CJK),
(0xFB00, 0xFB4F, Alphabetic),
(0xFB50, 0xFDFF, Arabic),
(0xFE00, 0xFE0F, Variation),
(0xFE10, 0xFE1F, Vertical),
(0xFE20, 0xFE2F, Combining),
(0xFE30, 0xFE4F, CJK),
(0xFE50, 0xFE6F, Small),
(0xFE70, 0xFEFF, Arabic),
(0xFF00, 0xFFEF, Halfwidth),
(0xFFF0, 0xFFFF, Specials),
];
const PLANE0_LIMIT: char = '\u{10000}';
const SUPPLEMENTARY_TABLE: LookupTable<u32, Block> = lookup_table![
(0x010000, 0x0100FF, Linear),
(0x010100, 0x01013F, Aegean),
(0x010140, 0x0101CF, Ancient),
(0x0101D0, 0x0101FF, Phaistos),
(0x010280, 0x01029F, Lycian),
(0x0102A0, 0x0102DF, Carian),
(0x0102E0, 0x0102FF, Coptic),
(0x010300, 0x01032F, Old),
(0x010330, 0x01034F, Gothic),
(0x010350, 0x01037F, Old),
(0x010380, 0x01039F, Ugaritic),
(0x0103A0, 0x0103DF, Old),
(0x010400, 0x01044F, Deseret),
(0x010450, 0x01047F, Shavian),
(0x010480, 0x0104AF, Osmanya),
(0x0104B0, 0x0104FF, Osage),
(0x010500, 0x01052F, Elbasan),
(0x010530, 0x01056F, Caucasian),
(0x010600, 0x01077F, Linear),
(0x010800, 0x01083F, Cypriot),
(0x010840, 0x01085F, Imperial),
(0x010860, 0x01087F, Palmyrene),
(0x010880, 0x0108AF, Nabataean),
(0x0108E0, 0x0108FF, Hatran),
(0x010900, 0x01091F, Phoenician),
(0x010920, 0x01093F, Lydian),
(0x010980, 0x0109FF, Meroitic),
(0x010A00, 0x010A5F, Kharoshthi),
(0x010A60, 0x010A9F, Old),
(0x010AC0, 0x010AFF, Manichaean),
(0x010B00, 0x010B3F, Avestan),
(0x010B40, 0x010B7F, Inscriptional),
(0x010B80, 0x010BAF, Psalter),
(0x010C00, 0x010C4F, Old),
(0x010C80, 0x010CFF, Old),
(0x010D00, 0x010D3F, Hanifi),
(0x010E60, 0x010E7F, Rumi),
(0x010F00, 0x010F2F, Old),
(0x010F30, 0x010F6F, Sogdian),
(0x010FE0, 0x010FFF, Elymaic),
(0x011000, 0x01107F, Brahmi),
(0x011080, 0x0110CF, Kaithi),
(0x0110D0, 0x0110FF, Sora),
(0x011100, 0x01114F, Chakma),
(0x011150, 0x01117F, Mahajani),
(0x011180, 0x0111DF, Sharada),
(0x0111E0, 0x0111FF, Sinhala),
(0x011200, 0x01124F, Khojki),
(0x011280, 0x0112AF, Multani),
(0x0112B0, 0x0112FF, Khudawadi),
(0x011300, 0x01137F, Grantha),
(0x011400, 0x01147F, Newa),
(0x011480, 0x0114DF, Tirhuta),
(0x011580, 0x0115FF, Siddham),
(0x011600, 0x01165F, Modi),
(0x011660, 0x01167F, Mongolian),
(0x011680, 0x0116CF, Takri),
(0x011700, 0x01173F, Ahom),
(0x011800, 0x01184F, Dogra),
(0x0118A0, 0x0118FF, Warang),
(0x0119A0, 0x0119FF, Nandinagari),
(0x011A00, 0x011A4F, Zanabazar),
(0x011A50, 0x011AAF, Soyombo),
(0x011AC0, 0x011AFF, Pau),
(0x011C00, 0x011C6F, Bhaiksuki),
(0x011C70, 0x011CBF, Marchen),
(0x011D00, 0x011D5F, Masaram),
(0x011D60, 0x011DAF, Gunjala),
(0x011EE0, 0x011EFF, Makasar),
(0x011FC0, 0x011FFF, Tamil),
(0x012000, 0x01247F, Cuneiform),
(0x012480, 0x01254F, Early),
(0x013000, 0x01343F, Egyptian),
(0x014400, 0x01467F, Anatolian),
(0x016800, 0x016A3F, Bamum),
(0x016A40, 0x016A6F, Mro),
(0x016AD0, 0x016AFF, Bassa),
(0x016B00, 0x016B8F, Pahawh),
(0x016E40, 0x016E9F, Medefaidrin),
(0x016F00, 0x016F9F, Miao),
(0x016FE0, 0x016FFF, Ideographic),
(0x017000, 0x018AFF, Tangut),
(0x01B000, 0x01B12F, Kana),
(0x01B130, 0x01B16F, Small),
(0x01B170, 0x01B2FF, Nushu),
(0x01BC00, 0x01BC9F, Duployan),
(0x01BCA0, 0x01BCAF, Shorthand),
(0x01D000, 0x01D0FF, Byzantine),
(0x01D100, 0x01D1FF, Musical),
(0x01D200, 0x01D24F, Ancient),
(0x01D2E0, 0x01D2FF, Mayan),
(0x01D300, 0x01D35F, Tai),
(0x01D360, 0x01D37F, Counting),
(0x01D400, 0x01D7FF, Mathematical),
(0x01D800, 0x01DAAF, Sutton),
(0x01E000, 0x01E02F, Glagolitic),
(0x01E100, 0x01E14F, Nyiakeng),
(0x01E2C0, 0x01E2FF, Wancho),
(0x01E800, 0x01E8DF, Mende),
(0x01E900, 0x01E95F, Adlam),
(0x01EC70, 0x01ECBF, Indic),
(0x01ED00, 0x01ED4F, Ottoman),
(0x01EE00, 0x01EEFF, Arabic),
(0x01F000, 0x01F02F, Mahjong),
(0x01F030, 0x01F09F, Domino),
(0x01F0A0, 0x01F0FF, Playing),
(0x01F100, 0x01F2FF, Enclosed),
(0x01F300, 0x01F5FF, Miscellaneous),
(0x01F600, 0x01F64F, Emoticons),
(0x01F650, 0x01F67F, Ornamental),
(0x01F680, 0x01F6FF, Transport),
(0x01F700, 0x01F77F, Alchemical),
(0x01F780, 0x01F7FF, Geometric),
(0x01F800, 0x01F9FF, Supplemental),
(0x01FA00, 0x01FA6F, Chess),
(0x01FA70, 0x01FAFF, Symbols),
(0x020000, 0x02A6DF, CJK),
(0x02A700, 0x02EBEF, CJK),
(0x02F800, 0x02FA1F, CJK),
(0x0E0000, 0x0E007F, Tags),
(0x0E0100, 0x0E01EF, Variation),
(0x0F0000, 0x10FFFF, Supplementary),
]; | (0x1C80, 0x1C8F, Cyrillic),
(0x1C90, 0x1CBF, Georgian), | random_line_split |
block.rs | use crate::lookup_table::LookupTable;
use crate::properties::Block;
use Block::*;
impl From<char> for Block {
#[inline]
fn from(c: char) -> Self {
if c < ROW0_LIMIT {
return ROW0_TABLE.get_or(&(c as u8), No_Block);
}
if c < PLANE0_LIMIT {
return PLANE0_TABLE.get_or(&(c as u16), No_Block);
}
return SUPPLEMENTARY_TABLE.get_or(&(c as u32), No_Block);
}
}
#[test]
fn validate_tables() {
use std::convert::TryInto;
ROW0_TABLE.validate();
if let Ok(x) = (ROW0_LIMIT as u32).try_into() { assert!(!ROW0_TABLE.contains(&x)); }
PLANE0_TABLE.validate();
if let Ok(x) = (PLANE0_LIMIT as u32).try_into() |
SUPPLEMENTARY_TABLE.validate();
}
const ROW0_TABLE: LookupTable<u8, Block> = lookup_table![
(0x00, 0x7F, Basic),
];
const ROW0_LIMIT: char = '\u{80}';
const PLANE0_TABLE: LookupTable<u16, Block> = lookup_table![
(0x0080, 0x024F, Latin),
(0x0250, 0x02AF, IPA),
(0x02B0, 0x02FF, Spacing),
(0x0300, 0x036F, Combining),
(0x0370, 0x03FF, Greek),
(0x0400, 0x052F, Cyrillic),
(0x0530, 0x058F, Armenian),
(0x0590, 0x05FF, Hebrew),
(0x0600, 0x06FF, Arabic),
(0x0700, 0x074F, Syriac),
(0x0750, 0x077F, Arabic),
(0x0780, 0x07BF, Thaana),
(0x07C0, 0x07FF, NKo),
(0x0800, 0x083F, Samaritan),
(0x0840, 0x085F, Mandaic),
(0x0860, 0x086F, Syriac),
(0x08A0, 0x08FF, Arabic),
(0x0900, 0x097F, Devanagari),
(0x0980, 0x09FF, Bengali),
(0x0A00, 0x0A7F, Gurmukhi),
(0x0A80, 0x0AFF, Gujarati),
(0x0B00, 0x0B7F, Oriya),
(0x0B80, 0x0BFF, Tamil),
(0x0C00, 0x0C7F, Telugu),
(0x0C80, 0x0CFF, Kannada),
(0x0D00, 0x0D7F, Malayalam),
(0x0D80, 0x0DFF, Sinhala),
(0x0E00, 0x0E7F, Thai),
(0x0E80, 0x0EFF, Lao),
(0x0F00, 0x0FFF, Tibetan),
(0x1000, 0x109F, Myanmar),
(0x10A0, 0x10FF, Georgian),
(0x1100, 0x11FF, Hangul),
(0x1200, 0x139F, Ethiopic),
(0x13A0, 0x13FF, Cherokee),
(0x1400, 0x167F, Unified),
(0x1680, 0x169F, Ogham),
(0x16A0, 0x16FF, Runic),
(0x1700, 0x171F, Tagalog),
(0x1720, 0x173F, Hanunoo),
(0x1740, 0x175F, Buhid),
(0x1760, 0x177F, Tagbanwa),
(0x1780, 0x17FF, Khmer),
(0x1800, 0x18AF, Mongolian),
(0x18B0, 0x18FF, Unified),
(0x1900, 0x194F, Limbu),
(0x1950, 0x197F, Tai),
(0x1980, 0x19DF, New),
(0x19E0, 0x19FF, Khmer),
(0x1A00, 0x1A1F, Buginese),
(0x1A20, 0x1AAF, Tai),
(0x1AB0, 0x1AFF, Combining),
(0x1B00, 0x1B7F, Balinese),
(0x1B80, 0x1BBF, Sundanese),
(0x1BC0, 0x1BFF, Batak),
(0x1C00, 0x1C4F, Lepcha),
(0x1C50, 0x1C7F, Ol),
(0x1C80, 0x1C8F, Cyrillic),
(0x1C90, 0x1CBF, Georgian),
(0x1CC0, 0x1CCF, Sundanese),
(0x1CD0, 0x1CFF, Vedic),
(0x1D00, 0x1DBF, Phonetic),
(0x1DC0, 0x1DFF, Combining),
(0x1E00, 0x1EFF, Latin),
(0x1F00, 0x1FFF, Greek),
(0x2000, 0x206F, General),
(0x2070, 0x209F, Superscripts),
(0x20A0, 0x20CF, Currency),
(0x20D0, 0x20FF, Combining),
(0x2100, 0x214F, Letterlike),
(0x2150, 0x218F, Number),
(0x2190, 0x21FF, Arrows),
(0x2200, 0x22FF, Mathematical),
(0x2300, 0x23FF, Miscellaneous),
(0x2400, 0x243F, Control),
(0x2440, 0x245F, Optical),
(0x2460, 0x24FF, Enclosed),
(0x2500, 0x257F, Box),
(0x2580, 0x259F, Block),
(0x25A0, 0x25FF, Geometric),
(0x2600, 0x26FF, Miscellaneous),
(0x2700, 0x27BF, Dingbats),
(0x27C0, 0x27EF, Miscellaneous),
(0x27F0, 0x27FF, Supplemental),
(0x2800, 0x28FF, Braille),
(0x2900, 0x297F, Supplemental),
(0x2980, 0x29FF, Miscellaneous),
(0x2A00, 0x2AFF, Supplemental),
(0x2B00, 0x2BFF, Miscellaneous),
(0x2C00, 0x2C5F, Glagolitic),
(0x2C60, 0x2C7F, Latin),
(0x2C80, 0x2CFF, Coptic),
(0x2D00, 0x2D2F, Georgian),
(0x2D30, 0x2D7F, Tifinagh),
(0x2D80, 0x2DDF, Ethiopic),
(0x2DE0, 0x2DFF, Cyrillic),
(0x2E00, 0x2E7F, Supplemental),
(0x2E80, 0x2EFF, CJK),
(0x2F00, 0x2FDF, Kangxi),
(0x2FF0, 0x2FFF, Ideographic),
(0x3000, 0x303F, CJK),
(0x3040, 0x309F, Hiragana),
(0x30A0, 0x30FF, Katakana),
(0x3100, 0x312F, Bopomofo),
(0x3130, 0x318F, Hangul),
(0x3190, 0x319F, Kanbun),
(0x31A0, 0x31BF, Bopomofo),
(0x31C0, 0x31EF, CJK),
(0x31F0, 0x31FF, Katakana),
(0x3200, 0x32FF, Enclosed),
(0x3300, 0x4DBF, CJK),
(0x4DC0, 0x4DFF, Yijing),
(0x4E00, 0x9FFF, CJK),
(0xA000, 0xA4CF, Yi),
(0xA4D0, 0xA4FF, Lisu),
(0xA500, 0xA63F, Vai),
(0xA640, 0xA69F, Cyrillic),
(0xA6A0, 0xA6FF, Bamum),
(0xA700, 0xA71F, Modifier),
(0xA720, 0xA7FF, Latin),
(0xA800, 0xA82F, Syloti),
(0xA830, 0xA83F, Common),
(0xA840, 0xA87F, Phags),
(0xA880, 0xA8DF, Saurashtra),
(0xA8E0, 0xA8FF, Devanagari),
(0xA900, 0xA92F, Kayah),
(0xA930, 0xA95F, Rejang),
(0xA960, 0xA97F, Hangul),
(0xA980, 0xA9DF, Javanese),
(0xA9E0, 0xA9FF, Myanmar),
(0xAA00, 0xAA5F, Cham),
(0xAA60, 0xAA7F, Myanmar),
(0xAA80, 0xAADF, Tai),
(0xAAE0, 0xAAFF, Meetei),
(0xAB00, 0xAB2F, Ethiopic),
(0xAB30, 0xAB6F, Latin),
(0xAB70, 0xABBF, Cherokee),
(0xABC0, 0xABFF, Meetei),
(0xAC00, 0xD7FF, Hangul),
(0xD800, 0xDBFF, High),
(0xDC00, 0xDFFF, Low),
(0xE000, 0xF8FF, Private),
(0xF900, 0xFAFF, CJK),
(0xFB00, 0xFB4F, Alphabetic),
(0xFB50, 0xFDFF, Arabic),
(0xFE00, 0xFE0F, Variation),
(0xFE10, 0xFE1F, Vertical),
(0xFE20, 0xFE2F, Combining),
(0xFE30, 0xFE4F, CJK),
(0xFE50, 0xFE6F, Small),
(0xFE70, 0xFEFF, Arabic),
(0xFF00, 0xFFEF, Halfwidth),
(0xFFF0, 0xFFFF, Specials),
];
const PLANE0_LIMIT: char = '\u{10000}';
const SUPPLEMENTARY_TABLE: LookupTable<u32, Block> = lookup_table![
(0x010000, 0x0100FF, Linear),
(0x010100, 0x01013F, Aegean),
(0x010140, 0x0101CF, Ancient),
(0x0101D0, 0x0101FF, Phaistos),
(0x010280, 0x01029F, Lycian),
(0x0102A0, 0x0102DF, Carian),
(0x0102E0, 0x0102FF, Coptic),
(0x010300, 0x01032F, Old),
(0x010330, 0x01034F, Gothic),
(0x010350, 0x01037F, Old),
(0x010380, 0x01039F, Ugaritic),
(0x0103A0, 0x0103DF, Old),
(0x010400, 0x01044F, Deseret),
(0x010450, 0x01047F, Shavian),
(0x010480, 0x0104AF, Osmanya),
(0x0104B0, 0x0104FF, Osage),
(0x010500, 0x01052F, Elbasan),
(0x010530, 0x01056F, Caucasian),
(0x010600, 0x01077F, Linear),
(0x010800, 0x01083F, Cypriot),
(0x010840, 0x01085F, Imperial),
(0x010860, 0x01087F, Palmyrene),
(0x010880, 0x0108AF, Nabataean),
(0x0108E0, 0x0108FF, Hatran),
(0x010900, 0x01091F, Phoenician),
(0x010920, 0x01093F, Lydian),
(0x010980, 0x0109FF, Meroitic),
(0x010A00, 0x010A5F, Kharoshthi),
(0x010A60, 0x010A9F, Old),
(0x010AC0, 0x010AFF, Manichaean),
(0x010B00, 0x010B3F, Avestan),
(0x010B40, 0x010B7F, Inscriptional),
(0x010B80, 0x010BAF, Psalter),
(0x010C00, 0x010C4F, Old),
(0x010C80, 0x010CFF, Old),
(0x010D00, 0x010D3F, Hanifi),
(0x010E60, 0x010E7F, Rumi),
(0x010F00, 0x010F2F, Old),
(0x010F30, 0x010F6F, Sogdian),
(0x010FE0, 0x010FFF, Elymaic),
(0x011000, 0x01107F, Brahmi),
(0x011080, 0x0110CF, Kaithi),
(0x0110D0, 0x0110FF, Sora),
(0x011100, 0x01114F, Chakma),
(0x011150, 0x01117F, Mahajani),
(0x011180, 0x0111DF, Sharada),
(0x0111E0, 0x0111FF, Sinhala),
(0x011200, 0x01124F, Khojki),
(0x011280, 0x0112AF, Multani),
(0x0112B0, 0x0112FF, Khudawadi),
(0x011300, 0x01137F, Grantha),
(0x011400, 0x01147F, Newa),
(0x011480, 0x0114DF, Tirhuta),
(0x011580, 0x0115FF, Siddham),
(0x011600, 0x01165F, Modi),
(0x011660, 0x01167F, Mongolian),
(0x011680, 0x0116CF, Takri),
(0x011700, 0x01173F, Ahom),
(0x011800, 0x01184F, Dogra),
(0x0118A0, 0x0118FF, Warang),
(0x0119A0, 0x0119FF, Nandinagari),
(0x011A00, 0x011A4F, Zanabazar),
(0x011A50, 0x011AAF, Soyombo),
(0x011AC0, 0x011AFF, Pau),
(0x011C00, 0x011C6F, Bhaiksuki),
(0x011C70, 0x011CBF, Marchen),
(0x011D00, 0x011D5F, Masaram),
(0x011D60, 0x011DAF, Gunjala),
(0x011EE0, 0x011EFF, Makasar),
(0x011FC0, 0x011FFF, Tamil),
(0x012000, 0x01247F, Cuneiform),
(0x012480, 0x01254F, Early),
(0x013000, 0x01343F, Egyptian),
(0x014400, 0x01467F, Anatolian),
(0x016800, 0x016A3F, Bamum),
(0x016A40, 0x016A6F, Mro),
(0x016AD0, 0x016AFF, Bassa),
(0x016B00, 0x016B8F, Pahawh),
(0x016E40, 0x016E9F, Medefaidrin),
(0x016F00, 0x016F9F, Miao),
(0x016FE0, 0x016FFF, Ideographic),
(0x017000, 0x018AFF, Tangut),
(0x01B000, 0x01B12F, Kana),
(0x01B130, 0x01B16F, Small),
(0x01B170, 0x01B2FF, Nushu),
(0x01BC00, 0x01BC9F, Duployan),
(0x01BCA0, 0x01BCAF, Shorthand),
(0x01D000, 0x01D0FF, Byzantine),
(0x01D100, 0x01D1FF, Musical),
(0x01D200, 0x01D24F, Ancient),
(0x01D2E0, 0x01D2FF, Mayan),
(0x01D300, 0x01D35F, Tai),
(0x01D360, 0x01D37F, Counting),
(0x01D400, 0x01D7FF, Mathematical),
(0x01D800, 0x01DAAF, Sutton),
(0x01E000, 0x01E02F, Glagolitic),
(0x01E100, 0x01E14F, Nyiakeng),
(0x01E2C0, 0x01E2FF, Wancho),
(0x01E800, 0x01E8DF, Mende),
(0x01E900, 0x01E95F, Adlam),
(0x01EC70, 0x01ECBF, Indic),
(0x01ED00, 0x01ED4F, Ottoman),
(0x01EE00, 0x01EEFF, Arabic),
(0x01F000, 0x01F02F, Mahjong),
(0x01F030, 0x01F09F, Domino),
(0x01F0A0, 0x01F0FF, Playing),
(0x01F100, 0x01F2FF, Enclosed),
(0x01F300, 0x01F5FF, Miscellaneous),
(0x01F600, 0x01F64F, Emoticons),
(0x01F650, 0x01F67F, Ornamental),
(0x01F680, 0x01F6FF, Transport),
(0x01F700, 0x01F77F, Alchemical),
(0x01F780, 0x01F7FF, Geometric),
(0x01F800, 0x01F9FF, Supplemental),
(0x01FA00, 0x01FA6F, Chess),
(0x01FA70, 0x01FAFF, Symbols),
(0x020000, 0x02A6DF, CJK),
(0x02A700, 0x02EBEF, CJK),
(0x02F800, 0x02FA1F, CJK),
(0x0E0000, 0x0E007F, Tags),
(0x0E0100, 0x0E01EF, Variation),
(0x0F0000, 0x10FFFF, Supplementary),
];
| { assert!(!PLANE0_TABLE.contains(&x)); } | conditional_block |
block.rs | use crate::lookup_table::LookupTable;
use crate::properties::Block;
use Block::*;
impl From<char> for Block {
#[inline]
fn from(c: char) -> Self {
if c < ROW0_LIMIT {
return ROW0_TABLE.get_or(&(c as u8), No_Block);
}
if c < PLANE0_LIMIT {
return PLANE0_TABLE.get_or(&(c as u16), No_Block);
}
return SUPPLEMENTARY_TABLE.get_or(&(c as u32), No_Block);
}
}
#[test]
fn | () {
use std::convert::TryInto;
ROW0_TABLE.validate();
if let Ok(x) = (ROW0_LIMIT as u32).try_into() { assert!(!ROW0_TABLE.contains(&x)); }
PLANE0_TABLE.validate();
if let Ok(x) = (PLANE0_LIMIT as u32).try_into() { assert!(!PLANE0_TABLE.contains(&x)); }
SUPPLEMENTARY_TABLE.validate();
}
const ROW0_TABLE: LookupTable<u8, Block> = lookup_table![
(0x00, 0x7F, Basic),
];
const ROW0_LIMIT: char = '\u{80}';
const PLANE0_TABLE: LookupTable<u16, Block> = lookup_table![
(0x0080, 0x024F, Latin),
(0x0250, 0x02AF, IPA),
(0x02B0, 0x02FF, Spacing),
(0x0300, 0x036F, Combining),
(0x0370, 0x03FF, Greek),
(0x0400, 0x052F, Cyrillic),
(0x0530, 0x058F, Armenian),
(0x0590, 0x05FF, Hebrew),
(0x0600, 0x06FF, Arabic),
(0x0700, 0x074F, Syriac),
(0x0750, 0x077F, Arabic),
(0x0780, 0x07BF, Thaana),
(0x07C0, 0x07FF, NKo),
(0x0800, 0x083F, Samaritan),
(0x0840, 0x085F, Mandaic),
(0x0860, 0x086F, Syriac),
(0x08A0, 0x08FF, Arabic),
(0x0900, 0x097F, Devanagari),
(0x0980, 0x09FF, Bengali),
(0x0A00, 0x0A7F, Gurmukhi),
(0x0A80, 0x0AFF, Gujarati),
(0x0B00, 0x0B7F, Oriya),
(0x0B80, 0x0BFF, Tamil),
(0x0C00, 0x0C7F, Telugu),
(0x0C80, 0x0CFF, Kannada),
(0x0D00, 0x0D7F, Malayalam),
(0x0D80, 0x0DFF, Sinhala),
(0x0E00, 0x0E7F, Thai),
(0x0E80, 0x0EFF, Lao),
(0x0F00, 0x0FFF, Tibetan),
(0x1000, 0x109F, Myanmar),
(0x10A0, 0x10FF, Georgian),
(0x1100, 0x11FF, Hangul),
(0x1200, 0x139F, Ethiopic),
(0x13A0, 0x13FF, Cherokee),
(0x1400, 0x167F, Unified),
(0x1680, 0x169F, Ogham),
(0x16A0, 0x16FF, Runic),
(0x1700, 0x171F, Tagalog),
(0x1720, 0x173F, Hanunoo),
(0x1740, 0x175F, Buhid),
(0x1760, 0x177F, Tagbanwa),
(0x1780, 0x17FF, Khmer),
(0x1800, 0x18AF, Mongolian),
(0x18B0, 0x18FF, Unified),
(0x1900, 0x194F, Limbu),
(0x1950, 0x197F, Tai),
(0x1980, 0x19DF, New),
(0x19E0, 0x19FF, Khmer),
(0x1A00, 0x1A1F, Buginese),
(0x1A20, 0x1AAF, Tai),
(0x1AB0, 0x1AFF, Combining),
(0x1B00, 0x1B7F, Balinese),
(0x1B80, 0x1BBF, Sundanese),
(0x1BC0, 0x1BFF, Batak),
(0x1C00, 0x1C4F, Lepcha),
(0x1C50, 0x1C7F, Ol),
(0x1C80, 0x1C8F, Cyrillic),
(0x1C90, 0x1CBF, Georgian),
(0x1CC0, 0x1CCF, Sundanese),
(0x1CD0, 0x1CFF, Vedic),
(0x1D00, 0x1DBF, Phonetic),
(0x1DC0, 0x1DFF, Combining),
(0x1E00, 0x1EFF, Latin),
(0x1F00, 0x1FFF, Greek),
(0x2000, 0x206F, General),
(0x2070, 0x209F, Superscripts),
(0x20A0, 0x20CF, Currency),
(0x20D0, 0x20FF, Combining),
(0x2100, 0x214F, Letterlike),
(0x2150, 0x218F, Number),
(0x2190, 0x21FF, Arrows),
(0x2200, 0x22FF, Mathematical),
(0x2300, 0x23FF, Miscellaneous),
(0x2400, 0x243F, Control),
(0x2440, 0x245F, Optical),
(0x2460, 0x24FF, Enclosed),
(0x2500, 0x257F, Box),
(0x2580, 0x259F, Block),
(0x25A0, 0x25FF, Geometric),
(0x2600, 0x26FF, Miscellaneous),
(0x2700, 0x27BF, Dingbats),
(0x27C0, 0x27EF, Miscellaneous),
(0x27F0, 0x27FF, Supplemental),
(0x2800, 0x28FF, Braille),
(0x2900, 0x297F, Supplemental),
(0x2980, 0x29FF, Miscellaneous),
(0x2A00, 0x2AFF, Supplemental),
(0x2B00, 0x2BFF, Miscellaneous),
(0x2C00, 0x2C5F, Glagolitic),
(0x2C60, 0x2C7F, Latin),
(0x2C80, 0x2CFF, Coptic),
(0x2D00, 0x2D2F, Georgian),
(0x2D30, 0x2D7F, Tifinagh),
(0x2D80, 0x2DDF, Ethiopic),
(0x2DE0, 0x2DFF, Cyrillic),
(0x2E00, 0x2E7F, Supplemental),
(0x2E80, 0x2EFF, CJK),
(0x2F00, 0x2FDF, Kangxi),
(0x2FF0, 0x2FFF, Ideographic),
(0x3000, 0x303F, CJK),
(0x3040, 0x309F, Hiragana),
(0x30A0, 0x30FF, Katakana),
(0x3100, 0x312F, Bopomofo),
(0x3130, 0x318F, Hangul),
(0x3190, 0x319F, Kanbun),
(0x31A0, 0x31BF, Bopomofo),
(0x31C0, 0x31EF, CJK),
(0x31F0, 0x31FF, Katakana),
(0x3200, 0x32FF, Enclosed),
(0x3300, 0x4DBF, CJK),
(0x4DC0, 0x4DFF, Yijing),
(0x4E00, 0x9FFF, CJK),
(0xA000, 0xA4CF, Yi),
(0xA4D0, 0xA4FF, Lisu),
(0xA500, 0xA63F, Vai),
(0xA640, 0xA69F, Cyrillic),
(0xA6A0, 0xA6FF, Bamum),
(0xA700, 0xA71F, Modifier),
(0xA720, 0xA7FF, Latin),
(0xA800, 0xA82F, Syloti),
(0xA830, 0xA83F, Common),
(0xA840, 0xA87F, Phags),
(0xA880, 0xA8DF, Saurashtra),
(0xA8E0, 0xA8FF, Devanagari),
(0xA900, 0xA92F, Kayah),
(0xA930, 0xA95F, Rejang),
(0xA960, 0xA97F, Hangul),
(0xA980, 0xA9DF, Javanese),
(0xA9E0, 0xA9FF, Myanmar),
(0xAA00, 0xAA5F, Cham),
(0xAA60, 0xAA7F, Myanmar),
(0xAA80, 0xAADF, Tai),
(0xAAE0, 0xAAFF, Meetei),
(0xAB00, 0xAB2F, Ethiopic),
(0xAB30, 0xAB6F, Latin),
(0xAB70, 0xABBF, Cherokee),
(0xABC0, 0xABFF, Meetei),
(0xAC00, 0xD7FF, Hangul),
(0xD800, 0xDBFF, High),
(0xDC00, 0xDFFF, Low),
(0xE000, 0xF8FF, Private),
(0xF900, 0xFAFF, CJK),
(0xFB00, 0xFB4F, Alphabetic),
(0xFB50, 0xFDFF, Arabic),
(0xFE00, 0xFE0F, Variation),
(0xFE10, 0xFE1F, Vertical),
(0xFE20, 0xFE2F, Combining),
(0xFE30, 0xFE4F, CJK),
(0xFE50, 0xFE6F, Small),
(0xFE70, 0xFEFF, Arabic),
(0xFF00, 0xFFEF, Halfwidth),
(0xFFF0, 0xFFFF, Specials),
];
const PLANE0_LIMIT: char = '\u{10000}';
const SUPPLEMENTARY_TABLE: LookupTable<u32, Block> = lookup_table![
(0x010000, 0x0100FF, Linear),
(0x010100, 0x01013F, Aegean),
(0x010140, 0x0101CF, Ancient),
(0x0101D0, 0x0101FF, Phaistos),
(0x010280, 0x01029F, Lycian),
(0x0102A0, 0x0102DF, Carian),
(0x0102E0, 0x0102FF, Coptic),
(0x010300, 0x01032F, Old),
(0x010330, 0x01034F, Gothic),
(0x010350, 0x01037F, Old),
(0x010380, 0x01039F, Ugaritic),
(0x0103A0, 0x0103DF, Old),
(0x010400, 0x01044F, Deseret),
(0x010450, 0x01047F, Shavian),
(0x010480, 0x0104AF, Osmanya),
(0x0104B0, 0x0104FF, Osage),
(0x010500, 0x01052F, Elbasan),
(0x010530, 0x01056F, Caucasian),
(0x010600, 0x01077F, Linear),
(0x010800, 0x01083F, Cypriot),
(0x010840, 0x01085F, Imperial),
(0x010860, 0x01087F, Palmyrene),
(0x010880, 0x0108AF, Nabataean),
(0x0108E0, 0x0108FF, Hatran),
(0x010900, 0x01091F, Phoenician),
(0x010920, 0x01093F, Lydian),
(0x010980, 0x0109FF, Meroitic),
(0x010A00, 0x010A5F, Kharoshthi),
(0x010A60, 0x010A9F, Old),
(0x010AC0, 0x010AFF, Manichaean),
(0x010B00, 0x010B3F, Avestan),
(0x010B40, 0x010B7F, Inscriptional),
(0x010B80, 0x010BAF, Psalter),
(0x010C00, 0x010C4F, Old),
(0x010C80, 0x010CFF, Old),
(0x010D00, 0x010D3F, Hanifi),
(0x010E60, 0x010E7F, Rumi),
(0x010F00, 0x010F2F, Old),
(0x010F30, 0x010F6F, Sogdian),
(0x010FE0, 0x010FFF, Elymaic),
(0x011000, 0x01107F, Brahmi),
(0x011080, 0x0110CF, Kaithi),
(0x0110D0, 0x0110FF, Sora),
(0x011100, 0x01114F, Chakma),
(0x011150, 0x01117F, Mahajani),
(0x011180, 0x0111DF, Sharada),
(0x0111E0, 0x0111FF, Sinhala),
(0x011200, 0x01124F, Khojki),
(0x011280, 0x0112AF, Multani),
(0x0112B0, 0x0112FF, Khudawadi),
(0x011300, 0x01137F, Grantha),
(0x011400, 0x01147F, Newa),
(0x011480, 0x0114DF, Tirhuta),
(0x011580, 0x0115FF, Siddham),
(0x011600, 0x01165F, Modi),
(0x011660, 0x01167F, Mongolian),
(0x011680, 0x0116CF, Takri),
(0x011700, 0x01173F, Ahom),
(0x011800, 0x01184F, Dogra),
(0x0118A0, 0x0118FF, Warang),
(0x0119A0, 0x0119FF, Nandinagari),
(0x011A00, 0x011A4F, Zanabazar),
(0x011A50, 0x011AAF, Soyombo),
(0x011AC0, 0x011AFF, Pau),
(0x011C00, 0x011C6F, Bhaiksuki),
(0x011C70, 0x011CBF, Marchen),
(0x011D00, 0x011D5F, Masaram),
(0x011D60, 0x011DAF, Gunjala),
(0x011EE0, 0x011EFF, Makasar),
(0x011FC0, 0x011FFF, Tamil),
(0x012000, 0x01247F, Cuneiform),
(0x012480, 0x01254F, Early),
(0x013000, 0x01343F, Egyptian),
(0x014400, 0x01467F, Anatolian),
(0x016800, 0x016A3F, Bamum),
(0x016A40, 0x016A6F, Mro),
(0x016AD0, 0x016AFF, Bassa),
(0x016B00, 0x016B8F, Pahawh),
(0x016E40, 0x016E9F, Medefaidrin),
(0x016F00, 0x016F9F, Miao),
(0x016FE0, 0x016FFF, Ideographic),
(0x017000, 0x018AFF, Tangut),
(0x01B000, 0x01B12F, Kana),
(0x01B130, 0x01B16F, Small),
(0x01B170, 0x01B2FF, Nushu),
(0x01BC00, 0x01BC9F, Duployan),
(0x01BCA0, 0x01BCAF, Shorthand),
(0x01D000, 0x01D0FF, Byzantine),
(0x01D100, 0x01D1FF, Musical),
(0x01D200, 0x01D24F, Ancient),
(0x01D2E0, 0x01D2FF, Mayan),
(0x01D300, 0x01D35F, Tai),
(0x01D360, 0x01D37F, Counting),
(0x01D400, 0x01D7FF, Mathematical),
(0x01D800, 0x01DAAF, Sutton),
(0x01E000, 0x01E02F, Glagolitic),
(0x01E100, 0x01E14F, Nyiakeng),
(0x01E2C0, 0x01E2FF, Wancho),
(0x01E800, 0x01E8DF, Mende),
(0x01E900, 0x01E95F, Adlam),
(0x01EC70, 0x01ECBF, Indic),
(0x01ED00, 0x01ED4F, Ottoman),
(0x01EE00, 0x01EEFF, Arabic),
(0x01F000, 0x01F02F, Mahjong),
(0x01F030, 0x01F09F, Domino),
(0x01F0A0, 0x01F0FF, Playing),
(0x01F100, 0x01F2FF, Enclosed),
(0x01F300, 0x01F5FF, Miscellaneous),
(0x01F600, 0x01F64F, Emoticons),
(0x01F650, 0x01F67F, Ornamental),
(0x01F680, 0x01F6FF, Transport),
(0x01F700, 0x01F77F, Alchemical),
(0x01F780, 0x01F7FF, Geometric),
(0x01F800, 0x01F9FF, Supplemental),
(0x01FA00, 0x01FA6F, Chess),
(0x01FA70, 0x01FAFF, Symbols),
(0x020000, 0x02A6DF, CJK),
(0x02A700, 0x02EBEF, CJK),
(0x02F800, 0x02FA1F, CJK),
(0x0E0000, 0x0E007F, Tags),
(0x0E0100, 0x0E01EF, Variation),
(0x0F0000, 0x10FFFF, Supplementary),
];
| validate_tables | identifier_name |
mod.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use super::*;
mod notification_stream;
use crate::types::PeerError;
use notification_stream::NotificationStream;
/// Processes incoming commands from the control stream and dispatches them to the control command
/// handler. This started only when we have connection and when we have either a target or
/// controller SDP profile record for the current peer.
async fn process_control_stream(peer: Arc<RwLock<RemotePeer>>) {
let connection = {
let peer_guard = peer.read();
match peer_guard.control_channel.connection() {
Some(connection) => connection.clone(),
None => return,
}
};
let command_stream = connection.take_command_stream();
// Limit to 16 since that is the max number of transactions we can process at any one time per
// AVCTP
match command_stream
.map(Ok)
.try_for_each_concurrent(16, |command| async {
let fut = peer.read().command_handler.handle_command(command.unwrap());
let result: Result<(), PeerError> = fut.await;
result
})
.await
{
Ok(_) => fx_log_info!("Peer command stream closed"),
Err(e) => fx_log_err!("Peer command returned error {:?}", e),
}
// Command stream closed/errored. Disconnect the peer.
{
peer.write().reset_connection(false);
}
}
/// Handles received notifications from the peer from the subscribed notifications streams and
/// dispatches the notifications back to the controller listeners
fn handle_notification(
notif: &NotificationEventId,
peer: &Arc<RwLock<RemotePeer>>,
data: &[u8],
) -> Result<bool, Error> {
fx_vlog!(tag: "avrcp", 2, "received notification for {:?} {:?}", notif, data);
let preamble = VendorDependentPreamble::decode(data).map_err(|e| Error::PacketError(e))?;
let data = &data[preamble.encoded_len()..];
if data.len() < preamble.parameter_length as usize {
return Err(Error::UnexpectedResponse);
}
match notif {
NotificationEventId::EventPlaybackStatusChanged => {
let response = PlaybackStatusChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackStatusChanged(response.playback_status()),
);
Ok(false)
}
NotificationEventId::EventTrackChanged => {
let response = TrackChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::TrackIdChanged(
response.identifier(),
));
Ok(false)
}
NotificationEventId::EventPlaybackPosChanged => {
let response = PlaybackPosChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackPosChanged(response.position()),
);
Ok(false)
}
NotificationEventId::EventVolumeChanged => {
let response = VolumeChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::VolumeChanged(
response.volume(),
));
Ok(false)
}
_ => Ok(true),
}
}
/// Starts a task to attempt an outgoing L2CAP connection to remote's AVRCP control channel.
/// The control channel should be in `Connecting` state before spawning this task.
/// TODO(BT-2747): Fix a race where an incoming connection can come in while we are making an
/// outgoing connection. Properly handle the case where we are attempting to connect to remote
/// at the same time they make an incoming connection according to how the the spec says.
fn start_make_connection_task(peer: Arc<RwLock<RemotePeer>>) {
let peer = peer.clone();
fasync::spawn(async move {
let (peer_id, profile_service) = {
let peer_guard = peer.read();
// early return if we are not in `Connecting`
match peer_guard.control_channel {
PeerChannel::Connecting => {}
_ => return,
}
(peer_guard.peer_id.clone(), peer_guard.profile_svc.clone())
};
match profile_service.connect_to_device(&peer_id, PSM_AVCTP as u16).await {
Ok(socket) => {
let mut peer_guard = peer.write();
match peer_guard.control_channel {
PeerChannel::Connecting => match AvcPeer::new(socket) {
Ok(peer) => {
peer_guard.set_control_connection(peer);
}
Err(e) => {
peer_guard.reset_connection(false);
fx_log_err!("Unable to make peer from socket {}: {:?}", peer_id, e);
}
},
_ => {
fx_log_info!(
"incoming connection established while making outgoing {:?}",
peer_id
);
// an incoming l2cap connection was made while we were making an
// outgoing one. Drop both connections per spec.
peer_guard.reset_connection(false);
}
};
}
Err(e) => {
fx_log_err!("connect_to_device error {}: {:?}", peer_id, e);
let mut peer_guard = peer.write();
if let PeerChannel::Connecting = peer_guard.control_channel {
peer_guard.reset_connection(false);
}
}
}
})
}
/// Checks for supported notification on the peer and registers for notifications.
/// This is started on a remote peer when we have a connection and target profile descriptor.
async fn pump_notifications(peer: Arc<RwLock<RemotePeer>>) {
// events we support when speaking to a peer that supports the target profile.
const SUPPORTED_NOTIFICATIONS: [NotificationEventId; 4] = [
NotificationEventId::EventPlaybackStatusChanged,
NotificationEventId::EventTrackChanged,
NotificationEventId::EventPlaybackPosChanged,
NotificationEventId::EventVolumeChanged,
];
let supported_notifications: Vec<NotificationEventId> =
SUPPORTED_NOTIFICATIONS.iter().cloned().collect();
// look up what notifications we support on this peer first. Consider updating this from
// time to time.
let remote_supported_notifications = match get_supported_events_internal(peer.clone()).await {
Ok(x) => x,
Err(_) => return,
};
let supported_notifications: Vec<NotificationEventId> = remote_supported_notifications
.into_iter()
.filter(|k| supported_notifications.contains(k))
.collect();
let mut notification_streams = SelectAll::new();
for notif in supported_notifications {
fx_vlog!(tag: "avrcp", 2, "creating notification stream for {:#?}", notif);
let stream =
NotificationStream::new(peer.clone(), notif, 1).map_ok(move |data| (notif, data));
notification_streams.push(stream);
}
pin_mut!(notification_streams);
loop {
if futures::select! {
event_result = notification_streams.select_next_some() => {
match event_result {
Ok((notif, data)) => {
handle_notification(¬if, &peer, &data[..])
.unwrap_or_else(|e| { fx_log_err!("Error decoding packet from peer {:?}", e); true} )
},
Err(Error::CommandNotSupported) => false,
Err(_) => true,
_=> true,
}
}
complete => { true }
} {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "stopping notifications for {:#?}", peer.read().peer_id);
}
/// Starts a task to poll notifications on the remote peer. Aborted when the peer connection is
/// reset.
fn start_notifications_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
pump_notifications(peer).await;
},
registration,
)
.map(|_| ()),
);
handle
}
/// Starts a task to poll control messages from the peer. Aborted when the peer connection is
/// reset. Started when we have a connection to the remote peer and we have any type of valid SDP
/// profile from the peer.
fn start_control_stream_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
process_control_stream(peer).await;
},
registration, |
/// State observer task around a remote peer. Takes a change stream from the remote peer that wakes
/// the task whenever some state has changed on the peer. Swaps tasks such as making outgoing
/// connections, processing the incoming control messages, and registering for notifications on the
/// remote peer.
pub(super) async fn state_watcher(peer: Arc<RwLock<RemotePeer>>) {
fx_vlog!(tag: "avrcp", 2, "state_watcher starting");
let mut change_stream = peer.read().state_change_listener.take_change_stream();
let peer_weak = Arc::downgrade(&peer);
drop(peer);
let mut channel_processor_abort_handle: Option<AbortHandle> = None;
let mut notification_poll_abort_handle: Option<AbortHandle> = None;
while let Some(_) = change_stream.next().await {
fx_vlog!(tag: "avrcp", 2, "state_watcher command received");
if let Some(peer) = peer_weak.upgrade() {
let mut peer_guard = peer.write();
fx_vlog!(tag: "avrcp", 2, "make_connection control channel {:?}", peer_guard.control_channel);
match peer_guard.control_channel {
PeerChannel::Connecting => {}
PeerChannel::Disconnected => {
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
channel_processor_abort_handle = None;
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
notification_poll_abort_handle = None;
}
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& peer_guard.attempt_connection
{
fx_vlog!(tag: "avrcp", 2, "make_connection {:?}", peer_guard.peer_id);
peer_guard.attempt_connection = false;
peer_guard.control_channel = PeerChannel::Connecting;
start_make_connection_task(peer.clone());
}
}
PeerChannel::Connected(_) => {
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& channel_processor_abort_handle.is_none()
{
channel_processor_abort_handle =
Some(start_control_stream_processing_task(peer.clone()));
}
if peer_guard.target_descriptor.is_some()
&& notification_poll_abort_handle.is_none()
{
notification_poll_abort_handle =
Some(start_notifications_processing_task(peer.clone()));
}
}
}
} else {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "state_watcher shutting down. aborting processors");
// Stop processing state changes entirely on the peer.
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
}
} | )
.map(|_| ()),
);
handle
} | random_line_split |
mod.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use super::*;
mod notification_stream;
use crate::types::PeerError;
use notification_stream::NotificationStream;
/// Processes incoming commands from the control stream and dispatches them to the control command
/// handler. This started only when we have connection and when we have either a target or
/// controller SDP profile record for the current peer.
async fn process_control_stream(peer: Arc<RwLock<RemotePeer>>) {
let connection = {
let peer_guard = peer.read();
match peer_guard.control_channel.connection() {
Some(connection) => connection.clone(),
None => return,
}
};
let command_stream = connection.take_command_stream();
// Limit to 16 since that is the max number of transactions we can process at any one time per
// AVCTP
match command_stream
.map(Ok)
.try_for_each_concurrent(16, |command| async {
let fut = peer.read().command_handler.handle_command(command.unwrap());
let result: Result<(), PeerError> = fut.await;
result
})
.await
{
Ok(_) => fx_log_info!("Peer command stream closed"),
Err(e) => fx_log_err!("Peer command returned error {:?}", e),
}
// Command stream closed/errored. Disconnect the peer.
{
peer.write().reset_connection(false);
}
}
/// Handles received notifications from the peer from the subscribed notifications streams and
/// dispatches the notifications back to the controller listeners
fn handle_notification(
notif: &NotificationEventId,
peer: &Arc<RwLock<RemotePeer>>,
data: &[u8],
) -> Result<bool, Error> | NotificationEventId::EventTrackChanged => {
let response = TrackChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::TrackIdChanged(
response.identifier(),
));
Ok(false)
}
NotificationEventId::EventPlaybackPosChanged => {
let response = PlaybackPosChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackPosChanged(response.position()),
);
Ok(false)
}
NotificationEventId::EventVolumeChanged => {
let response = VolumeChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::VolumeChanged(
response.volume(),
));
Ok(false)
}
_ => Ok(true),
}
}
/// Starts a task to attempt an outgoing L2CAP connection to remote's AVRCP control channel.
/// The control channel should be in `Connecting` state before spawning this task.
/// TODO(BT-2747): Fix a race where an incoming connection can come in while we are making an
/// outgoing connection. Properly handle the case where we are attempting to connect to remote
/// at the same time they make an incoming connection according to how the the spec says.
fn start_make_connection_task(peer: Arc<RwLock<RemotePeer>>) {
let peer = peer.clone();
fasync::spawn(async move {
let (peer_id, profile_service) = {
let peer_guard = peer.read();
// early return if we are not in `Connecting`
match peer_guard.control_channel {
PeerChannel::Connecting => {}
_ => return,
}
(peer_guard.peer_id.clone(), peer_guard.profile_svc.clone())
};
match profile_service.connect_to_device(&peer_id, PSM_AVCTP as u16).await {
Ok(socket) => {
let mut peer_guard = peer.write();
match peer_guard.control_channel {
PeerChannel::Connecting => match AvcPeer::new(socket) {
Ok(peer) => {
peer_guard.set_control_connection(peer);
}
Err(e) => {
peer_guard.reset_connection(false);
fx_log_err!("Unable to make peer from socket {}: {:?}", peer_id, e);
}
},
_ => {
fx_log_info!(
"incoming connection established while making outgoing {:?}",
peer_id
);
// an incoming l2cap connection was made while we were making an
// outgoing one. Drop both connections per spec.
peer_guard.reset_connection(false);
}
};
}
Err(e) => {
fx_log_err!("connect_to_device error {}: {:?}", peer_id, e);
let mut peer_guard = peer.write();
if let PeerChannel::Connecting = peer_guard.control_channel {
peer_guard.reset_connection(false);
}
}
}
})
}
/// Checks for supported notification on the peer and registers for notifications.
/// This is started on a remote peer when we have a connection and target profile descriptor.
async fn pump_notifications(peer: Arc<RwLock<RemotePeer>>) {
// events we support when speaking to a peer that supports the target profile.
const SUPPORTED_NOTIFICATIONS: [NotificationEventId; 4] = [
NotificationEventId::EventPlaybackStatusChanged,
NotificationEventId::EventTrackChanged,
NotificationEventId::EventPlaybackPosChanged,
NotificationEventId::EventVolumeChanged,
];
let supported_notifications: Vec<NotificationEventId> =
SUPPORTED_NOTIFICATIONS.iter().cloned().collect();
// look up what notifications we support on this peer first. Consider updating this from
// time to time.
let remote_supported_notifications = match get_supported_events_internal(peer.clone()).await {
Ok(x) => x,
Err(_) => return,
};
let supported_notifications: Vec<NotificationEventId> = remote_supported_notifications
.into_iter()
.filter(|k| supported_notifications.contains(k))
.collect();
let mut notification_streams = SelectAll::new();
for notif in supported_notifications {
fx_vlog!(tag: "avrcp", 2, "creating notification stream for {:#?}", notif);
let stream =
NotificationStream::new(peer.clone(), notif, 1).map_ok(move |data| (notif, data));
notification_streams.push(stream);
}
pin_mut!(notification_streams);
loop {
if futures::select! {
event_result = notification_streams.select_next_some() => {
match event_result {
Ok((notif, data)) => {
handle_notification(¬if, &peer, &data[..])
.unwrap_or_else(|e| { fx_log_err!("Error decoding packet from peer {:?}", e); true} )
},
Err(Error::CommandNotSupported) => false,
Err(_) => true,
_=> true,
}
}
complete => { true }
} {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "stopping notifications for {:#?}", peer.read().peer_id);
}
/// Starts a task to poll notifications on the remote peer. Aborted when the peer connection is
/// reset.
fn start_notifications_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
pump_notifications(peer).await;
},
registration,
)
.map(|_| ()),
);
handle
}
/// Starts a task to poll control messages from the peer. Aborted when the peer connection is
/// reset. Started when we have a connection to the remote peer and we have any type of valid SDP
/// profile from the peer.
fn start_control_stream_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
process_control_stream(peer).await;
},
registration,
)
.map(|_| ()),
);
handle
}
/// State observer task around a remote peer. Takes a change stream from the remote peer that wakes
/// the task whenever some state has changed on the peer. Swaps tasks such as making outgoing
/// connections, processing the incoming control messages, and registering for notifications on the
/// remote peer.
pub(super) async fn state_watcher(peer: Arc<RwLock<RemotePeer>>) {
fx_vlog!(tag: "avrcp", 2, "state_watcher starting");
let mut change_stream = peer.read().state_change_listener.take_change_stream();
let peer_weak = Arc::downgrade(&peer);
drop(peer);
let mut channel_processor_abort_handle: Option<AbortHandle> = None;
let mut notification_poll_abort_handle: Option<AbortHandle> = None;
while let Some(_) = change_stream.next().await {
fx_vlog!(tag: "avrcp", 2, "state_watcher command received");
if let Some(peer) = peer_weak.upgrade() {
let mut peer_guard = peer.write();
fx_vlog!(tag: "avrcp", 2, "make_connection control channel {:?}", peer_guard.control_channel);
match peer_guard.control_channel {
PeerChannel::Connecting => {}
PeerChannel::Disconnected => {
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
channel_processor_abort_handle = None;
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
notification_poll_abort_handle = None;
}
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& peer_guard.attempt_connection
{
fx_vlog!(tag: "avrcp", 2, "make_connection {:?}", peer_guard.peer_id);
peer_guard.attempt_connection = false;
peer_guard.control_channel = PeerChannel::Connecting;
start_make_connection_task(peer.clone());
}
}
PeerChannel::Connected(_) => {
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& channel_processor_abort_handle.is_none()
{
channel_processor_abort_handle =
Some(start_control_stream_processing_task(peer.clone()));
}
if peer_guard.target_descriptor.is_some()
&& notification_poll_abort_handle.is_none()
{
notification_poll_abort_handle =
Some(start_notifications_processing_task(peer.clone()));
}
}
}
} else {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "state_watcher shutting down. aborting processors");
// Stop processing state changes entirely on the peer.
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
}
}
| {
fx_vlog!(tag: "avrcp", 2, "received notification for {:?} {:?}", notif, data);
let preamble = VendorDependentPreamble::decode(data).map_err(|e| Error::PacketError(e))?;
let data = &data[preamble.encoded_len()..];
if data.len() < preamble.parameter_length as usize {
return Err(Error::UnexpectedResponse);
}
match notif {
NotificationEventId::EventPlaybackStatusChanged => {
let response = PlaybackStatusChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackStatusChanged(response.playback_status()),
);
Ok(false)
} | identifier_body |
mod.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use super::*;
mod notification_stream;
use crate::types::PeerError;
use notification_stream::NotificationStream;
/// Processes incoming commands from the control stream and dispatches them to the control command
/// handler. This started only when we have connection and when we have either a target or
/// controller SDP profile record for the current peer.
async fn process_control_stream(peer: Arc<RwLock<RemotePeer>>) {
let connection = {
let peer_guard = peer.read();
match peer_guard.control_channel.connection() {
Some(connection) => connection.clone(),
None => return,
}
};
let command_stream = connection.take_command_stream();
// Limit to 16 since that is the max number of transactions we can process at any one time per
// AVCTP
match command_stream
.map(Ok)
.try_for_each_concurrent(16, |command| async {
let fut = peer.read().command_handler.handle_command(command.unwrap());
let result: Result<(), PeerError> = fut.await;
result
})
.await
{
Ok(_) => fx_log_info!("Peer command stream closed"),
Err(e) => fx_log_err!("Peer command returned error {:?}", e),
}
// Command stream closed/errored. Disconnect the peer.
{
peer.write().reset_connection(false);
}
}
/// Handles received notifications from the peer from the subscribed notifications streams and
/// dispatches the notifications back to the controller listeners
fn handle_notification(
notif: &NotificationEventId,
peer: &Arc<RwLock<RemotePeer>>,
data: &[u8],
) -> Result<bool, Error> {
fx_vlog!(tag: "avrcp", 2, "received notification for {:?} {:?}", notif, data);
let preamble = VendorDependentPreamble::decode(data).map_err(|e| Error::PacketError(e))?;
let data = &data[preamble.encoded_len()..];
if data.len() < preamble.parameter_length as usize {
return Err(Error::UnexpectedResponse);
}
match notif {
NotificationEventId::EventPlaybackStatusChanged => {
let response = PlaybackStatusChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackStatusChanged(response.playback_status()),
);
Ok(false)
}
NotificationEventId::EventTrackChanged => {
let response = TrackChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::TrackIdChanged(
response.identifier(),
));
Ok(false)
}
NotificationEventId::EventPlaybackPosChanged => {
let response = PlaybackPosChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackPosChanged(response.position()),
);
Ok(false)
}
NotificationEventId::EventVolumeChanged => {
let response = VolumeChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::VolumeChanged(
response.volume(),
));
Ok(false)
}
_ => Ok(true),
}
}
/// Starts a task to attempt an outgoing L2CAP connection to remote's AVRCP control channel.
/// The control channel should be in `Connecting` state before spawning this task.
/// TODO(BT-2747): Fix a race where an incoming connection can come in while we are making an
/// outgoing connection. Properly handle the case where we are attempting to connect to remote
/// at the same time they make an incoming connection according to how the the spec says.
fn start_make_connection_task(peer: Arc<RwLock<RemotePeer>>) {
let peer = peer.clone();
fasync::spawn(async move {
let (peer_id, profile_service) = {
let peer_guard = peer.read();
// early return if we are not in `Connecting`
match peer_guard.control_channel {
PeerChannel::Connecting => {}
_ => return,
}
(peer_guard.peer_id.clone(), peer_guard.profile_svc.clone())
};
match profile_service.connect_to_device(&peer_id, PSM_AVCTP as u16).await {
Ok(socket) => {
let mut peer_guard = peer.write();
match peer_guard.control_channel {
PeerChannel::Connecting => match AvcPeer::new(socket) {
Ok(peer) => {
peer_guard.set_control_connection(peer);
}
Err(e) => {
peer_guard.reset_connection(false);
fx_log_err!("Unable to make peer from socket {}: {:?}", peer_id, e);
}
},
_ => {
fx_log_info!(
"incoming connection established while making outgoing {:?}",
peer_id
);
// an incoming l2cap connection was made while we were making an
// outgoing one. Drop both connections per spec.
peer_guard.reset_connection(false);
}
};
}
Err(e) => {
fx_log_err!("connect_to_device error {}: {:?}", peer_id, e);
let mut peer_guard = peer.write();
if let PeerChannel::Connecting = peer_guard.control_channel {
peer_guard.reset_connection(false);
}
}
}
})
}
/// Checks for supported notification on the peer and registers for notifications.
/// This is started on a remote peer when we have a connection and target profile descriptor.
async fn pump_notifications(peer: Arc<RwLock<RemotePeer>>) {
// events we support when speaking to a peer that supports the target profile.
const SUPPORTED_NOTIFICATIONS: [NotificationEventId; 4] = [
NotificationEventId::EventPlaybackStatusChanged,
NotificationEventId::EventTrackChanged,
NotificationEventId::EventPlaybackPosChanged,
NotificationEventId::EventVolumeChanged,
];
let supported_notifications: Vec<NotificationEventId> =
SUPPORTED_NOTIFICATIONS.iter().cloned().collect();
// look up what notifications we support on this peer first. Consider updating this from
// time to time.
let remote_supported_notifications = match get_supported_events_internal(peer.clone()).await {
Ok(x) => x,
Err(_) => return,
};
let supported_notifications: Vec<NotificationEventId> = remote_supported_notifications
.into_iter()
.filter(|k| supported_notifications.contains(k))
.collect();
let mut notification_streams = SelectAll::new();
for notif in supported_notifications {
fx_vlog!(tag: "avrcp", 2, "creating notification stream for {:#?}", notif);
let stream =
NotificationStream::new(peer.clone(), notif, 1).map_ok(move |data| (notif, data));
notification_streams.push(stream);
}
pin_mut!(notification_streams);
loop {
if futures::select! {
event_result = notification_streams.select_next_some() => {
match event_result {
Ok((notif, data)) => {
handle_notification(¬if, &peer, &data[..])
.unwrap_or_else(|e| { fx_log_err!("Error decoding packet from peer {:?}", e); true} )
},
Err(Error::CommandNotSupported) => false,
Err(_) => true,
_=> true,
}
}
complete => { true }
} {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "stopping notifications for {:#?}", peer.read().peer_id);
}
/// Starts a task to poll notifications on the remote peer. Aborted when the peer connection is
/// reset.
fn start_notifications_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
pump_notifications(peer).await;
},
registration,
)
.map(|_| ()),
);
handle
}
/// Starts a task to poll control messages from the peer. Aborted when the peer connection is
/// reset. Started when we have a connection to the remote peer and we have any type of valid SDP
/// profile from the peer.
fn start_control_stream_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
process_control_stream(peer).await;
},
registration,
)
.map(|_| ()),
);
handle
}
/// State observer task around a remote peer. Takes a change stream from the remote peer that wakes
/// the task whenever some state has changed on the peer. Swaps tasks such as making outgoing
/// connections, processing the incoming control messages, and registering for notifications on the
/// remote peer.
pub(super) async fn | (peer: Arc<RwLock<RemotePeer>>) {
fx_vlog!(tag: "avrcp", 2, "state_watcher starting");
let mut change_stream = peer.read().state_change_listener.take_change_stream();
let peer_weak = Arc::downgrade(&peer);
drop(peer);
let mut channel_processor_abort_handle: Option<AbortHandle> = None;
let mut notification_poll_abort_handle: Option<AbortHandle> = None;
while let Some(_) = change_stream.next().await {
fx_vlog!(tag: "avrcp", 2, "state_watcher command received");
if let Some(peer) = peer_weak.upgrade() {
let mut peer_guard = peer.write();
fx_vlog!(tag: "avrcp", 2, "make_connection control channel {:?}", peer_guard.control_channel);
match peer_guard.control_channel {
PeerChannel::Connecting => {}
PeerChannel::Disconnected => {
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
channel_processor_abort_handle = None;
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
notification_poll_abort_handle = None;
}
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& peer_guard.attempt_connection
{
fx_vlog!(tag: "avrcp", 2, "make_connection {:?}", peer_guard.peer_id);
peer_guard.attempt_connection = false;
peer_guard.control_channel = PeerChannel::Connecting;
start_make_connection_task(peer.clone());
}
}
PeerChannel::Connected(_) => {
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& channel_processor_abort_handle.is_none()
{
channel_processor_abort_handle =
Some(start_control_stream_processing_task(peer.clone()));
}
if peer_guard.target_descriptor.is_some()
&& notification_poll_abort_handle.is_none()
{
notification_poll_abort_handle =
Some(start_notifications_processing_task(peer.clone()));
}
}
}
} else {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "state_watcher shutting down. aborting processors");
// Stop processing state changes entirely on the peer.
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
}
}
| state_watcher | identifier_name |
global.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Values that should be shared across all modules, without necessarily
//! having to pass them all over the place, but aren't consensus values.
//! should be used sparingly.
use crate::consensus::{
graph_weight, header_version, HeaderDifficultyInfo, BASE_EDGE_BITS, BLOCK_TIME_SEC,
C32_GRAPH_WEIGHT, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS,
DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT,
PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
};
use crate::core::block::{Block, HeaderVersion};
use crate::genesis;
use crate::pow::{
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx,
new_cuckatoo_ctx, no_cuckaroo_ctx, PoWContext, Proof,
};
use crate::ser::ProtocolVersion;
use std::cell::Cell;
use util::OneTime;
/// An enum collecting sets of parameters used throughout the
/// code wherever mining is needed. This should allow for
/// different sets of parameters for different purposes,
/// e.g. CI, User testing, production values
/// Define these here, as they should be developer-set, not really tweakable
/// by users
/// The default "local" protocol version for this node.
/// We negotiate compatible versions with each peer via Hand/Shake.
/// Note: We also use a specific (possible different) protocol version
/// for both the backend database and MMR data files.
/// This defines the p2p layer protocol version for this node.
pub const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion(1_000);
/// Automated testing edge_bits
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 10;
/// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 8;
/// User testing edge_bits
pub const USER_TESTING_MIN_EDGE_BITS: u8 = 15;
/// User testing proof size
pub const USER_TESTING_PROOF_SIZE: usize = 42;
/// Automated testing coinbase maturity
pub const AUTOMATED_TESTING_COINBASE_MATURITY: u64 = 3;
/// User testing coinbase maturity
pub const USER_TESTING_COINBASE_MATURITY: u64 = 3;
/// Testing cut through horizon in blocks
pub const AUTOMATED_TESTING_CUT_THROUGH_HORIZON: u32 = 20;
/// Testing cut through horizon in blocks
pub const USER_TESTING_CUT_THROUGH_HORIZON: u32 = 70;
/// Testing state sync threshold in blocks
pub const TESTING_STATE_SYNC_THRESHOLD: u32 = 20;
/// Testing initial block difficulty
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testing max_block_weight (artifically low, just enough to support a few txs).
pub const TESTING_MAX_BLOCK_WEIGHT: u64 = 250;
/// Default unit of fee per tx weight, making each output cost about a Grincent
pub const DEFAULT_ACCEPT_FEE_BASE: u64 = GRIN_BASE / 100 / 20; // 500_000
/// default Future Time Limit (FTL) of 5 minutes
pub const DEFAULT_FUTURE_TIME_LIMIT: u64 = 5 * 60;
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
/// If a peer's last seen time is 2 weeks ago we will forget such kind of defunct peers.
const PEER_EXPIRATION_DAYS: i64 = 7 * 2;
/// Constant that expresses defunct peer timeout in seconds to be used in checks.
pub const PEER_EXPIRATION_REMOVE_TIME: i64 = PEER_EXPIRATION_DAYS * 24 * 3600;
/// Trigger compaction check on average every day for all nodes.
/// Randomized per node - roll the dice on every block to decide.
/// Will compact the txhashset to remove pruned data.
/// Will also remove old blocks and associated data from the database.
/// For a node configured as "archival_mode = true" only the txhashset will be compacted.
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
/// Number of blocks to reuse a txhashset zip for (automated testing and user testing).
pub const TESTING_TXHASHSET_ARCHIVE_INTERVAL: u64 = 10;
/// Number of blocks to reuse a txhashset zip for.
pub const TXHASHSET_ARCHIVE_INTERVAL: u64 = 12 * 60;
/// Types of chain a server can run with, dictates the genesis block and
/// and mining parameters used.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub enum ChainTypes {
/// For CI testing
AutomatedTesting,
/// For User testing
UserTesting,
/// Protocol testing network
Testnet,
/// Main production network
Mainnet,
}
impl ChainTypes {
/// Short name representing the chain type ("test", "main", etc.)
pub fn shortname(&self) -> String {
match *self {
ChainTypes::AutomatedTesting => "auto".to_owned(),
ChainTypes::UserTesting => "user".to_owned(),
ChainTypes::Testnet => "test".to_owned(),
ChainTypes::Mainnet => "main".to_owned(),
}
}
}
impl Default for ChainTypes {
fn default() -> ChainTypes {
ChainTypes::Mainnet
}
}
lazy_static! {
/// Global chain_type that must be initialized once on node startup.
/// This is accessed via get_chain_type() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_CHAIN_TYPE: OneTime<ChainTypes> = OneTime::new();
/// Global acccept fee base that must be initialized once on node startup.
/// This is accessed via get_acccept_fee_base() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_ACCEPT_FEE_BASE: OneTime<u64> = OneTime::new();
/// Global future time limit that must be initialized once on node startup.
/// This is accessed via get_future_time_limit() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_FUTURE_TIME_LIMIT: OneTime<u64> = OneTime::new();
/// Global feature flag for NRD kernel support.
/// If enabled NRD kernels are treated as valid after HF3 (based on header version).
/// If disabled NRD kernels are invalid regardless of header version or block height.
pub static ref GLOBAL_NRD_FEATURE_ENABLED: OneTime<bool> = OneTime::new();
}
thread_local! {
/// Mainnet|Testnet|UserTesting|AutomatedTesting
pub static CHAIN_TYPE: Cell<Option<ChainTypes>> = Cell::new(None);
/// minimum transaction fee per unit of transaction weight for mempool acceptance
pub static ACCEPT_FEE_BASE: Cell<Option<u64>> = Cell::new(None);
/// maximum number of seconds into future for timestamp of block to be acceptable
pub static FUTURE_TIME_LIMIT: Cell<Option<u64>> = Cell::new(None);
/// Local feature flag for NRD kernel support.
pub static NRD_FEATURE_ENABLED: Cell<Option<bool>> = Cell::new(None);
}
/// One time initialization of the global chain_type.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.init(new_type)
}
/// Set the global chain_type using an override
pub fn set_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.set(new_type, true);
}
/// Set the chain type on a per-thread basis via thread_local storage.
pub fn set_local_chain_type(new_type: ChainTypes) {
CHAIN_TYPE.with(|chain_type| chain_type.set(Some(new_type)))
}
/// Get the chain type via thread_local, fallback to global chain_type.
pub fn get_chain_type() -> ChainTypes {
CHAIN_TYPE.with(|chain_type| match chain_type.get() {
None => {
if!GLOBAL_CHAIN_TYPE.is_init() {
panic!("GLOBAL_CHAIN_TYPE and CHAIN_TYPE unset. Consider set_local_chain_type() in tests.");
}
let chain_type = GLOBAL_CHAIN_TYPE.borrow();
set_local_chain_type(chain_type);
chain_type
}
Some(chain_type) => chain_type,
})
}
/// Return genesis block for the active chain type
pub fn get_genesis_block() -> Block {
match get_chain_type() {
ChainTypes::Mainnet => genesis::genesis_main(),
ChainTypes::Testnet => genesis::genesis_test(),
_ => genesis::genesis_dev(),
}
}
/// One time initialization of the global future time limit
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.init(new_ftl)
}
/// The global future time limit may be reset again using the override
pub fn set_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.set(new_ftl, true)
}
/// One time initialization of the global accept fee base
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.init(new_base)
}
/// The global accept fee base may be reset using override.
pub fn set_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.set(new_base, true)
}
/// Set the accept fee base on a per-thread basis via thread_local storage.
pub fn set_local_accept_fee_base(new_base: u64) {
ACCEPT_FEE_BASE.with(|base| base.set(Some(new_base)))
}
/// Accept Fee Base
/// Look at thread local config first. If not set fallback to global config.
/// Default to grin-cent/20 if global config unset.
pub fn get_accept_fee_base() -> u64 {
ACCEPT_FEE_BASE.with(|base| match base.get() {
None => {
let base = if GLOBAL_ACCEPT_FEE_BASE.is_init() {
GLOBAL_ACCEPT_FEE_BASE.borrow()
} else {
DEFAULT_ACCEPT_FEE_BASE
};
set_local_accept_fee_base(base);
base
}
Some(base) => base,
})
}
/// Set the future time limit on a per-thread basis via thread_local storage.
pub fn set_local_future_time_limit(new_ftl: u64) {
FUTURE_TIME_LIMIT.with(|ftl| ftl.set(Some(new_ftl)))
}
/// Future Time Limit (FTL)
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn get_future_time_limit() -> u64 {
FUTURE_TIME_LIMIT.with(|ftl| match ftl.get() {
None => {
let ftl = if GLOBAL_FUTURE_TIME_LIMIT.is_init() {
GLOBAL_FUTURE_TIME_LIMIT.borrow()
} else {
DEFAULT_FUTURE_TIME_LIMIT
};
set_local_future_time_limit(ftl);
ftl
}
Some(ftl) => ftl,
})
}
/// One time initialization of the global NRD feature flag.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_nrd_enabled(enabled: bool) |
/// Set the global NRD feature flag using override.
pub fn set_global_nrd_enabled(enabled: bool) {
GLOBAL_NRD_FEATURE_ENABLED.set(enabled, true)
}
/// Explicitly enable the local NRD feature flag.
pub fn set_local_nrd_enabled(enabled: bool) {
NRD_FEATURE_ENABLED.with(|flag| flag.set(Some(enabled)))
}
/// Is the NRD feature flag enabled?
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn is_nrd_enabled() -> bool {
NRD_FEATURE_ENABLED.with(|flag| match flag.get() {
None => {
if GLOBAL_NRD_FEATURE_ENABLED.is_init() {
let global_flag = GLOBAL_NRD_FEATURE_ENABLED.borrow();
flag.set(Some(global_flag));
global_flag
} else {
// Global config unset, default to false.
false
}
}
Some(flag) => flag,
})
}
/// Return either a cuckaroo* context or a cuckatoo context
/// Single change point
pub fn create_pow_context<T>(
height: u64,
edge_bits: u8,
proof_size: usize,
max_sols: u32,
) -> Result<Box<dyn PoWContext>, pow::Error> {
let chain_type = get_chain_type();
if chain_type == ChainTypes::Mainnet || chain_type == ChainTypes::Testnet {
// Mainnet and Testnet have Cuckatoo31+ for AF and Cuckaroo{,d,m,z}29 for AR
if edge_bits > 29 {
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
} else {
match header_version(height) {
HeaderVersion(1) => new_cuckaroo_ctx(edge_bits, proof_size),
HeaderVersion(2) => new_cuckarood_ctx(edge_bits, proof_size),
HeaderVersion(3) => new_cuckaroom_ctx(edge_bits, proof_size),
HeaderVersion(4) => new_cuckarooz_ctx(edge_bits, proof_size),
_ => no_cuckaroo_ctx(),
}
}
} else {
// Everything else is Cuckatoo only
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
}
}
/// The minimum acceptable edge_bits
pub fn min_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => DEFAULT_MIN_EDGE_BITS,
}
}
/// Reference edge_bits used to compute factor on higher Cuck(at)oo graph sizes,
/// while the min_edge_bits can be changed on a soft fork, changing
/// base_edge_bits is a hard fork.
pub fn base_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => BASE_EDGE_BITS,
}
}
/// The proofsize
pub fn proofsize() -> usize {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE,
_ => PROOFSIZE,
}
}
/// Coinbase maturity for coinbases to be spent
pub fn coinbase_maturity() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
_ => COINBASE_MATURITY,
}
}
/// Initial mining difficulty
pub fn initial_block_difficulty() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::Testnet => INITIAL_DIFFICULTY,
ChainTypes::Mainnet => INITIAL_DIFFICULTY,
}
}
/// Initial mining secondary scale
pub fn initial_graph_weight() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
ChainTypes::Mainnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
}
}
/// Minimum valid graph weight post HF4
pub fn min_wtema_graph_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS),
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS),
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS),
ChainTypes::Mainnet => C32_GRAPH_WEIGHT,
}
}
/// Maximum allowed block weight.
pub fn max_block_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::UserTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::Testnet => MAX_BLOCK_WEIGHT,
ChainTypes::Mainnet => MAX_BLOCK_WEIGHT,
}
}
/// Maximum allowed transaction weight (1 weight unit ~= 32 bytes)
pub fn max_tx_weight() -> u64 {
let coinbase_weight = OUTPUT_WEIGHT + KERNEL_WEIGHT;
max_block_weight().saturating_sub(coinbase_weight) as u64
}
/// Horizon at which we can cut-through and do full local pruning
pub fn cut_through_horizon() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_CUT_THROUGH_HORIZON,
ChainTypes::UserTesting => USER_TESTING_CUT_THROUGH_HORIZON,
_ => CUT_THROUGH_HORIZON,
}
}
/// Threshold at which we can request a txhashset (and full blocks from)
pub fn state_sync_threshold() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_STATE_SYNC_THRESHOLD,
ChainTypes::UserTesting => TESTING_STATE_SYNC_THRESHOLD,
_ => STATE_SYNC_THRESHOLD,
}
}
/// Number of blocks to reuse a txhashset zip for.
pub fn txhashset_archive_interval() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
ChainTypes::UserTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
_ => TXHASHSET_ARCHIVE_INTERVAL,
}
}
/// Are we in production mode?
/// Production defined as a live public network, testnet[n] or mainnet.
pub fn is_production_mode() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
ChainTypes::Mainnet => true,
_ => false,
}
}
/// Are we in testnet?
/// Note: We do not have a corresponding is_mainnet() as we want any tests to be as close
/// as possible to "mainnet" configuration as possible.
/// We want to avoid missing any mainnet only code paths.
pub fn is_testnet() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
_ => false,
}
}
/// Converts an iterator of block difficulty data to more a more manageable
/// vector and pads if needed (which will) only be needed for the first few
/// blocks after genesis
pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<HeaderDifficultyInfo>
where
T: IntoIterator<Item = HeaderDifficultyInfo>,
{
// Convert iterator to vector, so we can append to it if necessary
let needed_block_count = DMA_WINDOW as usize + 1;
let mut last_n: Vec<HeaderDifficultyInfo> =
cursor.into_iter().take(needed_block_count).collect();
// Only needed just after blockchain launch... basically ensures there's
// always enough data by simulating perfectly timed pre-genesis
// blocks at the genesis difficulty as needed.
let n = last_n.len();
if needed_block_count > n {
let last_ts_delta = if n > 1 {
last_n[0].timestamp - last_n[1].timestamp
} else {
BLOCK_TIME_SEC
};
let last_diff = last_n[0].difficulty;
// fill in simulated blocks with values from the previous real block
let mut last_ts = last_n.last().unwrap().timestamp;
for _ in n..needed_block_count {
last_ts = last_ts.saturating_sub(last_ts_delta);
last_n.push(HeaderDifficultyInfo::from_ts_diff(last_ts, last_diff));
}
}
last_n.reverse();
last_n
}
/// Calculates the size of a header (in bytes) given a number of edge bits in the PoW
#[inline]
pub fn header_size_bytes(edge_bits: u8) -> usize {
let size = 2 + 2 * 8 + 5 * 32 + 32 + 2 * 8;
let proof_size = 8 + 4 + 8 + 1 + Proof::pack_len(edge_bits);
size + proof_size
}
#[cfg(test)]
mod test {
use super::*;
use crate::core::Block;
use crate::genesis::*;
use crate::pow::mine_genesis_block;
use crate::ser::{BinWriter, Writeable};
fn test_header_len(genesis: Block) {
let mut raw = Vec::<u8>::with_capacity(1_024);
let mut writer = BinWriter::new(&mut raw, ProtocolVersion::local());
genesis.header.write(&mut writer).unwrap();
assert_eq!(raw.len(), header_size_bytes(genesis.header.pow.edge_bits()));
}
#[test]
fn automated_testing_header_len() {
set_local_chain_type(ChainTypes::AutomatedTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn user_testing_header_len() {
set_local_chain_type(ChainTypes::UserTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn testnet_header_len() {
set_local_chain_type(ChainTypes::Testnet);
test_header_len(genesis_test());
}
#[test]
fn mainnet_header_len() {
set_local_chain_type(ChainTypes::Mainnet);
test_header_len(genesis_main());
}
}
| {
GLOBAL_NRD_FEATURE_ENABLED.init(enabled)
} | identifier_body |
global.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Values that should be shared across all modules, without necessarily
//! having to pass them all over the place, but aren't consensus values.
//! should be used sparingly.
use crate::consensus::{
graph_weight, header_version, HeaderDifficultyInfo, BASE_EDGE_BITS, BLOCK_TIME_SEC,
C32_GRAPH_WEIGHT, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS,
DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT,
PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
};
use crate::core::block::{Block, HeaderVersion};
use crate::genesis;
use crate::pow::{
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx,
new_cuckatoo_ctx, no_cuckaroo_ctx, PoWContext, Proof,
};
use crate::ser::ProtocolVersion;
use std::cell::Cell;
use util::OneTime;
/// An enum collecting sets of parameters used throughout the
/// code wherever mining is needed. This should allow for
/// different sets of parameters for different purposes,
/// e.g. CI, User testing, production values
/// Define these here, as they should be developer-set, not really tweakable
/// by users
/// The default "local" protocol version for this node.
/// We negotiate compatible versions with each peer via Hand/Shake.
/// Note: We also use a specific (possible different) protocol version
/// for both the backend database and MMR data files.
/// This defines the p2p layer protocol version for this node.
pub const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion(1_000);
/// Automated testing edge_bits
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 10;
/// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 8;
/// User testing edge_bits
pub const USER_TESTING_MIN_EDGE_BITS: u8 = 15;
/// User testing proof size
pub const USER_TESTING_PROOF_SIZE: usize = 42;
/// Automated testing coinbase maturity
pub const AUTOMATED_TESTING_COINBASE_MATURITY: u64 = 3;
/// User testing coinbase maturity
pub const USER_TESTING_COINBASE_MATURITY: u64 = 3;
/// Testing cut through horizon in blocks
pub const AUTOMATED_TESTING_CUT_THROUGH_HORIZON: u32 = 20;
/// Testing cut through horizon in blocks
pub const USER_TESTING_CUT_THROUGH_HORIZON: u32 = 70;
/// Testing state sync threshold in blocks
pub const TESTING_STATE_SYNC_THRESHOLD: u32 = 20;
/// Testing initial block difficulty
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testing max_block_weight (artifically low, just enough to support a few txs).
pub const TESTING_MAX_BLOCK_WEIGHT: u64 = 250;
/// Default unit of fee per tx weight, making each output cost about a Grincent
pub const DEFAULT_ACCEPT_FEE_BASE: u64 = GRIN_BASE / 100 / 20; // 500_000
/// default Future Time Limit (FTL) of 5 minutes
pub const DEFAULT_FUTURE_TIME_LIMIT: u64 = 5 * 60;
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
/// If a peer's last seen time is 2 weeks ago we will forget such kind of defunct peers.
const PEER_EXPIRATION_DAYS: i64 = 7 * 2;
/// Constant that expresses defunct peer timeout in seconds to be used in checks.
pub const PEER_EXPIRATION_REMOVE_TIME: i64 = PEER_EXPIRATION_DAYS * 24 * 3600;
/// Trigger compaction check on average every day for all nodes.
/// Randomized per node - roll the dice on every block to decide.
/// Will compact the txhashset to remove pruned data.
/// Will also remove old blocks and associated data from the database.
/// For a node configured as "archival_mode = true" only the txhashset will be compacted.
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
/// Number of blocks to reuse a txhashset zip for (automated testing and user testing).
pub const TESTING_TXHASHSET_ARCHIVE_INTERVAL: u64 = 10;
/// Number of blocks to reuse a txhashset zip for.
pub const TXHASHSET_ARCHIVE_INTERVAL: u64 = 12 * 60;
/// Types of chain a server can run with, dictates the genesis block and
/// and mining parameters used.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub enum ChainTypes {
/// For CI testing
AutomatedTesting,
/// For User testing
UserTesting,
/// Protocol testing network
Testnet,
/// Main production network
Mainnet,
}
impl ChainTypes {
/// Short name representing the chain type ("test", "main", etc.)
pub fn shortname(&self) -> String {
match *self {
ChainTypes::AutomatedTesting => "auto".to_owned(),
ChainTypes::UserTesting => "user".to_owned(),
ChainTypes::Testnet => "test".to_owned(),
ChainTypes::Mainnet => "main".to_owned(),
}
}
}
impl Default for ChainTypes {
fn default() -> ChainTypes {
ChainTypes::Mainnet
}
}
lazy_static! {
/// Global chain_type that must be initialized once on node startup.
/// This is accessed via get_chain_type() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_CHAIN_TYPE: OneTime<ChainTypes> = OneTime::new();
/// Global acccept fee base that must be initialized once on node startup.
/// This is accessed via get_acccept_fee_base() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_ACCEPT_FEE_BASE: OneTime<u64> = OneTime::new();
/// Global future time limit that must be initialized once on node startup.
/// This is accessed via get_future_time_limit() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_FUTURE_TIME_LIMIT: OneTime<u64> = OneTime::new();
/// Global feature flag for NRD kernel support.
/// If enabled NRD kernels are treated as valid after HF3 (based on header version).
/// If disabled NRD kernels are invalid regardless of header version or block height.
pub static ref GLOBAL_NRD_FEATURE_ENABLED: OneTime<bool> = OneTime::new();
}
thread_local! {
/// Mainnet|Testnet|UserTesting|AutomatedTesting
pub static CHAIN_TYPE: Cell<Option<ChainTypes>> = Cell::new(None);
/// minimum transaction fee per unit of transaction weight for mempool acceptance
pub static ACCEPT_FEE_BASE: Cell<Option<u64>> = Cell::new(None);
/// maximum number of seconds into future for timestamp of block to be acceptable
pub static FUTURE_TIME_LIMIT: Cell<Option<u64>> = Cell::new(None);
/// Local feature flag for NRD kernel support.
pub static NRD_FEATURE_ENABLED: Cell<Option<bool>> = Cell::new(None);
}
/// One time initialization of the global chain_type.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.init(new_type)
}
/// Set the global chain_type using an override
pub fn set_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.set(new_type, true);
}
/// Set the chain type on a per-thread basis via thread_local storage.
pub fn set_local_chain_type(new_type: ChainTypes) {
CHAIN_TYPE.with(|chain_type| chain_type.set(Some(new_type)))
}
/// Get the chain type via thread_local, fallback to global chain_type.
pub fn | () -> ChainTypes {
CHAIN_TYPE.with(|chain_type| match chain_type.get() {
None => {
if!GLOBAL_CHAIN_TYPE.is_init() {
panic!("GLOBAL_CHAIN_TYPE and CHAIN_TYPE unset. Consider set_local_chain_type() in tests.");
}
let chain_type = GLOBAL_CHAIN_TYPE.borrow();
set_local_chain_type(chain_type);
chain_type
}
Some(chain_type) => chain_type,
})
}
/// Return genesis block for the active chain type
pub fn get_genesis_block() -> Block {
match get_chain_type() {
ChainTypes::Mainnet => genesis::genesis_main(),
ChainTypes::Testnet => genesis::genesis_test(),
_ => genesis::genesis_dev(),
}
}
/// One time initialization of the global future time limit
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.init(new_ftl)
}
/// The global future time limit may be reset again using the override
pub fn set_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.set(new_ftl, true)
}
/// One time initialization of the global accept fee base
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.init(new_base)
}
/// The global accept fee base may be reset using override.
pub fn set_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.set(new_base, true)
}
/// Set the accept fee base on a per-thread basis via thread_local storage.
pub fn set_local_accept_fee_base(new_base: u64) {
ACCEPT_FEE_BASE.with(|base| base.set(Some(new_base)))
}
/// Accept Fee Base
/// Look at thread local config first. If not set fallback to global config.
/// Default to grin-cent/20 if global config unset.
pub fn get_accept_fee_base() -> u64 {
ACCEPT_FEE_BASE.with(|base| match base.get() {
None => {
let base = if GLOBAL_ACCEPT_FEE_BASE.is_init() {
GLOBAL_ACCEPT_FEE_BASE.borrow()
} else {
DEFAULT_ACCEPT_FEE_BASE
};
set_local_accept_fee_base(base);
base
}
Some(base) => base,
})
}
/// Set the future time limit on a per-thread basis via thread_local storage.
pub fn set_local_future_time_limit(new_ftl: u64) {
FUTURE_TIME_LIMIT.with(|ftl| ftl.set(Some(new_ftl)))
}
/// Future Time Limit (FTL)
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn get_future_time_limit() -> u64 {
FUTURE_TIME_LIMIT.with(|ftl| match ftl.get() {
None => {
let ftl = if GLOBAL_FUTURE_TIME_LIMIT.is_init() {
GLOBAL_FUTURE_TIME_LIMIT.borrow()
} else {
DEFAULT_FUTURE_TIME_LIMIT
};
set_local_future_time_limit(ftl);
ftl
}
Some(ftl) => ftl,
})
}
/// One time initialization of the global NRD feature flag.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_nrd_enabled(enabled: bool) {
GLOBAL_NRD_FEATURE_ENABLED.init(enabled)
}
/// Set the global NRD feature flag using override.
pub fn set_global_nrd_enabled(enabled: bool) {
GLOBAL_NRD_FEATURE_ENABLED.set(enabled, true)
}
/// Explicitly enable the local NRD feature flag.
pub fn set_local_nrd_enabled(enabled: bool) {
NRD_FEATURE_ENABLED.with(|flag| flag.set(Some(enabled)))
}
/// Is the NRD feature flag enabled?
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn is_nrd_enabled() -> bool {
NRD_FEATURE_ENABLED.with(|flag| match flag.get() {
None => {
if GLOBAL_NRD_FEATURE_ENABLED.is_init() {
let global_flag = GLOBAL_NRD_FEATURE_ENABLED.borrow();
flag.set(Some(global_flag));
global_flag
} else {
// Global config unset, default to false.
false
}
}
Some(flag) => flag,
})
}
/// Return either a cuckaroo* context or a cuckatoo context
/// Single change point
pub fn create_pow_context<T>(
height: u64,
edge_bits: u8,
proof_size: usize,
max_sols: u32,
) -> Result<Box<dyn PoWContext>, pow::Error> {
let chain_type = get_chain_type();
if chain_type == ChainTypes::Mainnet || chain_type == ChainTypes::Testnet {
// Mainnet and Testnet have Cuckatoo31+ for AF and Cuckaroo{,d,m,z}29 for AR
if edge_bits > 29 {
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
} else {
match header_version(height) {
HeaderVersion(1) => new_cuckaroo_ctx(edge_bits, proof_size),
HeaderVersion(2) => new_cuckarood_ctx(edge_bits, proof_size),
HeaderVersion(3) => new_cuckaroom_ctx(edge_bits, proof_size),
HeaderVersion(4) => new_cuckarooz_ctx(edge_bits, proof_size),
_ => no_cuckaroo_ctx(),
}
}
} else {
// Everything else is Cuckatoo only
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
}
}
/// The minimum acceptable edge_bits
pub fn min_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => DEFAULT_MIN_EDGE_BITS,
}
}
/// Reference edge_bits used to compute factor on higher Cuck(at)oo graph sizes,
/// while the min_edge_bits can be changed on a soft fork, changing
/// base_edge_bits is a hard fork.
pub fn base_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => BASE_EDGE_BITS,
}
}
/// The proofsize
pub fn proofsize() -> usize {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE,
_ => PROOFSIZE,
}
}
/// Coinbase maturity for coinbases to be spent
pub fn coinbase_maturity() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
_ => COINBASE_MATURITY,
}
}
/// Initial mining difficulty
pub fn initial_block_difficulty() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::Testnet => INITIAL_DIFFICULTY,
ChainTypes::Mainnet => INITIAL_DIFFICULTY,
}
}
/// Initial mining secondary scale
pub fn initial_graph_weight() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
ChainTypes::Mainnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
}
}
/// Minimum valid graph weight post HF4
pub fn min_wtema_graph_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS),
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS),
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS),
ChainTypes::Mainnet => C32_GRAPH_WEIGHT,
}
}
/// Maximum allowed block weight.
pub fn max_block_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::UserTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::Testnet => MAX_BLOCK_WEIGHT,
ChainTypes::Mainnet => MAX_BLOCK_WEIGHT,
}
}
/// Maximum allowed transaction weight (1 weight unit ~= 32 bytes)
pub fn max_tx_weight() -> u64 {
let coinbase_weight = OUTPUT_WEIGHT + KERNEL_WEIGHT;
max_block_weight().saturating_sub(coinbase_weight) as u64
}
/// Horizon at which we can cut-through and do full local pruning
pub fn cut_through_horizon() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_CUT_THROUGH_HORIZON,
ChainTypes::UserTesting => USER_TESTING_CUT_THROUGH_HORIZON,
_ => CUT_THROUGH_HORIZON,
}
}
/// Threshold at which we can request a txhashset (and full blocks from)
pub fn state_sync_threshold() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_STATE_SYNC_THRESHOLD,
ChainTypes::UserTesting => TESTING_STATE_SYNC_THRESHOLD,
_ => STATE_SYNC_THRESHOLD,
}
}
/// Number of blocks to reuse a txhashset zip for.
pub fn txhashset_archive_interval() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
ChainTypes::UserTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
_ => TXHASHSET_ARCHIVE_INTERVAL,
}
}
/// Are we in production mode?
/// Production defined as a live public network, testnet[n] or mainnet.
pub fn is_production_mode() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
ChainTypes::Mainnet => true,
_ => false,
}
}
/// Are we in testnet?
/// Note: We do not have a corresponding is_mainnet() as we want any tests to be as close
/// as possible to "mainnet" configuration as possible.
/// We want to avoid missing any mainnet only code paths.
pub fn is_testnet() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
_ => false,
}
}
/// Converts an iterator of block difficulty data to more a more manageable
/// vector and pads if needed (which will) only be needed for the first few
/// blocks after genesis
pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<HeaderDifficultyInfo>
where
T: IntoIterator<Item = HeaderDifficultyInfo>,
{
// Convert iterator to vector, so we can append to it if necessary
let needed_block_count = DMA_WINDOW as usize + 1;
let mut last_n: Vec<HeaderDifficultyInfo> =
cursor.into_iter().take(needed_block_count).collect();
// Only needed just after blockchain launch... basically ensures there's
// always enough data by simulating perfectly timed pre-genesis
// blocks at the genesis difficulty as needed.
let n = last_n.len();
if needed_block_count > n {
let last_ts_delta = if n > 1 {
last_n[0].timestamp - last_n[1].timestamp
} else {
BLOCK_TIME_SEC
};
let last_diff = last_n[0].difficulty;
// fill in simulated blocks with values from the previous real block
let mut last_ts = last_n.last().unwrap().timestamp;
for _ in n..needed_block_count {
last_ts = last_ts.saturating_sub(last_ts_delta);
last_n.push(HeaderDifficultyInfo::from_ts_diff(last_ts, last_diff));
}
}
last_n.reverse();
last_n
}
/// Calculates the size of a header (in bytes) given a number of edge bits in the PoW
#[inline]
pub fn header_size_bytes(edge_bits: u8) -> usize {
let size = 2 + 2 * 8 + 5 * 32 + 32 + 2 * 8;
let proof_size = 8 + 4 + 8 + 1 + Proof::pack_len(edge_bits);
size + proof_size
}
#[cfg(test)]
mod test {
use super::*;
use crate::core::Block;
use crate::genesis::*;
use crate::pow::mine_genesis_block;
use crate::ser::{BinWriter, Writeable};
fn test_header_len(genesis: Block) {
let mut raw = Vec::<u8>::with_capacity(1_024);
let mut writer = BinWriter::new(&mut raw, ProtocolVersion::local());
genesis.header.write(&mut writer).unwrap();
assert_eq!(raw.len(), header_size_bytes(genesis.header.pow.edge_bits()));
}
#[test]
fn automated_testing_header_len() {
set_local_chain_type(ChainTypes::AutomatedTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn user_testing_header_len() {
set_local_chain_type(ChainTypes::UserTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn testnet_header_len() {
set_local_chain_type(ChainTypes::Testnet);
test_header_len(genesis_test());
}
#[test]
fn mainnet_header_len() {
set_local_chain_type(ChainTypes::Mainnet);
test_header_len(genesis_main());
}
}
| get_chain_type | identifier_name |
global.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Values that should be shared across all modules, without necessarily
//! having to pass them all over the place, but aren't consensus values.
//! should be used sparingly.
use crate::consensus::{
graph_weight, header_version, HeaderDifficultyInfo, BASE_EDGE_BITS, BLOCK_TIME_SEC,
C32_GRAPH_WEIGHT, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS,
DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT,
PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
};
use crate::core::block::{Block, HeaderVersion};
use crate::genesis;
use crate::pow::{
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx,
new_cuckatoo_ctx, no_cuckaroo_ctx, PoWContext, Proof,
};
use crate::ser::ProtocolVersion;
use std::cell::Cell;
use util::OneTime;
/// An enum collecting sets of parameters used throughout the
/// code wherever mining is needed. This should allow for
/// different sets of parameters for different purposes,
/// e.g. CI, User testing, production values
/// Define these here, as they should be developer-set, not really tweakable
/// by users
/// The default "local" protocol version for this node.
/// We negotiate compatible versions with each peer via Hand/Shake.
/// Note: We also use a specific (possible different) protocol version
/// for both the backend database and MMR data files.
/// This defines the p2p layer protocol version for this node.
pub const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion(1_000);
/// Automated testing edge_bits
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 10;
/// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 8;
/// User testing edge_bits
pub const USER_TESTING_MIN_EDGE_BITS: u8 = 15;
/// User testing proof size
pub const USER_TESTING_PROOF_SIZE: usize = 42;
/// Automated testing coinbase maturity
pub const AUTOMATED_TESTING_COINBASE_MATURITY: u64 = 3;
/// User testing coinbase maturity
pub const USER_TESTING_COINBASE_MATURITY: u64 = 3;
/// Testing cut through horizon in blocks
pub const AUTOMATED_TESTING_CUT_THROUGH_HORIZON: u32 = 20;
/// Testing cut through horizon in blocks
pub const USER_TESTING_CUT_THROUGH_HORIZON: u32 = 70;
/// Testing state sync threshold in blocks
pub const TESTING_STATE_SYNC_THRESHOLD: u32 = 20;
/// Testing initial block difficulty
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testing max_block_weight (artifically low, just enough to support a few txs).
pub const TESTING_MAX_BLOCK_WEIGHT: u64 = 250;
/// Default unit of fee per tx weight, making each output cost about a Grincent
pub const DEFAULT_ACCEPT_FEE_BASE: u64 = GRIN_BASE / 100 / 20; // 500_000
/// default Future Time Limit (FTL) of 5 minutes
pub const DEFAULT_FUTURE_TIME_LIMIT: u64 = 5 * 60;
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
/// If a peer's last seen time is 2 weeks ago we will forget such kind of defunct peers.
const PEER_EXPIRATION_DAYS: i64 = 7 * 2;
/// Constant that expresses defunct peer timeout in seconds to be used in checks.
pub const PEER_EXPIRATION_REMOVE_TIME: i64 = PEER_EXPIRATION_DAYS * 24 * 3600;
/// Trigger compaction check on average every day for all nodes.
/// Randomized per node - roll the dice on every block to decide.
/// Will compact the txhashset to remove pruned data.
/// Will also remove old blocks and associated data from the database.
/// For a node configured as "archival_mode = true" only the txhashset will be compacted.
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
/// Number of blocks to reuse a txhashset zip for (automated testing and user testing).
pub const TESTING_TXHASHSET_ARCHIVE_INTERVAL: u64 = 10;
/// Number of blocks to reuse a txhashset zip for.
pub const TXHASHSET_ARCHIVE_INTERVAL: u64 = 12 * 60;
/// Types of chain a server can run with, dictates the genesis block and
/// and mining parameters used.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub enum ChainTypes {
/// For CI testing
AutomatedTesting,
/// For User testing
UserTesting,
/// Protocol testing network
Testnet,
/// Main production network
Mainnet,
}
impl ChainTypes {
/// Short name representing the chain type ("test", "main", etc.)
pub fn shortname(&self) -> String {
match *self {
ChainTypes::AutomatedTesting => "auto".to_owned(),
ChainTypes::UserTesting => "user".to_owned(),
ChainTypes::Testnet => "test".to_owned(),
ChainTypes::Mainnet => "main".to_owned(),
}
}
}
impl Default for ChainTypes {
fn default() -> ChainTypes {
ChainTypes::Mainnet
}
}
lazy_static! {
/// Global chain_type that must be initialized once on node startup.
/// This is accessed via get_chain_type() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_CHAIN_TYPE: OneTime<ChainTypes> = OneTime::new();
/// Global acccept fee base that must be initialized once on node startup.
/// This is accessed via get_acccept_fee_base() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_ACCEPT_FEE_BASE: OneTime<u64> = OneTime::new();
/// Global future time limit that must be initialized once on node startup.
/// This is accessed via get_future_time_limit() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_FUTURE_TIME_LIMIT: OneTime<u64> = OneTime::new();
/// Global feature flag for NRD kernel support.
/// If enabled NRD kernels are treated as valid after HF3 (based on header version).
/// If disabled NRD kernels are invalid regardless of header version or block height.
pub static ref GLOBAL_NRD_FEATURE_ENABLED: OneTime<bool> = OneTime::new();
}
thread_local! {
/// Mainnet|Testnet|UserTesting|AutomatedTesting
pub static CHAIN_TYPE: Cell<Option<ChainTypes>> = Cell::new(None);
/// minimum transaction fee per unit of transaction weight for mempool acceptance
pub static ACCEPT_FEE_BASE: Cell<Option<u64>> = Cell::new(None);
/// maximum number of seconds into future for timestamp of block to be acceptable
pub static FUTURE_TIME_LIMIT: Cell<Option<u64>> = Cell::new(None);
/// Local feature flag for NRD kernel support.
pub static NRD_FEATURE_ENABLED: Cell<Option<bool>> = Cell::new(None);
}
/// One time initialization of the global chain_type.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.init(new_type)
}
/// Set the global chain_type using an override
pub fn set_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.set(new_type, true);
}
/// Set the chain type on a per-thread basis via thread_local storage.
pub fn set_local_chain_type(new_type: ChainTypes) {
CHAIN_TYPE.with(|chain_type| chain_type.set(Some(new_type)))
}
/// Get the chain type via thread_local, fallback to global chain_type.
pub fn get_chain_type() -> ChainTypes {
CHAIN_TYPE.with(|chain_type| match chain_type.get() {
None => {
if!GLOBAL_CHAIN_TYPE.is_init() {
panic!("GLOBAL_CHAIN_TYPE and CHAIN_TYPE unset. Consider set_local_chain_type() in tests.");
}
let chain_type = GLOBAL_CHAIN_TYPE.borrow();
set_local_chain_type(chain_type);
chain_type
}
Some(chain_type) => chain_type,
})
}
/// Return genesis block for the active chain type
pub fn get_genesis_block() -> Block {
match get_chain_type() {
ChainTypes::Mainnet => genesis::genesis_main(),
ChainTypes::Testnet => genesis::genesis_test(),
_ => genesis::genesis_dev(),
}
}
/// One time initialization of the global future time limit
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.init(new_ftl)
}
/// The global future time limit may be reset again using the override
pub fn set_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.set(new_ftl, true)
}
/// One time initialization of the global accept fee base
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.init(new_base)
}
/// The global accept fee base may be reset using override.
pub fn set_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.set(new_base, true)
}
/// Set the accept fee base on a per-thread basis via thread_local storage.
pub fn set_local_accept_fee_base(new_base: u64) {
ACCEPT_FEE_BASE.with(|base| base.set(Some(new_base)))
}
/// Accept Fee Base
/// Look at thread local config first. If not set fallback to global config.
/// Default to grin-cent/20 if global config unset.
pub fn get_accept_fee_base() -> u64 {
ACCEPT_FEE_BASE.with(|base| match base.get() {
None => {
let base = if GLOBAL_ACCEPT_FEE_BASE.is_init() {
GLOBAL_ACCEPT_FEE_BASE.borrow()
} else {
DEFAULT_ACCEPT_FEE_BASE
};
set_local_accept_fee_base(base);
base
}
Some(base) => base,
})
}
/// Set the future time limit on a per-thread basis via thread_local storage.
pub fn set_local_future_time_limit(new_ftl: u64) {
FUTURE_TIME_LIMIT.with(|ftl| ftl.set(Some(new_ftl)))
}
/// Future Time Limit (FTL)
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn get_future_time_limit() -> u64 {
FUTURE_TIME_LIMIT.with(|ftl| match ftl.get() {
None => {
let ftl = if GLOBAL_FUTURE_TIME_LIMIT.is_init() {
GLOBAL_FUTURE_TIME_LIMIT.borrow()
} else {
DEFAULT_FUTURE_TIME_LIMIT
};
set_local_future_time_limit(ftl);
ftl
}
Some(ftl) => ftl,
})
}
/// One time initialization of the global NRD feature flag.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_nrd_enabled(enabled: bool) {
GLOBAL_NRD_FEATURE_ENABLED.init(enabled)
}
/// Set the global NRD feature flag using override.
pub fn set_global_nrd_enabled(enabled: bool) {
GLOBAL_NRD_FEATURE_ENABLED.set(enabled, true)
}
/// Explicitly enable the local NRD feature flag.
pub fn set_local_nrd_enabled(enabled: bool) {
NRD_FEATURE_ENABLED.with(|flag| flag.set(Some(enabled)))
}
/// Is the NRD feature flag enabled?
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn is_nrd_enabled() -> bool {
NRD_FEATURE_ENABLED.with(|flag| match flag.get() {
None => {
if GLOBAL_NRD_FEATURE_ENABLED.is_init() {
let global_flag = GLOBAL_NRD_FEATURE_ENABLED.borrow();
flag.set(Some(global_flag));
global_flag
} else {
// Global config unset, default to false.
false
}
}
Some(flag) => flag,
})
}
/// Return either a cuckaroo* context or a cuckatoo context
/// Single change point
pub fn create_pow_context<T>(
height: u64,
edge_bits: u8,
proof_size: usize,
max_sols: u32,
) -> Result<Box<dyn PoWContext>, pow::Error> {
let chain_type = get_chain_type();
if chain_type == ChainTypes::Mainnet || chain_type == ChainTypes::Testnet {
// Mainnet and Testnet have Cuckatoo31+ for AF and Cuckaroo{,d,m,z}29 for AR
if edge_bits > 29 {
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
} else {
match header_version(height) {
HeaderVersion(1) => new_cuckaroo_ctx(edge_bits, proof_size),
HeaderVersion(2) => new_cuckarood_ctx(edge_bits, proof_size),
HeaderVersion(3) => new_cuckaroom_ctx(edge_bits, proof_size),
HeaderVersion(4) => new_cuckarooz_ctx(edge_bits, proof_size),
_ => no_cuckaroo_ctx(),
}
}
} else {
// Everything else is Cuckatoo only
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
}
}
/// The minimum acceptable edge_bits
pub fn min_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => DEFAULT_MIN_EDGE_BITS,
}
}
/// Reference edge_bits used to compute factor on higher Cuck(at)oo graph sizes,
/// while the min_edge_bits can be changed on a soft fork, changing
/// base_edge_bits is a hard fork.
pub fn base_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => BASE_EDGE_BITS,
}
}
/// The proofsize
pub fn proofsize() -> usize {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE,
_ => PROOFSIZE,
}
}
/// Coinbase maturity for coinbases to be spent
pub fn coinbase_maturity() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
_ => COINBASE_MATURITY,
}
}
/// Initial mining difficulty
pub fn initial_block_difficulty() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::Testnet => INITIAL_DIFFICULTY,
ChainTypes::Mainnet => INITIAL_DIFFICULTY,
}
}
/// Initial mining secondary scale
pub fn initial_graph_weight() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
ChainTypes::Mainnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
}
}
/// Minimum valid graph weight post HF4
pub fn min_wtema_graph_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS),
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS),
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS),
ChainTypes::Mainnet => C32_GRAPH_WEIGHT,
}
}
/// Maximum allowed block weight.
pub fn max_block_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::UserTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::Testnet => MAX_BLOCK_WEIGHT,
ChainTypes::Mainnet => MAX_BLOCK_WEIGHT,
}
}
/// Maximum allowed transaction weight (1 weight unit ~= 32 bytes)
pub fn max_tx_weight() -> u64 {
let coinbase_weight = OUTPUT_WEIGHT + KERNEL_WEIGHT;
max_block_weight().saturating_sub(coinbase_weight) as u64
}
/// Horizon at which we can cut-through and do full local pruning
pub fn cut_through_horizon() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_CUT_THROUGH_HORIZON,
ChainTypes::UserTesting => USER_TESTING_CUT_THROUGH_HORIZON,
_ => CUT_THROUGH_HORIZON,
}
}
/// Threshold at which we can request a txhashset (and full blocks from)
pub fn state_sync_threshold() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_STATE_SYNC_THRESHOLD,
ChainTypes::UserTesting => TESTING_STATE_SYNC_THRESHOLD,
_ => STATE_SYNC_THRESHOLD,
}
}
/// Number of blocks to reuse a txhashset zip for.
pub fn txhashset_archive_interval() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
ChainTypes::UserTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
_ => TXHASHSET_ARCHIVE_INTERVAL,
}
}
/// Are we in production mode?
/// Production defined as a live public network, testnet[n] or mainnet.
pub fn is_production_mode() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
ChainTypes::Mainnet => true,
_ => false,
}
}
/// Are we in testnet?
/// Note: We do not have a corresponding is_mainnet() as we want any tests to be as close
/// as possible to "mainnet" configuration as possible.
/// We want to avoid missing any mainnet only code paths.
pub fn is_testnet() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
_ => false,
}
}
/// Converts an iterator of block difficulty data to more a more manageable
/// vector and pads if needed (which will) only be needed for the first few
/// blocks after genesis
pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<HeaderDifficultyInfo>
where
T: IntoIterator<Item = HeaderDifficultyInfo>,
{
// Convert iterator to vector, so we can append to it if necessary
let needed_block_count = DMA_WINDOW as usize + 1;
let mut last_n: Vec<HeaderDifficultyInfo> =
cursor.into_iter().take(needed_block_count).collect();
// Only needed just after blockchain launch... basically ensures there's
// always enough data by simulating perfectly timed pre-genesis
// blocks at the genesis difficulty as needed.
let n = last_n.len();
if needed_block_count > n {
let last_ts_delta = if n > 1 {
last_n[0].timestamp - last_n[1].timestamp
} else {
BLOCK_TIME_SEC
};
let last_diff = last_n[0].difficulty;
// fill in simulated blocks with values from the previous real block
let mut last_ts = last_n.last().unwrap().timestamp;
for _ in n..needed_block_count {
last_ts = last_ts.saturating_sub(last_ts_delta);
last_n.push(HeaderDifficultyInfo::from_ts_diff(last_ts, last_diff));
}
}
last_n.reverse();
last_n
}
| size + proof_size
}
#[cfg(test)]
mod test {
use super::*;
use crate::core::Block;
use crate::genesis::*;
use crate::pow::mine_genesis_block;
use crate::ser::{BinWriter, Writeable};
fn test_header_len(genesis: Block) {
let mut raw = Vec::<u8>::with_capacity(1_024);
let mut writer = BinWriter::new(&mut raw, ProtocolVersion::local());
genesis.header.write(&mut writer).unwrap();
assert_eq!(raw.len(), header_size_bytes(genesis.header.pow.edge_bits()));
}
#[test]
fn automated_testing_header_len() {
set_local_chain_type(ChainTypes::AutomatedTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn user_testing_header_len() {
set_local_chain_type(ChainTypes::UserTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn testnet_header_len() {
set_local_chain_type(ChainTypes::Testnet);
test_header_len(genesis_test());
}
#[test]
fn mainnet_header_len() {
set_local_chain_type(ChainTypes::Mainnet);
test_header_len(genesis_main());
}
} | /// Calculates the size of a header (in bytes) given a number of edge bits in the PoW
#[inline]
pub fn header_size_bytes(edge_bits: u8) -> usize {
let size = 2 + 2 * 8 + 5 * 32 + 32 + 2 * 8;
let proof_size = 8 + 4 + 8 + 1 + Proof::pack_len(edge_bits); | random_line_split |
lib.rs | use http::{
Method,
StatusCode,
Uri,
Version,
};
use http::header::{
HeaderMap,
HeaderName,
HeaderValue,
InvalidHeaderName,
InvalidHeaderValue,
};
use http::method::InvalidMethod;
use http::uri::InvalidUriBytes;
use lazy_static::lazy_static;
use regex::bytes::Regex;
use std::io;
use std::io::{BufRead, BufWriter, Read, Write};
pub mod media_type;
static QUOTED_STRING_1G: &str =
r#""([\t!#-\[\]-~\x80-\xFF]|\\[\t!-~\x80-\xFF])*""#;
static TOKEN: &str = r"[!#$%&'*+.^_`|~0-9A-Za-z-]+";
#[derive(Debug)] | pub struct RequestHeader {
pub method: Method,
pub uri: Uri,
pub version: Version,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub struct ResponseHeader {
pub status_code: StatusCode,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub enum InvalidRequestHeader {
Format,
RequestLine(InvalidRequestLine),
HeaderField(InvalidHeaderField),
Io(io::Error),
}
impl From<InvalidRequestLine> for InvalidRequestHeader {
fn from(e: InvalidRequestLine) -> Self {
InvalidRequestHeader::RequestLine(e)
}
}
impl From<InvalidHeaderField> for InvalidRequestHeader {
fn from(e: InvalidHeaderField) -> Self {
InvalidRequestHeader::HeaderField(e)
}
}
impl From<io::Error> for InvalidRequestHeader {
fn from(e: io::Error) -> Self {
InvalidRequestHeader::Io(e)
}
}
const LINE_CAP: usize = 16384;
pub fn parse_request_header<B: BufRead>(mut stream: B)
-> Result<RequestHeader, InvalidRequestHeader>
{
// TODO: Why does removing the type from `line` here cause errors?
let next_line = |stream: &mut B, line: &mut Vec<u8>| {
line.clear();
let count = stream
.take(LINE_CAP as u64)
.read_until('\n' as u8, line)?;
match count {
0 => Err(InvalidRequestHeader::Format), // FIXME?
LINE_CAP => Err(InvalidRequestHeader::Format), // FIXME
_ => Ok(()),
}
};
let mut line = Vec::with_capacity(LINE_CAP);
next_line(&mut stream, &mut line)?;
if!line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
let (method, uri, version) = parse_request_line(&line[..])?;
let mut fields = HeaderMap::new();
loop {
next_line(&mut stream, &mut line)?;
if!line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
if line == b"" {
return Ok(RequestHeader { method, uri, version, fields });
}
let (name, value) = parse_header_field(&line)?;
// TODO: append is okay, right? No syntax issues because we haven't
// seralized anything yet.
fields.append(name, value); // TODO: we should care about result, right?
}
}
pub fn write_response_header<W: Write>(header: &ResponseHeader, stream: W)
-> io::Result<()>
{
let mut stream = BufWriter::new(stream);
// TODO: Is this the way you're supposed to format bytes?
stream.write_all(b"HTTP/1.1")?;
stream.write_all(b" ")?;
stream.write_all(header.status_code.as_str().as_bytes())?;
stream.write_all(b" ")?;
stream.write_all(
header
.status_code
.canonical_reason()
.unwrap_or("Unknown Reason")
.as_bytes()
)?;
stream.write_all(b"\r\n")?;
for key in header.fields.keys() {
let mut values = header.fields.get_all(key).into_iter().peekable();
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
match values.next() {
Some(v) => stream.write_all(v.as_bytes())?,
None => panic!("what?"),
}
if values.peek().is_some() {
let separate_fields = key == "set-cookie";
for v in values {
if separate_fields {
stream.write_all(b"\r\n")?;
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
} else {
stream.write_all(b",")?;
}
stream.write_all(v.as_bytes())?;
}
}
stream.write_all(b"\r\n")?;
}
stream.write_all(b"\r\n")?;
Ok(())
}
#[derive(Debug)]
pub enum InvalidRequestLine {
Format,
Method(InvalidMethod),
Uri(InvalidUriBytes),
Version,
}
impl From<InvalidMethod> for InvalidRequestLine {
fn from(e: InvalidMethod) -> Self {
InvalidRequestLine::Method(e)
}
}
impl From<InvalidUriBytes> for InvalidRequestLine {
fn from(e: InvalidUriBytes) -> Self {
InvalidRequestLine::Uri(e)
}
}
pub fn parse_request_line(s: &[u8])
-> Result<(Method, Uri, Version), InvalidRequestLine>
{
lazy_static! {
static ref R: Regex = Regex::new(
// method SP request-target SP HTTP-version
r"(?-u)^(\S+) (\S+) (\S+)$"
).unwrap();
}
let cap = R.captures(s).ok_or(InvalidRequestLine::Format)?;
Ok((
Method::from_bytes(&cap[1])?,
Uri::from_shared(cap[2].into())?,
match &cap[3] {
// rfc 7230 section A: "Any server that implements name-based
// virtual hosts ought to disable support for HTTP/0.9."
b"HTTP/1.0" => Version::HTTP_10,
b"HTTP/1.1" => Version::HTTP_11,
// We don't support HTTP 0.9 or 2.0. 2.0 support may be added later.
// FIXME: Can we respond to an invalid version with 505 HTTP
// Version Not Supported? If not, unsupported major versions need a
// different error than invalid versions.
// FIXME: We should probably accept requests with version 1.2 and
// higher. Check the spec.
_ => return Err(InvalidRequestLine::Version),
},
))
}
#[derive(Debug)]
pub enum InvalidHeaderField {
Format,
Name(InvalidHeaderName),
Value(InvalidHeaderValue),
}
impl From<InvalidHeaderName> for InvalidHeaderField {
fn from(e: InvalidHeaderName) -> Self {
InvalidHeaderField::Name(e)
}
}
impl From<InvalidHeaderValue> for InvalidHeaderField {
fn from(e: InvalidHeaderValue) -> Self {
InvalidHeaderField::Value(e)
}
}
pub fn parse_header_field(s: &[u8])
-> Result<(HeaderName, HeaderValue), InvalidHeaderField>
{
// TODO: support obs-fold e.g. within message/http
// (see rfc7230 section 3.2.4)
// rfc7230 section 3.2.4: Server MUST return 400 if there's whitespace
// between field name and colon.
// rfc7230 section 3.2.4: If obs-fold is used outside a message/http body,
// server MUST either return 400 or replace each such obs-fold with one or
// more SP chars.
lazy_static! {
static ref R: Regex = Regex::new(&(String::new()
// token ":" OWS *field-content OWS
+ r"(?-u)^(" + TOKEN + "):"
+ r"[\t ]*"
+ r"([!-~\x80-\xFF]([\t!-~\x80-\xFF]*[!-~\x80-\xFF])?)"
+ r"[\t ]*$"
)).unwrap();
}
let cap = R.captures(s).ok_or(InvalidHeaderField::Format)?;
Ok((
HeaderName::from_bytes(&cap[1])?,
// TODO: HeaderValue might not fully validate input.
HeaderValue::from_bytes(&cap[2])?,
))
}
#[cfg(test)]
mod test {
use crate::{
parse_request_header,
parse_request_line,
parse_header_field,
ResponseHeader,
write_response_header,
};
use http::header::{
HeaderMap,
HeaderValue,
};
use http::{
Method,
StatusCode,
Version,
};
#[test]
fn test_parse_request_header() {
let mut s = Vec::new();
// TODO: There's a better way to do this, right?
s.extend(
&b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.1\r\n"[..]
);
s.extend(&b"Host: foo.example.com\r\n"[..]);
s.extend(&b"Content-Type: application/json\r\n"[..]);
s.extend(&b"\r\n"[..]);
let h = parse_request_header(&s[..]).unwrap();
assert_eq!(h.method, Method::POST);
assert_eq!(h.uri.scheme_str().unwrap(), "http");
assert_eq!(h.uri.host().unwrap(), "foo.example.com");
assert_eq!(h.uri.port_part(), None);
assert_eq!(h.uri.path(), "/bar");
assert_eq!(h.uri.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(h.version, Version::HTTP_11);
assert_eq!(h.fields["host"], "foo.example.com");
assert_eq!(h.fields["content-type"], "application/json");
}
#[test]
fn test_write_response_header() {
let mut s = Vec::new();
let mut h = ResponseHeader {
status_code: StatusCode::from_u16(404).unwrap(),
fields: HeaderMap::new(),
};
write_response_header(&h, &mut s).unwrap();
assert_eq!(s, b"HTTP/1.1 404 Not Found\r\n\r\n");
h.fields.append("set-cookie", HeaderValue::from_static(
"FOO=\"some text\""
));
h.fields.append("Set-cookie", HeaderValue::from_static(
"BAR=\"some other text\""
));
h.fields.append("LOCATION", HeaderValue::from_static(
"http://example.com:3180/foo&bar"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"en"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"de"
));
s.clear();
write_response_header(&h, &mut s).unwrap();
assert!(s.starts_with(b"HTTP/1.1 404 Not Found\r\n"));
}
#[test]
fn test_parse_request_line() {
let s = b"OPTIONS * HTTP/1.1";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::OPTIONS);
assert_eq!(u.path(), "*");
assert_eq!(v, Version::HTTP_11);
let s = b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.0";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::POST);
assert_eq!(u.scheme_str().unwrap(), "http");
assert_eq!(u.host().unwrap(), "foo.example.com");
assert_eq!(u.port_part(), None);
assert_eq!(u.path(), "/bar");
assert_eq!(u.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(v, Version::HTTP_10);
}
#[test]
fn test_parse_header_field() {
let s = b"Content-Type: application/json; charset=\"\xAA\xBB\xCC\"";
let (h, v) = parse_header_field(s).unwrap();
assert_eq!(
h,
http::header::CONTENT_TYPE,
);
assert_eq!(
v,
HeaderValue::from_bytes(
&b"application/json; charset=\"\xAA\xBB\xCC\""[..]
).unwrap(),
);
}
} | random_line_split |
|
lib.rs | use http::{
Method,
StatusCode,
Uri,
Version,
};
use http::header::{
HeaderMap,
HeaderName,
HeaderValue,
InvalidHeaderName,
InvalidHeaderValue,
};
use http::method::InvalidMethod;
use http::uri::InvalidUriBytes;
use lazy_static::lazy_static;
use regex::bytes::Regex;
use std::io;
use std::io::{BufRead, BufWriter, Read, Write};
pub mod media_type;
static QUOTED_STRING_1G: &str =
r#""([\t!#-\[\]-~\x80-\xFF]|\\[\t!-~\x80-\xFF])*""#;
static TOKEN: &str = r"[!#$%&'*+.^_`|~0-9A-Za-z-]+";
#[derive(Debug)]
pub struct RequestHeader {
pub method: Method,
pub uri: Uri,
pub version: Version,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub struct ResponseHeader {
pub status_code: StatusCode,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub enum InvalidRequestHeader {
Format,
RequestLine(InvalidRequestLine),
HeaderField(InvalidHeaderField),
Io(io::Error),
}
impl From<InvalidRequestLine> for InvalidRequestHeader {
fn from(e: InvalidRequestLine) -> Self {
InvalidRequestHeader::RequestLine(e)
}
}
impl From<InvalidHeaderField> for InvalidRequestHeader {
fn from(e: InvalidHeaderField) -> Self {
InvalidRequestHeader::HeaderField(e)
}
}
impl From<io::Error> for InvalidRequestHeader {
fn from(e: io::Error) -> Self {
InvalidRequestHeader::Io(e)
}
}
const LINE_CAP: usize = 16384;
pub fn parse_request_header<B: BufRead>(mut stream: B)
-> Result<RequestHeader, InvalidRequestHeader>
{
// TODO: Why does removing the type from `line` here cause errors?
let next_line = |stream: &mut B, line: &mut Vec<u8>| {
line.clear();
let count = stream
.take(LINE_CAP as u64)
.read_until('\n' as u8, line)?;
match count {
0 => Err(InvalidRequestHeader::Format), // FIXME?
LINE_CAP => Err(InvalidRequestHeader::Format), // FIXME
_ => Ok(()),
}
};
let mut line = Vec::with_capacity(LINE_CAP);
next_line(&mut stream, &mut line)?;
if!line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
let (method, uri, version) = parse_request_line(&line[..])?;
let mut fields = HeaderMap::new();
loop {
next_line(&mut stream, &mut line)?;
if!line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
if line == b"" {
return Ok(RequestHeader { method, uri, version, fields });
}
let (name, value) = parse_header_field(&line)?;
// TODO: append is okay, right? No syntax issues because we haven't
// seralized anything yet.
fields.append(name, value); // TODO: we should care about result, right?
}
}
pub fn write_response_header<W: Write>(header: &ResponseHeader, stream: W)
-> io::Result<()>
{
let mut stream = BufWriter::new(stream);
// TODO: Is this the way you're supposed to format bytes?
stream.write_all(b"HTTP/1.1")?;
stream.write_all(b" ")?;
stream.write_all(header.status_code.as_str().as_bytes())?;
stream.write_all(b" ")?;
stream.write_all(
header
.status_code
.canonical_reason()
.unwrap_or("Unknown Reason")
.as_bytes()
)?;
stream.write_all(b"\r\n")?;
for key in header.fields.keys() {
let mut values = header.fields.get_all(key).into_iter().peekable();
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
match values.next() {
Some(v) => stream.write_all(v.as_bytes())?,
None => panic!("what?"),
}
if values.peek().is_some() {
let separate_fields = key == "set-cookie";
for v in values {
if separate_fields {
stream.write_all(b"\r\n")?;
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
} else {
stream.write_all(b",")?;
}
stream.write_all(v.as_bytes())?;
}
}
stream.write_all(b"\r\n")?;
}
stream.write_all(b"\r\n")?;
Ok(())
}
#[derive(Debug)]
pub enum InvalidRequestLine {
Format,
Method(InvalidMethod),
Uri(InvalidUriBytes),
Version,
}
impl From<InvalidMethod> for InvalidRequestLine {
fn from(e: InvalidMethod) -> Self {
InvalidRequestLine::Method(e)
}
}
impl From<InvalidUriBytes> for InvalidRequestLine {
fn from(e: InvalidUriBytes) -> Self {
InvalidRequestLine::Uri(e)
}
}
pub fn parse_request_line(s: &[u8])
-> Result<(Method, Uri, Version), InvalidRequestLine>
{
lazy_static! {
static ref R: Regex = Regex::new(
// method SP request-target SP HTTP-version
r"(?-u)^(\S+) (\S+) (\S+)$"
).unwrap();
}
let cap = R.captures(s).ok_or(InvalidRequestLine::Format)?;
Ok((
Method::from_bytes(&cap[1])?,
Uri::from_shared(cap[2].into())?,
match &cap[3] {
// rfc 7230 section A: "Any server that implements name-based
// virtual hosts ought to disable support for HTTP/0.9."
b"HTTP/1.0" => Version::HTTP_10,
b"HTTP/1.1" => Version::HTTP_11,
// We don't support HTTP 0.9 or 2.0. 2.0 support may be added later.
// FIXME: Can we respond to an invalid version with 505 HTTP
// Version Not Supported? If not, unsupported major versions need a
// different error than invalid versions.
// FIXME: We should probably accept requests with version 1.2 and
// higher. Check the spec.
_ => return Err(InvalidRequestLine::Version),
},
))
}
#[derive(Debug)]
pub enum InvalidHeaderField {
Format,
Name(InvalidHeaderName),
Value(InvalidHeaderValue),
}
impl From<InvalidHeaderName> for InvalidHeaderField {
fn from(e: InvalidHeaderName) -> Self {
InvalidHeaderField::Name(e)
}
}
impl From<InvalidHeaderValue> for InvalidHeaderField {
fn from(e: InvalidHeaderValue) -> Self {
InvalidHeaderField::Value(e)
}
}
pub fn parse_header_field(s: &[u8])
-> Result<(HeaderName, HeaderValue), InvalidHeaderField>
{
// TODO: support obs-fold e.g. within message/http
// (see rfc7230 section 3.2.4)
// rfc7230 section 3.2.4: Server MUST return 400 if there's whitespace
// between field name and colon.
// rfc7230 section 3.2.4: If obs-fold is used outside a message/http body,
// server MUST either return 400 or replace each such obs-fold with one or
// more SP chars.
lazy_static! {
static ref R: Regex = Regex::new(&(String::new()
// token ":" OWS *field-content OWS
+ r"(?-u)^(" + TOKEN + "):"
+ r"[\t ]*"
+ r"([!-~\x80-\xFF]([\t!-~\x80-\xFF]*[!-~\x80-\xFF])?)"
+ r"[\t ]*$"
)).unwrap();
}
let cap = R.captures(s).ok_or(InvalidHeaderField::Format)?;
Ok((
HeaderName::from_bytes(&cap[1])?,
// TODO: HeaderValue might not fully validate input.
HeaderValue::from_bytes(&cap[2])?,
))
}
#[cfg(test)]
mod test {
use crate::{
parse_request_header,
parse_request_line,
parse_header_field,
ResponseHeader,
write_response_header,
};
use http::header::{
HeaderMap,
HeaderValue,
};
use http::{
Method,
StatusCode,
Version,
};
#[test]
fn test_parse_request_header() {
let mut s = Vec::new();
// TODO: There's a better way to do this, right?
s.extend(
&b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.1\r\n"[..]
);
s.extend(&b"Host: foo.example.com\r\n"[..]);
s.extend(&b"Content-Type: application/json\r\n"[..]);
s.extend(&b"\r\n"[..]);
let h = parse_request_header(&s[..]).unwrap();
assert_eq!(h.method, Method::POST);
assert_eq!(h.uri.scheme_str().unwrap(), "http");
assert_eq!(h.uri.host().unwrap(), "foo.example.com");
assert_eq!(h.uri.port_part(), None);
assert_eq!(h.uri.path(), "/bar");
assert_eq!(h.uri.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(h.version, Version::HTTP_11);
assert_eq!(h.fields["host"], "foo.example.com");
assert_eq!(h.fields["content-type"], "application/json");
}
#[test]
fn test_write_response_header() {
let mut s = Vec::new();
let mut h = ResponseHeader {
status_code: StatusCode::from_u16(404).unwrap(),
fields: HeaderMap::new(),
};
write_response_header(&h, &mut s).unwrap();
assert_eq!(s, b"HTTP/1.1 404 Not Found\r\n\r\n");
h.fields.append("set-cookie", HeaderValue::from_static(
"FOO=\"some text\""
));
h.fields.append("Set-cookie", HeaderValue::from_static(
"BAR=\"some other text\""
));
h.fields.append("LOCATION", HeaderValue::from_static(
"http://example.com:3180/foo&bar"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"en"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"de"
));
s.clear();
write_response_header(&h, &mut s).unwrap();
assert!(s.starts_with(b"HTTP/1.1 404 Not Found\r\n"));
}
#[test]
fn test_parse_request_line() |
#[test]
fn test_parse_header_field() {
let s = b"Content-Type: application/json; charset=\"\xAA\xBB\xCC\"";
let (h, v) = parse_header_field(s).unwrap();
assert_eq!(
h,
http::header::CONTENT_TYPE,
);
assert_eq!(
v,
HeaderValue::from_bytes(
&b"application/json; charset=\"\xAA\xBB\xCC\""[..]
).unwrap(),
);
}
}
| {
let s = b"OPTIONS * HTTP/1.1";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::OPTIONS);
assert_eq!(u.path(), "*");
assert_eq!(v, Version::HTTP_11);
let s = b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.0";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::POST);
assert_eq!(u.scheme_str().unwrap(), "http");
assert_eq!(u.host().unwrap(), "foo.example.com");
assert_eq!(u.port_part(), None);
assert_eq!(u.path(), "/bar");
assert_eq!(u.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(v, Version::HTTP_10);
} | identifier_body |
lib.rs | use http::{
Method,
StatusCode,
Uri,
Version,
};
use http::header::{
HeaderMap,
HeaderName,
HeaderValue,
InvalidHeaderName,
InvalidHeaderValue,
};
use http::method::InvalidMethod;
use http::uri::InvalidUriBytes;
use lazy_static::lazy_static;
use regex::bytes::Regex;
use std::io;
use std::io::{BufRead, BufWriter, Read, Write};
pub mod media_type;
static QUOTED_STRING_1G: &str =
r#""([\t!#-\[\]-~\x80-\xFF]|\\[\t!-~\x80-\xFF])*""#;
static TOKEN: &str = r"[!#$%&'*+.^_`|~0-9A-Za-z-]+";
#[derive(Debug)]
pub struct RequestHeader {
pub method: Method,
pub uri: Uri,
pub version: Version,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub struct ResponseHeader {
pub status_code: StatusCode,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub enum InvalidRequestHeader {
Format,
RequestLine(InvalidRequestLine),
HeaderField(InvalidHeaderField),
Io(io::Error),
}
impl From<InvalidRequestLine> for InvalidRequestHeader {
fn from(e: InvalidRequestLine) -> Self {
InvalidRequestHeader::RequestLine(e)
}
}
impl From<InvalidHeaderField> for InvalidRequestHeader {
fn | (e: InvalidHeaderField) -> Self {
InvalidRequestHeader::HeaderField(e)
}
}
impl From<io::Error> for InvalidRequestHeader {
fn from(e: io::Error) -> Self {
InvalidRequestHeader::Io(e)
}
}
const LINE_CAP: usize = 16384;
pub fn parse_request_header<B: BufRead>(mut stream: B)
-> Result<RequestHeader, InvalidRequestHeader>
{
// TODO: Why does removing the type from `line` here cause errors?
let next_line = |stream: &mut B, line: &mut Vec<u8>| {
line.clear();
let count = stream
.take(LINE_CAP as u64)
.read_until('\n' as u8, line)?;
match count {
0 => Err(InvalidRequestHeader::Format), // FIXME?
LINE_CAP => Err(InvalidRequestHeader::Format), // FIXME
_ => Ok(()),
}
};
let mut line = Vec::with_capacity(LINE_CAP);
next_line(&mut stream, &mut line)?;
if!line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
let (method, uri, version) = parse_request_line(&line[..])?;
let mut fields = HeaderMap::new();
loop {
next_line(&mut stream, &mut line)?;
if!line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
if line == b"" {
return Ok(RequestHeader { method, uri, version, fields });
}
let (name, value) = parse_header_field(&line)?;
// TODO: append is okay, right? No syntax issues because we haven't
// seralized anything yet.
fields.append(name, value); // TODO: we should care about result, right?
}
}
pub fn write_response_header<W: Write>(header: &ResponseHeader, stream: W)
-> io::Result<()>
{
let mut stream = BufWriter::new(stream);
// TODO: Is this the way you're supposed to format bytes?
stream.write_all(b"HTTP/1.1")?;
stream.write_all(b" ")?;
stream.write_all(header.status_code.as_str().as_bytes())?;
stream.write_all(b" ")?;
stream.write_all(
header
.status_code
.canonical_reason()
.unwrap_or("Unknown Reason")
.as_bytes()
)?;
stream.write_all(b"\r\n")?;
for key in header.fields.keys() {
let mut values = header.fields.get_all(key).into_iter().peekable();
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
match values.next() {
Some(v) => stream.write_all(v.as_bytes())?,
None => panic!("what?"),
}
if values.peek().is_some() {
let separate_fields = key == "set-cookie";
for v in values {
if separate_fields {
stream.write_all(b"\r\n")?;
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
} else {
stream.write_all(b",")?;
}
stream.write_all(v.as_bytes())?;
}
}
stream.write_all(b"\r\n")?;
}
stream.write_all(b"\r\n")?;
Ok(())
}
#[derive(Debug)]
pub enum InvalidRequestLine {
Format,
Method(InvalidMethod),
Uri(InvalidUriBytes),
Version,
}
impl From<InvalidMethod> for InvalidRequestLine {
fn from(e: InvalidMethod) -> Self {
InvalidRequestLine::Method(e)
}
}
impl From<InvalidUriBytes> for InvalidRequestLine {
fn from(e: InvalidUriBytes) -> Self {
InvalidRequestLine::Uri(e)
}
}
pub fn parse_request_line(s: &[u8])
-> Result<(Method, Uri, Version), InvalidRequestLine>
{
lazy_static! {
static ref R: Regex = Regex::new(
// method SP request-target SP HTTP-version
r"(?-u)^(\S+) (\S+) (\S+)$"
).unwrap();
}
let cap = R.captures(s).ok_or(InvalidRequestLine::Format)?;
Ok((
Method::from_bytes(&cap[1])?,
Uri::from_shared(cap[2].into())?,
match &cap[3] {
// rfc 7230 section A: "Any server that implements name-based
// virtual hosts ought to disable support for HTTP/0.9."
b"HTTP/1.0" => Version::HTTP_10,
b"HTTP/1.1" => Version::HTTP_11,
// We don't support HTTP 0.9 or 2.0. 2.0 support may be added later.
// FIXME: Can we respond to an invalid version with 505 HTTP
// Version Not Supported? If not, unsupported major versions need a
// different error than invalid versions.
// FIXME: We should probably accept requests with version 1.2 and
// higher. Check the spec.
_ => return Err(InvalidRequestLine::Version),
},
))
}
#[derive(Debug)]
pub enum InvalidHeaderField {
Format,
Name(InvalidHeaderName),
Value(InvalidHeaderValue),
}
impl From<InvalidHeaderName> for InvalidHeaderField {
fn from(e: InvalidHeaderName) -> Self {
InvalidHeaderField::Name(e)
}
}
impl From<InvalidHeaderValue> for InvalidHeaderField {
fn from(e: InvalidHeaderValue) -> Self {
InvalidHeaderField::Value(e)
}
}
pub fn parse_header_field(s: &[u8])
-> Result<(HeaderName, HeaderValue), InvalidHeaderField>
{
// TODO: support obs-fold e.g. within message/http
// (see rfc7230 section 3.2.4)
// rfc7230 section 3.2.4: Server MUST return 400 if there's whitespace
// between field name and colon.
// rfc7230 section 3.2.4: If obs-fold is used outside a message/http body,
// server MUST either return 400 or replace each such obs-fold with one or
// more SP chars.
lazy_static! {
static ref R: Regex = Regex::new(&(String::new()
// token ":" OWS *field-content OWS
+ r"(?-u)^(" + TOKEN + "):"
+ r"[\t ]*"
+ r"([!-~\x80-\xFF]([\t!-~\x80-\xFF]*[!-~\x80-\xFF])?)"
+ r"[\t ]*$"
)).unwrap();
}
let cap = R.captures(s).ok_or(InvalidHeaderField::Format)?;
Ok((
HeaderName::from_bytes(&cap[1])?,
// TODO: HeaderValue might not fully validate input.
HeaderValue::from_bytes(&cap[2])?,
))
}
#[cfg(test)]
mod test {
use crate::{
parse_request_header,
parse_request_line,
parse_header_field,
ResponseHeader,
write_response_header,
};
use http::header::{
HeaderMap,
HeaderValue,
};
use http::{
Method,
StatusCode,
Version,
};
#[test]
fn test_parse_request_header() {
let mut s = Vec::new();
// TODO: There's a better way to do this, right?
s.extend(
&b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.1\r\n"[..]
);
s.extend(&b"Host: foo.example.com\r\n"[..]);
s.extend(&b"Content-Type: application/json\r\n"[..]);
s.extend(&b"\r\n"[..]);
let h = parse_request_header(&s[..]).unwrap();
assert_eq!(h.method, Method::POST);
assert_eq!(h.uri.scheme_str().unwrap(), "http");
assert_eq!(h.uri.host().unwrap(), "foo.example.com");
assert_eq!(h.uri.port_part(), None);
assert_eq!(h.uri.path(), "/bar");
assert_eq!(h.uri.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(h.version, Version::HTTP_11);
assert_eq!(h.fields["host"], "foo.example.com");
assert_eq!(h.fields["content-type"], "application/json");
}
#[test]
fn test_write_response_header() {
let mut s = Vec::new();
let mut h = ResponseHeader {
status_code: StatusCode::from_u16(404).unwrap(),
fields: HeaderMap::new(),
};
write_response_header(&h, &mut s).unwrap();
assert_eq!(s, b"HTTP/1.1 404 Not Found\r\n\r\n");
h.fields.append("set-cookie", HeaderValue::from_static(
"FOO=\"some text\""
));
h.fields.append("Set-cookie", HeaderValue::from_static(
"BAR=\"some other text\""
));
h.fields.append("LOCATION", HeaderValue::from_static(
"http://example.com:3180/foo&bar"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"en"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"de"
));
s.clear();
write_response_header(&h, &mut s).unwrap();
assert!(s.starts_with(b"HTTP/1.1 404 Not Found\r\n"));
}
#[test]
fn test_parse_request_line() {
let s = b"OPTIONS * HTTP/1.1";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::OPTIONS);
assert_eq!(u.path(), "*");
assert_eq!(v, Version::HTTP_11);
let s = b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.0";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::POST);
assert_eq!(u.scheme_str().unwrap(), "http");
assert_eq!(u.host().unwrap(), "foo.example.com");
assert_eq!(u.port_part(), None);
assert_eq!(u.path(), "/bar");
assert_eq!(u.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(v, Version::HTTP_10);
}
#[test]
fn test_parse_header_field() {
let s = b"Content-Type: application/json; charset=\"\xAA\xBB\xCC\"";
let (h, v) = parse_header_field(s).unwrap();
assert_eq!(
h,
http::header::CONTENT_TYPE,
);
assert_eq!(
v,
HeaderValue::from_bytes(
&b"application/json; charset=\"\xAA\xBB\xCC\""[..]
).unwrap(),
);
}
}
| from | identifier_name |
custom_widget.rs | that in this case, we use `piston_window` to draw our widget, however in practise you may
//! use any backend you wish.
//!
//! For more information, please see the `Widget` trait documentation.
//!
#[macro_use] extern crate conrod;
extern crate find_folder;
extern crate piston_window;
extern crate vecmath;
/// The module in which we'll implement our own custom circular button.
mod circular_button {
use conrod::{
default_x_dimension,
default_y_dimension,
CharacterCache,
Circle,
Color,
Colorable,
CommonBuilder,
Dimension,
Dimensions,
FontSize,
IndexSlot,
Labelable,
Mouse,
Point,
Positionable,
Scalar,
Text,
Theme,
UpdateArgs,
Widget,
WidgetKind,
Ui,
};
/// The type upon which we'll implement the `Widget` trait.
pub struct CircularButton<'a, F> {
/// An object that handles some of the dirty work of rendering a GUI. We don't
/// really have to worry about it.
common: CommonBuilder,
/// Optional label string for the button.
maybe_label: Option<&'a str>,
/// Optional callback for when the button is pressed. If you want the button to
/// do anything, this callback must exist.
maybe_react: Option<F>,
/// See the Style struct below.
style: Style,
/// Whether the button is currently enabled, i.e. whether it responds to
/// user input.
enabled: bool
}
/// Represents the unique styling for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct Style {
/// Color of the button.
pub maybe_color: Option<Color>,
/// Radius of the button.
pub maybe_radius: Option<Scalar>,
/// Color of the button's label.
pub maybe_label_color: Option<Color>,
/// Font size of the button's label.
pub maybe_label_font_size: Option<u32>,
}
/// Represents the unique, cached state for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct State {
/// The current interaction state. See the Interaction enum below. See also
/// get_new_interaction below, where we define all the logic for transitioning between
/// interaction states.
interaction: Interaction,
/// An index to use for our **Circle** primitive graphics widget.
circle_idx: IndexSlot,
/// An index to use for our **Text** primitive graphics widget (for the label).
text_idx: IndexSlot,
}
/// A `&'static str` that can be used to uniquely identify our widget type.
pub const KIND: WidgetKind = "CircularButton";
/// A type to keep track of interaction between updates.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Interaction {
Normal,
Highlighted,
Clicked,
}
impl Interaction {
/// Alter the widget color depending on the current interaction.
fn color(&self, color: Color) -> Color {
match *self {
/// The base color as defined in the Style struct, or a default provided
/// by the current Theme if the Style has no color.
Interaction::Normal => color,
/// The Color object (from Elmesque) can calculate a highlighted version
/// of itself. We don't have to use it, though. We could specify any color
/// we want.
Interaction::Highlighted => color.highlighted(),
/// Ditto for clicked.
Interaction::Clicked => color.clicked(),
}
}
}
/// Check the current interaction with the button. Takes into account whether the mouse is
/// over the button and the previous interaction state.
fn get_new_interaction(is_over: bool, prev: Interaction, mouse: Mouse) -> Interaction {
use conrod::MouseButtonPosition::{Down, Up};
use self::Interaction::{Normal, Highlighted, Clicked};
match (is_over, prev, mouse.left.position) {
// LMB is down over the button. But the button wasn't Highlighted last
// update. This means the user clicked somewhere outside the button and
// moved over the button holding LMB down. We do nothing in this case.
(true, Normal, Down) => Normal,
// LMB is down over the button. The button was either Highlighted or Clicked
// last update. If it was highlighted before, that means the user clicked
// just now, and we transition to the Clicked state. If it was clicked
// before, that means the user is still holding LMB down from a previous
// click, in which case the state remains Clicked.
(true, _, Down) => Clicked,
// LMB is up. The mouse is hovering over the button. Regardless of what the
// state was last update, the state should definitely be Highlighted now.
(true, _, Up) => Highlighted,
// LMB is down, the mouse is not over the button, but the previous state was
// Clicked. That means the user clicked the button and then moved the mouse
// outside the button while holding LMB down. The button stays Clicked.
(false, Clicked, Down) => Clicked,
// If none of the above applies, then nothing interesting is happening with
// this button.
_ => Normal,
}
}
/// Return whether or not a given point is over a circle at a given point on a
/// Cartesian plane. We use this to determine whether the mouse is over the button.
pub fn is_over_circ(circ_center: Point, mouse_point: Point, dim: Dimensions) -> bool {
// Offset vector from the center of the circle to the mouse.
let offset = ::vecmath::vec2_sub(mouse_point, circ_center);
// If the length of the offset vector is less than or equal to the circle's
// radius, then the mouse is inside the circle. We assume that dim is a square
// bounding box around the circle, thus 2 * radius == dim[0] == dim[1].
::vecmath::vec2_len(offset) <= dim[0] / 2.0
}
impl<'a, F> CircularButton<'a, F> {
/// Create a button context to be built upon.
pub fn new() -> CircularButton<'a, F> {
CircularButton {
common: CommonBuilder::new(),
maybe_react: None,
maybe_label: None,
style: Style::new(),
enabled: true,
}
}
/// Set the reaction for the Button. The reaction will be triggered upon release
/// of the button. Like other Conrod configs, this returns self for chainability.
pub fn react(mut self, reaction: F) -> Self {
self.maybe_react = Some(reaction);
self
}
/// If true, will allow user inputs. If false, will disallow user inputs. Like
/// other Conrod configs, this returns self for chainability. Allow dead code
/// because we never call this in the example.
#[allow(dead_code)]
pub fn enabled(mut self, flag: bool) -> Self {
self.enabled = flag;
self
}
}
/// A custom Conrod widget must implement the Widget trait. See the **Widget** trait
/// documentation for more details.
impl<'a, F> Widget for CircularButton<'a, F>
where F: FnMut()
{
/// The State struct that we defined above.
type State = State;
/// The Style struct that we defined above.
type Style = Style;
fn common(&self) -> &CommonBuilder {
&self.common
}
fn common_mut(&mut self) -> &mut CommonBuilder {
&mut self.common
}
fn unique_kind(&self) -> &'static str {
KIND
}
fn init_state(&self) -> State {
State {
interaction: Interaction::Normal,
circle_idx: IndexSlot::new(),
text_idx: IndexSlot::new(),
}
}
fn style(&self) -> Style {
self.style.clone()
}
/// Default width of the widget.
///
/// This method is optional.
/// | //
// Defaults can come from several places. Here, we define how certain defaults take
// precedence over others.
//
// Most commonly, defaults are to be retrieved from the `Theme`, however in some cases
// some other logic may need to be considered.
default_x_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Default height of the widget.
///
/// This method is optional.
///
/// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_y_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
default_y_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Update the state of the button. The state may or may not have changed since
/// the last update. (E.g. it may have changed because the user moused over the
/// button.) If the state has changed, return the new state. Else, return None.
fn update<C: CharacterCache>(mut self, args: UpdateArgs<Self, C>) {
let UpdateArgs { idx, state, rect, mut ui, style,.. } = args;
let (xy, dim) = rect.xy_dim();
let maybe_mouse = ui.input().maybe_mouse.map(|mouse| mouse.relative_to(xy));
// Check whether or not a new interaction has occurred.
let new_interaction = match (self.enabled, maybe_mouse) {
(false, _) | (true, None) => Interaction::Normal,
(true, Some(mouse)) => {
// Conrod does us a favor by transforming mouse.xy into this widget's
// local coordinate system. Because mouse.xy is in local coords,
// we must also pass the circle center in local coords. Thus we pass
// [0.0, 0.0] as the center.
//
// See above where we define is_over_circ.
let is_over = is_over_circ([0.0, 0.0], mouse.xy, dim);
// See above where we define get_new_interaction.
get_new_interaction(is_over, state.view().interaction, mouse)
},
};
// If the mouse was released over the button, react. state.interaction is the
// button's state as of a moment ago. new_interaction is the updated state as
// of right now. So this if statement is saying: If the button was clicked a
// moment ago, and it's now highlighted, then the button has been activated.
if let (Interaction::Clicked, Interaction::Highlighted) =
(state.view().interaction, new_interaction)
{
// Recall that our CircularButton struct includes maybe_react, which
// stores either a reaction function or None. If maybe_react is Some, call
// the function.
if let Some(ref mut react) = self.maybe_react {
react();
}
}
// Here we check to see whether or not our button should capture the mouse.
//
// Widgets can "capture" user input. If the button captures the mouse, then mouse
// events will only be seen by the button. Other widgets will not see mouse events
// until the button uncaptures the mouse.
match (state.view().interaction, new_interaction) {
// If the user has pressed the button we capture the mouse.
(Interaction::Highlighted, Interaction::Clicked) => {
ui.capture_mouse();
},
// If the user releases the button, we uncapture the mouse.
(Interaction::Clicked, Interaction::Highlighted) |
(Interaction::Clicked, Interaction::Normal) => {
ui.uncapture_mouse();
},
_ => (),
}
// Whenever we call `state.update` (as below), a flag is set within our `State`
// indicating that there has been some mutation and that our widget requires a
// re-draw. Thus, we only want to call `state.update` if there has been some change in
// order to only re-draw when absolutely required.
//
// You can see how we do this below - we check if the state has changed before calling
// `state.update`.
// If the interaction has changed, set the new interaction.
if state.view().interaction!= new_interaction {
state.update(|state| state.interaction = new_interaction);
}
// Finally, we'll describe how we want our widget drawn by simply instantiating the
// necessary primitive graphics widgets.
//
// Conrod will automatically determine whether or not any changes have occurred and
// whether or not any widgets need to be re-drawn.
//
// The primitive graphics widgets are special in that their unique state is used within
// conrod's backend to do the actual drawing. This allows us to build up more complex
// widgets by using these simple primitives with our familiar layout, coloring, etc
// methods.
//
// If you notice that conrod is missing some sort of primitive graphics that you
// require, please file an issue or open a PR so we can add it! :)
// First, we'll draw the **Circle** with a radius that is half our given width.
let radius = rect.w() / 2.0;
let color = new_interaction.color(style.color(ui.theme()));
let circle_idx = state.view().circle_idx.get(&mut ui);
Circle::fill(radius)
.middle_of(idx)
.graphics_for(idx)
.color(color)
.set(circle_idx, &mut ui);
// Now we'll instantiate our label using the **Text** widget.
let label_color = style.label_color(ui.theme());
let font_size = style.label_font_size(ui.theme());
let text_idx = state.view().text_idx.get(&mut ui);
if let Some(ref label) = self.maybe_label {
Text::new(label)
.middle_of(idx)
.font_size(font_size)
.graphics_for(idx)
.color(label_color)
.set(text_idx, &mut ui);
}
}
}
impl Style {
/// Construct the default Style.
pub fn new() -> Style {
Style {
maybe_color: None,
maybe_radius: None,
maybe_label_color: None,
maybe_label_font_size: None,
}
}
/// Get the Color for an Element.
pub fn color(&self, theme: &Theme) -> Color {
self.maybe_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_color.unwrap_or(theme.shape_color)
})).unwrap_or(theme.shape_color)
}
/// Get the label Color for an Element.
pub fn label_color(&self, theme: &Theme) -> Color {
self.maybe_label_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_color.unwrap_or(theme.label_color)
})).unwrap_or(theme.label_color)
}
/// Get the label font size for an Element.
pub fn label_font_size(&self, theme: &Theme) -> FontSize {
self.maybe_label_font_size.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_font_size.unwrap_or(theme.font_size_medium)
})).unwrap_or(theme.font_size_medium)
}
}
/// Provide the chainable color() configuration method.
impl<'a, F> Colorable for CircularButton<'a, F> {
fn color(mut self, color: Color) -> Self {
self.style.maybe_color = Some(color);
self
}
}
/// Provide the chainable label(), label_color(), and label_font_size()
/// configuration methods.
impl<'a, F> Labelable<'a> for CircularButton<'a, F> {
fn label(mut self, text: &'a str) -> Self {
self.maybe_label = Some(text);
self
}
fn label_color(mut self, color: Color) -> Self {
self.style.maybe_label_color = Some(color);
self
}
fn label_font_size(mut self, size: FontSize) -> Self {
self.style.maybe_label_font_size = Some(size);
self
}
}
}
fn main() {
use piston_window::{EventLoop, Glyphs, PistonWindow, OpenGL, UpdateEvent, WindowSettings};
use conrod::{Colorable, Labelable, Positionable, Sizeable, Widget};
use circular_button::CircularButton;
// PistonWindow has two type parameters, but the default type is
// PistonWindow<T = (), W: Window = GlutinWindow>. To change the Piston backend,
// specify a different type in the let binding, e.g.
// let window: PistonWindow<(), Sdl2Window>.
let window: PistonWindow = WindowSettings::new("Control Panel", [1200, 800])
.opengl(OpenGL::V3_2)
.exit_on_esc(true)
.build().unwrap();
// Conrod's main object.
let mut ui = {
// Load a font. `Glyphs` is provided to us via piston_window and gfx, though you may use
// any type that implements `CharacterCache`.
let assets = find_folder::Search::ParentsThenKids(3, 3)
.for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
let glyph_cache = Glyphs::new(&font_path, window.factory.borrow().clone()).unwrap();
conrod::Ui::new(glyph_cache, conrod::Theme::default())
};
for e in window.ups(60) {
// Pass each `Event` to the `Ui`.
ui.handle_event(e.event.as_ref().unwrap());
e.update(|_| ui.set_widgets(|ui| {
// Sets a color to clear the background with before the Ui draws our widget.
conrod::Split::new(BACKGROUND).color(conrod::color::dark_red()).set(ui);
// Create an instance of our custom widget.
CircularButton::new()
.color(conrod::color::rgb(0.0, 0.3, 0.1))
.middle_of(BACKGROUND)
.dimensions(256.0, 256.0)
.label_color(conrod::color::white())
.label("Circular Button")
// This is called when the user clicks the button.
.react(|| println!("Click"))
// Add the widget to the conrod::Ui. This schedules the widget it to be
| /// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_x_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
// If no width was given via the `Sizeable` (a trait implemented for all widgets)
// methods, some default width must be chosen. | random_line_split |
custom_widget.rs | in this case, we use `piston_window` to draw our widget, however in practise you may
//! use any backend you wish.
//!
//! For more information, please see the `Widget` trait documentation.
//!
#[macro_use] extern crate conrod;
extern crate find_folder;
extern crate piston_window;
extern crate vecmath;
/// The module in which we'll implement our own custom circular button.
mod circular_button {
use conrod::{
default_x_dimension,
default_y_dimension,
CharacterCache,
Circle,
Color,
Colorable,
CommonBuilder,
Dimension,
Dimensions,
FontSize,
IndexSlot,
Labelable,
Mouse,
Point,
Positionable,
Scalar,
Text,
Theme,
UpdateArgs,
Widget,
WidgetKind,
Ui,
};
/// The type upon which we'll implement the `Widget` trait.
pub struct CircularButton<'a, F> {
/// An object that handles some of the dirty work of rendering a GUI. We don't
/// really have to worry about it.
common: CommonBuilder,
/// Optional label string for the button.
maybe_label: Option<&'a str>,
/// Optional callback for when the button is pressed. If you want the button to
/// do anything, this callback must exist.
maybe_react: Option<F>,
/// See the Style struct below.
style: Style,
/// Whether the button is currently enabled, i.e. whether it responds to
/// user input.
enabled: bool
}
/// Represents the unique styling for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct Style {
/// Color of the button.
pub maybe_color: Option<Color>,
/// Radius of the button.
pub maybe_radius: Option<Scalar>,
/// Color of the button's label.
pub maybe_label_color: Option<Color>,
/// Font size of the button's label.
pub maybe_label_font_size: Option<u32>,
}
/// Represents the unique, cached state for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct State {
/// The current interaction state. See the Interaction enum below. See also
/// get_new_interaction below, where we define all the logic for transitioning between
/// interaction states.
interaction: Interaction,
/// An index to use for our **Circle** primitive graphics widget.
circle_idx: IndexSlot,
/// An index to use for our **Text** primitive graphics widget (for the label).
text_idx: IndexSlot,
}
/// A `&'static str` that can be used to uniquely identify our widget type.
pub const KIND: WidgetKind = "CircularButton";
/// A type to keep track of interaction between updates.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Interaction {
Normal,
Highlighted,
Clicked,
}
impl Interaction {
/// Alter the widget color depending on the current interaction.
fn color(&self, color: Color) -> Color {
match *self {
/// The base color as defined in the Style struct, or a default provided
/// by the current Theme if the Style has no color.
Interaction::Normal => color,
/// The Color object (from Elmesque) can calculate a highlighted version
/// of itself. We don't have to use it, though. We could specify any color
/// we want.
Interaction::Highlighted => color.highlighted(),
/// Ditto for clicked.
Interaction::Clicked => color.clicked(),
}
}
}
/// Check the current interaction with the button. Takes into account whether the mouse is
/// over the button and the previous interaction state.
fn get_new_interaction(is_over: bool, prev: Interaction, mouse: Mouse) -> Interaction {
use conrod::MouseButtonPosition::{Down, Up};
use self::Interaction::{Normal, Highlighted, Clicked};
match (is_over, prev, mouse.left.position) {
// LMB is down over the button. But the button wasn't Highlighted last
// update. This means the user clicked somewhere outside the button and
// moved over the button holding LMB down. We do nothing in this case.
(true, Normal, Down) => Normal,
// LMB is down over the button. The button was either Highlighted or Clicked
// last update. If it was highlighted before, that means the user clicked
// just now, and we transition to the Clicked state. If it was clicked
// before, that means the user is still holding LMB down from a previous
// click, in which case the state remains Clicked.
(true, _, Down) => Clicked,
// LMB is up. The mouse is hovering over the button. Regardless of what the
// state was last update, the state should definitely be Highlighted now.
(true, _, Up) => Highlighted,
// LMB is down, the mouse is not over the button, but the previous state was
// Clicked. That means the user clicked the button and then moved the mouse
// outside the button while holding LMB down. The button stays Clicked.
(false, Clicked, Down) => Clicked,
// If none of the above applies, then nothing interesting is happening with
// this button.
_ => Normal,
}
}
/// Return whether or not a given point is over a circle at a given point on a
/// Cartesian plane. We use this to determine whether the mouse is over the button.
pub fn is_over_circ(circ_center: Point, mouse_point: Point, dim: Dimensions) -> bool {
// Offset vector from the center of the circle to the mouse.
let offset = ::vecmath::vec2_sub(mouse_point, circ_center);
// If the length of the offset vector is less than or equal to the circle's
// radius, then the mouse is inside the circle. We assume that dim is a square
// bounding box around the circle, thus 2 * radius == dim[0] == dim[1].
::vecmath::vec2_len(offset) <= dim[0] / 2.0
}
impl<'a, F> CircularButton<'a, F> {
/// Create a button context to be built upon.
pub fn new() -> CircularButton<'a, F> {
CircularButton {
common: CommonBuilder::new(),
maybe_react: None,
maybe_label: None,
style: Style::new(),
enabled: true,
}
}
/// Set the reaction for the Button. The reaction will be triggered upon release
/// of the button. Like other Conrod configs, this returns self for chainability.
pub fn react(mut self, reaction: F) -> Self {
self.maybe_react = Some(reaction);
self
}
/// If true, will allow user inputs. If false, will disallow user inputs. Like
/// other Conrod configs, this returns self for chainability. Allow dead code
/// because we never call this in the example.
#[allow(dead_code)]
pub fn enabled(mut self, flag: bool) -> Self {
self.enabled = flag;
self
}
}
/// A custom Conrod widget must implement the Widget trait. See the **Widget** trait
/// documentation for more details.
impl<'a, F> Widget for CircularButton<'a, F>
where F: FnMut()
{
/// The State struct that we defined above.
type State = State;
/// The Style struct that we defined above.
type Style = Style;
fn common(&self) -> &CommonBuilder {
&self.common
}
fn common_mut(&mut self) -> &mut CommonBuilder {
&mut self.common
}
fn unique_kind(&self) -> &'static str {
KIND
}
fn init_state(&self) -> State {
State {
interaction: Interaction::Normal,
circle_idx: IndexSlot::new(),
text_idx: IndexSlot::new(),
}
}
fn style(&self) -> Style {
self.style.clone()
}
/// Default width of the widget.
///
/// This method is optional.
///
/// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_x_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
// If no width was given via the `Sizeable` (a trait implemented for all widgets)
// methods, some default width must be chosen.
//
// Defaults can come from several places. Here, we define how certain defaults take
// precedence over others.
//
// Most commonly, defaults are to be retrieved from the `Theme`, however in some cases
// some other logic may need to be considered.
default_x_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Default height of the widget.
///
/// This method is optional.
///
/// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_y_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
default_y_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Update the state of the button. The state may or may not have changed since
/// the last update. (E.g. it may have changed because the user moused over the
/// button.) If the state has changed, return the new state. Else, return None.
fn update<C: CharacterCache>(mut self, args: UpdateArgs<Self, C>) {
let UpdateArgs { idx, state, rect, mut ui, style,.. } = args;
let (xy, dim) = rect.xy_dim();
let maybe_mouse = ui.input().maybe_mouse.map(|mouse| mouse.relative_to(xy));
// Check whether or not a new interaction has occurred.
let new_interaction = match (self.enabled, maybe_mouse) {
(false, _) | (true, None) => Interaction::Normal,
(true, Some(mouse)) => {
// Conrod does us a favor by transforming mouse.xy into this widget's
// local coordinate system. Because mouse.xy is in local coords,
// we must also pass the circle center in local coords. Thus we pass
// [0.0, 0.0] as the center.
//
// See above where we define is_over_circ.
let is_over = is_over_circ([0.0, 0.0], mouse.xy, dim);
// See above where we define get_new_interaction.
get_new_interaction(is_over, state.view().interaction, mouse)
},
};
// If the mouse was released over the button, react. state.interaction is the
// button's state as of a moment ago. new_interaction is the updated state as
// of right now. So this if statement is saying: If the button was clicked a
// moment ago, and it's now highlighted, then the button has been activated.
if let (Interaction::Clicked, Interaction::Highlighted) =
(state.view().interaction, new_interaction)
{
// Recall that our CircularButton struct includes maybe_react, which
// stores either a reaction function or None. If maybe_react is Some, call
// the function.
if let Some(ref mut react) = self.maybe_react |
}
// Here we check to see whether or not our button should capture the mouse.
//
// Widgets can "capture" user input. If the button captures the mouse, then mouse
// events will only be seen by the button. Other widgets will not see mouse events
// until the button uncaptures the mouse.
match (state.view().interaction, new_interaction) {
// If the user has pressed the button we capture the mouse.
(Interaction::Highlighted, Interaction::Clicked) => {
ui.capture_mouse();
},
// If the user releases the button, we uncapture the mouse.
(Interaction::Clicked, Interaction::Highlighted) |
(Interaction::Clicked, Interaction::Normal) => {
ui.uncapture_mouse();
},
_ => (),
}
// Whenever we call `state.update` (as below), a flag is set within our `State`
// indicating that there has been some mutation and that our widget requires a
// re-draw. Thus, we only want to call `state.update` if there has been some change in
// order to only re-draw when absolutely required.
//
// You can see how we do this below - we check if the state has changed before calling
// `state.update`.
// If the interaction has changed, set the new interaction.
if state.view().interaction!= new_interaction {
state.update(|state| state.interaction = new_interaction);
}
// Finally, we'll describe how we want our widget drawn by simply instantiating the
// necessary primitive graphics widgets.
//
// Conrod will automatically determine whether or not any changes have occurred and
// whether or not any widgets need to be re-drawn.
//
// The primitive graphics widgets are special in that their unique state is used within
// conrod's backend to do the actual drawing. This allows us to build up more complex
// widgets by using these simple primitives with our familiar layout, coloring, etc
// methods.
//
// If you notice that conrod is missing some sort of primitive graphics that you
// require, please file an issue or open a PR so we can add it! :)
// First, we'll draw the **Circle** with a radius that is half our given width.
let radius = rect.w() / 2.0;
let color = new_interaction.color(style.color(ui.theme()));
let circle_idx = state.view().circle_idx.get(&mut ui);
Circle::fill(radius)
.middle_of(idx)
.graphics_for(idx)
.color(color)
.set(circle_idx, &mut ui);
// Now we'll instantiate our label using the **Text** widget.
let label_color = style.label_color(ui.theme());
let font_size = style.label_font_size(ui.theme());
let text_idx = state.view().text_idx.get(&mut ui);
if let Some(ref label) = self.maybe_label {
Text::new(label)
.middle_of(idx)
.font_size(font_size)
.graphics_for(idx)
.color(label_color)
.set(text_idx, &mut ui);
}
}
}
impl Style {
/// Construct the default Style.
pub fn new() -> Style {
Style {
maybe_color: None,
maybe_radius: None,
maybe_label_color: None,
maybe_label_font_size: None,
}
}
/// Get the Color for an Element.
pub fn color(&self, theme: &Theme) -> Color {
self.maybe_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_color.unwrap_or(theme.shape_color)
})).unwrap_or(theme.shape_color)
}
/// Get the label Color for an Element.
pub fn label_color(&self, theme: &Theme) -> Color {
self.maybe_label_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_color.unwrap_or(theme.label_color)
})).unwrap_or(theme.label_color)
}
/// Get the label font size for an Element.
pub fn label_font_size(&self, theme: &Theme) -> FontSize {
self.maybe_label_font_size.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_font_size.unwrap_or(theme.font_size_medium)
})).unwrap_or(theme.font_size_medium)
}
}
/// Provide the chainable color() configuration method.
impl<'a, F> Colorable for CircularButton<'a, F> {
fn color(mut self, color: Color) -> Self {
self.style.maybe_color = Some(color);
self
}
}
/// Provide the chainable label(), label_color(), and label_font_size()
/// configuration methods.
impl<'a, F> Labelable<'a> for CircularButton<'a, F> {
fn label(mut self, text: &'a str) -> Self {
self.maybe_label = Some(text);
self
}
fn label_color(mut self, color: Color) -> Self {
self.style.maybe_label_color = Some(color);
self
}
fn label_font_size(mut self, size: FontSize) -> Self {
self.style.maybe_label_font_size = Some(size);
self
}
}
}
fn main() {
use piston_window::{EventLoop, Glyphs, PistonWindow, OpenGL, UpdateEvent, WindowSettings};
use conrod::{Colorable, Labelable, Positionable, Sizeable, Widget};
use circular_button::CircularButton;
// PistonWindow has two type parameters, but the default type is
// PistonWindow<T = (), W: Window = GlutinWindow>. To change the Piston backend,
// specify a different type in the let binding, e.g.
// let window: PistonWindow<(), Sdl2Window>.
let window: PistonWindow = WindowSettings::new("Control Panel", [1200, 800])
.opengl(OpenGL::V3_2)
.exit_on_esc(true)
.build().unwrap();
// Conrod's main object.
let mut ui = {
// Load a font. `Glyphs` is provided to us via piston_window and gfx, though you may use
// any type that implements `CharacterCache`.
let assets = find_folder::Search::ParentsThenKids(3, 3)
.for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
let glyph_cache = Glyphs::new(&font_path, window.factory.borrow().clone()).unwrap();
conrod::Ui::new(glyph_cache, conrod::Theme::default())
};
for e in window.ups(60) {
// Pass each `Event` to the `Ui`.
ui.handle_event(e.event.as_ref().unwrap());
e.update(|_| ui.set_widgets(|ui| {
// Sets a color to clear the background with before the Ui draws our widget.
conrod::Split::new(BACKGROUND).color(conrod::color::dark_red()).set(ui);
// Create an instance of our custom widget.
CircularButton::new()
.color(conrod::color::rgb(0.0, 0.3, 0.1))
.middle_of(BACKGROUND)
.dimensions(256.0, 256.0)
.label_color(conrod::color::white())
.label("Circular Button")
// This is called when the user clicks the button.
.react(|| println!("Click"))
// Add the widget to the conrod::Ui. This schedules the widget it to be
| {
react();
} | conditional_block |
custom_widget.rs | in this case, we use `piston_window` to draw our widget, however in practise you may
//! use any backend you wish.
//!
//! For more information, please see the `Widget` trait documentation.
//!
#[macro_use] extern crate conrod;
extern crate find_folder;
extern crate piston_window;
extern crate vecmath;
/// The module in which we'll implement our own custom circular button.
mod circular_button {
use conrod::{
default_x_dimension,
default_y_dimension,
CharacterCache,
Circle,
Color,
Colorable,
CommonBuilder,
Dimension,
Dimensions,
FontSize,
IndexSlot,
Labelable,
Mouse,
Point,
Positionable,
Scalar,
Text,
Theme,
UpdateArgs,
Widget,
WidgetKind,
Ui,
};
/// The type upon which we'll implement the `Widget` trait.
pub struct CircularButton<'a, F> {
/// An object that handles some of the dirty work of rendering a GUI. We don't
/// really have to worry about it.
common: CommonBuilder,
/// Optional label string for the button.
maybe_label: Option<&'a str>,
/// Optional callback for when the button is pressed. If you want the button to
/// do anything, this callback must exist.
maybe_react: Option<F>,
/// See the Style struct below.
style: Style,
/// Whether the button is currently enabled, i.e. whether it responds to
/// user input.
enabled: bool
}
/// Represents the unique styling for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct Style {
/// Color of the button.
pub maybe_color: Option<Color>,
/// Radius of the button.
pub maybe_radius: Option<Scalar>,
/// Color of the button's label.
pub maybe_label_color: Option<Color>,
/// Font size of the button's label.
pub maybe_label_font_size: Option<u32>,
}
/// Represents the unique, cached state for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct State {
/// The current interaction state. See the Interaction enum below. See also
/// get_new_interaction below, where we define all the logic for transitioning between
/// interaction states.
interaction: Interaction,
/// An index to use for our **Circle** primitive graphics widget.
circle_idx: IndexSlot,
/// An index to use for our **Text** primitive graphics widget (for the label).
text_idx: IndexSlot,
}
/// A `&'static str` that can be used to uniquely identify our widget type.
pub const KIND: WidgetKind = "CircularButton";
/// A type to keep track of interaction between updates.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Interaction {
Normal,
Highlighted,
Clicked,
}
impl Interaction {
/// Alter the widget color depending on the current interaction.
fn color(&self, color: Color) -> Color {
match *self {
/// The base color as defined in the Style struct, or a default provided
/// by the current Theme if the Style has no color.
Interaction::Normal => color,
/// The Color object (from Elmesque) can calculate a highlighted version
/// of itself. We don't have to use it, though. We could specify any color
/// we want.
Interaction::Highlighted => color.highlighted(),
/// Ditto for clicked.
Interaction::Clicked => color.clicked(),
}
}
}
/// Check the current interaction with the button. Takes into account whether the mouse is
/// over the button and the previous interaction state.
fn get_new_interaction(is_over: bool, prev: Interaction, mouse: Mouse) -> Interaction {
use conrod::MouseButtonPosition::{Down, Up};
use self::Interaction::{Normal, Highlighted, Clicked};
match (is_over, prev, mouse.left.position) {
// LMB is down over the button. But the button wasn't Highlighted last
// update. This means the user clicked somewhere outside the button and
// moved over the button holding LMB down. We do nothing in this case.
(true, Normal, Down) => Normal,
// LMB is down over the button. The button was either Highlighted or Clicked
// last update. If it was highlighted before, that means the user clicked
// just now, and we transition to the Clicked state. If it was clicked
// before, that means the user is still holding LMB down from a previous
// click, in which case the state remains Clicked.
(true, _, Down) => Clicked,
// LMB is up. The mouse is hovering over the button. Regardless of what the
// state was last update, the state should definitely be Highlighted now.
(true, _, Up) => Highlighted,
// LMB is down, the mouse is not over the button, but the previous state was
// Clicked. That means the user clicked the button and then moved the mouse
// outside the button while holding LMB down. The button stays Clicked.
(false, Clicked, Down) => Clicked,
// If none of the above applies, then nothing interesting is happening with
// this button.
_ => Normal,
}
}
/// Return whether or not a given point is over a circle at a given point on a
/// Cartesian plane. We use this to determine whether the mouse is over the button.
pub fn is_over_circ(circ_center: Point, mouse_point: Point, dim: Dimensions) -> bool {
// Offset vector from the center of the circle to the mouse.
let offset = ::vecmath::vec2_sub(mouse_point, circ_center);
// If the length of the offset vector is less than or equal to the circle's
// radius, then the mouse is inside the circle. We assume that dim is a square
// bounding box around the circle, thus 2 * radius == dim[0] == dim[1].
::vecmath::vec2_len(offset) <= dim[0] / 2.0
}
impl<'a, F> CircularButton<'a, F> {
/// Create a button context to be built upon.
pub fn new() -> CircularButton<'a, F> {
CircularButton {
common: CommonBuilder::new(),
maybe_react: None,
maybe_label: None,
style: Style::new(),
enabled: true,
}
}
/// Set the reaction for the Button. The reaction will be triggered upon release
/// of the button. Like other Conrod configs, this returns self for chainability.
pub fn react(mut self, reaction: F) -> Self {
self.maybe_react = Some(reaction);
self
}
/// If true, will allow user inputs. If false, will disallow user inputs. Like
/// other Conrod configs, this returns self for chainability. Allow dead code
/// because we never call this in the example.
#[allow(dead_code)]
pub fn enabled(mut self, flag: bool) -> Self {
self.enabled = flag;
self
}
}
/// A custom Conrod widget must implement the Widget trait. See the **Widget** trait
/// documentation for more details.
impl<'a, F> Widget for CircularButton<'a, F>
where F: FnMut()
{
/// The State struct that we defined above.
type State = State;
/// The Style struct that we defined above.
type Style = Style;
fn | (&self) -> &CommonBuilder {
&self.common
}
fn common_mut(&mut self) -> &mut CommonBuilder {
&mut self.common
}
fn unique_kind(&self) -> &'static str {
KIND
}
fn init_state(&self) -> State {
State {
interaction: Interaction::Normal,
circle_idx: IndexSlot::new(),
text_idx: IndexSlot::new(),
}
}
fn style(&self) -> Style {
self.style.clone()
}
/// Default width of the widget.
///
/// This method is optional.
///
/// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_x_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
// If no width was given via the `Sizeable` (a trait implemented for all widgets)
// methods, some default width must be chosen.
//
// Defaults can come from several places. Here, we define how certain defaults take
// precedence over others.
//
// Most commonly, defaults are to be retrieved from the `Theme`, however in some cases
// some other logic may need to be considered.
default_x_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Default height of the widget.
///
/// This method is optional.
///
/// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_y_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
default_y_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Update the state of the button. The state may or may not have changed since
/// the last update. (E.g. it may have changed because the user moused over the
/// button.) If the state has changed, return the new state. Else, return None.
fn update<C: CharacterCache>(mut self, args: UpdateArgs<Self, C>) {
let UpdateArgs { idx, state, rect, mut ui, style,.. } = args;
let (xy, dim) = rect.xy_dim();
let maybe_mouse = ui.input().maybe_mouse.map(|mouse| mouse.relative_to(xy));
// Check whether or not a new interaction has occurred.
let new_interaction = match (self.enabled, maybe_mouse) {
(false, _) | (true, None) => Interaction::Normal,
(true, Some(mouse)) => {
// Conrod does us a favor by transforming mouse.xy into this widget's
// local coordinate system. Because mouse.xy is in local coords,
// we must also pass the circle center in local coords. Thus we pass
// [0.0, 0.0] as the center.
//
// See above where we define is_over_circ.
let is_over = is_over_circ([0.0, 0.0], mouse.xy, dim);
// See above where we define get_new_interaction.
get_new_interaction(is_over, state.view().interaction, mouse)
},
};
// If the mouse was released over the button, react. state.interaction is the
// button's state as of a moment ago. new_interaction is the updated state as
// of right now. So this if statement is saying: If the button was clicked a
// moment ago, and it's now highlighted, then the button has been activated.
if let (Interaction::Clicked, Interaction::Highlighted) =
(state.view().interaction, new_interaction)
{
// Recall that our CircularButton struct includes maybe_react, which
// stores either a reaction function or None. If maybe_react is Some, call
// the function.
if let Some(ref mut react) = self.maybe_react {
react();
}
}
// Here we check to see whether or not our button should capture the mouse.
//
// Widgets can "capture" user input. If the button captures the mouse, then mouse
// events will only be seen by the button. Other widgets will not see mouse events
// until the button uncaptures the mouse.
match (state.view().interaction, new_interaction) {
// If the user has pressed the button we capture the mouse.
(Interaction::Highlighted, Interaction::Clicked) => {
ui.capture_mouse();
},
// If the user releases the button, we uncapture the mouse.
(Interaction::Clicked, Interaction::Highlighted) |
(Interaction::Clicked, Interaction::Normal) => {
ui.uncapture_mouse();
},
_ => (),
}
// Whenever we call `state.update` (as below), a flag is set within our `State`
// indicating that there has been some mutation and that our widget requires a
// re-draw. Thus, we only want to call `state.update` if there has been some change in
// order to only re-draw when absolutely required.
//
// You can see how we do this below - we check if the state has changed before calling
// `state.update`.
// If the interaction has changed, set the new interaction.
if state.view().interaction!= new_interaction {
state.update(|state| state.interaction = new_interaction);
}
// Finally, we'll describe how we want our widget drawn by simply instantiating the
// necessary primitive graphics widgets.
//
// Conrod will automatically determine whether or not any changes have occurred and
// whether or not any widgets need to be re-drawn.
//
// The primitive graphics widgets are special in that their unique state is used within
// conrod's backend to do the actual drawing. This allows us to build up more complex
// widgets by using these simple primitives with our familiar layout, coloring, etc
// methods.
//
// If you notice that conrod is missing some sort of primitive graphics that you
// require, please file an issue or open a PR so we can add it! :)
// First, we'll draw the **Circle** with a radius that is half our given width.
let radius = rect.w() / 2.0;
let color = new_interaction.color(style.color(ui.theme()));
let circle_idx = state.view().circle_idx.get(&mut ui);
Circle::fill(radius)
.middle_of(idx)
.graphics_for(idx)
.color(color)
.set(circle_idx, &mut ui);
// Now we'll instantiate our label using the **Text** widget.
let label_color = style.label_color(ui.theme());
let font_size = style.label_font_size(ui.theme());
let text_idx = state.view().text_idx.get(&mut ui);
if let Some(ref label) = self.maybe_label {
Text::new(label)
.middle_of(idx)
.font_size(font_size)
.graphics_for(idx)
.color(label_color)
.set(text_idx, &mut ui);
}
}
}
impl Style {
/// Construct the default Style.
pub fn new() -> Style {
Style {
maybe_color: None,
maybe_radius: None,
maybe_label_color: None,
maybe_label_font_size: None,
}
}
/// Get the Color for an Element.
pub fn color(&self, theme: &Theme) -> Color {
self.maybe_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_color.unwrap_or(theme.shape_color)
})).unwrap_or(theme.shape_color)
}
/// Get the label Color for an Element.
pub fn label_color(&self, theme: &Theme) -> Color {
self.maybe_label_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_color.unwrap_or(theme.label_color)
})).unwrap_or(theme.label_color)
}
/// Get the label font size for an Element.
pub fn label_font_size(&self, theme: &Theme) -> FontSize {
self.maybe_label_font_size.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_font_size.unwrap_or(theme.font_size_medium)
})).unwrap_or(theme.font_size_medium)
}
}
/// Provide the chainable color() configuration method.
impl<'a, F> Colorable for CircularButton<'a, F> {
fn color(mut self, color: Color) -> Self {
self.style.maybe_color = Some(color);
self
}
}
/// Provide the chainable label(), label_color(), and label_font_size()
/// configuration methods.
impl<'a, F> Labelable<'a> for CircularButton<'a, F> {
fn label(mut self, text: &'a str) -> Self {
self.maybe_label = Some(text);
self
}
fn label_color(mut self, color: Color) -> Self {
self.style.maybe_label_color = Some(color);
self
}
fn label_font_size(mut self, size: FontSize) -> Self {
self.style.maybe_label_font_size = Some(size);
self
}
}
}
fn main() {
use piston_window::{EventLoop, Glyphs, PistonWindow, OpenGL, UpdateEvent, WindowSettings};
use conrod::{Colorable, Labelable, Positionable, Sizeable, Widget};
use circular_button::CircularButton;
// PistonWindow has two type parameters, but the default type is
// PistonWindow<T = (), W: Window = GlutinWindow>. To change the Piston backend,
// specify a different type in the let binding, e.g.
// let window: PistonWindow<(), Sdl2Window>.
let window: PistonWindow = WindowSettings::new("Control Panel", [1200, 800])
.opengl(OpenGL::V3_2)
.exit_on_esc(true)
.build().unwrap();
// Conrod's main object.
let mut ui = {
// Load a font. `Glyphs` is provided to us via piston_window and gfx, though you may use
// any type that implements `CharacterCache`.
let assets = find_folder::Search::ParentsThenKids(3, 3)
.for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
let glyph_cache = Glyphs::new(&font_path, window.factory.borrow().clone()).unwrap();
conrod::Ui::new(glyph_cache, conrod::Theme::default())
};
for e in window.ups(60) {
// Pass each `Event` to the `Ui`.
ui.handle_event(e.event.as_ref().unwrap());
e.update(|_| ui.set_widgets(|ui| {
// Sets a color to clear the background with before the Ui draws our widget.
conrod::Split::new(BACKGROUND).color(conrod::color::dark_red()).set(ui);
// Create an instance of our custom widget.
CircularButton::new()
.color(conrod::color::rgb(0.0, 0.3, 0.1))
.middle_of(BACKGROUND)
.dimensions(256.0, 256.0)
.label_color(conrod::color::white())
.label("Circular Button")
// This is called when the user clicks the button.
.react(|| println!("Click"))
// Add the widget to the conrod::Ui. This schedules the widget it to be
| common | identifier_name |
wire.rs | //! Creating and consuming data in wire format.
use super::name::ToDname;
use super::net::{Ipv4Addr, Ipv6Addr};
use core::fmt;
use octseq::builder::{OctetsBuilder, Truncate};
use octseq::parse::{Parser, ShortInput};
//------------ Composer ------------------------------------------------------
pub trait Composer:
OctetsBuilder + AsRef<[u8]> + AsMut<[u8]> + Truncate
{
/// Appends a domain name using name compression if supported.
///
/// Domain name compression attempts to lower the size of a DNS message
/// by avoiding to include repeated domain name suffixes. Instead of
/// adding the full suffix, a pointer to the location of the previous
/// occurence is added. Since that occurence may itself contain a
/// compressed suffix, doing name compression isn’t cheap and therefore
/// optional. However, in order to be able to opt in, we need to know
/// if we are dealing with a domain name that ought to be compressed.
///
/// The trait provides a default implementation which simply appends the
/// name uncompressed.
fn append_compressed_dname<N: ToDname +?Sized>(
&mut self,
name: &N,
) -> Result<(), Self::AppendError> {
name.compose(self)
}
fn can_compress(&self) -> bool {
false
}
}
#[cfg(feature = "std")]
impl Composer for std::vec::Vec<u8> {}
impl<const N: usize> Composer for octseq::array::Array<N> {}
#[cfg(feature = "bytes")]
impl Composer for bytes::BytesMut {}
#[cfg(feature = "smallvec")]
impl<A: smallvec::Array<Item = u8>> Composer for smallvec::SmallVec<A> {}
//------------ Compose -------------------------------------------------------
/// An extension trait to add composing to foreign types.
///
/// This trait can be used to add the `compose` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
///
/// The trait can only be used for types that have a fixed-size wire
/// representation.
pub trait Compose {
/// The length in octets of the wire representation of a value.
///
/// Because all wire format lengths are limited to 16 bit, this is a
/// `u16` rather than a `usize`.
const COMPOSE_LEN: u16 = 0;
/// Appends the wire format representation of the value to the target.
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError>;
}
impl<'a, T: Compose +?Sized> Compose for &'a T {
const COMPOSE_LEN: u16 = T::COMPOSE_LEN;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
(*self).compose(target)
}
}
impl Compose for i8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self as u8])
}
}
impl Compose for u8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self])
}
}
macro_rules! compose_to_be_bytes {
( $type:ident ) => {
impl Compose for $type {
const COMPOSE_LEN: u16 = ($type::BITS >> 3) as u16;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.to_be_bytes())
}
}
};
}
compose_to_be_bytes!(i16);
compose_to_be_bytes!(u16);
compose_to_be_bytes!(i32);
compose_to_be_bytes!(u32);
compose_to_be_bytes!(i64);
compose_to_be_bytes!(u64);
compose_to_be_bytes!(i128);
compose_to_be_bytes!(u128);
impl Compose for Ipv4Addr {
const COMPOSE_LEN: u16 = 4;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
impl Compose for Ipv6Addr {
const COMPOSE_LEN: u16 = 16;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
// No impl for [u8; const N: usize] because we can’t guarantee a correct
// COMPOSE_LEN -- it may be longer than a u16 can hold.
//------------ Parse ------------------------------------------------------
/// An extension trait to add parsing to foreign types.
///
/// This trait can be used to add the `parse` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
pub trait Parse<'a, Octs:?Sized>: Sized {
/// Extracts a value from the beginning of `parser`.
///
/// If parsing fails and an error is returned, the parser’s position
/// should be considered to be undefined. If it is supposed to be reused
/// in this case, you should store the position before attempting to parse
/// and seek to that position again before continuing.
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError>;
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i16_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
| pl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for Ipv4Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
Ok(Self::new(
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
))
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for Ipv6Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut buf = [0u8; 16];
parser.parse_buf(&mut buf)?;
Ok(buf.into())
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized, const N: usize> Parse<'a, Octs>
for [u8; N]
{
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut res = [0u8; N];
parser.parse_buf(&mut res)?;
Ok(res)
}
}
//============ Helpful Function ==============================================
/// Parses something from a `Vec<u8>`.
///
/// The actual parsing happens in the provided closure. Returns an error if
/// the closure returns an error or if there is unparsed data left over after
/// the closure returns. Otherwise returns whatever the closure returned.
#[cfg(feature = "std")]
pub fn parse_slice<F, T>(data: &[u8], op: F) -> Result<T, ParseError>
where
F: FnOnce(&mut Parser<[u8]>) -> Result<T, ParseError>,
{
let mut parser = Parser::from_ref(data);
let res = op(&mut parser)?;
if parser.remaining() > 0 {
Err(ParseError::form_error("trailing data"))
} else {
Ok(res)
}
}
/// Composes something into a `Vec<u8>`.
///
/// The actual composing happens in the provided closure.
/// This function is mostly useful in testing so you can construct this vec
/// directly inside an asserting.
#[cfg(feature = "std")]
pub fn compose_vec(
op: impl FnOnce(
&mut std::vec::Vec<u8>,
) -> Result<(), core::convert::Infallible>,
) -> std::vec::Vec<u8> {
let mut res = std::vec::Vec::new();
octseq::builder::infallible(op(&mut res));
res
}
//============ Error Types ===================================================
//------------ ParseError ----------------------------------------------------
/// An error happened while parsing data.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ParseError {
/// An attempt was made to go beyond the end of the parser.
ShortInput,
/// A formatting error occurred.
Form(FormError),
}
impl ParseError {
/// Creates a new parse error as a form error with the given message.
pub fn form_error(msg: &'static str) -> Self {
FormError::new(msg).into()
}
}
//--- From
impl From<ShortInput> for ParseError {
fn from(_: ShortInput) -> Self {
ParseError::ShortInput
}
}
impl From<FormError> for ParseError {
fn from(err: FormError) -> Self {
ParseError::Form(err)
}
}
//--- Display and Error
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::ShortInput => f.write_str("unexpected end of input"),
ParseError::Form(ref err) => err.fmt(f),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for ParseError {}
//------------ FormError -----------------------------------------------------
/// A formatting error occured.
///
/// This is a generic error for all kinds of error cases that result in data
/// not being accepted. For diagnostics, the error is being given a static
/// string describing the error.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct FormError(&'static str);
impl FormError {
/// Creates a new form error value with the given diagnostics string.
pub fn new(msg: &'static str) -> Self {
FormError(msg)
}
}
//--- Display and Error
impl fmt::Display for FormError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.0)
}
}
#[cfg(feature = "std")]
impl std::error::Error for FormError {}
| parser.parse_u16_be().map_err(Into::into)
}
}
im | identifier_body |
wire.rs | //! Creating and consuming data in wire format.
use super::name::ToDname;
use super::net::{Ipv4Addr, Ipv6Addr};
use core::fmt;
use octseq::builder::{OctetsBuilder, Truncate};
use octseq::parse::{Parser, ShortInput};
//------------ Composer ------------------------------------------------------
pub trait Composer:
OctetsBuilder + AsRef<[u8]> + AsMut<[u8]> + Truncate
{
/// Appends a domain name using name compression if supported.
///
/// Domain name compression attempts to lower the size of a DNS message
/// by avoiding to include repeated domain name suffixes. Instead of
/// adding the full suffix, a pointer to the location of the previous
/// occurence is added. Since that occurence may itself contain a
/// compressed suffix, doing name compression isn’t cheap and therefore
/// optional. However, in order to be able to opt in, we need to know
/// if we are dealing with a domain name that ought to be compressed.
///
/// The trait provides a default implementation which simply appends the
/// name uncompressed.
fn append_compressed_dname<N: ToDname +?Sized>(
&mut self,
name: &N,
) -> Result<(), Self::AppendError> {
name.compose(self)
}
fn can_compress(&self) -> bool {
false
}
}
#[cfg(feature = "std")]
impl Composer for std::vec::Vec<u8> {}
impl<const N: usize> Composer for octseq::array::Array<N> {}
#[cfg(feature = "bytes")]
impl Composer for bytes::BytesMut {}
#[cfg(feature = "smallvec")]
impl<A: smallvec::Array<Item = u8>> Composer for smallvec::SmallVec<A> {}
//------------ Compose -------------------------------------------------------
/// An extension trait to add composing to foreign types.
///
/// This trait can be used to add the `compose` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
///
/// The trait can only be used for types that have a fixed-size wire
/// representation.
pub trait Compose {
/// The length in octets of the wire representation of a value.
///
/// Because all wire format lengths are limited to 16 bit, this is a
/// `u16` rather than a `usize`.
const COMPOSE_LEN: u16 = 0;
/// Appends the wire format representation of the value to the target.
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError>;
}
impl<'a, T: Compose +?Sized> Compose for &'a T {
const COMPOSE_LEN: u16 = T::COMPOSE_LEN;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
(*self).compose(target)
}
}
impl Compose for i8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self as u8])
}
}
impl Compose for u8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self])
}
}
macro_rules! compose_to_be_bytes {
( $type:ident ) => {
impl Compose for $type {
const COMPOSE_LEN: u16 = ($type::BITS >> 3) as u16;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.to_be_bytes())
}
}
};
}
compose_to_be_bytes!(i16);
compose_to_be_bytes!(u16);
compose_to_be_bytes!(i32);
compose_to_be_bytes!(u32);
compose_to_be_bytes!(i64);
compose_to_be_bytes!(u64);
compose_to_be_bytes!(i128);
compose_to_be_bytes!(u128);
impl Compose for Ipv4Addr {
const COMPOSE_LEN: u16 = 4;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
impl Compose for Ipv6Addr {
const COMPOSE_LEN: u16 = 16;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
// No impl for [u8; const N: usize] because we can’t guarantee a correct
// COMPOSE_LEN -- it may be longer than a u16 can hold.
//------------ Parse ------------------------------------------------------
/// An extension trait to add parsing to foreign types.
///
/// This trait can be used to add the `parse` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
pub trait Parse<'a, Octs:?Sized>: Sized {
/// Extracts a value from the beginning of `parser`.
///
/// If parsing fails and an error is returned, the parser’s position
/// should be considered to be undefined. If it is supposed to be reused
/// in this case, you should store the position before attempting to parse
/// and seek to that position again before continuing.
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError>;
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i16_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u16_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for Ipv4Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
Ok(Self::new(
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
))
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for Ipv6Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut buf = [0u8; 16];
parser.parse_buf(&mut buf)?;
Ok(buf.into())
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized, const N: usize> Parse<'a, Octs>
for [u8; N]
{
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut res = [0u8; N];
parser.parse_buf(&mut res)?;
Ok(res)
}
}
//============ Helpful Function ==============================================
/// Parses something from a `Vec<u8>`.
///
/// The actual parsing happens in the provided closure. Returns an error if
/// the closure returns an error or if there is unparsed data left over after
/// the closure returns. Otherwise returns whatever the closure returned.
#[cfg(feature = "std")]
pub fn parse_slice<F, T>(data: &[u8], op: F) -> Result<T, ParseError>
where
F: FnOnce(&mut Parser<[u8]>) -> Result<T, ParseError>,
{
let mut parser = Parser::from_ref(data);
let res = op(&mut parser)?;
if parser.remaining() > 0 {
Err(ParseError::form_error("trailing data"))
} else {
Ok(res)
}
}
/// Composes something into a `Vec<u8>`.
///
/// The actual composing happens in the provided closure.
/// This function is mostly useful in testing so you can construct this vec
/// directly inside an asserting.
#[cfg(feature = "std")]
pub fn compose_vec(
op: impl FnOnce(
&mut std::vec::Vec<u8>,
) -> Result<(), core::convert::Infallible>,
) -> std::vec::Vec<u8> {
let mut res = std::vec::Vec::new();
octseq::builder::infallible(op(&mut res));
res
}
//============ Error Types ===================================================
//------------ ParseError ----------------------------------------------------
/// An error happened while parsing data.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ParseError {
/// An attempt was made to go beyond the end of the parser.
ShortInput,
/// A formatting error occurred.
Form(FormError),
}
impl ParseError {
/// Creates a new parse error as a form error with the given message.
pub fn form_error(msg: &'static str) -> Self {
FormError::new(msg).into()
}
}
//--- From
impl From<ShortInput> for ParseError {
fn from(_: ShortInput) -> Self {
ParseError::ShortInput
}
}
| }
}
//--- Display and Error
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::ShortInput => f.write_str("unexpected end of input"),
ParseError::Form(ref err) => err.fmt(f),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for ParseError {}
//------------ FormError -----------------------------------------------------
/// A formatting error occured.
///
/// This is a generic error for all kinds of error cases that result in data
/// not being accepted. For diagnostics, the error is being given a static
/// string describing the error.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct FormError(&'static str);
impl FormError {
/// Creates a new form error value with the given diagnostics string.
pub fn new(msg: &'static str) -> Self {
FormError(msg)
}
}
//--- Display and Error
impl fmt::Display for FormError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.0)
}
}
#[cfg(feature = "std")]
impl std::error::Error for FormError {} | impl From<FormError> for ParseError {
fn from(err: FormError) -> Self {
ParseError::Form(err) | random_line_split |
wire.rs | //! Creating and consuming data in wire format.
use super::name::ToDname;
use super::net::{Ipv4Addr, Ipv6Addr};
use core::fmt;
use octseq::builder::{OctetsBuilder, Truncate};
use octseq::parse::{Parser, ShortInput};
//------------ Composer ------------------------------------------------------
pub trait Composer:
OctetsBuilder + AsRef<[u8]> + AsMut<[u8]> + Truncate
{
/// Appends a domain name using name compression if supported.
///
/// Domain name compression attempts to lower the size of a DNS message
/// by avoiding to include repeated domain name suffixes. Instead of
/// adding the full suffix, a pointer to the location of the previous
/// occurence is added. Since that occurence may itself contain a
/// compressed suffix, doing name compression isn’t cheap and therefore
/// optional. However, in order to be able to opt in, we need to know
/// if we are dealing with a domain name that ought to be compressed.
///
/// The trait provides a default implementation which simply appends the
/// name uncompressed.
fn append_compressed_dname<N: ToDname +?Sized>(
&mut self,
name: &N,
) -> Result<(), Self::AppendError> {
name.compose(self)
}
fn ca | self) -> bool {
false
}
}
#[cfg(feature = "std")]
impl Composer for std::vec::Vec<u8> {}
impl<const N: usize> Composer for octseq::array::Array<N> {}
#[cfg(feature = "bytes")]
impl Composer for bytes::BytesMut {}
#[cfg(feature = "smallvec")]
impl<A: smallvec::Array<Item = u8>> Composer for smallvec::SmallVec<A> {}
//------------ Compose -------------------------------------------------------
/// An extension trait to add composing to foreign types.
///
/// This trait can be used to add the `compose` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
///
/// The trait can only be used for types that have a fixed-size wire
/// representation.
pub trait Compose {
/// The length in octets of the wire representation of a value.
///
/// Because all wire format lengths are limited to 16 bit, this is a
/// `u16` rather than a `usize`.
const COMPOSE_LEN: u16 = 0;
/// Appends the wire format representation of the value to the target.
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError>;
}
impl<'a, T: Compose +?Sized> Compose for &'a T {
const COMPOSE_LEN: u16 = T::COMPOSE_LEN;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
(*self).compose(target)
}
}
impl Compose for i8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self as u8])
}
}
impl Compose for u8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self])
}
}
macro_rules! compose_to_be_bytes {
( $type:ident ) => {
impl Compose for $type {
const COMPOSE_LEN: u16 = ($type::BITS >> 3) as u16;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.to_be_bytes())
}
}
};
}
compose_to_be_bytes!(i16);
compose_to_be_bytes!(u16);
compose_to_be_bytes!(i32);
compose_to_be_bytes!(u32);
compose_to_be_bytes!(i64);
compose_to_be_bytes!(u64);
compose_to_be_bytes!(i128);
compose_to_be_bytes!(u128);
impl Compose for Ipv4Addr {
const COMPOSE_LEN: u16 = 4;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
impl Compose for Ipv6Addr {
const COMPOSE_LEN: u16 = 16;
fn compose<Target: OctetsBuilder +?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
// No impl for [u8; const N: usize] because we can’t guarantee a correct
// COMPOSE_LEN -- it may be longer than a u16 can hold.
//------------ Parse ------------------------------------------------------
/// An extension trait to add parsing to foreign types.
///
/// This trait can be used to add the `parse` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
pub trait Parse<'a, Octs:?Sized>: Sized {
/// Extracts a value from the beginning of `parser`.
///
/// If parsing fails and an error is returned, the parser’s position
/// should be considered to be undefined. If it is supposed to be reused
/// in this case, you should store the position before attempting to parse
/// and seek to that position again before continuing.
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError>;
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i16_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u16_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for u64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for i64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for Ipv4Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
Ok(Self::new(
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
))
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized> Parse<'a, Octs> for Ipv6Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut buf = [0u8; 16];
parser.parse_buf(&mut buf)?;
Ok(buf.into())
}
}
impl<'a, Octs: AsRef<[u8]> +?Sized, const N: usize> Parse<'a, Octs>
for [u8; N]
{
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut res = [0u8; N];
parser.parse_buf(&mut res)?;
Ok(res)
}
}
//============ Helpful Function ==============================================
/// Parses something from a `Vec<u8>`.
///
/// The actual parsing happens in the provided closure. Returns an error if
/// the closure returns an error or if there is unparsed data left over after
/// the closure returns. Otherwise returns whatever the closure returned.
#[cfg(feature = "std")]
pub fn parse_slice<F, T>(data: &[u8], op: F) -> Result<T, ParseError>
where
F: FnOnce(&mut Parser<[u8]>) -> Result<T, ParseError>,
{
let mut parser = Parser::from_ref(data);
let res = op(&mut parser)?;
if parser.remaining() > 0 {
Err(ParseError::form_error("trailing data"))
} else {
Ok(res)
}
}
/// Composes something into a `Vec<u8>`.
///
/// The actual composing happens in the provided closure.
/// This function is mostly useful in testing so you can construct this vec
/// directly inside an asserting.
#[cfg(feature = "std")]
pub fn compose_vec(
op: impl FnOnce(
&mut std::vec::Vec<u8>,
) -> Result<(), core::convert::Infallible>,
) -> std::vec::Vec<u8> {
let mut res = std::vec::Vec::new();
octseq::builder::infallible(op(&mut res));
res
}
//============ Error Types ===================================================
//------------ ParseError ----------------------------------------------------
/// An error happened while parsing data.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ParseError {
/// An attempt was made to go beyond the end of the parser.
ShortInput,
/// A formatting error occurred.
Form(FormError),
}
impl ParseError {
/// Creates a new parse error as a form error with the given message.
pub fn form_error(msg: &'static str) -> Self {
FormError::new(msg).into()
}
}
//--- From
impl From<ShortInput> for ParseError {
fn from(_: ShortInput) -> Self {
ParseError::ShortInput
}
}
impl From<FormError> for ParseError {
fn from(err: FormError) -> Self {
ParseError::Form(err)
}
}
//--- Display and Error
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::ShortInput => f.write_str("unexpected end of input"),
ParseError::Form(ref err) => err.fmt(f),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for ParseError {}
//------------ FormError -----------------------------------------------------
/// A formatting error occured.
///
/// This is a generic error for all kinds of error cases that result in data
/// not being accepted. For diagnostics, the error is being given a static
/// string describing the error.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct FormError(&'static str);
impl FormError {
/// Creates a new form error value with the given diagnostics string.
pub fn new(msg: &'static str) -> Self {
FormError(msg)
}
}
//--- Display and Error
impl fmt::Display for FormError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.0)
}
}
#[cfg(feature = "std")]
impl std::error::Error for FormError {}
| n_compress(& | identifier_name |
request.rs | extern crate base64;
extern crate md5;
use std::collections::HashMap;
use std::io::{Read, Write};
use bucket::Bucket;
use chrono::{DateTime, Utc};
use command::Command;
use hmac::Mac;
use reqwest::async;
use reqwest::header::{self, HeaderMap, HeaderName, HeaderValue};
use sha2::{Digest, Sha256};
use url::Url;
use futures::prelude::*;
use tokio::runtime::current_thread::Runtime;
use signing;
use error::{S3Error, S3Result};
use reqwest::async::Response;
use EMPTY_PAYLOAD_SHA;
use LONG_DATE;
/// Collection of HTTP headers sent to S3 service, in key/value format.
pub type Headers = HashMap<String, String>;
/// Collection of HTTP query parameters sent to S3 service, in key/value
/// format.
pub type Query = HashMap<String, String>;
// Temporary structure for making a request
pub struct Request<'a> {
pub bucket: &'a Bucket,
pub path: &'a str,
pub command: Command<'a>,
pub datetime: DateTime<Utc>,
pub async: bool,
}
impl<'a> Request<'a> {
pub fn new<'b>(bucket: &'b Bucket, path: &'b str, command: Command<'b>) -> Request<'b> {
Request {
bucket,
path,
command,
datetime: Utc::now(),
async: false,
}
}
fn url(&self) -> Url {
let mut url_str = match self.command {
Command::GetBucketLocation => {
format!("{}://{}", self.bucket.scheme(), self.bucket.self_host())
}
_ => format!("{}://{}", self.bucket.scheme(), self.bucket.host()),
};
match self.command {
Command::GetBucketLocation => {}
_ => {
url_str.push_str("/");
url_str.push_str(&self.bucket.name());
}
}
if!self.path.starts_with('/') {
url_str.push_str("/");
}
match self.command {
Command::GetBucketLocation => url_str.push_str(self.path),
_ => url_str.push_str(&signing::uri_encode(self.path, false)),
};
// Since every part of this URL is either pre-encoded or statically
// generated, there's really no way this should fail.
let mut url = Url::parse(&url_str).expect("static URL parsing");
for (key, value) in &self.bucket.extra_query {
url.query_pairs_mut().append_pair(key, value);
}
if let Command::ListBucket {
prefix,
delimiter,
continuation_token,
} = self.command.clone()
{
let mut query_pairs = url.query_pairs_mut();
delimiter.map(|d| query_pairs.append_pair("delimiter", &d.clone()));
query_pairs.append_pair("prefix", &prefix);
query_pairs.append_pair("list-type", "2");
if let Some(token) = continuation_token {
query_pairs.append_pair("continuation-token", &token);
}
}
match self.command {
Command::PutObjectTagging {.. }
| Command::GetObjectTagging
| Command::DeleteObjectTagging => {
url.query_pairs_mut().append_pair("tagging", "");
}
_ => {}
}
// println!("{}", url);
url
}
fn content_length(&self) -> usize {
match self.command {
Command::PutObject { content,.. } => content.len(),
Command::PutObjectTagging { tags } => tags.len(),
_ => 0,
}
}
fn content_type(&self) -> String {
match self.command {
Command::PutObject { content_type,.. } => content_type.into(),
_ => "text/plain".into(),
}
}
fn sha256(&self) -> String {
match self.command {
Command::PutObject { content,.. } => {
let mut sha = Sha256::default();
sha.input(content);
hex::encode(sha.result().as_slice())
}
Command::PutObjectTagging { tags } => {
let mut sha = Sha256::default();
sha.input(tags.as_bytes());
hex::encode(sha.result().as_slice())
}
_ => EMPTY_PAYLOAD_SHA.into(),
}
}
fn long_date(&self) -> String {
self.datetime.format(LONG_DATE).to_string()
}
fn canonical_request(&self, headers: &HeaderMap) -> String {
signing::canonical_request( | }
fn string_to_sign(&self, request: &str) -> String {
signing::string_to_sign(&self.datetime, &self.bucket.region(), request)
}
fn signing_key(&self) -> S3Result<Vec<u8>> {
Ok(signing::signing_key(
&self.datetime,
&self.bucket.secret_key(),
&self.bucket.region(),
"s3",
)?)
}
fn authorization(&self, headers: &HeaderMap) -> S3Result<String> {
let canonical_request = self.canonical_request(headers);
let string_to_sign = self.string_to_sign(&canonical_request);
let mut hmac = signing::HmacSha256::new_varkey(&self.signing_key()?)?;
hmac.input(string_to_sign.as_bytes());
let signature = hex::encode(hmac.result().code());
let signed_header = signing::signed_header_string(headers);
Ok(signing::authorization_header(
&self.bucket.access_key(),
&self.datetime,
&self.bucket.region(),
&signed_header,
&signature,
))
}
fn headers(&self) -> S3Result<HeaderMap> {
// Generate this once, but it's used in more than one place.
let sha256 = self.sha256();
// Start with extra_headers, that way our headers replace anything with
// the same name.
let mut headers = self
.bucket
.extra_headers
.iter()
.map(|(k, v)| Ok((k.parse::<HeaderName>()?, v.parse::<HeaderValue>()?)))
.collect::<Result<HeaderMap, S3Error>>()?;
match self.command {
Command::GetBucketLocation => {
headers.insert(header::HOST, self.bucket.self_host().parse()?)
}
_ => headers.insert(header::HOST, self.bucket.host().parse()?),
};
headers.insert(
header::CONTENT_LENGTH,
self.content_length().to_string().parse()?,
);
headers.insert(header::CONTENT_TYPE, self.content_type().parse()?);
headers.insert("X-Amz-Content-Sha256", sha256.parse()?);
headers.insert("X-Amz-Date", self.long_date().parse()?);
if let Some(token) = self.bucket.credentials().token.as_ref() {
headers.insert("X-Amz-Security-Token", token.parse()?);
}
if let Command::PutObjectTagging { tags } = self.command {
let digest = md5::compute(tags);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::PutObject { content,.. } = self.command {
let digest = md5::compute(content);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::GetObject {} = self.command {
headers.insert(
header::ACCEPT,
HeaderValue::from_str("application/octet-stream")?,
);
// headers.insert(header::ACCEPT_CHARSET, HeaderValue::from_str("UTF-8")?);
}
// This must be last, as it signs the other headers
let authorization = self.authorization(&headers)?;
headers.insert(header::AUTHORIZATION, authorization.parse()?);
// The format of RFC2822 is somewhat malleable, so including it in
// signed headers can cause signature mismatches. We do include the
// X-Amz-Date header, so requests are still properly limited to a date
// range and can't be used again e.g. reply attacks. Adding this header
// after the generation of the Authorization header leaves it out of
// the signed headers.
headers.insert(header::DATE, self.datetime.to_rfc2822().parse()?);
Ok(headers)
}
pub fn response_data(&self) -> S3Result<(Vec<u8>, u16)> {
let response_data = self.response_data_future().then(|result| match result {
Ok((response_data, status_code)) => Ok((response_data, status_code)),
Err(e) => Err(e),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(response_data)
}
pub fn response_data_to_writer<T: Write>(&self, writer: &mut T) -> S3Result<u16> {
let status_code_future =
self.response_data_to_writer_future(writer)
.then(|result| match result {
Ok(status_code) => Ok(status_code),
Err(_) => Err(S3Error::from("ReqwestFuture")),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(status_code_future)
}
pub fn response_future(&self) -> impl Future<Item = Response, Error = S3Error> {
let client = if cfg!(feature = "no-verify-ssl") {
async::Client::builder()
.danger_accept_invalid_certs(true)
.danger_accept_invalid_hostnames(true)
.build()
.expect("Could not build dangereous client!")
} else {
async::Client::new()
};
// Build headers
let headers = self.headers().expect("Could not get headers!");
// Get owned content to pass to reqwest
let content = if let Command::PutObject { content,.. } = self.command {
Vec::from(content)
} else if let Command::PutObjectTagging { tags } = self.command {
Vec::from(tags)
} else {
Vec::new()
};
let request = client
.request(self.command.http_verb(), self.url().as_str())
.headers(headers.to_owned())
.body(content.to_owned());
request.send().map_err(S3Error::from)
}
pub fn response_data_future(&self) -> impl Future<Item = (Vec<u8>, u16), Error = S3Error> {
self.response_future()
.and_then(|response| {
// println!("{:?}", response.headers());
let status_code = response.status().as_u16();
Ok((response.into_body().collect(), status_code))
})
.and_then(|(body_future, status_code)| {
body_future
.and_then(move |body| {
let mut entire_body = body
.iter()
.fold(vec![], |mut acc, slice| {acc.extend_from_slice(slice); acc});
entire_body.shrink_to_fit();
Ok((entire_body, status_code))
})
.map_err(S3Error::from)
})
}
pub fn response_data_to_writer_future<'b, T: Write>(
&self,
writer: &'b mut T,
) -> impl Future<Item = u16> + 'b {
let future_response = self.response_data_future();
future_response.and_then(move |(body, status_code)| {
writer
.write_all(body.as_slice())
.expect("Could not write to writer");
Ok(status_code)
})
}
}
#[cfg(test)]
mod tests {
use bucket::Bucket;
use command::Command;
use credentials::Credentials;
use error::S3Result;
use request::Request;
// Fake keys - otherwise using Credentials::default will use actual user
// credentials if they exist.
fn fake_credentials() -> Credentials {
const ACCESS_KEY: &'static str = "AKIAIOSFODNN7EXAMPLE";
const SECRET_KEY: &'static str = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
Credentials::new(Some(ACCESS_KEY.into()), Some(SECRET_KEY.into()), None, None)
}
#[test]
fn url_uses_https_by_default() -> S3Result<()> {
let region = "custom-region".parse()?;
let bucket = Bucket::new("my-first-bucket", region, fake_credentials())?;
let path = "/my-first/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "https");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
#[test]
fn url_uses_scheme_from_custom_region_if_defined() -> S3Result<()> {
let region = "http://custom-region".parse()?;
let bucket = Bucket::new("my-second-bucket", region, fake_credentials())?;
let path = "/my-second/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "http");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
} | self.command.http_verb().as_str(),
&self.url(),
headers,
&self.sha256(),
) | random_line_split |
request.rs | extern crate base64;
extern crate md5;
use std::collections::HashMap;
use std::io::{Read, Write};
use bucket::Bucket;
use chrono::{DateTime, Utc};
use command::Command;
use hmac::Mac;
use reqwest::async;
use reqwest::header::{self, HeaderMap, HeaderName, HeaderValue};
use sha2::{Digest, Sha256};
use url::Url;
use futures::prelude::*;
use tokio::runtime::current_thread::Runtime;
use signing;
use error::{S3Error, S3Result};
use reqwest::async::Response;
use EMPTY_PAYLOAD_SHA;
use LONG_DATE;
/// Collection of HTTP headers sent to S3 service, in key/value format.
pub type Headers = HashMap<String, String>;
/// Collection of HTTP query parameters sent to S3 service, in key/value
/// format.
pub type Query = HashMap<String, String>;
// Temporary structure for making a request
pub struct Request<'a> {
pub bucket: &'a Bucket,
pub path: &'a str,
pub command: Command<'a>,
pub datetime: DateTime<Utc>,
pub async: bool,
}
impl<'a> Request<'a> {
pub fn new<'b>(bucket: &'b Bucket, path: &'b str, command: Command<'b>) -> Request<'b> {
Request {
bucket,
path,
command,
datetime: Utc::now(),
async: false,
}
}
fn url(&self) -> Url {
let mut url_str = match self.command {
Command::GetBucketLocation => {
format!("{}://{}", self.bucket.scheme(), self.bucket.self_host())
}
_ => format!("{}://{}", self.bucket.scheme(), self.bucket.host()),
};
match self.command {
Command::GetBucketLocation => {}
_ => {
url_str.push_str("/");
url_str.push_str(&self.bucket.name());
}
}
if!self.path.starts_with('/') {
url_str.push_str("/");
}
match self.command {
Command::GetBucketLocation => url_str.push_str(self.path),
_ => url_str.push_str(&signing::uri_encode(self.path, false)),
};
// Since every part of this URL is either pre-encoded or statically
// generated, there's really no way this should fail.
let mut url = Url::parse(&url_str).expect("static URL parsing");
for (key, value) in &self.bucket.extra_query {
url.query_pairs_mut().append_pair(key, value);
}
if let Command::ListBucket {
prefix,
delimiter,
continuation_token,
} = self.command.clone()
{
let mut query_pairs = url.query_pairs_mut();
delimiter.map(|d| query_pairs.append_pair("delimiter", &d.clone()));
query_pairs.append_pair("prefix", &prefix);
query_pairs.append_pair("list-type", "2");
if let Some(token) = continuation_token {
query_pairs.append_pair("continuation-token", &token);
}
}
match self.command {
Command::PutObjectTagging {.. }
| Command::GetObjectTagging
| Command::DeleteObjectTagging => {
url.query_pairs_mut().append_pair("tagging", "");
}
_ => {}
}
// println!("{}", url);
url
}
fn content_length(&self) -> usize {
match self.command {
Command::PutObject { content,.. } => content.len(),
Command::PutObjectTagging { tags } => tags.len(),
_ => 0,
}
}
fn content_type(&self) -> String {
match self.command {
Command::PutObject { content_type,.. } => content_type.into(),
_ => "text/plain".into(),
}
}
fn sha256(&self) -> String {
match self.command {
Command::PutObject { content,.. } => {
let mut sha = Sha256::default();
sha.input(content);
hex::encode(sha.result().as_slice())
}
Command::PutObjectTagging { tags } => {
let mut sha = Sha256::default();
sha.input(tags.as_bytes());
hex::encode(sha.result().as_slice())
}
_ => EMPTY_PAYLOAD_SHA.into(),
}
}
fn long_date(&self) -> String {
self.datetime.format(LONG_DATE).to_string()
}
fn canonical_request(&self, headers: &HeaderMap) -> String {
signing::canonical_request(
self.command.http_verb().as_str(),
&self.url(),
headers,
&self.sha256(),
)
}
fn string_to_sign(&self, request: &str) -> String {
signing::string_to_sign(&self.datetime, &self.bucket.region(), request)
}
fn signing_key(&self) -> S3Result<Vec<u8>> {
Ok(signing::signing_key(
&self.datetime,
&self.bucket.secret_key(),
&self.bucket.region(),
"s3",
)?)
}
fn authorization(&self, headers: &HeaderMap) -> S3Result<String> {
let canonical_request = self.canonical_request(headers);
let string_to_sign = self.string_to_sign(&canonical_request);
let mut hmac = signing::HmacSha256::new_varkey(&self.signing_key()?)?;
hmac.input(string_to_sign.as_bytes());
let signature = hex::encode(hmac.result().code());
let signed_header = signing::signed_header_string(headers);
Ok(signing::authorization_header(
&self.bucket.access_key(),
&self.datetime,
&self.bucket.region(),
&signed_header,
&signature,
))
}
fn headers(&self) -> S3Result<HeaderMap> {
// Generate this once, but it's used in more than one place.
let sha256 = self.sha256();
// Start with extra_headers, that way our headers replace anything with
// the same name.
let mut headers = self
.bucket
.extra_headers
.iter()
.map(|(k, v)| Ok((k.parse::<HeaderName>()?, v.parse::<HeaderValue>()?)))
.collect::<Result<HeaderMap, S3Error>>()?;
match self.command {
Command::GetBucketLocation => {
headers.insert(header::HOST, self.bucket.self_host().parse()?)
}
_ => headers.insert(header::HOST, self.bucket.host().parse()?),
};
headers.insert(
header::CONTENT_LENGTH,
self.content_length().to_string().parse()?,
);
headers.insert(header::CONTENT_TYPE, self.content_type().parse()?);
headers.insert("X-Amz-Content-Sha256", sha256.parse()?);
headers.insert("X-Amz-Date", self.long_date().parse()?);
if let Some(token) = self.bucket.credentials().token.as_ref() {
headers.insert("X-Amz-Security-Token", token.parse()?);
}
if let Command::PutObjectTagging { tags } = self.command {
let digest = md5::compute(tags);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::PutObject { content,.. } = self.command {
let digest = md5::compute(content);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::GetObject {} = self.command {
headers.insert(
header::ACCEPT,
HeaderValue::from_str("application/octet-stream")?,
);
// headers.insert(header::ACCEPT_CHARSET, HeaderValue::from_str("UTF-8")?);
}
// This must be last, as it signs the other headers
let authorization = self.authorization(&headers)?;
headers.insert(header::AUTHORIZATION, authorization.parse()?);
// The format of RFC2822 is somewhat malleable, so including it in
// signed headers can cause signature mismatches. We do include the
// X-Amz-Date header, so requests are still properly limited to a date
// range and can't be used again e.g. reply attacks. Adding this header
// after the generation of the Authorization header leaves it out of
// the signed headers.
headers.insert(header::DATE, self.datetime.to_rfc2822().parse()?);
Ok(headers)
}
pub fn | (&self) -> S3Result<(Vec<u8>, u16)> {
let response_data = self.response_data_future().then(|result| match result {
Ok((response_data, status_code)) => Ok((response_data, status_code)),
Err(e) => Err(e),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(response_data)
}
pub fn response_data_to_writer<T: Write>(&self, writer: &mut T) -> S3Result<u16> {
let status_code_future =
self.response_data_to_writer_future(writer)
.then(|result| match result {
Ok(status_code) => Ok(status_code),
Err(_) => Err(S3Error::from("ReqwestFuture")),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(status_code_future)
}
pub fn response_future(&self) -> impl Future<Item = Response, Error = S3Error> {
let client = if cfg!(feature = "no-verify-ssl") {
async::Client::builder()
.danger_accept_invalid_certs(true)
.danger_accept_invalid_hostnames(true)
.build()
.expect("Could not build dangereous client!")
} else {
async::Client::new()
};
// Build headers
let headers = self.headers().expect("Could not get headers!");
// Get owned content to pass to reqwest
let content = if let Command::PutObject { content,.. } = self.command {
Vec::from(content)
} else if let Command::PutObjectTagging { tags } = self.command {
Vec::from(tags)
} else {
Vec::new()
};
let request = client
.request(self.command.http_verb(), self.url().as_str())
.headers(headers.to_owned())
.body(content.to_owned());
request.send().map_err(S3Error::from)
}
pub fn response_data_future(&self) -> impl Future<Item = (Vec<u8>, u16), Error = S3Error> {
self.response_future()
.and_then(|response| {
// println!("{:?}", response.headers());
let status_code = response.status().as_u16();
Ok((response.into_body().collect(), status_code))
})
.and_then(|(body_future, status_code)| {
body_future
.and_then(move |body| {
let mut entire_body = body
.iter()
.fold(vec![], |mut acc, slice| {acc.extend_from_slice(slice); acc});
entire_body.shrink_to_fit();
Ok((entire_body, status_code))
})
.map_err(S3Error::from)
})
}
pub fn response_data_to_writer_future<'b, T: Write>(
&self,
writer: &'b mut T,
) -> impl Future<Item = u16> + 'b {
let future_response = self.response_data_future();
future_response.and_then(move |(body, status_code)| {
writer
.write_all(body.as_slice())
.expect("Could not write to writer");
Ok(status_code)
})
}
}
#[cfg(test)]
mod tests {
use bucket::Bucket;
use command::Command;
use credentials::Credentials;
use error::S3Result;
use request::Request;
// Fake keys - otherwise using Credentials::default will use actual user
// credentials if they exist.
fn fake_credentials() -> Credentials {
const ACCESS_KEY: &'static str = "AKIAIOSFODNN7EXAMPLE";
const SECRET_KEY: &'static str = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
Credentials::new(Some(ACCESS_KEY.into()), Some(SECRET_KEY.into()), None, None)
}
#[test]
fn url_uses_https_by_default() -> S3Result<()> {
let region = "custom-region".parse()?;
let bucket = Bucket::new("my-first-bucket", region, fake_credentials())?;
let path = "/my-first/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "https");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
#[test]
fn url_uses_scheme_from_custom_region_if_defined() -> S3Result<()> {
let region = "http://custom-region".parse()?;
let bucket = Bucket::new("my-second-bucket", region, fake_credentials())?;
let path = "/my-second/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "http");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
}
| response_data | identifier_name |
request.rs | extern crate base64;
extern crate md5;
use std::collections::HashMap;
use std::io::{Read, Write};
use bucket::Bucket;
use chrono::{DateTime, Utc};
use command::Command;
use hmac::Mac;
use reqwest::async;
use reqwest::header::{self, HeaderMap, HeaderName, HeaderValue};
use sha2::{Digest, Sha256};
use url::Url;
use futures::prelude::*;
use tokio::runtime::current_thread::Runtime;
use signing;
use error::{S3Error, S3Result};
use reqwest::async::Response;
use EMPTY_PAYLOAD_SHA;
use LONG_DATE;
/// Collection of HTTP headers sent to S3 service, in key/value format.
pub type Headers = HashMap<String, String>;
/// Collection of HTTP query parameters sent to S3 service, in key/value
/// format.
pub type Query = HashMap<String, String>;
// Temporary structure for making a request
pub struct Request<'a> {
pub bucket: &'a Bucket,
pub path: &'a str,
pub command: Command<'a>,
pub datetime: DateTime<Utc>,
pub async: bool,
}
impl<'a> Request<'a> {
pub fn new<'b>(bucket: &'b Bucket, path: &'b str, command: Command<'b>) -> Request<'b> {
Request {
bucket,
path,
command,
datetime: Utc::now(),
async: false,
}
}
fn url(&self) -> Url {
let mut url_str = match self.command {
Command::GetBucketLocation => {
format!("{}://{}", self.bucket.scheme(), self.bucket.self_host())
}
_ => format!("{}://{}", self.bucket.scheme(), self.bucket.host()),
};
match self.command {
Command::GetBucketLocation => {}
_ => {
url_str.push_str("/");
url_str.push_str(&self.bucket.name());
}
}
if!self.path.starts_with('/') {
url_str.push_str("/");
}
match self.command {
Command::GetBucketLocation => url_str.push_str(self.path),
_ => url_str.push_str(&signing::uri_encode(self.path, false)),
};
// Since every part of this URL is either pre-encoded or statically
// generated, there's really no way this should fail.
let mut url = Url::parse(&url_str).expect("static URL parsing");
for (key, value) in &self.bucket.extra_query {
url.query_pairs_mut().append_pair(key, value);
}
if let Command::ListBucket {
prefix,
delimiter,
continuation_token,
} = self.command.clone()
{
let mut query_pairs = url.query_pairs_mut();
delimiter.map(|d| query_pairs.append_pair("delimiter", &d.clone()));
query_pairs.append_pair("prefix", &prefix);
query_pairs.append_pair("list-type", "2");
if let Some(token) = continuation_token {
query_pairs.append_pair("continuation-token", &token);
}
}
match self.command {
Command::PutObjectTagging {.. }
| Command::GetObjectTagging
| Command::DeleteObjectTagging => {
url.query_pairs_mut().append_pair("tagging", "");
}
_ => {}
}
// println!("{}", url);
url
}
fn content_length(&self) -> usize {
match self.command {
Command::PutObject { content,.. } => content.len(),
Command::PutObjectTagging { tags } => tags.len(),
_ => 0,
}
}
fn content_type(&self) -> String {
match self.command {
Command::PutObject { content_type,.. } => content_type.into(),
_ => "text/plain".into(),
}
}
fn sha256(&self) -> String {
match self.command {
Command::PutObject { content,.. } => {
let mut sha = Sha256::default();
sha.input(content);
hex::encode(sha.result().as_slice())
}
Command::PutObjectTagging { tags } => {
let mut sha = Sha256::default();
sha.input(tags.as_bytes());
hex::encode(sha.result().as_slice())
}
_ => EMPTY_PAYLOAD_SHA.into(),
}
}
fn long_date(&self) -> String {
self.datetime.format(LONG_DATE).to_string()
}
fn canonical_request(&self, headers: &HeaderMap) -> String {
signing::canonical_request(
self.command.http_verb().as_str(),
&self.url(),
headers,
&self.sha256(),
)
}
fn string_to_sign(&self, request: &str) -> String {
signing::string_to_sign(&self.datetime, &self.bucket.region(), request)
}
fn signing_key(&self) -> S3Result<Vec<u8>> |
fn authorization(&self, headers: &HeaderMap) -> S3Result<String> {
let canonical_request = self.canonical_request(headers);
let string_to_sign = self.string_to_sign(&canonical_request);
let mut hmac = signing::HmacSha256::new_varkey(&self.signing_key()?)?;
hmac.input(string_to_sign.as_bytes());
let signature = hex::encode(hmac.result().code());
let signed_header = signing::signed_header_string(headers);
Ok(signing::authorization_header(
&self.bucket.access_key(),
&self.datetime,
&self.bucket.region(),
&signed_header,
&signature,
))
}
fn headers(&self) -> S3Result<HeaderMap> {
// Generate this once, but it's used in more than one place.
let sha256 = self.sha256();
// Start with extra_headers, that way our headers replace anything with
// the same name.
let mut headers = self
.bucket
.extra_headers
.iter()
.map(|(k, v)| Ok((k.parse::<HeaderName>()?, v.parse::<HeaderValue>()?)))
.collect::<Result<HeaderMap, S3Error>>()?;
match self.command {
Command::GetBucketLocation => {
headers.insert(header::HOST, self.bucket.self_host().parse()?)
}
_ => headers.insert(header::HOST, self.bucket.host().parse()?),
};
headers.insert(
header::CONTENT_LENGTH,
self.content_length().to_string().parse()?,
);
headers.insert(header::CONTENT_TYPE, self.content_type().parse()?);
headers.insert("X-Amz-Content-Sha256", sha256.parse()?);
headers.insert("X-Amz-Date", self.long_date().parse()?);
if let Some(token) = self.bucket.credentials().token.as_ref() {
headers.insert("X-Amz-Security-Token", token.parse()?);
}
if let Command::PutObjectTagging { tags } = self.command {
let digest = md5::compute(tags);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::PutObject { content,.. } = self.command {
let digest = md5::compute(content);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::GetObject {} = self.command {
headers.insert(
header::ACCEPT,
HeaderValue::from_str("application/octet-stream")?,
);
// headers.insert(header::ACCEPT_CHARSET, HeaderValue::from_str("UTF-8")?);
}
// This must be last, as it signs the other headers
let authorization = self.authorization(&headers)?;
headers.insert(header::AUTHORIZATION, authorization.parse()?);
// The format of RFC2822 is somewhat malleable, so including it in
// signed headers can cause signature mismatches. We do include the
// X-Amz-Date header, so requests are still properly limited to a date
// range and can't be used again e.g. reply attacks. Adding this header
// after the generation of the Authorization header leaves it out of
// the signed headers.
headers.insert(header::DATE, self.datetime.to_rfc2822().parse()?);
Ok(headers)
}
pub fn response_data(&self) -> S3Result<(Vec<u8>, u16)> {
let response_data = self.response_data_future().then(|result| match result {
Ok((response_data, status_code)) => Ok((response_data, status_code)),
Err(e) => Err(e),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(response_data)
}
pub fn response_data_to_writer<T: Write>(&self, writer: &mut T) -> S3Result<u16> {
let status_code_future =
self.response_data_to_writer_future(writer)
.then(|result| match result {
Ok(status_code) => Ok(status_code),
Err(_) => Err(S3Error::from("ReqwestFuture")),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(status_code_future)
}
pub fn response_future(&self) -> impl Future<Item = Response, Error = S3Error> {
let client = if cfg!(feature = "no-verify-ssl") {
async::Client::builder()
.danger_accept_invalid_certs(true)
.danger_accept_invalid_hostnames(true)
.build()
.expect("Could not build dangereous client!")
} else {
async::Client::new()
};
// Build headers
let headers = self.headers().expect("Could not get headers!");
// Get owned content to pass to reqwest
let content = if let Command::PutObject { content,.. } = self.command {
Vec::from(content)
} else if let Command::PutObjectTagging { tags } = self.command {
Vec::from(tags)
} else {
Vec::new()
};
let request = client
.request(self.command.http_verb(), self.url().as_str())
.headers(headers.to_owned())
.body(content.to_owned());
request.send().map_err(S3Error::from)
}
pub fn response_data_future(&self) -> impl Future<Item = (Vec<u8>, u16), Error = S3Error> {
self.response_future()
.and_then(|response| {
// println!("{:?}", response.headers());
let status_code = response.status().as_u16();
Ok((response.into_body().collect(), status_code))
})
.and_then(|(body_future, status_code)| {
body_future
.and_then(move |body| {
let mut entire_body = body
.iter()
.fold(vec![], |mut acc, slice| {acc.extend_from_slice(slice); acc});
entire_body.shrink_to_fit();
Ok((entire_body, status_code))
})
.map_err(S3Error::from)
})
}
pub fn response_data_to_writer_future<'b, T: Write>(
&self,
writer: &'b mut T,
) -> impl Future<Item = u16> + 'b {
let future_response = self.response_data_future();
future_response.and_then(move |(body, status_code)| {
writer
.write_all(body.as_slice())
.expect("Could not write to writer");
Ok(status_code)
})
}
}
#[cfg(test)]
mod tests {
use bucket::Bucket;
use command::Command;
use credentials::Credentials;
use error::S3Result;
use request::Request;
// Fake keys - otherwise using Credentials::default will use actual user
// credentials if they exist.
fn fake_credentials() -> Credentials {
const ACCESS_KEY: &'static str = "AKIAIOSFODNN7EXAMPLE";
const SECRET_KEY: &'static str = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
Credentials::new(Some(ACCESS_KEY.into()), Some(SECRET_KEY.into()), None, None)
}
#[test]
fn url_uses_https_by_default() -> S3Result<()> {
let region = "custom-region".parse()?;
let bucket = Bucket::new("my-first-bucket", region, fake_credentials())?;
let path = "/my-first/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "https");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
#[test]
fn url_uses_scheme_from_custom_region_if_defined() -> S3Result<()> {
let region = "http://custom-region".parse()?;
let bucket = Bucket::new("my-second-bucket", region, fake_credentials())?;
let path = "/my-second/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "http");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
}
| {
Ok(signing::signing_key(
&self.datetime,
&self.bucket.secret_key(),
&self.bucket.region(),
"s3",
)?)
} | identifier_body |
qutex.rs | //! A queue-backed exclusive data lock.
//!
//
// * It is unclear how many of the unsafe methods within need actually remain
// unsafe.
use crossbeam::queue::SegQueue;
use futures::sync::oneshot::{self, Canceled, Receiver, Sender};
use futures::{Future, Poll};
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
/// Allows access to the data contained within a lock just like a mutex guard.
#[derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
}
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> {
let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn | (&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> {
Qutex {
inner: self.inner.clone(),
}
}
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i))
.spawn(|| future_write.wait().unwrap())
.unwrap(),
);
}
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i + thread_count))
.spawn(|| {
let mut guard = future_guard.wait().unwrap();
*guard -= 1;
})
.unwrap(),
)
}
for thread in threads {
thread.join().unwrap();
}
let guard = qutex.clone().lock().wait().unwrap();
assert_eq!(*guard, start_val);
}
#[test]
fn future_guard_drop() {
let lock = Qutex::from(true);
let _future_guard_0 = lock.clone().lock();
let _future_guard_1 = lock.clone().lock();
let _future_guard_2 = lock.clone().lock();
// TODO: FINISH ME
}
#[test]
fn explicit_unlock() {
let lock = Qutex::from(true);
let mut guard_0 = lock.clone().lock().wait().unwrap();
*guard_0 = false;
let _ = Guard::unlock(guard_0);
// Will deadlock if this doesn't work:
let guard_1 = lock.clone().lock().wait().unwrap();
assert!(*guard_1 == false);
}
}
| direct_unlock | identifier_name |
qutex.rs | //! A queue-backed exclusive data lock.
//!
//
// * It is unclear how many of the unsafe methods within need actually remain
// unsafe.
use crossbeam::queue::SegQueue;
use futures::sync::oneshot::{self, Canceled, Receiver, Sender};
use futures::{Future, Poll};
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
/// Allows access to the data contained within a lock just like a mutex guard.
#[derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() | else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
}
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> {
let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn direct_unlock(&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> {
Qutex {
inner: self.inner.clone(),
}
}
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i))
.spawn(|| future_write.wait().unwrap())
.unwrap(),
);
}
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i + thread_count))
.spawn(|| {
let mut guard = future_guard.wait().unwrap();
*guard -= 1;
})
.unwrap(),
)
}
for thread in threads {
thread.join().unwrap();
}
let guard = qutex.clone().lock().wait().unwrap();
assert_eq!(*guard, start_val);
}
#[test]
fn future_guard_drop() {
let lock = Qutex::from(true);
let _future_guard_0 = lock.clone().lock();
let _future_guard_1 = lock.clone().lock();
let _future_guard_2 = lock.clone().lock();
// TODO: FINISH ME
}
#[test]
fn explicit_unlock() {
let lock = Qutex::from(true);
let mut guard_0 = lock.clone().lock().wait().unwrap();
*guard_0 = false;
let _ = Guard::unlock(guard_0);
// Will deadlock if this doesn't work:
let guard_1 = lock.clone().lock().wait().unwrap();
assert!(*guard_1 == false);
}
}
| {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} | conditional_block |
qutex.rs | //! A queue-backed exclusive data lock.
//!
//
// * It is unclear how many of the unsafe methods within need actually remain
// unsafe.
use crossbeam::queue::SegQueue;
use futures::sync::oneshot::{self, Canceled, Receiver, Sender};
use futures::{Future, Poll};
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
/// Allows access to the data contained within a lock just like a mutex guard.
#[derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
} | let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn direct_unlock(&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> {
Qutex {
inner: self.inner.clone(),
}
}
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i))
.spawn(|| future_write.wait().unwrap())
.unwrap(),
);
}
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i + thread_count))
.spawn(|| {
let mut guard = future_guard.wait().unwrap();
*guard -= 1;
})
.unwrap(),
)
}
for thread in threads {
thread.join().unwrap();
}
let guard = qutex.clone().lock().wait().unwrap();
assert_eq!(*guard, start_val);
}
#[test]
fn future_guard_drop() {
let lock = Qutex::from(true);
let _future_guard_0 = lock.clone().lock();
let _future_guard_1 = lock.clone().lock();
let _future_guard_2 = lock.clone().lock();
// TODO: FINISH ME
}
#[test]
fn explicit_unlock() {
let lock = Qutex::from(true);
let mut guard_0 = lock.clone().lock().wait().unwrap();
*guard_0 = false;
let _ = Guard::unlock(guard_0);
// Will deadlock if this doesn't work:
let guard_1 = lock.clone().lock().wait().unwrap();
assert!(*guard_1 == false);
}
} |
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> { | random_line_split |
qutex.rs | //! A queue-backed exclusive data lock.
//!
//
// * It is unclear how many of the unsafe methods within need actually remain
// unsafe.
use crossbeam::queue::SegQueue;
use futures::sync::oneshot::{self, Canceled, Receiver, Sender};
use futures::{Future, Poll};
use std::cell::UnsafeCell;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
/// Allows access to the data contained within a lock just like a mutex guard.
#[derive(Debug)]
pub struct Guard<T> {
qutex: Qutex<T>,
}
impl<T> Guard<T> {
/// Releases the lock held by a `Guard` and returns the original `Qutex`.
pub fn unlock(guard: Guard<T>) -> Qutex<T> {
let qutex = unsafe { ::std::ptr::read(&guard.qutex) };
::std::mem::forget(guard);
unsafe { qutex.direct_unlock() }
qutex
}
}
impl<T> Deref for Guard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.qutex.inner.cell.get() }
}
}
impl<T> DerefMut for Guard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.qutex.inner.cell.get() }
}
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
// unsafe { self.qutex.direct_unlock().expect("Error dropping Guard") };
unsafe { self.qutex.direct_unlock() }
}
}
/// A future which resolves to a `Guard`.
#[must_use = "futures do nothing unless polled"]
#[derive(Debug)]
pub struct FutureGuard<T> {
qutex: Option<Qutex<T>>,
rx: Receiver<()>,
}
impl<T> FutureGuard<T> {
/// Returns a new `FutureGuard`.
fn new(qutex: Qutex<T>, rx: Receiver<()>) -> FutureGuard<T> {
FutureGuard {
qutex: Some(qutex),
rx: rx,
}
}
/// Blocks the current thread until this future resolves.
#[inline]
pub fn wait(self) -> Result<Guard<T>, Canceled> {
<Self as Future>::wait(self)
}
}
impl<T> Future for FutureGuard<T> {
type Item = Guard<T>;
type Error = Canceled;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.qutex.is_some() {
unsafe { self.qutex.as_ref().unwrap().process_queue() }
match self.rx.poll() {
Ok(status) => Ok(status.map(|_| Guard {
qutex: self.qutex.take().unwrap(),
})),
Err(e) => Err(e.into()),
}
} else {
panic!("FutureGuard::poll: Task already completed.");
}
}
}
impl<T> Drop for FutureGuard<T> {
/// Gracefully unlock if this guard has a lock acquired but has not yet
/// been polled to completion.
fn drop(&mut self) {
if let Some(qutex) = self.qutex.take() {
self.rx.close();
match self.rx.try_recv() {
Ok(status) => {
if status.is_some() {
unsafe {
qutex.direct_unlock();
}
}
}
Err(_) => (),
}
}
}
}
/// A request to lock the qutex for exclusive access.
#[derive(Debug)]
pub struct Request {
tx: Sender<()>,
}
impl Request {
/// Returns a new `Request`.
pub fn new(tx: Sender<()>) -> Request {
Request { tx: tx }
}
}
#[derive(Debug)]
struct Inner<T> {
// TODO: Convert to `AtomicBool` if no additional states are needed:
state: AtomicUsize,
cell: UnsafeCell<T>,
queue: SegQueue<Request>,
}
impl<T> From<T> for Inner<T> {
#[inline]
fn from(val: T) -> Inner<T> {
Inner {
state: AtomicUsize::new(0),
cell: UnsafeCell::new(val),
queue: SegQueue::new(),
}
}
}
unsafe impl<T: Send> Send for Inner<T> {}
unsafe impl<T: Send> Sync for Inner<T> {}
/// A lock-free-queue-backed exclusive data lock.
#[derive(Debug)]
pub struct Qutex<T> {
inner: Arc<Inner<T>>,
}
impl<T> Qutex<T> {
/// Creates and returns a new `Qutex`.
#[inline]
pub fn new(val: T) -> Qutex<T> {
Qutex {
inner: Arc::new(Inner::from(val)),
}
}
/// Returns a new `FutureGuard` which can be used as a future and will
/// resolve into a `Guard`.
pub fn lock(self) -> FutureGuard<T> {
let (tx, rx) = oneshot::channel();
unsafe {
self.push_request(Request::new(tx));
}
FutureGuard::new(self, rx)
}
/// Pushes a lock request onto the queue.
///
//
// TODO: Evaluate unsafe-ness.
//
#[inline]
pub unsafe fn push_request(&self, req: Request) {
self.inner.queue.push(req);
}
/// Returns a mutable reference to the inner `Vec` if there are currently
/// no other copies of this `Qutex`.
///
/// Since this call borrows the inner lock mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
#[inline]
pub fn get_mut(&mut self) -> Option<&mut T> {
Arc::get_mut(&mut self.inner).map(|inn| unsafe { &mut *inn.cell.get() })
}
/// Returns a reference to the inner value.
///
#[inline]
pub fn as_ptr(&self) -> *const T {
self.inner.cell.get()
}
/// Returns a mutable reference to the inner value.
///
#[inline]
pub fn as_mut_ptr(&self) -> *mut T {
self.inner.cell.get()
}
/// Pops the next lock request in the queue if this (the caller's) lock is
/// unlocked.
//
// TODO:
// * This is currently public due to 'derivers' (aka. sub-types). Evaluate.
// * Consider removing unsafe qualifier.
// * Return proper error type.
// * [performance] Determine whether or not `compare_exchange_weak` should be used instead.
// * [performance] Consider failure ordering.
//
pub unsafe fn process_queue(&self) {
match self.inner.state.compare_exchange(0, 1, SeqCst, SeqCst) {
// Unlocked:
Ok(0) => {
loop {
if let Some(req) = self.inner.queue.pop() {
// If there is a send error, a requester has dropped
// its receiver so just go to the next.
if req.tx.send(()).is_err() {
continue;
} else {
break;
}
} else {
self.inner.state.store(0, SeqCst);
break;
}
}
}
// Already locked, leave it alone:
Err(1) => (),
// Already locked, leave it alone:
//
// TODO: Remove this option. Should be unreachable.
//
Ok(1) => unreachable!(),
// Something else:
Ok(n) => panic!("Qutex::process_queue: inner.state: {}.", n),
Err(n) => panic!("Qutex::process_queue: error: {}.", n),
}
}
/// Unlocks this (the caller's) lock and wakes up the next task in the
/// queue.
//
// TODO:
// * Evaluate unsafe-ness.
// * Return proper error type
// pub unsafe fn direct_unlock(&self) -> Result<(), ()> {
pub unsafe fn direct_unlock(&self) {
// TODO: Consider using `Ordering::Release`.
self.inner.state.store(0, SeqCst);
self.process_queue()
}
}
impl<T> From<T> for Qutex<T> {
#[inline]
fn from(val: T) -> Qutex<T> {
Qutex::new(val)
}
}
// Avoids needing `T: Clone`.
impl<T> Clone for Qutex<T> {
#[inline]
fn clone(&self) -> Qutex<T> |
}
#[cfg(test)]
// Woefully incomplete:
mod tests {
use super::*;
use futures::Future;
#[test]
fn simple() {
let val = Qutex::from(999i32);
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
println!("Storing new val...");
{
let future_guard = val.clone().lock();
let mut guard = future_guard.wait().unwrap();
*guard = 5;
}
println!("Reading val...");
{
let future_guard = val.clone().lock();
let guard = future_guard.wait().unwrap();
println!("val: {}", *guard);
}
}
#[test]
fn concurrent() {
use std::thread;
let thread_count = 20;
let mut threads = Vec::with_capacity(thread_count);
let start_val = 0i32;
let qutex = Qutex::new(start_val);
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
let future_write = future_guard.and_then(|mut guard| {
*guard += 1;
Ok(())
});
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i))
.spawn(|| future_write.wait().unwrap())
.unwrap(),
);
}
for i in 0..thread_count {
let future_guard = qutex.clone().lock();
threads.push(
thread::Builder::new()
.name(format!("test_thread_{}", i + thread_count))
.spawn(|| {
let mut guard = future_guard.wait().unwrap();
*guard -= 1;
})
.unwrap(),
)
}
for thread in threads {
thread.join().unwrap();
}
let guard = qutex.clone().lock().wait().unwrap();
assert_eq!(*guard, start_val);
}
#[test]
fn future_guard_drop() {
let lock = Qutex::from(true);
let _future_guard_0 = lock.clone().lock();
let _future_guard_1 = lock.clone().lock();
let _future_guard_2 = lock.clone().lock();
// TODO: FINISH ME
}
#[test]
fn explicit_unlock() {
let lock = Qutex::from(true);
let mut guard_0 = lock.clone().lock().wait().unwrap();
*guard_0 = false;
let _ = Guard::unlock(guard_0);
// Will deadlock if this doesn't work:
let guard_1 = lock.clone().lock().wait().unwrap();
assert!(*guard_1 == false);
}
}
| {
Qutex {
inner: self.inner.clone(),
}
} | identifier_body |
entity.rs | use anyhow::{anyhow, Context, Error, Result};
use log::{debug, trace};
use serde::Deserialize;
use shellexpand;
use std::{collections::HashMap, convert::TryFrom, env, fs, path::PathBuf, thread};
use toml;
use crate::output::utils::run_cmd;
const DEFAULT_PAGE_SIZE: usize = 10;
#[derive(Debug, Default, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Account {
// TODO: rename with `from`
pub name: Option<String>,
pub downloads_dir: Option<PathBuf>,
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
pub default: Option<bool>,
pub email: String,
pub imap_host: String,
pub imap_port: u16,
pub imap_starttls: Option<bool>,
pub imap_insecure: Option<bool>,
pub imap_login: String,
pub imap_passwd_cmd: String,
pub smtp_host: String,
pub smtp_port: u16,
pub smtp_starttls: Option<bool>,
pub smtp_insecure: Option<bool>,
pub smtp_login: String,
pub smtp_passwd_cmd: String,
}
pub type AccountsMap = HashMap<String, Account>;
/// Represents the whole config file.
#[derive(Debug, Default, Clone, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
// TODO: rename with `from`
pub name: String,
pub downloads_dir: Option<PathBuf>,
pub notify_cmd: Option<String>,
/// Option to override the default signature delimiter "`--\n `".
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
#[serde(flatten)]
pub accounts: HashMap<String, Account>,
}
impl Config {
fn path_from_xdg() -> Result<PathBuf> {
let path = env::var("XDG_CONFIG_HOME").context("cannot find `XDG_CONFIG_HOME` env var")?;
let mut path = PathBuf::from(path);
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_xdg_alt() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".config");
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_home() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".himalayarc");
Ok(path)
}
pub fn path() -> Result<PathBuf> {
let path = Self::path_from_xdg()
.or_else(|_| Self::path_from_xdg_alt())
.or_else(|_| Self::path_from_home())
.context("cannot find config path")?;
Ok(path)
}
/// Returns the account by the given name.
/// If `name` is `None`, then the default account is returned.
pub fn find_account_by_name(&self, name: Option<&str>) -> Result<&Account> {
match name {
Some("") | None => self
.accounts
.iter()
.find(|(_, account)| account.default.unwrap_or(false))
.map(|(_, account)| account)
.ok_or_else(|| anyhow!("cannot find default account")),
Some(name) => self
.accounts
.get(name)
.ok_or_else(|| anyhow!(format!("cannot find account `{}`", name))),
}
}
/// Returns the path to the given filename in the download directory.
/// You can imagine this as:
/// ```skip
/// Account-specifique-download-dir-path + Attachment-Filename
/// ```
pub fn downloads_filepath(&self, account: &Account, filename: &str) -> PathBuf {
account
.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(
self.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(env::temp_dir()),
)
.join(filename)
}
/// This is a little helper-function like which uses the the name and email
/// of the account to create a valid address for the header of the headers
/// of a msg.
///
/// # Hint
/// If the name includes some special characters like a whitespace, comma or semicolon, then
/// the name will be automatically wrapped between two `"`.
///
/// # Exapmle
/// ```
/// use himalaya::config::model::{Account, Config};
///
/// fn main() {
/// let config = Config::default();
///
/// let normal_account = Account::new(Some("Acc1"), "[email protected]");
/// // notice the semicolon in the name!
/// let special_account = Account::new(Some("TL;DR"), "[email protected]");
///
/// // -- Expeced outputs --
/// let expected_normal = Account {
/// name: Some("Acc1".to_string()),
/// email: "[email protected]".to_string(),
/// .. Account::default()
/// };
///
/// let expected_special = Account {
/// name: Some("\"TL;DR\"".to_string()),
/// email: "[email protected]".to_string(),
/// .. Account::default()
/// };
///
/// assert_eq!(config.address(&normal_account), "Acc1 <[email protected]>");
/// assert_eq!(config.address(&special_account), "\"TL;DR\" <[email protected]>");
/// }
/// ```
pub fn address(&self, account: &Account) -> String |
pub fn run_notify_cmd<S: AsRef<str>>(&self, subject: S, sender: S) -> Result<()> {
let subject = subject.as_ref();
let sender = sender.as_ref();
let default_cmd = format!(r#"notify-send "📫 {}" "{}""#, sender, subject);
let cmd = self
.notify_cmd
.as_ref()
.map(|cmd| format!(r#"{} {:?} {:?}"#, cmd, subject, sender))
.unwrap_or(default_cmd);
run_cmd(&cmd).context("cannot run notify cmd")?;
Ok(())
}
/// Returns the signature of the given acccount in combination witht the sigantion delimiter.
/// If the account doesn't have a signature, then the global signature is used.
///
/// # Example
/// ```
/// use himalaya::config::model::{Config, Account};
///
/// fn main() {
/// let config = Config {
/// signature: Some("Global signature".to_string()),
/// .. Config::default()
/// };
///
/// // a config without a global signature
/// let config_no_global = Config::default();
///
/// let account1 = Account::new_with_signature(Some("Account Name"), "[email protected]", Some("Cya"));
/// let account2 = Account::new(Some("Bruh"), "[email protected]");
///
/// // Hint: Don't forget the default signature delimiter: '\n-- \n'
/// assert_eq!(config.signature(&account1), Some("\n-- \nCya".to_string()));
/// assert_eq!(config.signature(&account2), Some("\n-- \nGlobal signature".to_string()));
///
/// assert_eq!(config_no_global.signature(&account2), None);
/// }
/// ```
pub fn signature(&self, account: &Account) -> Option<String> {
let default_sig_delim = String::from("-- \n");
let sig_delim = account
.signature_delimiter
.as_ref()
.or_else(|| self.signature_delimiter.as_ref())
.unwrap_or(&default_sig_delim);
let sig = account
.signature
.as_ref()
.or_else(|| self.signature.as_ref());
sig.and_then(|sig| shellexpand::full(sig).ok())
.map(|sig| sig.to_string())
.and_then(|sig| fs::read_to_string(sig).ok())
.or_else(|| sig.map(|sig| sig.to_owned()))
.map(|sig| format!("\n{}{}", sig_delim, sig))
}
pub fn default_page_size(&self, account: &Account) -> usize {
account
.default_page_size
.as_ref()
.or_else(|| self.default_page_size.as_ref())
.or(Some(&DEFAULT_PAGE_SIZE))
.unwrap()
.to_owned()
}
pub fn exec_watch_cmds(&self, account: &Account) -> Result<()> {
let cmds = account
.watch_cmds
.as_ref()
.or_else(|| self.watch_cmds.as_ref())
.map(|cmds| cmds.to_owned())
.unwrap_or_default();
thread::spawn(move || {
debug!("batch execution of {} cmd(s)", cmds.len());
cmds.iter().for_each(|cmd| {
debug!("running command {:?}…", cmd);
let res = run_cmd(cmd);
debug!("{:?}", res);
})
});
Ok(())
}
}
impl TryFrom<Option<&str>> for Config {
type Error = Error;
fn try_from(path: Option<&str>) -> Result<Self, Self::Error> {
debug!("init config from `{:?}`", path);
let path = path.map(|s| s.into()).unwrap_or(Config::path()?);
let content = fs::read_to_string(path).context("cannot read config file")?;
let config = toml::from_str(&content).context("cannot parse config file")?;
trace!("{:#?}", config);
Ok(config)
}
}
// FIXME: tests
// #[cfg(test)]
// mod tests {
// use crate::domain::{account::entity::Account, config::entity::Config};
// // a quick way to get a config instance for testing
// fn get_config() -> Config {
// Config {
// name: String::from("Config Name"),
// ..Config::default()
// }
// }
// #[test]
// fn test_find_account_by_name() {
// let mut config = get_config();
// let account1 = Account::new(None, "[email protected]");
// let account2 = Account::new(Some("Two"), "[email protected]");
// // add some accounts
// config.accounts.insert("One".to_string(), account1.clone());
// config.accounts.insert("Two".to_string(), account2.clone());
// let ret1 = config.find_account_by_name(Some("One")).unwrap();
// let ret2 = config.find_account_by_name(Some("Two")).unwrap();
// assert_eq!(*ret1, account1);
// assert_eq!(*ret2, account2);
// }
// #[test]
// fn test_address() {
// let config = get_config();
// let account1 = Account::new(None, "[email protected]");
// let account2 = Account::new(Some("Two"), "[email protected]");
// let account3 = Account::new(Some("TL;DR"), "[email protected]");
// let account4 = Account::new(Some("TL,DR"), "[email protected]");
// let account5 = Account::new(Some("TL:DR"), "[email protected]");
// let account6 = Account::new(Some("TL.DR"), "[email protected]");
// assert_eq!(&config.address(&account1), "Config Name <[email protected]>");
// assert_eq!(&config.address(&account2), "Two <[email protected]>");
// assert_eq!(&config.address(&account3), "\"TL;DR\" <[email protected]>");
// assert_eq!(&config.address(&account4), "\"TL,DR\" <[email protected]>");
// assert_eq!(&config.address(&account5), "\"TL:DR\" <[email protected]>");
// assert_eq!(&config.address(&account6), "\"TL.DR\" <[email protected]>");
// }
// }
| {
let name = account.name.as_ref().unwrap_or(&self.name);
let has_special_chars = "()<>[]:;@.,".contains(|special_char| name.contains(special_char));
if name.is_empty() {
format!("{}", account.email)
} else if has_special_chars {
// so the name has special characters => Wrap it with '"'
format!("\"{}\" <{}>", name, account.email)
} else {
format!("{} <{}>", name, account.email)
}
} | identifier_body |
entity.rs | use anyhow::{anyhow, Context, Error, Result};
use log::{debug, trace};
use serde::Deserialize;
use shellexpand;
use std::{collections::HashMap, convert::TryFrom, env, fs, path::PathBuf, thread};
use toml;
use crate::output::utils::run_cmd;
const DEFAULT_PAGE_SIZE: usize = 10;
#[derive(Debug, Default, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Account {
// TODO: rename with `from`
pub name: Option<String>,
pub downloads_dir: Option<PathBuf>,
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
pub default: Option<bool>,
pub email: String,
pub imap_host: String,
pub imap_port: u16,
pub imap_starttls: Option<bool>,
pub imap_insecure: Option<bool>,
pub imap_login: String,
pub imap_passwd_cmd: String,
pub smtp_host: String,
pub smtp_port: u16,
pub smtp_starttls: Option<bool>,
pub smtp_insecure: Option<bool>,
pub smtp_login: String,
pub smtp_passwd_cmd: String,
}
pub type AccountsMap = HashMap<String, Account>;
/// Represents the whole config file.
#[derive(Debug, Default, Clone, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
// TODO: rename with `from`
pub name: String,
pub downloads_dir: Option<PathBuf>,
pub notify_cmd: Option<String>,
/// Option to override the default signature delimiter "`--\n `".
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
#[serde(flatten)]
pub accounts: HashMap<String, Account>,
}
impl Config {
fn path_from_xdg() -> Result<PathBuf> {
let path = env::var("XDG_CONFIG_HOME").context("cannot find `XDG_CONFIG_HOME` env var")?;
let mut path = PathBuf::from(path);
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_xdg_alt() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".config");
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_home() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".himalayarc");
Ok(path)
}
pub fn path() -> Result<PathBuf> {
let path = Self::path_from_xdg()
.or_else(|_| Self::path_from_xdg_alt())
.or_else(|_| Self::path_from_home())
.context("cannot find config path")?;
Ok(path)
}
/// Returns the account by the given name.
/// If `name` is `None`, then the default account is returned.
pub fn find_account_by_name(&self, name: Option<&str>) -> Result<&Account> {
match name {
Some("") | None => self
.accounts
.iter()
.find(|(_, account)| account.default.unwrap_or(false))
.map(|(_, account)| account)
.ok_or_else(|| anyhow!("cannot find default account")),
Some(name) => self
.accounts
.get(name)
.ok_or_else(|| anyhow!(format!("cannot find account `{}`", name))),
}
}
/// Returns the path to the given filename in the download directory.
/// You can imagine this as:
/// ```skip
/// Account-specifique-download-dir-path + Attachment-Filename
/// ```
pub fn downloads_filepath(&self, account: &Account, filename: &str) -> PathBuf {
account
.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(
self.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(env::temp_dir()),
)
.join(filename)
}
/// This is a little helper-function like which uses the the name and email
/// of the account to create a valid address for the header of the headers
/// of a msg.
///
/// # Hint
/// If the name includes some special characters like a whitespace, comma or semicolon, then
/// the name will be automatically wrapped between two `"`.
///
/// # Exapmle
/// ```
/// use himalaya::config::model::{Account, Config};
///
/// fn main() {
/// let config = Config::default();
///
/// let normal_account = Account::new(Some("Acc1"), "[email protected]");
/// // notice the semicolon in the name!
/// let special_account = Account::new(Some("TL;DR"), "[email protected]");
///
/// // -- Expeced outputs --
/// let expected_normal = Account {
/// name: Some("Acc1".to_string()),
/// email: "[email protected]".to_string(),
/// .. Account::default()
/// };
///
/// let expected_special = Account {
/// name: Some("\"TL;DR\"".to_string()),
/// email: "[email protected]".to_string(),
/// .. Account::default()
/// };
///
/// assert_eq!(config.address(&normal_account), "Acc1 <[email protected]>");
/// assert_eq!(config.address(&special_account), "\"TL;DR\" <[email protected]>");
/// }
/// ```
pub fn address(&self, account: &Account) -> String {
let name = account.name.as_ref().unwrap_or(&self.name);
let has_special_chars = "()<>[]:;@.,".contains(|special_char| name.contains(special_char));
if name.is_empty() {
format!("{}", account.email)
} else if has_special_chars {
// so the name has special characters => Wrap it with '"'
format!("\"{}\" <{}>", name, account.email)
} else {
format!("{} <{}>", name, account.email)
}
}
pub fn run_notify_cmd<S: AsRef<str>>(&self, subject: S, sender: S) -> Result<()> {
let subject = subject.as_ref();
let sender = sender.as_ref();
let default_cmd = format!(r#"notify-send "📫 {}" "{}""#, sender, subject);
let cmd = self
.notify_cmd
.as_ref()
.map(|cmd| format!(r#"{} {:?} {:?}"#, cmd, subject, sender))
.unwrap_or(default_cmd);
run_cmd(&cmd).context("cannot run notify cmd")?;
Ok(())
}
/// Returns the signature of the given acccount in combination witht the sigantion delimiter.
/// If the account doesn't have a signature, then the global signature is used.
///
/// # Example
/// ```
/// use himalaya::config::model::{Config, Account};
///
/// fn main() {
/// let config = Config {
/// signature: Some("Global signature".to_string()),
/// .. Config::default()
/// };
///
/// // a config without a global signature
/// let config_no_global = Config::default();
///
/// let account1 = Account::new_with_signature(Some("Account Name"), "[email protected]", Some("Cya"));
/// let account2 = Account::new(Some("Bruh"), "[email protected]");
///
/// // Hint: Don't forget the default signature delimiter: '\n-- \n'
/// assert_eq!(config.signature(&account1), Some("\n-- \nCya".to_string()));
/// assert_eq!(config.signature(&account2), Some("\n-- \nGlobal signature".to_string()));
///
/// assert_eq!(config_no_global.signature(&account2), None);
/// }
/// ```
pub fn signature(&self, account: &Account) -> Option<String> {
let default_sig_delim = String::from("-- \n");
let sig_delim = account
.signature_delimiter
.as_ref()
.or_else(|| self.signature_delimiter.as_ref())
.unwrap_or(&default_sig_delim);
let sig = account
.signature
.as_ref()
.or_else(|| self.signature.as_ref());
sig.and_then(|sig| shellexpand::full(sig).ok())
.map(|sig| sig.to_string())
.and_then(|sig| fs::read_to_string(sig).ok())
.or_else(|| sig.map(|sig| sig.to_owned()))
.map(|sig| format!("\n{}{}", sig_delim, sig))
}
pub fn default_page_size(&self, account: &Account) -> usize {
account
.default_page_size
.as_ref()
.or_else(|| self.default_page_size.as_ref())
.or(Some(&DEFAULT_PAGE_SIZE))
.unwrap()
.to_owned()
}
pub fn exec_watch_cmds(&self, account: &Account) -> Result<()> {
let cmds = account
.watch_cmds
.as_ref()
.or_else(|| self.watch_cmds.as_ref())
.map(|cmds| cmds.to_owned())
.unwrap_or_default();
thread::spawn(move || {
debug!("batch execution of {} cmd(s)", cmds.len());
cmds.iter().for_each(|cmd| {
debug!("running command {:?}…", cmd);
let res = run_cmd(cmd);
debug!("{:?}", res);
})
});
Ok(())
}
}
impl TryFrom<Option<&str>> for Config {
type Error = Error;
fn try_from(path: Option<&str>) -> Result<Self, Self::Error> {
debug!("init config from `{:?}`", path);
let path = path.map(|s| s.into()).unwrap_or(Config::path()?);
let content = fs::read_to_string(path).context("cannot read config file")?;
let config = toml::from_str(&content).context("cannot parse config file")?;
trace!("{:#?}", config);
Ok(config)
} | // #[cfg(test)]
// mod tests {
// use crate::domain::{account::entity::Account, config::entity::Config};
// // a quick way to get a config instance for testing
// fn get_config() -> Config {
// Config {
// name: String::from("Config Name"),
// ..Config::default()
// }
// }
// #[test]
// fn test_find_account_by_name() {
// let mut config = get_config();
// let account1 = Account::new(None, "[email protected]");
// let account2 = Account::new(Some("Two"), "[email protected]");
// // add some accounts
// config.accounts.insert("One".to_string(), account1.clone());
// config.accounts.insert("Two".to_string(), account2.clone());
// let ret1 = config.find_account_by_name(Some("One")).unwrap();
// let ret2 = config.find_account_by_name(Some("Two")).unwrap();
// assert_eq!(*ret1, account1);
// assert_eq!(*ret2, account2);
// }
// #[test]
// fn test_address() {
// let config = get_config();
// let account1 = Account::new(None, "[email protected]");
// let account2 = Account::new(Some("Two"), "[email protected]");
// let account3 = Account::new(Some("TL;DR"), "[email protected]");
// let account4 = Account::new(Some("TL,DR"), "[email protected]");
// let account5 = Account::new(Some("TL:DR"), "[email protected]");
// let account6 = Account::new(Some("TL.DR"), "[email protected]");
// assert_eq!(&config.address(&account1), "Config Name <[email protected]>");
// assert_eq!(&config.address(&account2), "Two <[email protected]>");
// assert_eq!(&config.address(&account3), "\"TL;DR\" <[email protected]>");
// assert_eq!(&config.address(&account4), "\"TL,DR\" <[email protected]>");
// assert_eq!(&config.address(&account5), "\"TL:DR\" <[email protected]>");
// assert_eq!(&config.address(&account6), "\"TL.DR\" <[email protected]>");
// }
// } | }
// FIXME: tests | random_line_split |
entity.rs | use anyhow::{anyhow, Context, Error, Result};
use log::{debug, trace};
use serde::Deserialize;
use shellexpand;
use std::{collections::HashMap, convert::TryFrom, env, fs, path::PathBuf, thread};
use toml;
use crate::output::utils::run_cmd;
const DEFAULT_PAGE_SIZE: usize = 10;
#[derive(Debug, Default, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Account {
// TODO: rename with `from`
pub name: Option<String>,
pub downloads_dir: Option<PathBuf>,
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
pub default: Option<bool>,
pub email: String,
pub imap_host: String,
pub imap_port: u16,
pub imap_starttls: Option<bool>,
pub imap_insecure: Option<bool>,
pub imap_login: String,
pub imap_passwd_cmd: String,
pub smtp_host: String,
pub smtp_port: u16,
pub smtp_starttls: Option<bool>,
pub smtp_insecure: Option<bool>,
pub smtp_login: String,
pub smtp_passwd_cmd: String,
}
pub type AccountsMap = HashMap<String, Account>;
/// Represents the whole config file.
#[derive(Debug, Default, Clone, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
// TODO: rename with `from`
pub name: String,
pub downloads_dir: Option<PathBuf>,
pub notify_cmd: Option<String>,
/// Option to override the default signature delimiter "`--\n `".
pub signature_delimiter: Option<String>,
pub signature: Option<String>,
pub default_page_size: Option<usize>,
pub watch_cmds: Option<Vec<String>>,
#[serde(flatten)]
pub accounts: HashMap<String, Account>,
}
impl Config {
fn path_from_xdg() -> Result<PathBuf> {
let path = env::var("XDG_CONFIG_HOME").context("cannot find `XDG_CONFIG_HOME` env var")?;
let mut path = PathBuf::from(path);
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn path_from_xdg_alt() -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".config");
path.push("himalaya");
path.push("config.toml");
Ok(path)
}
fn | () -> Result<PathBuf> {
let home_var = if cfg!(target_family = "windows") {
"USERPROFILE"
} else {
"HOME"
};
let mut path: PathBuf = env::var(home_var)
.context(format!("cannot find `{}` env var", home_var))?
.into();
path.push(".himalayarc");
Ok(path)
}
pub fn path() -> Result<PathBuf> {
let path = Self::path_from_xdg()
.or_else(|_| Self::path_from_xdg_alt())
.or_else(|_| Self::path_from_home())
.context("cannot find config path")?;
Ok(path)
}
/// Returns the account by the given name.
/// If `name` is `None`, then the default account is returned.
pub fn find_account_by_name(&self, name: Option<&str>) -> Result<&Account> {
match name {
Some("") | None => self
.accounts
.iter()
.find(|(_, account)| account.default.unwrap_or(false))
.map(|(_, account)| account)
.ok_or_else(|| anyhow!("cannot find default account")),
Some(name) => self
.accounts
.get(name)
.ok_or_else(|| anyhow!(format!("cannot find account `{}`", name))),
}
}
/// Returns the path to the given filename in the download directory.
/// You can imagine this as:
/// ```skip
/// Account-specifique-download-dir-path + Attachment-Filename
/// ```
pub fn downloads_filepath(&self, account: &Account, filename: &str) -> PathBuf {
account
.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(
self.downloads_dir
.as_ref()
.and_then(|dir| dir.to_str())
.and_then(|dir| shellexpand::full(dir).ok())
.map(|dir| PathBuf::from(dir.to_string()))
.unwrap_or(env::temp_dir()),
)
.join(filename)
}
/// This is a little helper-function like which uses the the name and email
/// of the account to create a valid address for the header of the headers
/// of a msg.
///
/// # Hint
/// If the name includes some special characters like a whitespace, comma or semicolon, then
/// the name will be automatically wrapped between two `"`.
///
/// # Exapmle
/// ```
/// use himalaya::config::model::{Account, Config};
///
/// fn main() {
/// let config = Config::default();
///
/// let normal_account = Account::new(Some("Acc1"), "[email protected]");
/// // notice the semicolon in the name!
/// let special_account = Account::new(Some("TL;DR"), "[email protected]");
///
/// // -- Expeced outputs --
/// let expected_normal = Account {
/// name: Some("Acc1".to_string()),
/// email: "[email protected]".to_string(),
/// .. Account::default()
/// };
///
/// let expected_special = Account {
/// name: Some("\"TL;DR\"".to_string()),
/// email: "[email protected]".to_string(),
/// .. Account::default()
/// };
///
/// assert_eq!(config.address(&normal_account), "Acc1 <[email protected]>");
/// assert_eq!(config.address(&special_account), "\"TL;DR\" <[email protected]>");
/// }
/// ```
pub fn address(&self, account: &Account) -> String {
let name = account.name.as_ref().unwrap_or(&self.name);
let has_special_chars = "()<>[]:;@.,".contains(|special_char| name.contains(special_char));
if name.is_empty() {
format!("{}", account.email)
} else if has_special_chars {
// so the name has special characters => Wrap it with '"'
format!("\"{}\" <{}>", name, account.email)
} else {
format!("{} <{}>", name, account.email)
}
}
pub fn run_notify_cmd<S: AsRef<str>>(&self, subject: S, sender: S) -> Result<()> {
let subject = subject.as_ref();
let sender = sender.as_ref();
let default_cmd = format!(r#"notify-send "📫 {}" "{}""#, sender, subject);
let cmd = self
.notify_cmd
.as_ref()
.map(|cmd| format!(r#"{} {:?} {:?}"#, cmd, subject, sender))
.unwrap_or(default_cmd);
run_cmd(&cmd).context("cannot run notify cmd")?;
Ok(())
}
/// Returns the signature of the given acccount in combination witht the sigantion delimiter.
/// If the account doesn't have a signature, then the global signature is used.
///
/// # Example
/// ```
/// use himalaya::config::model::{Config, Account};
///
/// fn main() {
/// let config = Config {
/// signature: Some("Global signature".to_string()),
/// .. Config::default()
/// };
///
/// // a config without a global signature
/// let config_no_global = Config::default();
///
/// let account1 = Account::new_with_signature(Some("Account Name"), "[email protected]", Some("Cya"));
/// let account2 = Account::new(Some("Bruh"), "[email protected]");
///
/// // Hint: Don't forget the default signature delimiter: '\n-- \n'
/// assert_eq!(config.signature(&account1), Some("\n-- \nCya".to_string()));
/// assert_eq!(config.signature(&account2), Some("\n-- \nGlobal signature".to_string()));
///
/// assert_eq!(config_no_global.signature(&account2), None);
/// }
/// ```
pub fn signature(&self, account: &Account) -> Option<String> {
let default_sig_delim = String::from("-- \n");
let sig_delim = account
.signature_delimiter
.as_ref()
.or_else(|| self.signature_delimiter.as_ref())
.unwrap_or(&default_sig_delim);
let sig = account
.signature
.as_ref()
.or_else(|| self.signature.as_ref());
sig.and_then(|sig| shellexpand::full(sig).ok())
.map(|sig| sig.to_string())
.and_then(|sig| fs::read_to_string(sig).ok())
.or_else(|| sig.map(|sig| sig.to_owned()))
.map(|sig| format!("\n{}{}", sig_delim, sig))
}
pub fn default_page_size(&self, account: &Account) -> usize {
account
.default_page_size
.as_ref()
.or_else(|| self.default_page_size.as_ref())
.or(Some(&DEFAULT_PAGE_SIZE))
.unwrap()
.to_owned()
}
pub fn exec_watch_cmds(&self, account: &Account) -> Result<()> {
let cmds = account
.watch_cmds
.as_ref()
.or_else(|| self.watch_cmds.as_ref())
.map(|cmds| cmds.to_owned())
.unwrap_or_default();
thread::spawn(move || {
debug!("batch execution of {} cmd(s)", cmds.len());
cmds.iter().for_each(|cmd| {
debug!("running command {:?}…", cmd);
let res = run_cmd(cmd);
debug!("{:?}", res);
})
});
Ok(())
}
}
impl TryFrom<Option<&str>> for Config {
type Error = Error;
fn try_from(path: Option<&str>) -> Result<Self, Self::Error> {
debug!("init config from `{:?}`", path);
let path = path.map(|s| s.into()).unwrap_or(Config::path()?);
let content = fs::read_to_string(path).context("cannot read config file")?;
let config = toml::from_str(&content).context("cannot parse config file")?;
trace!("{:#?}", config);
Ok(config)
}
}
// FIXME: tests
// #[cfg(test)]
// mod tests {
// use crate::domain::{account::entity::Account, config::entity::Config};
// // a quick way to get a config instance for testing
// fn get_config() -> Config {
// Config {
// name: String::from("Config Name"),
// ..Config::default()
// }
// }
// #[test]
// fn test_find_account_by_name() {
// let mut config = get_config();
// let account1 = Account::new(None, "[email protected]");
// let account2 = Account::new(Some("Two"), "[email protected]");
// // add some accounts
// config.accounts.insert("One".to_string(), account1.clone());
// config.accounts.insert("Two".to_string(), account2.clone());
// let ret1 = config.find_account_by_name(Some("One")).unwrap();
// let ret2 = config.find_account_by_name(Some("Two")).unwrap();
// assert_eq!(*ret1, account1);
// assert_eq!(*ret2, account2);
// }
// #[test]
// fn test_address() {
// let config = get_config();
// let account1 = Account::new(None, "[email protected]");
// let account2 = Account::new(Some("Two"), "[email protected]");
// let account3 = Account::new(Some("TL;DR"), "[email protected]");
// let account4 = Account::new(Some("TL,DR"), "[email protected]");
// let account5 = Account::new(Some("TL:DR"), "[email protected]");
// let account6 = Account::new(Some("TL.DR"), "[email protected]");
// assert_eq!(&config.address(&account1), "Config Name <[email protected]>");
// assert_eq!(&config.address(&account2), "Two <[email protected]>");
// assert_eq!(&config.address(&account3), "\"TL;DR\" <[email protected]>");
// assert_eq!(&config.address(&account4), "\"TL,DR\" <[email protected]>");
// assert_eq!(&config.address(&account5), "\"TL:DR\" <[email protected]>");
// assert_eq!(&config.address(&account6), "\"TL.DR\" <[email protected]>");
// }
// }
| path_from_home | identifier_name |
lib.rs | #![forbid(unsafe_code)]
#![warn(rust_2018_idioms)]
use anyhow::{anyhow, Context as _};
use cargo_metadata as cm;
use duct::cmd;
use indoc::indoc;
use itertools::Itertools as _;
use quote::quote;
use std::{
collections::{BTreeMap, HashMap, HashSet},
env, fs,
ops::Range,
path::{Path, PathBuf},
};
pub fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps,.. } = resolve
.nodes
.iter()
.find(|cm::Node { id,.. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds,.. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind,.. }| *kind!= cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg,.. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn | (
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => {
joint.insert(&target.name, Self::Leaf(&package.id, target));
}
(Self::Joint(joint), [segment, path @..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if!doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
}
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from(
env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start,.. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option<&cm::Target> {
self.targets.iter().find(|cm::Target { kind,.. }| {
[&["lib".to_owned()][..], &["proc-macro".to_owned()]].contains(&&**kind)
})
}
}
trait TargetExt {
fn read_crate_level_doc(&self) -> anyhow::Result<String>;
}
impl TargetExt for cm::Target {
fn read_crate_level_doc(&self) -> anyhow::Result<String> {
let syn::File { attrs,.. } = syn::parse_file(&fs::read_to_string(&self.src_path)?)?;
Ok(attrs
.iter()
.flat_map(syn::Attribute::parse_meta)
.flat_map(|meta| match meta {
syn::Meta::NameValue(syn::MetaNameValue {
path,
lit: syn::Lit::Str(lit_str),
..
}) if path.is_ident("doc") => Some(lit_str.value()),
_ => None,
})
.join("\n"))
}
}
| insert | identifier_name |
lib.rs | #![forbid(unsafe_code)]
#![warn(rust_2018_idioms)]
use anyhow::{anyhow, Context as _};
use cargo_metadata as cm;
use duct::cmd;
use indoc::indoc;
use itertools::Itertools as _;
use quote::quote;
use std::{
collections::{BTreeMap, HashMap, HashSet},
env, fs,
ops::Range,
path::{Path, PathBuf},
};
pub fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps,.. } = resolve
.nodes
.iter()
.find(|cm::Node { id,.. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds,.. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind,.. }| *kind!= cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg,.. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn insert(
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => {
joint.insert(&target.name, Self::Leaf(&package.id, target));
}
(Self::Joint(joint), [segment, path @..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if!doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
} | env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start,.. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option<&cm::Target> {
self.targets.iter().find(|cm::Target { kind,.. }| {
[&["lib".to_owned()][..], &["proc-macro".to_owned()]].contains(&&**kind)
})
}
}
trait TargetExt {
fn read_crate_level_doc(&self) -> anyhow::Result<String>;
}
impl TargetExt for cm::Target {
fn read_crate_level_doc(&self) -> anyhow::Result<String> {
let syn::File { attrs,.. } = syn::parse_file(&fs::read_to_string(&self.src_path)?)?;
Ok(attrs
.iter()
.flat_map(syn::Attribute::parse_meta)
.flat_map(|meta| match meta {
syn::Meta::NameValue(syn::MetaNameValue {
path,
lit: syn::Lit::Str(lit_str),
..
}) if path.is_ident("doc") => Some(lit_str.value()),
_ => None,
})
.join("\n"))
}
} |
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from( | random_line_split |
lib.rs | #![forbid(unsafe_code)]
#![warn(rust_2018_idioms)]
use anyhow::{anyhow, Context as _};
use cargo_metadata as cm;
use duct::cmd;
use indoc::indoc;
use itertools::Itertools as _;
use quote::quote;
use std::{
collections::{BTreeMap, HashMap, HashSet},
env, fs,
ops::Range,
path::{Path, PathBuf},
};
pub fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps,.. } = resolve
.nodes
.iter()
.find(|cm::Node { id,.. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds,.. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind,.. }| *kind!= cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg,.. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn insert(
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => |
(Self::Joint(joint), [segment, path @..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if!doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
}
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from(
env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start,.. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option<&cm::Target> {
self.targets.iter().find(|cm::Target { kind,.. }| {
[&["lib".to_owned()][..], &["proc-macro".to_owned()]].contains(&&**kind)
})
}
}
trait TargetExt {
fn read_crate_level_doc(&self) -> anyhow::Result<String>;
}
impl TargetExt for cm::Target {
fn read_crate_level_doc(&self) -> anyhow::Result<String> {
let syn::File { attrs,.. } = syn::parse_file(&fs::read_to_string(&self.src_path)?)?;
Ok(attrs
.iter()
.flat_map(syn::Attribute::parse_meta)
.flat_map(|meta| match meta {
syn::Meta::NameValue(syn::MetaNameValue {
path,
lit: syn::Lit::Str(lit_str),
..
}) if path.is_ident("doc") => Some(lit_str.value()),
_ => None,
})
.join("\n"))
}
}
| {
joint.insert(&target.name, Self::Leaf(&package.id, target));
} | conditional_block |
lib.rs | #![forbid(unsafe_code)]
#![warn(rust_2018_idioms)]
use anyhow::{anyhow, Context as _};
use cargo_metadata as cm;
use duct::cmd;
use indoc::indoc;
use itertools::Itertools as _;
use quote::quote;
use std::{
collections::{BTreeMap, HashMap, HashSet},
env, fs,
ops::Range,
path::{Path, PathBuf},
};
pub fn gen_doc_rust(crates_dir: Option<&Path>, manifest_path: Option<&Path>) -> anyhow::Result<()> {
let metadata = &cargo_metadata(manifest_path)?;
let resolve = metadata.resolve.as_ref().expect("should be present");
let resolve_root = resolve
.root
.as_ref()
.with_context(|| "this is a virtual manifest")?;
let extern_crate_names = {
let explicit_names_in_toml = metadata[resolve_root]
.dependencies
.iter()
.flat_map(|d| &d.rename)
.collect::<HashSet<_>>();
let cm::Node { deps,.. } = resolve
.nodes
.iter()
.find(|cm::Node { id,.. }| id == resolve_root)
.unwrap();
deps.iter()
.filter(|cm::NodeDep { dep_kinds,.. }| {
dep_kinds
.iter()
.any(|cm::DepKindInfo { kind,.. }| *kind!= cm::DependencyKind::Build)
})
.flat_map(|cm::NodeDep { name, pkg,.. }| {
let extern_crate_name = if explicit_names_in_toml.contains(name) {
name
} else {
&metadata[pkg].lib_or_proc_macro()?.name
};
Some((pkg, extern_crate_name))
})
.collect::<HashMap<_, _>>()
};
let lib = metadata[resolve_root]
.lib_or_proc_macro()
.with_context(|| format!("missing `lib|proc-macro` target in `{}`", resolve_root))?;
let crates_dir = &crates_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("crates"));
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
let src_path = dunce::canonicalize(&target.src_path).ok()?;
let path1 = ws_member.manifest_path.strip_prefix(crates_dir).ok()?;
let path2 = src_path.strip_prefix(crates_dir).ok()?;
let common_path = itertools::zip(path1, path2)
.filter(|(s1, s2)| s1 == s2)
.map(|(segment, _)| segment.to_str().expect("this is from a JSON"))
.collect();
Some((common_path, ws_member, target))
})
.collect::<Vec<(Vec<_>, _, _)>>();
let mut tree = Tree::default();
for (path, package, target) in &library_crates {
tree.insert(path, package, target);
}
let generated = &mut indoc! {r"
// This file is automatically generated by `cargo-online-judge-verification-helper-helper`.
//! Re-exports the library crates for rustdoc.
//!
//! This crate itself is not intended to be used directly.
"}
.to_owned();
tree.expand(&|id| &extern_crate_names[id], generated)?;
fs::write(&lib.src_path, apply_rustfmt(generated)?)?;
eprintln!("{:>12} {}", "Wrote", lib.src_path.display());
return Ok(());
enum Tree<'cm> {
Leaf(&'cm cm::PackageId, &'cm cm::Target),
Joint(BTreeMap<&'cm str, Self>),
}
impl<'cm> Tree<'cm> {
fn insert(
&mut self,
path: &[&'cm str],
package: &'cm cm::Package,
target: &'cm cm::Target,
) {
match (self, path) {
(Self::Joint(joint), []) => {
joint.insert(&target.name, Self::Leaf(&package.id, target));
}
(Self::Joint(joint), [segment, path @..]) => {
joint
.entry(segment)
.or_default()
.insert(path, package, target);
}
_ => panic!(),
}
}
fn expand(
&self,
extern_crate_name: &impl Fn(&cm::PackageId) -> &'cm str,
out: &mut String,
) -> anyhow::Result<()> {
match self {
Self::Leaf(package_id, target) => {
let doc = target.read_crate_level_doc()?;
if!doc.is_empty() {
*out += "#![doc=";
*out += "e!(#doc).to_string();
*out += "]";
}
*out += "pub use ::";
*out += extern_crate_name(package_id);
*out += "::*;"
}
Self::Joint(joint) => {
for (segment, node) in joint {
*out += "pub mod ";
*out += segment;
*out += "{";
node.expand(extern_crate_name, out)?;
*out += "}";
}
}
}
Ok(())
}
}
impl Default for Tree<'_> {
fn default() -> Self {
Self::Joint(BTreeMap::new())
}
}
fn apply_rustfmt(code: &str) -> anyhow::Result<String> {
let rustfmt_exe = PathBuf::from(
env::var_os("CARGO").with_context(|| "missing `$CARGO` environment variable")?,
)
.with_file_name("rustfmt")
.with_extension(env::consts::EXE_EXTENSION);
let tempdir = tempfile::Builder::new()
.prefix("qryxip-competitive-programming-library-xtask-")
.tempdir()?;
let path = tempdir.path().join("lib.rs");
fs::write(&path, code)?;
cmd!(rustfmt_exe, "--edition", "2018", &path).run()?;
let code = fs::read_to_string(path)?;
tempdir.close()?;
Ok(code)
}
}
pub fn gen_doc_oj_verify(
md_dir: Option<&Path>,
manifest_path: Option<&Path>,
) -> anyhow::Result<()> | for (package, target) in library_crates {
let markdown = format!(
"---\n\
title: \"{} (<code>{}</code>)\"\n\
documentation_of: //{}\n\
---\n\
{}",
package.name,
target.name,
target
.src_path
.strip_prefix(&metadata.workspace_root)
.unwrap_or(&target.src_path)
.display(),
modify_doc_for_oj_verify(&target.read_crate_level_doc()?),
);
let markdown_path = &md_dir.join(&package.name).with_extension("md");
fs::write(markdown_path, markdown)
.with_context(|| format!("could not write `{}`", markdown_path.display()))?;
eprintln!("{:>12} {}", "Wrote", markdown_path.display());
}
return Ok(());
/// Inserts `rust` in code blocks without language specification.
fn modify_doc_for_oj_verify(doc: &str) -> String {
use pulldown_cmark::{CodeBlockKind, Event, Options, Parser, Tag};
let mut doc = match doc {
"" => "\n".to_owned(),
doc => doc
.lines()
.map(|s| s.strip_prefix(' ').unwrap_or(s).to_owned() + "\n")
.join(""),
};
#[allow(clippy::redundant_clone)]
for (_, Range { start,.. }) in Parser::new_ext(&doc.clone(), Options::all())
.into_offset_iter()
.filter(|(event, _)| {
matches!(
event,
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(kind))) if kind.is_empty()
)
})
.collect::<Vec<_>>()
.into_iter()
.rev()
{
let mut pos = start;
while doc.as_bytes()[pos] == b'`' {
pos += 1;
}
doc.insert_str(pos, "rust");
}
doc
}
}
fn cargo_metadata(manifest_path: Option<&Path>) -> anyhow::Result<cm::Metadata> {
let mut cmd = cm::MetadataCommand::new();
if let Some(manifest_path) = manifest_path {
cmd.manifest_path(manifest_path);
}
cmd.exec().map_err(|err| match err {
cm::Error::CargoMetadata { stderr } => {
anyhow!("{}", stderr.trim_start_matches("error: ").trim_end())
}
err => anyhow::Error::msg(err),
})
}
trait PackageExt {
fn lib_or_proc_macro(&self) -> Option<&cm::Target>;
}
impl PackageExt for cm::Package {
fn lib_or_proc_macro(&self) -> Option<&cm::Target> {
self.targets.iter().find(|cm::Target { kind,.. }| {
[&["lib".to_owned()][..], &["proc-macro".to_owned()]].contains(&&**kind)
})
}
}
trait TargetExt {
fn read_crate_level_doc(&self) -> anyhow::Result<String>;
}
impl TargetExt for cm::Target {
fn read_crate_level_doc(&self) -> anyhow::Result<String> {
let syn::File { attrs,.. } = syn::parse_file(&fs::read_to_string(&self.src_path)?)?;
Ok(attrs
.iter()
.flat_map(syn::Attribute::parse_meta)
.flat_map(|meta| match meta {
syn::Meta::NameValue(syn::MetaNameValue {
path,
lit: syn::Lit::Str(lit_str),
..
}) if path.is_ident("doc") => Some(lit_str.value()),
_ => None,
})
.join("\n"))
}
}
| {
let metadata = &cargo_metadata(manifest_path)?;
let library_crates = metadata
.workspace_members
.iter()
.flat_map(|ws_member| {
let ws_member = &metadata[ws_member];
let target = ws_member.lib_or_proc_macro()?;
Some((ws_member, target))
})
.collect::<Vec<_>>();
let md_dir = &md_dir
.map(ToOwned::to_owned)
.unwrap_or_else(|| metadata.workspace_root.join("md"));
fs::create_dir_all(md_dir)
.with_context(|| format!("could not create `{}`", md_dir.display()))?;
| identifier_body |
lib.rs | // use std::borrow::Cow;
use std::cmp::Ordering;
use std::rc::Rc;
use std::result::Result;
use std::vec::Vec;
#[derive(Debug)]
enum MastError {
InvalidNode,
StoreError(std::io::Error),
}
#[derive(Debug,Clone)]
struct Node {
key: Vec<i32>,
value: Vec<i32>,
link: Vec<Option<Link>>,
dirty: bool,
}
/*
// TODO
impl Clone for Node {
fn clone(&self) -> Node {
panic!("why are you doing this")
}
}
impl ToOwned for Node {
type Owned = Node;
fn to_owned(&self) -> Self::Owned {
return *(self.clone());
}
}*/
#[derive(Clone, Debug)]
enum Link {
// Empty,
MutableNode(Node, Option<Rc<Node>>),
SharedNode(Rc<Node>),
// Node(Cow<'a, Node<'a>>),
Stored(String),
}
pub struct Mast<'a> {
size: u64,
height: u8,
root_link: Link,
branch_factor: u16,
grow_after_size: u64,
shrink_below_size: u64,
key_order: fn(&i32, &i32) -> i8,
key_layer: fn(&i32, u16) -> u8,
_a: std::marker::PhantomData<&'a u32>,
// marshal:
// unmarshal:
// store: InMemoryNodeStore<'a>,
}
const default_branch_factor: u16 = 16;
fn default_order(a: &i32, b: &i32) -> i8 {
if *a < *b {
return -1;
} else if *a > *b {
return 1;
} else {
return 0;
}
}
fn default_layer(v: &i32, branch_factor: u16) -> u8 {
let mut layer = 0;
let mut v = *v;
if branch_factor == 16 | else {
while v!= 0 && v % branch_factor as i32 == 0 {
v /= branch_factor as i32;
layer += 1;
}
}
return layer;
}
impl<'a> Mast<'a> {
pub fn newInMemory() -> Mast<'a> {
return Mast {
size: 0,
height: 0,
root_link: Link::MutableNode(Node::new(default_branch_factor as usize), None),
branch_factor: default_branch_factor,
grow_after_size: default_branch_factor as u64,
shrink_below_size: 1,
key_order: default_order,
key_layer: default_layer,
_a: std::marker::PhantomData,
// store: InMemoryNodeStore::new(),
};
}
fn insert(&mut self, key: i32, value: i32) -> Result<InsertResult, MastError> {
let key_layer = (self.key_layer)(&key, self.branch_factor);
let target_layer = std::cmp::min(key_layer, self.height);
let distance = self.height - target_layer;
let root = load_mut(&mut self.root_link)?;
let res = root.insert(key, value, distance, self.key_order)?;
match res {
InsertResult::Inserted => self.size += 1,
_ => return Ok(res),
};
if self.size > self.grow_after_size
&& root.can_grow(self.height, self.key_layer, self.branch_factor)
{
self.root_link = root
.grow(self.height, self.key_layer, self.branch_factor)
.unwrap();
self.height += 1;
self.shrink_below_size *= self.branch_factor as u64;
self.grow_after_size *= self.branch_factor as u64;
};
Ok(res)
}
fn get(&self, key: &i32) -> Result<Option<&i32>, MastError> {
let mut distance =
self.height - std::cmp::min((self.key_layer)(key, self.branch_factor), self.height);
if distance < 0 { panic!("goo") };
let mut node = load(&self.root_link)?;
loop {
let (equal, i) = get_index_for_key(key, &node.key, self.key_order);
if distance == 0 {
if equal {
return Ok(Some(&node.value[i]));
} else {
return Ok(None);
}
} else {
distance -= 1
}
match node.link[i] {
None => return Ok(None),
Some(ref link) => node = load(link)?,
}
}
}
}
fn load(link: &Link) -> Result<&Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref node, _) => Ok(node),
Link::SharedNode(ref rc) => Ok(rc),
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
fn load_mut(link: &mut Link) -> Result<&mut Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut node, _) => Ok(node),
Link::SharedNode(ref mut rc) => {
let mutable = Rc::make_mut(rc).to_owned();
*link = Link::MutableNode(mutable, Some(rc.clone()));
if let Link::MutableNode(ref mut scopey, _) = link {
Ok(scopey)
} else {
panic!("asdf")
}
}
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
// struct NodeAndSlot<'a>(&'a mut Node<'a>, usize);
/*
struct FindOptions<'a> {
mast: &'a mut Mast<'a>,
target_layer: u8,
current_height: u8,
create_missing_nodes: bool,
node_path: Vec<&'a mut Node>,
link_path: Vec<usize>,
}
*/
impl Node {
fn new(branch_factor: usize) -> Node {
let mut link = Vec::with_capacity(branch_factor + 1);
link.push(None);
Node {
key: Vec::with_capacity(branch_factor),
value: Vec::with_capacity(branch_factor),
link,
dirty: false,
}
}
/*
fn follow(
&'a mut self,
index: usize,
create_ok: bool,
m: &'a mut Mast<'a>,
) -> std::result::Result<&'a mut Node<'a>, std::io::Error> {
if let Some(ref mut links) = self.link {
return Ok(m.load(&mut links[index])?);
} else if!create_ok {
return Ok(self);
}
return Ok(&mut Node::empty());
}*/
fn insert(
&mut self,
key: i32,
value: i32,
distance: u8,
key_order: fn(&i32, &i32) -> i8,
) -> Result<InsertResult, MastError> {
let (equal, i) = get_index_for_key(&key, &self.key, key_order);
if distance!= 0 {
let mut z = self.link.get_mut(i).unwrap();
let child = match &mut z {
Some(ref mut link) => load_mut(link)?,
None => {
*z = Some(Link::MutableNode(Node::new(self.key.capacity()), None));
match &mut z {
Some(ref mut link) => load_mut(link)?,
None => panic!("can't load just-set link"),
}
}
};
let res = child.insert(key, value, distance - 1, key_order)?;
match res {
InsertResult::NoChange => (),
_ => self.dirty = true,
};
return Ok(res);
}
if equal {
if value == self.value[i] {
return Ok(InsertResult::NoChange);
}
self.value[i] = value;
self.dirty = true;
return Ok(InsertResult::Updated);
}
let (left_link, right_link) = match self.link.get_mut(i).unwrap() {
Some(ref mut link) => {
let child = load_mut(link)?;
split(child, &key, key_order)?
}
None => (None, None),
};
self.key.insert(i, key);
self.value.insert(i, value);
self.link[i] = right_link;
self.link.insert(i, left_link);
self.dirty = true;
return Ok(InsertResult::Inserted);
}
fn can_grow(
&self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> bool {
for key in &self.key {
if key_layer(key, branch_factor) > current_height {
return true;
}
}
return false;
}
fn grow(
&mut self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> Option<Link> {
let mut new_parent = Node::new(self.key.capacity());
if!self.is_empty() {
for i in 0..self.key.len() {
let key = &self.key[i];
let layer = key_layer(key, branch_factor);
if layer <= current_height {
continue;
}
let new_left = self.extract(i);
new_parent.key.push(self.key[0]);
new_parent.value.push(self.value[0]);
new_parent.link.insert(new_parent.link.len() - 1, new_left);
}
}
let new_right = self.extract(self.key.len());
*new_parent.link.last_mut().unwrap() = new_right;
return new_parent.to_link();
}
fn extract(&mut self, end: usize) -> Option<Link> {
let mut node = Node::new(self.key.capacity());
node.key = self.key.drain(..end).collect();
node.key.reserve(self.key.capacity());
node.value = self.value.drain(..end).collect();
node.value.reserve(self.key.capacity());
node.link = self.link.drain(..=end).collect();
node.link.reserve(self.key.capacity() + 1);
self.link.insert(0, None);
return node.to_link();
}
fn to_link(self) -> Option<Link> {
if self.is_empty() {
return None;
}
return Some(Link::MutableNode(self, None));
}
fn is_empty(&self) -> bool {
return self.key.len() == 0
&& self.value.len() == 0
&& self.link.len() == 1
&& self.link[0].is_none();
}
}
#[derive(Debug)]
enum InsertResult {
Updated,
Inserted,
NoChange,
}
fn split(
node: &mut Node,
key: &i32,
key_order: fn(&i32, &i32) -> i8,
) -> Result<(Option<Link>, Option<Link>), MastError> {
let (equal, i) = get_index_for_key(key, &node.key, key_order);
if equal {
panic!("split not expecting existing key")
}
let mut left_node = Node::new(node.key.capacity());
let mut right_node = Node::new(node.key.capacity());
let (mut left, mut right) = node.key.split_at(i);
left_node.key.extend_from_slice(left);
right_node.key.extend_from_slice(right);
let (mut left, mut right) = node.value.split_at(i);
left_node.value.extend_from_slice(left);
right_node.value.extend_from_slice(left);
let (mut left, mut right) = node.link.split_at(i + 1);
left_node.link.remove(0);
left_node.link.extend_from_slice(left);
right_node.link.extend_from_slice(right);
// repartition left and right subtrees based on new key
if let Some(ref mut cur_left_max_link) = left_node.link[i] {
let left_max = load_mut(cur_left_max_link)?;
let (left_max_link, too_big_link) = split(left_max, key, key_order)?;
left_node.link[i] = left_max_link;
right_node.link[0] = too_big_link;
};
if let Some(ref mut cur_right_min_link) = right_node.link[0] {
let right_min = load_mut(cur_right_min_link)?;
let (too_small_link, right_min_link) = split(right_min, key, key_order)?;
if too_small_link.is_some() {
panic!("bad news!")
}
right_node.link[0] = right_min_link
};
return Ok((left_node.to_link(), right_node.to_link()));
}
fn get_index_for_key(key: &i32, keys: &Vec<i32>, key_order: fn(&i32, &i32) -> i8) -> (bool, usize) {
match keys.binary_search_by(|x| {
let r = key_order(x, key);
if r < 0 {
Ordering::Less
} else if r > 0 {
Ordering::Greater
} else {
Ordering::Equal
}
}) {
Ok(n) => (true, n),
Err(n) => (false, n),
}
}
fn bad_get_index_for_key(
key: &i32,
keys: &Vec<i32>,
key_order: fn(&i32, &i32) -> i8,
) -> (bool, usize) {
let mut cmp: i8 = 1;
let mut i: usize = 0;
while i < keys.len() {
cmp = (key_order)(&keys[i], key);
if cmp >= 0 {
break;
};
i += 1
}
return (cmp == 0, i);
}
/*
fn findNode<'a>(key: i32, options: &mut FindOptions<'a>) -> std::result::Result<(), MastError> {
let mut cmp: i8 = 1;
let mut i: usize = 0;
let keyOrder = options.mast.keyOrder;
let mut node = options.node_path.last().unwrap();
unimplemented!();
while i < node.key.len() {
cmp = (keyOrder)(node.key[i], key);
if cmp >= 0 {
break;
}
i += 1
}
if cmp == 0 || options.current_height == options.target_layer {
return Ok(());
};
let child_link = match node.link {
None => return Err(MastError::InvalidNode),
Some(ref mut link) => link.get_mut(i).unwrap(),
};
let child = load(child_link)?;
options.current_height -= 1;
options.node_path.push(child);
options.link_path.push(i);
return findNode(key, options);
}*/
/*
trait NodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error>;
}
struct InMemoryNodeStore<'a> {
map: std::collections::HashMap<String, Node>,
}
impl<'a> InMemoryNodeStore<'a> {
fn new() -> InMemoryNodeStore<'a> {
InMemoryNodeStore {
map: std::collections::HashMap::new(),
}
}
}
impl<'a> NodeStore<'a> for InMemoryNodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut cow) => Ok(cow.to_mut()),
}
}
}
*/
#[test]
fn test_insert_accessibility() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let n = 16 * 16 + 2;
for i in 0..n {
t.insert(i, i)?;
for i in 0..=i {
let v = t.get(&i)?;
assert_eq!(v, Some(&i))
}
}
Ok(())
}
#[test]
fn test_bench_insert() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let parts = 4;
let mut n = 16 * 16 * 16;
let mut i = 0;
let mut start = std::time::Instant::now();
for p in 0..parts {
while i < n {
t.insert(i, i)?;
i += 1;
}
let end = std::time::Instant::now();
let diff = end - start;
println!(
"part {}/{}: height:{}, {}/s ({}ns/op) size:{}",
p + 1,
parts,
//diff.as_micros(), // {}μs,
t.height,
1_000_000_000 / (diff.as_nanos() / t.size as u128),
diff.as_nanos() / t.size as u128,
t.size,
);
n *= 16;
start = end;
}
Ok(())
}
#[test]
fn test_int_layer() {
assert_eq!(default_layer(&-528, 16), 1);
assert_eq!(default_layer(&-513, 16), 0);
assert_eq!(default_layer(&-512, 16), 2);
assert_eq!(default_layer(&-256, 16), 2);
assert_eq!(default_layer(&-16, 16), 1);
assert_eq!(default_layer(&-1, 16), 0);
assert_eq!(default_layer(&0, 16), 0);
assert_eq!(default_layer(&1, 16), 0);
assert_eq!(default_layer(&16, 16), 1);
assert_eq!(default_layer(&32, 16), 1);
}
| {
while v != 0 && v & 0xf == 0 {
v >>= 4;
layer += 1
}
} | conditional_block |
lib.rs | // use std::borrow::Cow;
use std::cmp::Ordering;
use std::rc::Rc;
use std::result::Result;
use std::vec::Vec;
#[derive(Debug)]
enum MastError {
InvalidNode,
StoreError(std::io::Error),
}
#[derive(Debug,Clone)]
struct Node {
key: Vec<i32>,
value: Vec<i32>,
link: Vec<Option<Link>>,
dirty: bool,
}
/*
// TODO
impl Clone for Node {
fn clone(&self) -> Node {
panic!("why are you doing this")
}
}
impl ToOwned for Node {
type Owned = Node;
fn to_owned(&self) -> Self::Owned {
return *(self.clone());
}
}*/
#[derive(Clone, Debug)]
enum Link {
// Empty,
MutableNode(Node, Option<Rc<Node>>),
SharedNode(Rc<Node>),
// Node(Cow<'a, Node<'a>>),
Stored(String),
}
pub struct Mast<'a> {
size: u64,
height: u8,
root_link: Link,
branch_factor: u16,
grow_after_size: u64,
shrink_below_size: u64,
key_order: fn(&i32, &i32) -> i8,
key_layer: fn(&i32, u16) -> u8,
_a: std::marker::PhantomData<&'a u32>,
// marshal:
// unmarshal:
// store: InMemoryNodeStore<'a>,
}
const default_branch_factor: u16 = 16;
fn default_order(a: &i32, b: &i32) -> i8 {
if *a < *b {
return -1;
} else if *a > *b {
return 1;
} else {
return 0;
}
}
fn default_layer(v: &i32, branch_factor: u16) -> u8 {
let mut layer = 0;
let mut v = *v;
if branch_factor == 16 {
while v!= 0 && v & 0xf == 0 {
v >>= 4;
layer += 1
}
} else {
while v!= 0 && v % branch_factor as i32 == 0 {
v /= branch_factor as i32;
layer += 1;
}
}
return layer;
}
impl<'a> Mast<'a> {
pub fn newInMemory() -> Mast<'a> {
return Mast {
size: 0,
height: 0,
root_link: Link::MutableNode(Node::new(default_branch_factor as usize), None),
branch_factor: default_branch_factor,
grow_after_size: default_branch_factor as u64,
shrink_below_size: 1,
key_order: default_order,
key_layer: default_layer,
_a: std::marker::PhantomData,
// store: InMemoryNodeStore::new(),
};
}
fn insert(&mut self, key: i32, value: i32) -> Result<InsertResult, MastError> {
let key_layer = (self.key_layer)(&key, self.branch_factor);
let target_layer = std::cmp::min(key_layer, self.height);
let distance = self.height - target_layer;
let root = load_mut(&mut self.root_link)?;
let res = root.insert(key, value, distance, self.key_order)?;
match res {
InsertResult::Inserted => self.size += 1,
_ => return Ok(res),
};
if self.size > self.grow_after_size
&& root.can_grow(self.height, self.key_layer, self.branch_factor)
{
self.root_link = root
.grow(self.height, self.key_layer, self.branch_factor)
.unwrap();
self.height += 1;
self.shrink_below_size *= self.branch_factor as u64;
self.grow_after_size *= self.branch_factor as u64;
};
Ok(res)
}
fn get(&self, key: &i32) -> Result<Option<&i32>, MastError> {
let mut distance =
self.height - std::cmp::min((self.key_layer)(key, self.branch_factor), self.height);
if distance < 0 { panic!("goo") };
let mut node = load(&self.root_link)?;
loop {
let (equal, i) = get_index_for_key(key, &node.key, self.key_order);
if distance == 0 {
if equal {
return Ok(Some(&node.value[i]));
} else {
return Ok(None);
}
} else {
distance -= 1
}
match node.link[i] {
None => return Ok(None),
Some(ref link) => node = load(link)?,
}
}
}
}
fn load(link: &Link) -> Result<&Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref node, _) => Ok(node),
Link::SharedNode(ref rc) => Ok(rc),
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
fn load_mut(link: &mut Link) -> Result<&mut Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut node, _) => Ok(node),
Link::SharedNode(ref mut rc) => {
let mutable = Rc::make_mut(rc).to_owned();
*link = Link::MutableNode(mutable, Some(rc.clone()));
if let Link::MutableNode(ref mut scopey, _) = link {
Ok(scopey)
} else {
panic!("asdf")
}
}
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
// struct NodeAndSlot<'a>(&'a mut Node<'a>, usize);
/*
struct FindOptions<'a> {
mast: &'a mut Mast<'a>,
target_layer: u8,
current_height: u8,
create_missing_nodes: bool,
node_path: Vec<&'a mut Node>,
link_path: Vec<usize>,
}
*/
impl Node {
fn new(branch_factor: usize) -> Node {
let mut link = Vec::with_capacity(branch_factor + 1);
link.push(None);
Node {
key: Vec::with_capacity(branch_factor),
value: Vec::with_capacity(branch_factor),
link,
dirty: false,
}
}
/*
fn follow(
&'a mut self,
index: usize,
create_ok: bool,
m: &'a mut Mast<'a>,
) -> std::result::Result<&'a mut Node<'a>, std::io::Error> {
if let Some(ref mut links) = self.link {
return Ok(m.load(&mut links[index])?);
} else if!create_ok {
return Ok(self);
}
return Ok(&mut Node::empty());
}*/
fn insert(
&mut self,
key: i32,
value: i32,
distance: u8,
key_order: fn(&i32, &i32) -> i8,
) -> Result<InsertResult, MastError> {
let (equal, i) = get_index_for_key(&key, &self.key, key_order);
if distance!= 0 {
let mut z = self.link.get_mut(i).unwrap();
let child = match &mut z {
Some(ref mut link) => load_mut(link)?,
None => {
*z = Some(Link::MutableNode(Node::new(self.key.capacity()), None));
match &mut z {
Some(ref mut link) => load_mut(link)?,
None => panic!("can't load just-set link"),
}
} | };
return Ok(res);
}
if equal {
if value == self.value[i] {
return Ok(InsertResult::NoChange);
}
self.value[i] = value;
self.dirty = true;
return Ok(InsertResult::Updated);
}
let (left_link, right_link) = match self.link.get_mut(i).unwrap() {
Some(ref mut link) => {
let child = load_mut(link)?;
split(child, &key, key_order)?
}
None => (None, None),
};
self.key.insert(i, key);
self.value.insert(i, value);
self.link[i] = right_link;
self.link.insert(i, left_link);
self.dirty = true;
return Ok(InsertResult::Inserted);
}
fn can_grow(
&self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> bool {
for key in &self.key {
if key_layer(key, branch_factor) > current_height {
return true;
}
}
return false;
}
fn grow(
&mut self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> Option<Link> {
let mut new_parent = Node::new(self.key.capacity());
if!self.is_empty() {
for i in 0..self.key.len() {
let key = &self.key[i];
let layer = key_layer(key, branch_factor);
if layer <= current_height {
continue;
}
let new_left = self.extract(i);
new_parent.key.push(self.key[0]);
new_parent.value.push(self.value[0]);
new_parent.link.insert(new_parent.link.len() - 1, new_left);
}
}
let new_right = self.extract(self.key.len());
*new_parent.link.last_mut().unwrap() = new_right;
return new_parent.to_link();
}
fn extract(&mut self, end: usize) -> Option<Link> {
let mut node = Node::new(self.key.capacity());
node.key = self.key.drain(..end).collect();
node.key.reserve(self.key.capacity());
node.value = self.value.drain(..end).collect();
node.value.reserve(self.key.capacity());
node.link = self.link.drain(..=end).collect();
node.link.reserve(self.key.capacity() + 1);
self.link.insert(0, None);
return node.to_link();
}
fn to_link(self) -> Option<Link> {
if self.is_empty() {
return None;
}
return Some(Link::MutableNode(self, None));
}
fn is_empty(&self) -> bool {
return self.key.len() == 0
&& self.value.len() == 0
&& self.link.len() == 1
&& self.link[0].is_none();
}
}
#[derive(Debug)]
enum InsertResult {
Updated,
Inserted,
NoChange,
}
fn split(
node: &mut Node,
key: &i32,
key_order: fn(&i32, &i32) -> i8,
) -> Result<(Option<Link>, Option<Link>), MastError> {
let (equal, i) = get_index_for_key(key, &node.key, key_order);
if equal {
panic!("split not expecting existing key")
}
let mut left_node = Node::new(node.key.capacity());
let mut right_node = Node::new(node.key.capacity());
let (mut left, mut right) = node.key.split_at(i);
left_node.key.extend_from_slice(left);
right_node.key.extend_from_slice(right);
let (mut left, mut right) = node.value.split_at(i);
left_node.value.extend_from_slice(left);
right_node.value.extend_from_slice(left);
let (mut left, mut right) = node.link.split_at(i + 1);
left_node.link.remove(0);
left_node.link.extend_from_slice(left);
right_node.link.extend_from_slice(right);
// repartition left and right subtrees based on new key
if let Some(ref mut cur_left_max_link) = left_node.link[i] {
let left_max = load_mut(cur_left_max_link)?;
let (left_max_link, too_big_link) = split(left_max, key, key_order)?;
left_node.link[i] = left_max_link;
right_node.link[0] = too_big_link;
};
if let Some(ref mut cur_right_min_link) = right_node.link[0] {
let right_min = load_mut(cur_right_min_link)?;
let (too_small_link, right_min_link) = split(right_min, key, key_order)?;
if too_small_link.is_some() {
panic!("bad news!")
}
right_node.link[0] = right_min_link
};
return Ok((left_node.to_link(), right_node.to_link()));
}
fn get_index_for_key(key: &i32, keys: &Vec<i32>, key_order: fn(&i32, &i32) -> i8) -> (bool, usize) {
match keys.binary_search_by(|x| {
let r = key_order(x, key);
if r < 0 {
Ordering::Less
} else if r > 0 {
Ordering::Greater
} else {
Ordering::Equal
}
}) {
Ok(n) => (true, n),
Err(n) => (false, n),
}
}
fn bad_get_index_for_key(
key: &i32,
keys: &Vec<i32>,
key_order: fn(&i32, &i32) -> i8,
) -> (bool, usize) {
let mut cmp: i8 = 1;
let mut i: usize = 0;
while i < keys.len() {
cmp = (key_order)(&keys[i], key);
if cmp >= 0 {
break;
};
i += 1
}
return (cmp == 0, i);
}
/*
fn findNode<'a>(key: i32, options: &mut FindOptions<'a>) -> std::result::Result<(), MastError> {
let mut cmp: i8 = 1;
let mut i: usize = 0;
let keyOrder = options.mast.keyOrder;
let mut node = options.node_path.last().unwrap();
unimplemented!();
while i < node.key.len() {
cmp = (keyOrder)(node.key[i], key);
if cmp >= 0 {
break;
}
i += 1
}
if cmp == 0 || options.current_height == options.target_layer {
return Ok(());
};
let child_link = match node.link {
None => return Err(MastError::InvalidNode),
Some(ref mut link) => link.get_mut(i).unwrap(),
};
let child = load(child_link)?;
options.current_height -= 1;
options.node_path.push(child);
options.link_path.push(i);
return findNode(key, options);
}*/
/*
trait NodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error>;
}
struct InMemoryNodeStore<'a> {
map: std::collections::HashMap<String, Node>,
}
impl<'a> InMemoryNodeStore<'a> {
fn new() -> InMemoryNodeStore<'a> {
InMemoryNodeStore {
map: std::collections::HashMap::new(),
}
}
}
impl<'a> NodeStore<'a> for InMemoryNodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut cow) => Ok(cow.to_mut()),
}
}
}
*/
#[test]
fn test_insert_accessibility() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let n = 16 * 16 + 2;
for i in 0..n {
t.insert(i, i)?;
for i in 0..=i {
let v = t.get(&i)?;
assert_eq!(v, Some(&i))
}
}
Ok(())
}
#[test]
fn test_bench_insert() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let parts = 4;
let mut n = 16 * 16 * 16;
let mut i = 0;
let mut start = std::time::Instant::now();
for p in 0..parts {
while i < n {
t.insert(i, i)?;
i += 1;
}
let end = std::time::Instant::now();
let diff = end - start;
println!(
"part {}/{}: height:{}, {}/s ({}ns/op) size:{}",
p + 1,
parts,
//diff.as_micros(), // {}μs,
t.height,
1_000_000_000 / (diff.as_nanos() / t.size as u128),
diff.as_nanos() / t.size as u128,
t.size,
);
n *= 16;
start = end;
}
Ok(())
}
#[test]
fn test_int_layer() {
assert_eq!(default_layer(&-528, 16), 1);
assert_eq!(default_layer(&-513, 16), 0);
assert_eq!(default_layer(&-512, 16), 2);
assert_eq!(default_layer(&-256, 16), 2);
assert_eq!(default_layer(&-16, 16), 1);
assert_eq!(default_layer(&-1, 16), 0);
assert_eq!(default_layer(&0, 16), 0);
assert_eq!(default_layer(&1, 16), 0);
assert_eq!(default_layer(&16, 16), 1);
assert_eq!(default_layer(&32, 16), 1);
} | };
let res = child.insert(key, value, distance - 1, key_order)?;
match res {
InsertResult::NoChange => (),
_ => self.dirty = true, | random_line_split |
lib.rs | // use std::borrow::Cow;
use std::cmp::Ordering;
use std::rc::Rc;
use std::result::Result;
use std::vec::Vec;
#[derive(Debug)]
enum MastError {
InvalidNode,
StoreError(std::io::Error),
}
#[derive(Debug,Clone)]
struct Node {
key: Vec<i32>,
value: Vec<i32>,
link: Vec<Option<Link>>,
dirty: bool,
}
/*
// TODO
impl Clone for Node {
fn clone(&self) -> Node {
panic!("why are you doing this")
}
}
impl ToOwned for Node {
type Owned = Node;
fn to_owned(&self) -> Self::Owned {
return *(self.clone());
}
}*/
#[derive(Clone, Debug)]
enum Link {
// Empty,
MutableNode(Node, Option<Rc<Node>>),
SharedNode(Rc<Node>),
// Node(Cow<'a, Node<'a>>),
Stored(String),
}
pub struct Mast<'a> {
size: u64,
height: u8,
root_link: Link,
branch_factor: u16,
grow_after_size: u64,
shrink_below_size: u64,
key_order: fn(&i32, &i32) -> i8,
key_layer: fn(&i32, u16) -> u8,
_a: std::marker::PhantomData<&'a u32>,
// marshal:
// unmarshal:
// store: InMemoryNodeStore<'a>,
}
const default_branch_factor: u16 = 16;
fn default_order(a: &i32, b: &i32) -> i8 {
if *a < *b {
return -1;
} else if *a > *b {
return 1;
} else {
return 0;
}
}
fn | (v: &i32, branch_factor: u16) -> u8 {
let mut layer = 0;
let mut v = *v;
if branch_factor == 16 {
while v!= 0 && v & 0xf == 0 {
v >>= 4;
layer += 1
}
} else {
while v!= 0 && v % branch_factor as i32 == 0 {
v /= branch_factor as i32;
layer += 1;
}
}
return layer;
}
impl<'a> Mast<'a> {
pub fn newInMemory() -> Mast<'a> {
return Mast {
size: 0,
height: 0,
root_link: Link::MutableNode(Node::new(default_branch_factor as usize), None),
branch_factor: default_branch_factor,
grow_after_size: default_branch_factor as u64,
shrink_below_size: 1,
key_order: default_order,
key_layer: default_layer,
_a: std::marker::PhantomData,
// store: InMemoryNodeStore::new(),
};
}
fn insert(&mut self, key: i32, value: i32) -> Result<InsertResult, MastError> {
let key_layer = (self.key_layer)(&key, self.branch_factor);
let target_layer = std::cmp::min(key_layer, self.height);
let distance = self.height - target_layer;
let root = load_mut(&mut self.root_link)?;
let res = root.insert(key, value, distance, self.key_order)?;
match res {
InsertResult::Inserted => self.size += 1,
_ => return Ok(res),
};
if self.size > self.grow_after_size
&& root.can_grow(self.height, self.key_layer, self.branch_factor)
{
self.root_link = root
.grow(self.height, self.key_layer, self.branch_factor)
.unwrap();
self.height += 1;
self.shrink_below_size *= self.branch_factor as u64;
self.grow_after_size *= self.branch_factor as u64;
};
Ok(res)
}
fn get(&self, key: &i32) -> Result<Option<&i32>, MastError> {
let mut distance =
self.height - std::cmp::min((self.key_layer)(key, self.branch_factor), self.height);
if distance < 0 { panic!("goo") };
let mut node = load(&self.root_link)?;
loop {
let (equal, i) = get_index_for_key(key, &node.key, self.key_order);
if distance == 0 {
if equal {
return Ok(Some(&node.value[i]));
} else {
return Ok(None);
}
} else {
distance -= 1
}
match node.link[i] {
None => return Ok(None),
Some(ref link) => node = load(link)?,
}
}
}
}
fn load(link: &Link) -> Result<&Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref node, _) => Ok(node),
Link::SharedNode(ref rc) => Ok(rc),
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
fn load_mut(link: &mut Link) -> Result<&mut Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut node, _) => Ok(node),
Link::SharedNode(ref mut rc) => {
let mutable = Rc::make_mut(rc).to_owned();
*link = Link::MutableNode(mutable, Some(rc.clone()));
if let Link::MutableNode(ref mut scopey, _) = link {
Ok(scopey)
} else {
panic!("asdf")
}
}
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
// struct NodeAndSlot<'a>(&'a mut Node<'a>, usize);
/*
struct FindOptions<'a> {
mast: &'a mut Mast<'a>,
target_layer: u8,
current_height: u8,
create_missing_nodes: bool,
node_path: Vec<&'a mut Node>,
link_path: Vec<usize>,
}
*/
impl Node {
fn new(branch_factor: usize) -> Node {
let mut link = Vec::with_capacity(branch_factor + 1);
link.push(None);
Node {
key: Vec::with_capacity(branch_factor),
value: Vec::with_capacity(branch_factor),
link,
dirty: false,
}
}
/*
fn follow(
&'a mut self,
index: usize,
create_ok: bool,
m: &'a mut Mast<'a>,
) -> std::result::Result<&'a mut Node<'a>, std::io::Error> {
if let Some(ref mut links) = self.link {
return Ok(m.load(&mut links[index])?);
} else if!create_ok {
return Ok(self);
}
return Ok(&mut Node::empty());
}*/
fn insert(
&mut self,
key: i32,
value: i32,
distance: u8,
key_order: fn(&i32, &i32) -> i8,
) -> Result<InsertResult, MastError> {
let (equal, i) = get_index_for_key(&key, &self.key, key_order);
if distance!= 0 {
let mut z = self.link.get_mut(i).unwrap();
let child = match &mut z {
Some(ref mut link) => load_mut(link)?,
None => {
*z = Some(Link::MutableNode(Node::new(self.key.capacity()), None));
match &mut z {
Some(ref mut link) => load_mut(link)?,
None => panic!("can't load just-set link"),
}
}
};
let res = child.insert(key, value, distance - 1, key_order)?;
match res {
InsertResult::NoChange => (),
_ => self.dirty = true,
};
return Ok(res);
}
if equal {
if value == self.value[i] {
return Ok(InsertResult::NoChange);
}
self.value[i] = value;
self.dirty = true;
return Ok(InsertResult::Updated);
}
let (left_link, right_link) = match self.link.get_mut(i).unwrap() {
Some(ref mut link) => {
let child = load_mut(link)?;
split(child, &key, key_order)?
}
None => (None, None),
};
self.key.insert(i, key);
self.value.insert(i, value);
self.link[i] = right_link;
self.link.insert(i, left_link);
self.dirty = true;
return Ok(InsertResult::Inserted);
}
fn can_grow(
&self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> bool {
for key in &self.key {
if key_layer(key, branch_factor) > current_height {
return true;
}
}
return false;
}
fn grow(
&mut self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> Option<Link> {
let mut new_parent = Node::new(self.key.capacity());
if!self.is_empty() {
for i in 0..self.key.len() {
let key = &self.key[i];
let layer = key_layer(key, branch_factor);
if layer <= current_height {
continue;
}
let new_left = self.extract(i);
new_parent.key.push(self.key[0]);
new_parent.value.push(self.value[0]);
new_parent.link.insert(new_parent.link.len() - 1, new_left);
}
}
let new_right = self.extract(self.key.len());
*new_parent.link.last_mut().unwrap() = new_right;
return new_parent.to_link();
}
fn extract(&mut self, end: usize) -> Option<Link> {
let mut node = Node::new(self.key.capacity());
node.key = self.key.drain(..end).collect();
node.key.reserve(self.key.capacity());
node.value = self.value.drain(..end).collect();
node.value.reserve(self.key.capacity());
node.link = self.link.drain(..=end).collect();
node.link.reserve(self.key.capacity() + 1);
self.link.insert(0, None);
return node.to_link();
}
fn to_link(self) -> Option<Link> {
if self.is_empty() {
return None;
}
return Some(Link::MutableNode(self, None));
}
fn is_empty(&self) -> bool {
return self.key.len() == 0
&& self.value.len() == 0
&& self.link.len() == 1
&& self.link[0].is_none();
}
}
#[derive(Debug)]
enum InsertResult {
Updated,
Inserted,
NoChange,
}
fn split(
node: &mut Node,
key: &i32,
key_order: fn(&i32, &i32) -> i8,
) -> Result<(Option<Link>, Option<Link>), MastError> {
let (equal, i) = get_index_for_key(key, &node.key, key_order);
if equal {
panic!("split not expecting existing key")
}
let mut left_node = Node::new(node.key.capacity());
let mut right_node = Node::new(node.key.capacity());
let (mut left, mut right) = node.key.split_at(i);
left_node.key.extend_from_slice(left);
right_node.key.extend_from_slice(right);
let (mut left, mut right) = node.value.split_at(i);
left_node.value.extend_from_slice(left);
right_node.value.extend_from_slice(left);
let (mut left, mut right) = node.link.split_at(i + 1);
left_node.link.remove(0);
left_node.link.extend_from_slice(left);
right_node.link.extend_from_slice(right);
// repartition left and right subtrees based on new key
if let Some(ref mut cur_left_max_link) = left_node.link[i] {
let left_max = load_mut(cur_left_max_link)?;
let (left_max_link, too_big_link) = split(left_max, key, key_order)?;
left_node.link[i] = left_max_link;
right_node.link[0] = too_big_link;
};
if let Some(ref mut cur_right_min_link) = right_node.link[0] {
let right_min = load_mut(cur_right_min_link)?;
let (too_small_link, right_min_link) = split(right_min, key, key_order)?;
if too_small_link.is_some() {
panic!("bad news!")
}
right_node.link[0] = right_min_link
};
return Ok((left_node.to_link(), right_node.to_link()));
}
fn get_index_for_key(key: &i32, keys: &Vec<i32>, key_order: fn(&i32, &i32) -> i8) -> (bool, usize) {
match keys.binary_search_by(|x| {
let r = key_order(x, key);
if r < 0 {
Ordering::Less
} else if r > 0 {
Ordering::Greater
} else {
Ordering::Equal
}
}) {
Ok(n) => (true, n),
Err(n) => (false, n),
}
}
fn bad_get_index_for_key(
key: &i32,
keys: &Vec<i32>,
key_order: fn(&i32, &i32) -> i8,
) -> (bool, usize) {
let mut cmp: i8 = 1;
let mut i: usize = 0;
while i < keys.len() {
cmp = (key_order)(&keys[i], key);
if cmp >= 0 {
break;
};
i += 1
}
return (cmp == 0, i);
}
/*
fn findNode<'a>(key: i32, options: &mut FindOptions<'a>) -> std::result::Result<(), MastError> {
let mut cmp: i8 = 1;
let mut i: usize = 0;
let keyOrder = options.mast.keyOrder;
let mut node = options.node_path.last().unwrap();
unimplemented!();
while i < node.key.len() {
cmp = (keyOrder)(node.key[i], key);
if cmp >= 0 {
break;
}
i += 1
}
if cmp == 0 || options.current_height == options.target_layer {
return Ok(());
};
let child_link = match node.link {
None => return Err(MastError::InvalidNode),
Some(ref mut link) => link.get_mut(i).unwrap(),
};
let child = load(child_link)?;
options.current_height -= 1;
options.node_path.push(child);
options.link_path.push(i);
return findNode(key, options);
}*/
/*
trait NodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error>;
}
struct InMemoryNodeStore<'a> {
map: std::collections::HashMap<String, Node>,
}
impl<'a> InMemoryNodeStore<'a> {
fn new() -> InMemoryNodeStore<'a> {
InMemoryNodeStore {
map: std::collections::HashMap::new(),
}
}
}
impl<'a> NodeStore<'a> for InMemoryNodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut cow) => Ok(cow.to_mut()),
}
}
}
*/
#[test]
fn test_insert_accessibility() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let n = 16 * 16 + 2;
for i in 0..n {
t.insert(i, i)?;
for i in 0..=i {
let v = t.get(&i)?;
assert_eq!(v, Some(&i))
}
}
Ok(())
}
#[test]
fn test_bench_insert() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let parts = 4;
let mut n = 16 * 16 * 16;
let mut i = 0;
let mut start = std::time::Instant::now();
for p in 0..parts {
while i < n {
t.insert(i, i)?;
i += 1;
}
let end = std::time::Instant::now();
let diff = end - start;
println!(
"part {}/{}: height:{}, {}/s ({}ns/op) size:{}",
p + 1,
parts,
//diff.as_micros(), // {}μs,
t.height,
1_000_000_000 / (diff.as_nanos() / t.size as u128),
diff.as_nanos() / t.size as u128,
t.size,
);
n *= 16;
start = end;
}
Ok(())
}
#[test]
fn test_int_layer() {
assert_eq!(default_layer(&-528, 16), 1);
assert_eq!(default_layer(&-513, 16), 0);
assert_eq!(default_layer(&-512, 16), 2);
assert_eq!(default_layer(&-256, 16), 2);
assert_eq!(default_layer(&-16, 16), 1);
assert_eq!(default_layer(&-1, 16), 0);
assert_eq!(default_layer(&0, 16), 0);
assert_eq!(default_layer(&1, 16), 0);
assert_eq!(default_layer(&16, 16), 1);
assert_eq!(default_layer(&32, 16), 1);
}
| default_layer | identifier_name |
lib.rs | // use std::borrow::Cow;
use std::cmp::Ordering;
use std::rc::Rc;
use std::result::Result;
use std::vec::Vec;
#[derive(Debug)]
enum MastError {
InvalidNode,
StoreError(std::io::Error),
}
#[derive(Debug,Clone)]
struct Node {
key: Vec<i32>,
value: Vec<i32>,
link: Vec<Option<Link>>,
dirty: bool,
}
/*
// TODO
impl Clone for Node {
fn clone(&self) -> Node {
panic!("why are you doing this")
}
}
impl ToOwned for Node {
type Owned = Node;
fn to_owned(&self) -> Self::Owned {
return *(self.clone());
}
}*/
#[derive(Clone, Debug)]
enum Link {
// Empty,
MutableNode(Node, Option<Rc<Node>>),
SharedNode(Rc<Node>),
// Node(Cow<'a, Node<'a>>),
Stored(String),
}
pub struct Mast<'a> {
size: u64,
height: u8,
root_link: Link,
branch_factor: u16,
grow_after_size: u64,
shrink_below_size: u64,
key_order: fn(&i32, &i32) -> i8,
key_layer: fn(&i32, u16) -> u8,
_a: std::marker::PhantomData<&'a u32>,
// marshal:
// unmarshal:
// store: InMemoryNodeStore<'a>,
}
const default_branch_factor: u16 = 16;
fn default_order(a: &i32, b: &i32) -> i8 |
fn default_layer(v: &i32, branch_factor: u16) -> u8 {
let mut layer = 0;
let mut v = *v;
if branch_factor == 16 {
while v!= 0 && v & 0xf == 0 {
v >>= 4;
layer += 1
}
} else {
while v!= 0 && v % branch_factor as i32 == 0 {
v /= branch_factor as i32;
layer += 1;
}
}
return layer;
}
impl<'a> Mast<'a> {
pub fn newInMemory() -> Mast<'a> {
return Mast {
size: 0,
height: 0,
root_link: Link::MutableNode(Node::new(default_branch_factor as usize), None),
branch_factor: default_branch_factor,
grow_after_size: default_branch_factor as u64,
shrink_below_size: 1,
key_order: default_order,
key_layer: default_layer,
_a: std::marker::PhantomData,
// store: InMemoryNodeStore::new(),
};
}
fn insert(&mut self, key: i32, value: i32) -> Result<InsertResult, MastError> {
let key_layer = (self.key_layer)(&key, self.branch_factor);
let target_layer = std::cmp::min(key_layer, self.height);
let distance = self.height - target_layer;
let root = load_mut(&mut self.root_link)?;
let res = root.insert(key, value, distance, self.key_order)?;
match res {
InsertResult::Inserted => self.size += 1,
_ => return Ok(res),
};
if self.size > self.grow_after_size
&& root.can_grow(self.height, self.key_layer, self.branch_factor)
{
self.root_link = root
.grow(self.height, self.key_layer, self.branch_factor)
.unwrap();
self.height += 1;
self.shrink_below_size *= self.branch_factor as u64;
self.grow_after_size *= self.branch_factor as u64;
};
Ok(res)
}
fn get(&self, key: &i32) -> Result<Option<&i32>, MastError> {
let mut distance =
self.height - std::cmp::min((self.key_layer)(key, self.branch_factor), self.height);
if distance < 0 { panic!("goo") };
let mut node = load(&self.root_link)?;
loop {
let (equal, i) = get_index_for_key(key, &node.key, self.key_order);
if distance == 0 {
if equal {
return Ok(Some(&node.value[i]));
} else {
return Ok(None);
}
} else {
distance -= 1
}
match node.link[i] {
None => return Ok(None),
Some(ref link) => node = load(link)?,
}
}
}
}
fn load(link: &Link) -> Result<&Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref node, _) => Ok(node),
Link::SharedNode(ref rc) => Ok(rc),
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
fn load_mut(link: &mut Link) -> Result<&mut Node, MastError> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut node, _) => Ok(node),
Link::SharedNode(ref mut rc) => {
let mutable = Rc::make_mut(rc).to_owned();
*link = Link::MutableNode(mutable, Some(rc.clone()));
if let Link::MutableNode(ref mut scopey, _) = link {
Ok(scopey)
} else {
panic!("asdf")
}
}
Link::Stored(_) => unimplemented!("Link::Stored"),
}
}
// struct NodeAndSlot<'a>(&'a mut Node<'a>, usize);
/*
struct FindOptions<'a> {
mast: &'a mut Mast<'a>,
target_layer: u8,
current_height: u8,
create_missing_nodes: bool,
node_path: Vec<&'a mut Node>,
link_path: Vec<usize>,
}
*/
impl Node {
fn new(branch_factor: usize) -> Node {
let mut link = Vec::with_capacity(branch_factor + 1);
link.push(None);
Node {
key: Vec::with_capacity(branch_factor),
value: Vec::with_capacity(branch_factor),
link,
dirty: false,
}
}
/*
fn follow(
&'a mut self,
index: usize,
create_ok: bool,
m: &'a mut Mast<'a>,
) -> std::result::Result<&'a mut Node<'a>, std::io::Error> {
if let Some(ref mut links) = self.link {
return Ok(m.load(&mut links[index])?);
} else if!create_ok {
return Ok(self);
}
return Ok(&mut Node::empty());
}*/
fn insert(
&mut self,
key: i32,
value: i32,
distance: u8,
key_order: fn(&i32, &i32) -> i8,
) -> Result<InsertResult, MastError> {
let (equal, i) = get_index_for_key(&key, &self.key, key_order);
if distance!= 0 {
let mut z = self.link.get_mut(i).unwrap();
let child = match &mut z {
Some(ref mut link) => load_mut(link)?,
None => {
*z = Some(Link::MutableNode(Node::new(self.key.capacity()), None));
match &mut z {
Some(ref mut link) => load_mut(link)?,
None => panic!("can't load just-set link"),
}
}
};
let res = child.insert(key, value, distance - 1, key_order)?;
match res {
InsertResult::NoChange => (),
_ => self.dirty = true,
};
return Ok(res);
}
if equal {
if value == self.value[i] {
return Ok(InsertResult::NoChange);
}
self.value[i] = value;
self.dirty = true;
return Ok(InsertResult::Updated);
}
let (left_link, right_link) = match self.link.get_mut(i).unwrap() {
Some(ref mut link) => {
let child = load_mut(link)?;
split(child, &key, key_order)?
}
None => (None, None),
};
self.key.insert(i, key);
self.value.insert(i, value);
self.link[i] = right_link;
self.link.insert(i, left_link);
self.dirty = true;
return Ok(InsertResult::Inserted);
}
fn can_grow(
&self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> bool {
for key in &self.key {
if key_layer(key, branch_factor) > current_height {
return true;
}
}
return false;
}
fn grow(
&mut self,
current_height: u8,
key_layer: fn(&i32, u16) -> u8,
branch_factor: u16,
) -> Option<Link> {
let mut new_parent = Node::new(self.key.capacity());
if!self.is_empty() {
for i in 0..self.key.len() {
let key = &self.key[i];
let layer = key_layer(key, branch_factor);
if layer <= current_height {
continue;
}
let new_left = self.extract(i);
new_parent.key.push(self.key[0]);
new_parent.value.push(self.value[0]);
new_parent.link.insert(new_parent.link.len() - 1, new_left);
}
}
let new_right = self.extract(self.key.len());
*new_parent.link.last_mut().unwrap() = new_right;
return new_parent.to_link();
}
fn extract(&mut self, end: usize) -> Option<Link> {
let mut node = Node::new(self.key.capacity());
node.key = self.key.drain(..end).collect();
node.key.reserve(self.key.capacity());
node.value = self.value.drain(..end).collect();
node.value.reserve(self.key.capacity());
node.link = self.link.drain(..=end).collect();
node.link.reserve(self.key.capacity() + 1);
self.link.insert(0, None);
return node.to_link();
}
fn to_link(self) -> Option<Link> {
if self.is_empty() {
return None;
}
return Some(Link::MutableNode(self, None));
}
fn is_empty(&self) -> bool {
return self.key.len() == 0
&& self.value.len() == 0
&& self.link.len() == 1
&& self.link[0].is_none();
}
}
#[derive(Debug)]
enum InsertResult {
Updated,
Inserted,
NoChange,
}
fn split(
node: &mut Node,
key: &i32,
key_order: fn(&i32, &i32) -> i8,
) -> Result<(Option<Link>, Option<Link>), MastError> {
let (equal, i) = get_index_for_key(key, &node.key, key_order);
if equal {
panic!("split not expecting existing key")
}
let mut left_node = Node::new(node.key.capacity());
let mut right_node = Node::new(node.key.capacity());
let (mut left, mut right) = node.key.split_at(i);
left_node.key.extend_from_slice(left);
right_node.key.extend_from_slice(right);
let (mut left, mut right) = node.value.split_at(i);
left_node.value.extend_from_slice(left);
right_node.value.extend_from_slice(left);
let (mut left, mut right) = node.link.split_at(i + 1);
left_node.link.remove(0);
left_node.link.extend_from_slice(left);
right_node.link.extend_from_slice(right);
// repartition left and right subtrees based on new key
if let Some(ref mut cur_left_max_link) = left_node.link[i] {
let left_max = load_mut(cur_left_max_link)?;
let (left_max_link, too_big_link) = split(left_max, key, key_order)?;
left_node.link[i] = left_max_link;
right_node.link[0] = too_big_link;
};
if let Some(ref mut cur_right_min_link) = right_node.link[0] {
let right_min = load_mut(cur_right_min_link)?;
let (too_small_link, right_min_link) = split(right_min, key, key_order)?;
if too_small_link.is_some() {
panic!("bad news!")
}
right_node.link[0] = right_min_link
};
return Ok((left_node.to_link(), right_node.to_link()));
}
fn get_index_for_key(key: &i32, keys: &Vec<i32>, key_order: fn(&i32, &i32) -> i8) -> (bool, usize) {
match keys.binary_search_by(|x| {
let r = key_order(x, key);
if r < 0 {
Ordering::Less
} else if r > 0 {
Ordering::Greater
} else {
Ordering::Equal
}
}) {
Ok(n) => (true, n),
Err(n) => (false, n),
}
}
fn bad_get_index_for_key(
key: &i32,
keys: &Vec<i32>,
key_order: fn(&i32, &i32) -> i8,
) -> (bool, usize) {
let mut cmp: i8 = 1;
let mut i: usize = 0;
while i < keys.len() {
cmp = (key_order)(&keys[i], key);
if cmp >= 0 {
break;
};
i += 1
}
return (cmp == 0, i);
}
/*
fn findNode<'a>(key: i32, options: &mut FindOptions<'a>) -> std::result::Result<(), MastError> {
let mut cmp: i8 = 1;
let mut i: usize = 0;
let keyOrder = options.mast.keyOrder;
let mut node = options.node_path.last().unwrap();
unimplemented!();
while i < node.key.len() {
cmp = (keyOrder)(node.key[i], key);
if cmp >= 0 {
break;
}
i += 1
}
if cmp == 0 || options.current_height == options.target_layer {
return Ok(());
};
let child_link = match node.link {
None => return Err(MastError::InvalidNode),
Some(ref mut link) => link.get_mut(i).unwrap(),
};
let child = load(child_link)?;
options.current_height -= 1;
options.node_path.push(child);
options.link_path.push(i);
return findNode(key, options);
}*/
/*
trait NodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error>;
}
struct InMemoryNodeStore<'a> {
map: std::collections::HashMap<String, Node>,
}
impl<'a> InMemoryNodeStore<'a> {
fn new() -> InMemoryNodeStore<'a> {
InMemoryNodeStore {
map: std::collections::HashMap::new(),
}
}
}
impl<'a> NodeStore<'a> for InMemoryNodeStore<'a> {
fn load(&mut self, link: &'a mut Link) -> Result<&'a mut Node, std::io::Error> {
match link {
// Link::Empty => Ok(Cow::Borrowed(&Node::empty()).to_mut()),
Link::MutableNode(ref mut cow) => Ok(cow.to_mut()),
}
}
}
*/
#[test]
fn test_insert_accessibility() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let n = 16 * 16 + 2;
for i in 0..n {
t.insert(i, i)?;
for i in 0..=i {
let v = t.get(&i)?;
assert_eq!(v, Some(&i))
}
}
Ok(())
}
#[test]
fn test_bench_insert() -> std::result::Result<(), MastError> {
let mut t = Mast::newInMemory();
let parts = 4;
let mut n = 16 * 16 * 16;
let mut i = 0;
let mut start = std::time::Instant::now();
for p in 0..parts {
while i < n {
t.insert(i, i)?;
i += 1;
}
let end = std::time::Instant::now();
let diff = end - start;
println!(
"part {}/{}: height:{}, {}/s ({}ns/op) size:{}",
p + 1,
parts,
//diff.as_micros(), // {}μs,
t.height,
1_000_000_000 / (diff.as_nanos() / t.size as u128),
diff.as_nanos() / t.size as u128,
t.size,
);
n *= 16;
start = end;
}
Ok(())
}
#[test]
fn test_int_layer() {
assert_eq!(default_layer(&-528, 16), 1);
assert_eq!(default_layer(&-513, 16), 0);
assert_eq!(default_layer(&-512, 16), 2);
assert_eq!(default_layer(&-256, 16), 2);
assert_eq!(default_layer(&-16, 16), 1);
assert_eq!(default_layer(&-1, 16), 0);
assert_eq!(default_layer(&0, 16), 0);
assert_eq!(default_layer(&1, 16), 0);
assert_eq!(default_layer(&16, 16), 1);
assert_eq!(default_layer(&32, 16), 1);
}
| {
if *a < *b {
return -1;
} else if *a > *b {
return 1;
} else {
return 0;
}
} | identifier_body |
offset.rs | use std::alloc::{GlobalAlloc, System, Layout};
use std::borrow::Borrow;
use std::cmp;
use std::convert::TryInto;
use std::fmt;
use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop};
use std::num::NonZeroU64;
use std::ptr::NonNull;
use std::hint::unreachable_unchecked;
use thiserror::Error;
use leint::Le;
use owned::{IntoOwned, Take};
use crate::pointee::Pointee;
use crate::refs::Ref;
use crate::blob::*;
use crate::load::*;
use crate::save::*;
use crate::scalar::*;
use crate::ptr::*;
use crate::pile::*;
use crate::heap::*;
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Offset<'pile,'version> {
marker: PhantomData<(
fn(&'pile ()) -> &'pile (),
&'version (),
)>,
raw: Le<NonZeroU64>,
}
unsafe impl Persist for Offset<'_, '_> {}
impl fmt::Debug for Offset<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.get().fmt(f)
}
}
#[derive(Debug, Error)]
#[error("invalid offset")]
#[non_exhaustive]
pub struct ValidateOffsetBlobError;
impl Scalar for Offset<'_, '_> {
const BLOB_LAYOUT: BlobLayout = BlobLayout::new_nonzero(mem::size_of::<Self>());
type ScalarBlobError = ValidateOffsetBlobError;
fn validate_blob<'a>(blob: Blob<'a, Self>) -> Result<ValidBlob<'a, Self>, Self::ScalarBlobError> {
let raw = u64::from_le_bytes(blob.as_bytes().try_into().unwrap());
if raw & 0b1 == 0b1 && (raw >> 1) < Offset::MAX as u64 {
unsafe { Ok(blob.assume_valid()) }
} else {
Err(ValidateOffsetBlobError)
}
}
fn decode_blob<'a>(blob: ValidBlob<'a, Self>) -> Self {
blob.as_value().clone()
}
fn try_deref_blob<'a>(blob: ValidBlob<'a, Self>) -> Result<&'a Self, ValidBlob<'a, Self>> {
Ok(blob.as_value())
}
fn encode_blob<W: WriteBlob>(&self, dst: W) -> Result<W::Ok, W::Error> {
todo!()
}
}
impl AsPtrImpl<Self> for Offset<'_, '_> {
fn as_ptr_impl(this: &Self) -> &Self {
this
}
}
impl<'p, 'v> PersistPtr for Offset<'p, 'v> {
type Zone =!;
type BlobZone = TryPile<'p, 'v>;
}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct OffsetMut<'p, 'v, A = System> {
marker: PhantomData<A>,
inner: Offset<'p, 'v>,
}
unsafe impl Persist for OffsetMut<'_, '_> {}
impl fmt::Debug for OffsetMut<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.kind().fmt(f)
}
}
unsafe impl ValidateBlob for OffsetMut<'_, '_> {
type BlobError = ValidateOffsetBlobError;
fn try_blob_layout(_: ()) -> Result<BlobLayout,!> {
Ok(BlobLayout::new_nonzero(mem::size_of::<Self>()))
}
fn validate_blob<'a>(blob: Blob<'a, Self>, ignore_padding: bool) -> Result<ValidBlob<'a, Self>, Self::BlobError> {
let mut fields = blob.validate_fields(ignore_padding);
fields.validate_blob::<Offset>()?;
unsafe { Ok(fields.finish()) }
}
}
impl Load for OffsetMut<'_, '_> {
type Ptr =!;
fn decode_blob(blob: ValidBlob<Self>, _: &<Self::Ptr as Ptr>::BlobZone) -> Self {
blob.as_value().clone()
}
fn try_deref_blob<'a>(blob: ValidBlob<'a, Self>, _: &()) -> Result<&'a Self, ValidBlob<'a, Self>> {
Ok(blob.as_value())
}
}
impl AsPtrImpl<Self> for OffsetMut<'_, '_> {
fn as_ptr_impl(this: &Self) -> &Self {
this
}
}
/*
impl<'p, 'v, A> Borrow<OffsetMut<'p, 'v, A>> for Offset<'p, 'v> {
#[inline(always)]
fn borrow(&self) -> &OffsetMut<'p, 'v, A> {
self.as_ref()
}
}
impl<'p, 'v, A> AsRef<OffsetMut<'p, 'v, A>> for Offset<'p, 'v> {
#[inline(always)]
fn as_ref(&self) -> &OffsetMut<'p, 'v, A> {
// SAFETY: #[repr(transparent)]
unsafe { &*(self as *const Self as *const _) }
}
}
*/
impl<'p, 'v> From<Offset<'p, 'v>> for usize {
fn from(offset: Offset<'p, 'v>) -> usize {
offset.get()
}
}
impl<'p, 'v> From<Offset<'p, 'v>> for OffsetMut<'p, 'v> {
fn from(inner: Offset<'p, 'v>) -> Self {
Self {
marker: PhantomData,
inner,
}
}
}
impl cmp::PartialEq<usize> for Offset<'_, '_> {
fn eq(&self, other: &usize) -> bool {
self.get() == *other
}
}
impl cmp::PartialEq<Offset<'_, '_>> for usize {
fn eq(&self, other: &Offset<'_, '_>) -> bool {
*self == other.get()
}
}
impl<'p, 'v> Offset<'p, 'v> {
/// The largest `Offset`.
pub const MAX: usize = (1 << 62) - 1;
/// Creates a new `Offset`.
///
/// Returns `None` if the offset is out of range:
///
/// ```
/// use hoard::offset::Offset;
///
/// assert!(Offset::new(Offset::MAX + 1)
/// .is_none());
/// ```
///
/// # Examples
///
/// Zero is a valid offset:
///
/// ```
/// use hoard::offset::Offset;
///
/// Offset::new(0).unwrap();
/// ```
pub fn new(offset: usize) -> Option<Self> {
if offset <= Self::MAX {
let offset = offset as u64;
Some(offset.checked_shl(1).map(|offset|
Self {
marker: PhantomData,
raw: NonZeroU64::new(offset | 1).unwrap().into(),
}
).unwrap())
} else {
None
}
}
/// Casts the `Offset` to a different lifetime.
///
/// This is *safe* because an offset by itself has no guarantees associated with it.
#[inline(always)]
pub fn cast<'p2, 'v2>(&self) -> Offset<'p2, 'v2> {
Offset {
marker: PhantomData,
raw: self.raw,
}
}
/// Gets the offset as a `usize`.
///
/// # Examples
///
/// ```
/// use hoard::offset::Offset;
///
/// assert_eq!(Offset::new(0).unwrap().get(), 0);
/// assert_eq!(Offset::new(1).unwrap().get(), 1);
/// ```
#[inline(always)]
pub fn get(&self) -> usize {
(self.raw.get().get() >> 1) as usize
}
/// Creates a dangling `Offset`.
///
/// # Examples
///
/// ```
/// use hoard::offset::Offset;
///
/// assert_eq!(Offset::dangling().get(), Offset::MAX);
/// ```
#[inline(always)]
pub fn dangling() -> Self {
Self::new(Self::MAX).unwrap()
}
/// Erases the lifetime of an `Offset`.
pub fn to_static(&self) -> Offset<'static,'static> {
Offset {
marker: PhantomData,
raw: self.raw,
}
}
}
/// Enum for the kinds of `OffsetMut`.
#[derive(Debug)]
pub enum Kind<'p, 'v> {
/// An unmodified `Offset`.
Offset(Offset<'p, 'v>),
/// A pointer to something in the heap.
Ptr(HeapPtr),
}
impl<'p, 'v, A> OffsetMut<'p, 'v, A> {
/// Create an `OffsetMut` from a pointer.
///
/// Returns `None` if the alignment is incorrect.
#[inline]
pub fn from_ptr(ptr: NonNull<u16>) -> Option<Self> {
let raw = ptr.as_ptr() as usize as u64;
if raw & 1 == 1 {
unsafe { Some(mem::transmute(ptr.as_ptr() as usize as u64)) }
} else {
None
}
}
/// Creates an `OffsetMut` from a pointer without checking the alignment.
///
/// # Safety
///
/// The pointer must be properly aligned.
#[inline]
pub unsafe fn from_ptr_unchecked(ptr: NonNull<u16>) -> Self {
match Self::from_ptr(ptr) {
Some(this) => this,
None => {
unreachable_unchecked()
}
}
}
/// Returns the kind of offset.
pub fn kind(&self) -> Kind<'p, 'v> {
if self.inner.raw.get().get() & 1 == 1 {
Kind::Offset(self.inner)
} else {
Kind::Ptr(unsafe { mem::transmute(self.inner) })
}
}
/// Gets the `Offset` from a clean `OffsetMut`.
#[inline(always)]
pub fn get_offset(&self) -> Option<Offset<'p, 'v>> {
match self.kind() {
Kind::Offset(offset) => Some(offset),
Kind::Ptr(_) => None,
}
}
/// Gets the pointer from a dirty `OffsetMut`.
#[inline(always)]
pub fn get_ptr(&self) -> Option<HeapPtr> {
match self.kind() {
Kind::Ptr(ptr) => Some(ptr),
Kind::Offset(_) => None,
}
}
}
/*
impl<'p, 'v, A> AsPtr<OffsetMut<'p, 'v, A>> for HeapPtr {
#[inline(always)]
fn as_ptr(&self) -> &OffsetMut<'p, 'v, A> {
static_assertions::assert_eq_size!(OffsetMut, HeapPtr);
unsafe {
&*(self as *const _ as *const _)
}
}
}
*/
impl<'p, 'v> Ptr for OffsetMut<'p, 'v> {
type Zone = TryPile<'p, 'v>;
type BlobZone = TryPile<'p, 'v>;
type Persist = Offset<'p, 'v>;
unsafe fn dealloc<T:?Sized + Pointee>(&self, metadata: T::Metadata) {
match self.kind() {
Kind::Offset(_) => {},
Kind::Ptr(heap_ptr) => heap_ptr.dealloc::<T>(metadata),
}
}
unsafe fn try_get_dirty_unchecked<T:?Sized + Pointee>(&self, metadata: T::Metadata) -> Result<&T, Self::Persist> {
match self.kind() {
Kind::Ptr(ptr) => {
todo!()
},
Kind::Offset(offset) => Err(offset),
}
}
}
impl<'p,'v> Default for OffsetMut<'p, 'v> {
fn default() -> Self {
Offset::dangling().into()
}
}
#[derive(Debug, Default)]
pub struct ShallowDumper<'p, 'v> {
marker: PhantomData<OffsetMut<'p, 'v>>,
written: Vec<u8>,
initial_offset: usize,
}
impl<'p, 'v> Saver for ShallowDumper<'p, 'v> {
type SrcPtr = OffsetMut<'p, 'v>;
type DstPtr = Offset<'p, 'v>;
type Error =!;
fn try_save_raw<R, T:?Sized + ValidateBlob>(&self,
ptr: &Offset<'p, 'v>,
_metadata: T::Metadata,
_f: impl FnOnce(ValidBlob<T>, &<Self::SrcPtr as Ptr>::BlobZone) -> R,
) -> Result<Result<<Self::DstPtr as Ptr>::Persist, R>,
Self::Error>
{
Ok(Ok(*ptr))
}
fn finish_save<T>(&mut self, value_poll: &T) -> Result<Offset<'p, 'v>, Self::Error>
where T: EncodeBlob
{
let offset = self.initial_offset
.checked_add(self.written.len())
.and_then(Offset::new)
.expect("overflow");
let written = mem::replace(&mut self.written, vec![]);
self.written = value_poll.encode_blob(written).into_ok();
Ok(offset)
}
}
impl<'p, 'v> ShallowDumper<'p, 'v> {
pub fn new(initial_offset: usize) -> Self {
Self {
marker: PhantomData,
written: vec![],
initial_offset,
}
}
pub fn from_buf(buf: impl Into<Vec<u8>>) -> Self {
Self {
marker: PhantomData,
initial_offset: 0,
written: buf.into(),
}
}
pub fn | <T:?Sized>(mut self, value: &T) -> (Vec<u8>, Offset<'p, 'v>)
where T: SavePtr<OffsetMut<'p, 'v>, Offset<'p, 'v>>
{
let mut encoder = value.init_save_ptr();
encoder.save_poll(&mut self).into_ok();
let offset = self.finish_save(&encoder).into_ok();
(self.written, offset)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::bag::Bag;
#[test]
fn test_shallow_dumper() {
let (buf, offset) = ShallowDumper::new(0).save(&42u8);
//assert_eq!(offset, 0);
//assert_eq!(buf, &[42]);
/*
let own = OffsetMut::alloc(42u8);
let (buf, offset) = ShallowDumper::new(0).save(&own);
assert_eq!(offset, 1);
assert_eq!(buf, &[42, 1,0,0,0,0,0,0,0]);
let own2 = OffsetMut::alloc(own);
let (buf, offset) = ShallowDumper::new(0).save(&own2);
assert_eq!(offset, 9);
assert_eq!(buf,
&[42,
1,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,
]);
*/
}
}
| save | identifier_name |
offset.rs | use std::alloc::{GlobalAlloc, System, Layout};
use std::borrow::Borrow;
use std::cmp;
use std::convert::TryInto;
use std::fmt;
use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop};
use std::num::NonZeroU64;
use std::ptr::NonNull;
use std::hint::unreachable_unchecked;
use thiserror::Error;
use leint::Le;
use owned::{IntoOwned, Take};
use crate::pointee::Pointee;
use crate::refs::Ref;
use crate::blob::*;
use crate::load::*;
use crate::save::*;
use crate::scalar::*;
use crate::ptr::*;
use crate::pile::*;
use crate::heap::*;
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Offset<'pile,'version> {
marker: PhantomData<(
fn(&'pile ()) -> &'pile (),
&'version (),
)>,
raw: Le<NonZeroU64>,
}
unsafe impl Persist for Offset<'_, '_> {}
impl fmt::Debug for Offset<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.get().fmt(f)
}
}
#[derive(Debug, Error)]
#[error("invalid offset")]
#[non_exhaustive]
pub struct ValidateOffsetBlobError;
impl Scalar for Offset<'_, '_> {
const BLOB_LAYOUT: BlobLayout = BlobLayout::new_nonzero(mem::size_of::<Self>());
type ScalarBlobError = ValidateOffsetBlobError;
fn validate_blob<'a>(blob: Blob<'a, Self>) -> Result<ValidBlob<'a, Self>, Self::ScalarBlobError> {
let raw = u64::from_le_bytes(blob.as_bytes().try_into().unwrap());
if raw & 0b1 == 0b1 && (raw >> 1) < Offset::MAX as u64 {
unsafe { Ok(blob.assume_valid()) }
} else {
Err(ValidateOffsetBlobError)
}
}
fn decode_blob<'a>(blob: ValidBlob<'a, Self>) -> Self {
blob.as_value().clone()
}
fn try_deref_blob<'a>(blob: ValidBlob<'a, Self>) -> Result<&'a Self, ValidBlob<'a, Self>> {
Ok(blob.as_value())
}
fn encode_blob<W: WriteBlob>(&self, dst: W) -> Result<W::Ok, W::Error> {
todo!()
}
}
impl AsPtrImpl<Self> for Offset<'_, '_> {
fn as_ptr_impl(this: &Self) -> &Self {
this
}
}
impl<'p, 'v> PersistPtr for Offset<'p, 'v> {
type Zone =!;
type BlobZone = TryPile<'p, 'v>;
}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct OffsetMut<'p, 'v, A = System> {
marker: PhantomData<A>,
inner: Offset<'p, 'v>,
}
unsafe impl Persist for OffsetMut<'_, '_> {}
impl fmt::Debug for OffsetMut<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.kind().fmt(f)
}
}
unsafe impl ValidateBlob for OffsetMut<'_, '_> {
type BlobError = ValidateOffsetBlobError;
fn try_blob_layout(_: ()) -> Result<BlobLayout,!> {
Ok(BlobLayout::new_nonzero(mem::size_of::<Self>()))
}
fn validate_blob<'a>(blob: Blob<'a, Self>, ignore_padding: bool) -> Result<ValidBlob<'a, Self>, Self::BlobError> {
let mut fields = blob.validate_fields(ignore_padding);
fields.validate_blob::<Offset>()?;
unsafe { Ok(fields.finish()) }
}
}
impl Load for OffsetMut<'_, '_> {
type Ptr =!;
fn decode_blob(blob: ValidBlob<Self>, _: &<Self::Ptr as Ptr>::BlobZone) -> Self {
blob.as_value().clone()
}
fn try_deref_blob<'a>(blob: ValidBlob<'a, Self>, _: &()) -> Result<&'a Self, ValidBlob<'a, Self>> {
Ok(blob.as_value())
}
}
impl AsPtrImpl<Self> for OffsetMut<'_, '_> {
fn as_ptr_impl(this: &Self) -> &Self {
this
}
}
/*
impl<'p, 'v, A> Borrow<OffsetMut<'p, 'v, A>> for Offset<'p, 'v> {
#[inline(always)]
fn borrow(&self) -> &OffsetMut<'p, 'v, A> {
self.as_ref()
}
}
impl<'p, 'v, A> AsRef<OffsetMut<'p, 'v, A>> for Offset<'p, 'v> {
#[inline(always)]
fn as_ref(&self) -> &OffsetMut<'p, 'v, A> {
// SAFETY: #[repr(transparent)]
unsafe { &*(self as *const Self as *const _) }
}
}
*/
impl<'p, 'v> From<Offset<'p, 'v>> for usize {
fn from(offset: Offset<'p, 'v>) -> usize {
offset.get()
}
}
impl<'p, 'v> From<Offset<'p, 'v>> for OffsetMut<'p, 'v> {
fn from(inner: Offset<'p, 'v>) -> Self {
Self {
marker: PhantomData,
inner,
}
}
}
impl cmp::PartialEq<usize> for Offset<'_, '_> {
fn eq(&self, other: &usize) -> bool {
self.get() == *other
}
}
impl cmp::PartialEq<Offset<'_, '_>> for usize {
fn eq(&self, other: &Offset<'_, '_>) -> bool {
*self == other.get()
}
}
impl<'p, 'v> Offset<'p, 'v> {
/// The largest `Offset`.
pub const MAX: usize = (1 << 62) - 1;
/// Creates a new `Offset`.
///
/// Returns `None` if the offset is out of range:
///
/// ```
/// use hoard::offset::Offset;
///
/// assert!(Offset::new(Offset::MAX + 1)
/// .is_none());
/// ```
///
/// # Examples
///
/// Zero is a valid offset:
///
/// ```
/// use hoard::offset::Offset;
///
/// Offset::new(0).unwrap();
/// ```
pub fn new(offset: usize) -> Option<Self> {
if offset <= Self::MAX {
let offset = offset as u64;
Some(offset.checked_shl(1).map(|offset|
Self {
marker: PhantomData,
raw: NonZeroU64::new(offset | 1).unwrap().into(),
}
).unwrap())
} else {
None
}
}
/// Casts the `Offset` to a different lifetime.
///
/// This is *safe* because an offset by itself has no guarantees associated with it.
#[inline(always)]
pub fn cast<'p2, 'v2>(&self) -> Offset<'p2, 'v2> {
Offset {
marker: PhantomData,
raw: self.raw,
}
}
/// Gets the offset as a `usize`.
///
/// # Examples
///
/// ```
/// use hoard::offset::Offset;
///
/// assert_eq!(Offset::new(0).unwrap().get(), 0);
/// assert_eq!(Offset::new(1).unwrap().get(), 1);
/// ```
#[inline(always)]
pub fn get(&self) -> usize {
(self.raw.get().get() >> 1) as usize
}
/// Creates a dangling `Offset`.
///
/// # Examples
///
/// ```
/// use hoard::offset::Offset;
///
/// assert_eq!(Offset::dangling().get(), Offset::MAX);
/// ```
#[inline(always)]
pub fn dangling() -> Self {
Self::new(Self::MAX).unwrap()
}
/// Erases the lifetime of an `Offset`.
pub fn to_static(&self) -> Offset<'static,'static> {
Offset {
marker: PhantomData,
raw: self.raw,
}
}
}
/// Enum for the kinds of `OffsetMut`.
#[derive(Debug)]
pub enum Kind<'p, 'v> {
/// An unmodified `Offset`.
Offset(Offset<'p, 'v>),
/// A pointer to something in the heap.
Ptr(HeapPtr),
}
impl<'p, 'v, A> OffsetMut<'p, 'v, A> {
/// Create an `OffsetMut` from a pointer.
///
/// Returns `None` if the alignment is incorrect.
#[inline]
pub fn from_ptr(ptr: NonNull<u16>) -> Option<Self> {
let raw = ptr.as_ptr() as usize as u64;
if raw & 1 == 1 {
unsafe { Some(mem::transmute(ptr.as_ptr() as usize as u64)) }
} else {
None
}
}
/// Creates an `OffsetMut` from a pointer without checking the alignment.
///
/// # Safety
///
/// The pointer must be properly aligned.
#[inline]
pub unsafe fn from_ptr_unchecked(ptr: NonNull<u16>) -> Self {
match Self::from_ptr(ptr) {
Some(this) => this,
None => {
unreachable_unchecked()
}
}
}
/// Returns the kind of offset.
pub fn kind(&self) -> Kind<'p, 'v> {
if self.inner.raw.get().get() & 1 == 1 {
Kind::Offset(self.inner)
} else |
}
/// Gets the `Offset` from a clean `OffsetMut`.
#[inline(always)]
pub fn get_offset(&self) -> Option<Offset<'p, 'v>> {
match self.kind() {
Kind::Offset(offset) => Some(offset),
Kind::Ptr(_) => None,
}
}
/// Gets the pointer from a dirty `OffsetMut`.
#[inline(always)]
pub fn get_ptr(&self) -> Option<HeapPtr> {
match self.kind() {
Kind::Ptr(ptr) => Some(ptr),
Kind::Offset(_) => None,
}
}
}
/*
impl<'p, 'v, A> AsPtr<OffsetMut<'p, 'v, A>> for HeapPtr {
#[inline(always)]
fn as_ptr(&self) -> &OffsetMut<'p, 'v, A> {
static_assertions::assert_eq_size!(OffsetMut, HeapPtr);
unsafe {
&*(self as *const _ as *const _)
}
}
}
*/
impl<'p, 'v> Ptr for OffsetMut<'p, 'v> {
type Zone = TryPile<'p, 'v>;
type BlobZone = TryPile<'p, 'v>;
type Persist = Offset<'p, 'v>;
unsafe fn dealloc<T:?Sized + Pointee>(&self, metadata: T::Metadata) {
match self.kind() {
Kind::Offset(_) => {},
Kind::Ptr(heap_ptr) => heap_ptr.dealloc::<T>(metadata),
}
}
unsafe fn try_get_dirty_unchecked<T:?Sized + Pointee>(&self, metadata: T::Metadata) -> Result<&T, Self::Persist> {
match self.kind() {
Kind::Ptr(ptr) => {
todo!()
},
Kind::Offset(offset) => Err(offset),
}
}
}
impl<'p,'v> Default for OffsetMut<'p, 'v> {
fn default() -> Self {
Offset::dangling().into()
}
}
#[derive(Debug, Default)]
pub struct ShallowDumper<'p, 'v> {
marker: PhantomData<OffsetMut<'p, 'v>>,
written: Vec<u8>,
initial_offset: usize,
}
impl<'p, 'v> Saver for ShallowDumper<'p, 'v> {
type SrcPtr = OffsetMut<'p, 'v>;
type DstPtr = Offset<'p, 'v>;
type Error =!;
fn try_save_raw<R, T:?Sized + ValidateBlob>(&self,
ptr: &Offset<'p, 'v>,
_metadata: T::Metadata,
_f: impl FnOnce(ValidBlob<T>, &<Self::SrcPtr as Ptr>::BlobZone) -> R,
) -> Result<Result<<Self::DstPtr as Ptr>::Persist, R>,
Self::Error>
{
Ok(Ok(*ptr))
}
fn finish_save<T>(&mut self, value_poll: &T) -> Result<Offset<'p, 'v>, Self::Error>
where T: EncodeBlob
{
let offset = self.initial_offset
.checked_add(self.written.len())
.and_then(Offset::new)
.expect("overflow");
let written = mem::replace(&mut self.written, vec![]);
self.written = value_poll.encode_blob(written).into_ok();
Ok(offset)
}
}
impl<'p, 'v> ShallowDumper<'p, 'v> {
pub fn new(initial_offset: usize) -> Self {
Self {
marker: PhantomData,
written: vec![],
initial_offset,
}
}
pub fn from_buf(buf: impl Into<Vec<u8>>) -> Self {
Self {
marker: PhantomData,
initial_offset: 0,
written: buf.into(),
}
}
pub fn save<T:?Sized>(mut self, value: &T) -> (Vec<u8>, Offset<'p, 'v>)
where T: SavePtr<OffsetMut<'p, 'v>, Offset<'p, 'v>>
{
let mut encoder = value.init_save_ptr();
encoder.save_poll(&mut self).into_ok();
let offset = self.finish_save(&encoder).into_ok();
(self.written, offset)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::bag::Bag;
#[test]
fn test_shallow_dumper() {
let (buf, offset) = ShallowDumper::new(0).save(&42u8);
//assert_eq!(offset, 0);
//assert_eq!(buf, &[42]);
/*
let own = OffsetMut::alloc(42u8);
let (buf, offset) = ShallowDumper::new(0).save(&own);
assert_eq!(offset, 1);
assert_eq!(buf, &[42, 1,0,0,0,0,0,0,0]);
let own2 = OffsetMut::alloc(own);
let (buf, offset) = ShallowDumper::new(0).save(&own2);
assert_eq!(offset, 9);
assert_eq!(buf,
&[42,
1,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,
]);
*/
}
}
| {
Kind::Ptr(unsafe { mem::transmute(self.inner) })
} | conditional_block |
offset.rs | use std::alloc::{GlobalAlloc, System, Layout};
use std::borrow::Borrow;
use std::cmp;
use std::convert::TryInto;
use std::fmt;
use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop};
use std::num::NonZeroU64;
use std::ptr::NonNull;
use std::hint::unreachable_unchecked;
use thiserror::Error;
use leint::Le;
use owned::{IntoOwned, Take};
use crate::pointee::Pointee;
use crate::refs::Ref;
use crate::blob::*;
use crate::load::*;
use crate::save::*;
use crate::scalar::*;
use crate::ptr::*;
use crate::pile::*;
use crate::heap::*;
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Offset<'pile,'version> {
marker: PhantomData<(
fn(&'pile ()) -> &'pile (),
&'version (),
)>,
raw: Le<NonZeroU64>,
}
unsafe impl Persist for Offset<'_, '_> {}
impl fmt::Debug for Offset<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.get().fmt(f)
}
}
#[derive(Debug, Error)]
#[error("invalid offset")]
#[non_exhaustive]
pub struct ValidateOffsetBlobError;
impl Scalar for Offset<'_, '_> {
const BLOB_LAYOUT: BlobLayout = BlobLayout::new_nonzero(mem::size_of::<Self>());
type ScalarBlobError = ValidateOffsetBlobError;
fn validate_blob<'a>(blob: Blob<'a, Self>) -> Result<ValidBlob<'a, Self>, Self::ScalarBlobError> {
let raw = u64::from_le_bytes(blob.as_bytes().try_into().unwrap());
if raw & 0b1 == 0b1 && (raw >> 1) < Offset::MAX as u64 {
unsafe { Ok(blob.assume_valid()) }
} else {
Err(ValidateOffsetBlobError)
}
}
fn decode_blob<'a>(blob: ValidBlob<'a, Self>) -> Self {
blob.as_value().clone()
}
fn try_deref_blob<'a>(blob: ValidBlob<'a, Self>) -> Result<&'a Self, ValidBlob<'a, Self>> {
Ok(blob.as_value())
}
fn encode_blob<W: WriteBlob>(&self, dst: W) -> Result<W::Ok, W::Error> {
todo!()
}
}
impl AsPtrImpl<Self> for Offset<'_, '_> {
fn as_ptr_impl(this: &Self) -> &Self {
this
}
}
impl<'p, 'v> PersistPtr for Offset<'p, 'v> {
type Zone =!;
type BlobZone = TryPile<'p, 'v>;
}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct OffsetMut<'p, 'v, A = System> {
marker: PhantomData<A>,
inner: Offset<'p, 'v>,
}
unsafe impl Persist for OffsetMut<'_, '_> {}
impl fmt::Debug for OffsetMut<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.kind().fmt(f)
}
}
unsafe impl ValidateBlob for OffsetMut<'_, '_> {
type BlobError = ValidateOffsetBlobError;
fn try_blob_layout(_: ()) -> Result<BlobLayout,!> {
Ok(BlobLayout::new_nonzero(mem::size_of::<Self>()))
}
fn validate_blob<'a>(blob: Blob<'a, Self>, ignore_padding: bool) -> Result<ValidBlob<'a, Self>, Self::BlobError> {
let mut fields = blob.validate_fields(ignore_padding);
fields.validate_blob::<Offset>()?;
unsafe { Ok(fields.finish()) }
}
}
impl Load for OffsetMut<'_, '_> {
type Ptr =!;
fn decode_blob(blob: ValidBlob<Self>, _: &<Self::Ptr as Ptr>::BlobZone) -> Self {
blob.as_value().clone()
}
fn try_deref_blob<'a>(blob: ValidBlob<'a, Self>, _: &()) -> Result<&'a Self, ValidBlob<'a, Self>> {
Ok(blob.as_value())
}
}
impl AsPtrImpl<Self> for OffsetMut<'_, '_> {
fn as_ptr_impl(this: &Self) -> &Self {
this
}
}
/*
impl<'p, 'v, A> Borrow<OffsetMut<'p, 'v, A>> for Offset<'p, 'v> {
#[inline(always)]
fn borrow(&self) -> &OffsetMut<'p, 'v, A> {
self.as_ref()
}
}
impl<'p, 'v, A> AsRef<OffsetMut<'p, 'v, A>> for Offset<'p, 'v> {
#[inline(always)] | // SAFETY: #[repr(transparent)]
unsafe { &*(self as *const Self as *const _) }
}
}
*/
impl<'p, 'v> From<Offset<'p, 'v>> for usize {
fn from(offset: Offset<'p, 'v>) -> usize {
offset.get()
}
}
impl<'p, 'v> From<Offset<'p, 'v>> for OffsetMut<'p, 'v> {
fn from(inner: Offset<'p, 'v>) -> Self {
Self {
marker: PhantomData,
inner,
}
}
}
impl cmp::PartialEq<usize> for Offset<'_, '_> {
fn eq(&self, other: &usize) -> bool {
self.get() == *other
}
}
impl cmp::PartialEq<Offset<'_, '_>> for usize {
fn eq(&self, other: &Offset<'_, '_>) -> bool {
*self == other.get()
}
}
impl<'p, 'v> Offset<'p, 'v> {
/// The largest `Offset`.
pub const MAX: usize = (1 << 62) - 1;
/// Creates a new `Offset`.
///
/// Returns `None` if the offset is out of range:
///
/// ```
/// use hoard::offset::Offset;
///
/// assert!(Offset::new(Offset::MAX + 1)
/// .is_none());
/// ```
///
/// # Examples
///
/// Zero is a valid offset:
///
/// ```
/// use hoard::offset::Offset;
///
/// Offset::new(0).unwrap();
/// ```
pub fn new(offset: usize) -> Option<Self> {
if offset <= Self::MAX {
let offset = offset as u64;
Some(offset.checked_shl(1).map(|offset|
Self {
marker: PhantomData,
raw: NonZeroU64::new(offset | 1).unwrap().into(),
}
).unwrap())
} else {
None
}
}
/// Casts the `Offset` to a different lifetime.
///
/// This is *safe* because an offset by itself has no guarantees associated with it.
#[inline(always)]
pub fn cast<'p2, 'v2>(&self) -> Offset<'p2, 'v2> {
Offset {
marker: PhantomData,
raw: self.raw,
}
}
/// Gets the offset as a `usize`.
///
/// # Examples
///
/// ```
/// use hoard::offset::Offset;
///
/// assert_eq!(Offset::new(0).unwrap().get(), 0);
/// assert_eq!(Offset::new(1).unwrap().get(), 1);
/// ```
#[inline(always)]
pub fn get(&self) -> usize {
(self.raw.get().get() >> 1) as usize
}
/// Creates a dangling `Offset`.
///
/// # Examples
///
/// ```
/// use hoard::offset::Offset;
///
/// assert_eq!(Offset::dangling().get(), Offset::MAX);
/// ```
#[inline(always)]
pub fn dangling() -> Self {
Self::new(Self::MAX).unwrap()
}
/// Erases the lifetime of an `Offset`.
pub fn to_static(&self) -> Offset<'static,'static> {
Offset {
marker: PhantomData,
raw: self.raw,
}
}
}
/// Enum for the kinds of `OffsetMut`.
#[derive(Debug)]
pub enum Kind<'p, 'v> {
/// An unmodified `Offset`.
Offset(Offset<'p, 'v>),
/// A pointer to something in the heap.
Ptr(HeapPtr),
}
impl<'p, 'v, A> OffsetMut<'p, 'v, A> {
/// Create an `OffsetMut` from a pointer.
///
/// Returns `None` if the alignment is incorrect.
#[inline]
pub fn from_ptr(ptr: NonNull<u16>) -> Option<Self> {
let raw = ptr.as_ptr() as usize as u64;
if raw & 1 == 1 {
unsafe { Some(mem::transmute(ptr.as_ptr() as usize as u64)) }
} else {
None
}
}
/// Creates an `OffsetMut` from a pointer without checking the alignment.
///
/// # Safety
///
/// The pointer must be properly aligned.
#[inline]
pub unsafe fn from_ptr_unchecked(ptr: NonNull<u16>) -> Self {
match Self::from_ptr(ptr) {
Some(this) => this,
None => {
unreachable_unchecked()
}
}
}
/// Returns the kind of offset.
pub fn kind(&self) -> Kind<'p, 'v> {
if self.inner.raw.get().get() & 1 == 1 {
Kind::Offset(self.inner)
} else {
Kind::Ptr(unsafe { mem::transmute(self.inner) })
}
}
/// Gets the `Offset` from a clean `OffsetMut`.
#[inline(always)]
pub fn get_offset(&self) -> Option<Offset<'p, 'v>> {
match self.kind() {
Kind::Offset(offset) => Some(offset),
Kind::Ptr(_) => None,
}
}
/// Gets the pointer from a dirty `OffsetMut`.
#[inline(always)]
pub fn get_ptr(&self) -> Option<HeapPtr> {
match self.kind() {
Kind::Ptr(ptr) => Some(ptr),
Kind::Offset(_) => None,
}
}
}
/*
impl<'p, 'v, A> AsPtr<OffsetMut<'p, 'v, A>> for HeapPtr {
#[inline(always)]
fn as_ptr(&self) -> &OffsetMut<'p, 'v, A> {
static_assertions::assert_eq_size!(OffsetMut, HeapPtr);
unsafe {
&*(self as *const _ as *const _)
}
}
}
*/
impl<'p, 'v> Ptr for OffsetMut<'p, 'v> {
type Zone = TryPile<'p, 'v>;
type BlobZone = TryPile<'p, 'v>;
type Persist = Offset<'p, 'v>;
unsafe fn dealloc<T:?Sized + Pointee>(&self, metadata: T::Metadata) {
match self.kind() {
Kind::Offset(_) => {},
Kind::Ptr(heap_ptr) => heap_ptr.dealloc::<T>(metadata),
}
}
unsafe fn try_get_dirty_unchecked<T:?Sized + Pointee>(&self, metadata: T::Metadata) -> Result<&T, Self::Persist> {
match self.kind() {
Kind::Ptr(ptr) => {
todo!()
},
Kind::Offset(offset) => Err(offset),
}
}
}
impl<'p,'v> Default for OffsetMut<'p, 'v> {
fn default() -> Self {
Offset::dangling().into()
}
}
#[derive(Debug, Default)]
pub struct ShallowDumper<'p, 'v> {
marker: PhantomData<OffsetMut<'p, 'v>>,
written: Vec<u8>,
initial_offset: usize,
}
impl<'p, 'v> Saver for ShallowDumper<'p, 'v> {
type SrcPtr = OffsetMut<'p, 'v>;
type DstPtr = Offset<'p, 'v>;
type Error =!;
fn try_save_raw<R, T:?Sized + ValidateBlob>(&self,
ptr: &Offset<'p, 'v>,
_metadata: T::Metadata,
_f: impl FnOnce(ValidBlob<T>, &<Self::SrcPtr as Ptr>::BlobZone) -> R,
) -> Result<Result<<Self::DstPtr as Ptr>::Persist, R>,
Self::Error>
{
Ok(Ok(*ptr))
}
fn finish_save<T>(&mut self, value_poll: &T) -> Result<Offset<'p, 'v>, Self::Error>
where T: EncodeBlob
{
let offset = self.initial_offset
.checked_add(self.written.len())
.and_then(Offset::new)
.expect("overflow");
let written = mem::replace(&mut self.written, vec![]);
self.written = value_poll.encode_blob(written).into_ok();
Ok(offset)
}
}
impl<'p, 'v> ShallowDumper<'p, 'v> {
pub fn new(initial_offset: usize) -> Self {
Self {
marker: PhantomData,
written: vec![],
initial_offset,
}
}
pub fn from_buf(buf: impl Into<Vec<u8>>) -> Self {
Self {
marker: PhantomData,
initial_offset: 0,
written: buf.into(),
}
}
pub fn save<T:?Sized>(mut self, value: &T) -> (Vec<u8>, Offset<'p, 'v>)
where T: SavePtr<OffsetMut<'p, 'v>, Offset<'p, 'v>>
{
let mut encoder = value.init_save_ptr();
encoder.save_poll(&mut self).into_ok();
let offset = self.finish_save(&encoder).into_ok();
(self.written, offset)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::bag::Bag;
#[test]
fn test_shallow_dumper() {
let (buf, offset) = ShallowDumper::new(0).save(&42u8);
//assert_eq!(offset, 0);
//assert_eq!(buf, &[42]);
/*
let own = OffsetMut::alloc(42u8);
let (buf, offset) = ShallowDumper::new(0).save(&own);
assert_eq!(offset, 1);
assert_eq!(buf, &[42, 1,0,0,0,0,0,0,0]);
let own2 = OffsetMut::alloc(own);
let (buf, offset) = ShallowDumper::new(0).save(&own2);
assert_eq!(offset, 9);
assert_eq!(buf,
&[42,
1,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,
]);
*/
}
} | fn as_ref(&self) -> &OffsetMut<'p, 'v, A> { | random_line_split |
offset.rs | use std::alloc::{GlobalAlloc, System, Layout};
use std::borrow::Borrow;
use std::cmp;
use std::convert::TryInto;
use std::fmt;
use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop};
use std::num::NonZeroU64;
use std::ptr::NonNull;
use std::hint::unreachable_unchecked;
use thiserror::Error;
use leint::Le;
use owned::{IntoOwned, Take};
use crate::pointee::Pointee;
use crate::refs::Ref;
use crate::blob::*;
use crate::load::*;
use crate::save::*;
use crate::scalar::*;
use crate::ptr::*;
use crate::pile::*;
use crate::heap::*;
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Offset<'pile,'version> {
marker: PhantomData<(
fn(&'pile ()) -> &'pile (),
&'version (),
)>,
raw: Le<NonZeroU64>,
}
unsafe impl Persist for Offset<'_, '_> {}
impl fmt::Debug for Offset<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.get().fmt(f)
}
}
#[derive(Debug, Error)]
#[error("invalid offset")]
#[non_exhaustive]
pub struct ValidateOffsetBlobError;
impl Scalar for Offset<'_, '_> {
const BLOB_LAYOUT: BlobLayout = BlobLayout::new_nonzero(mem::size_of::<Self>());
type ScalarBlobError = ValidateOffsetBlobError;
fn validate_blob<'a>(blob: Blob<'a, Self>) -> Result<ValidBlob<'a, Self>, Self::ScalarBlobError> |
fn decode_blob<'a>(blob: ValidBlob<'a, Self>) -> Self {
blob.as_value().clone()
}
fn try_deref_blob<'a>(blob: ValidBlob<'a, Self>) -> Result<&'a Self, ValidBlob<'a, Self>> {
Ok(blob.as_value())
}
fn encode_blob<W: WriteBlob>(&self, dst: W) -> Result<W::Ok, W::Error> {
todo!()
}
}
impl AsPtrImpl<Self> for Offset<'_, '_> {
fn as_ptr_impl(this: &Self) -> &Self {
this
}
}
impl<'p, 'v> PersistPtr for Offset<'p, 'v> {
type Zone =!;
type BlobZone = TryPile<'p, 'v>;
}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct OffsetMut<'p, 'v, A = System> {
marker: PhantomData<A>,
inner: Offset<'p, 'v>,
}
unsafe impl Persist for OffsetMut<'_, '_> {}
impl fmt::Debug for OffsetMut<'_, '_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.kind().fmt(f)
}
}
unsafe impl ValidateBlob for OffsetMut<'_, '_> {
type BlobError = ValidateOffsetBlobError;
fn try_blob_layout(_: ()) -> Result<BlobLayout,!> {
Ok(BlobLayout::new_nonzero(mem::size_of::<Self>()))
}
fn validate_blob<'a>(blob: Blob<'a, Self>, ignore_padding: bool) -> Result<ValidBlob<'a, Self>, Self::BlobError> {
let mut fields = blob.validate_fields(ignore_padding);
fields.validate_blob::<Offset>()?;
unsafe { Ok(fields.finish()) }
}
}
impl Load for OffsetMut<'_, '_> {
type Ptr =!;
fn decode_blob(blob: ValidBlob<Self>, _: &<Self::Ptr as Ptr>::BlobZone) -> Self {
blob.as_value().clone()
}
fn try_deref_blob<'a>(blob: ValidBlob<'a, Self>, _: &()) -> Result<&'a Self, ValidBlob<'a, Self>> {
Ok(blob.as_value())
}
}
impl AsPtrImpl<Self> for OffsetMut<'_, '_> {
fn as_ptr_impl(this: &Self) -> &Self {
this
}
}
/*
impl<'p, 'v, A> Borrow<OffsetMut<'p, 'v, A>> for Offset<'p, 'v> {
#[inline(always)]
fn borrow(&self) -> &OffsetMut<'p, 'v, A> {
self.as_ref()
}
}
impl<'p, 'v, A> AsRef<OffsetMut<'p, 'v, A>> for Offset<'p, 'v> {
#[inline(always)]
fn as_ref(&self) -> &OffsetMut<'p, 'v, A> {
// SAFETY: #[repr(transparent)]
unsafe { &*(self as *const Self as *const _) }
}
}
*/
impl<'p, 'v> From<Offset<'p, 'v>> for usize {
fn from(offset: Offset<'p, 'v>) -> usize {
offset.get()
}
}
impl<'p, 'v> From<Offset<'p, 'v>> for OffsetMut<'p, 'v> {
fn from(inner: Offset<'p, 'v>) -> Self {
Self {
marker: PhantomData,
inner,
}
}
}
impl cmp::PartialEq<usize> for Offset<'_, '_> {
fn eq(&self, other: &usize) -> bool {
self.get() == *other
}
}
impl cmp::PartialEq<Offset<'_, '_>> for usize {
fn eq(&self, other: &Offset<'_, '_>) -> bool {
*self == other.get()
}
}
impl<'p, 'v> Offset<'p, 'v> {
/// The largest `Offset`.
pub const MAX: usize = (1 << 62) - 1;
/// Creates a new `Offset`.
///
/// Returns `None` if the offset is out of range:
///
/// ```
/// use hoard::offset::Offset;
///
/// assert!(Offset::new(Offset::MAX + 1)
/// .is_none());
/// ```
///
/// # Examples
///
/// Zero is a valid offset:
///
/// ```
/// use hoard::offset::Offset;
///
/// Offset::new(0).unwrap();
/// ```
pub fn new(offset: usize) -> Option<Self> {
if offset <= Self::MAX {
let offset = offset as u64;
Some(offset.checked_shl(1).map(|offset|
Self {
marker: PhantomData,
raw: NonZeroU64::new(offset | 1).unwrap().into(),
}
).unwrap())
} else {
None
}
}
/// Casts the `Offset` to a different lifetime.
///
/// This is *safe* because an offset by itself has no guarantees associated with it.
#[inline(always)]
pub fn cast<'p2, 'v2>(&self) -> Offset<'p2, 'v2> {
Offset {
marker: PhantomData,
raw: self.raw,
}
}
/// Gets the offset as a `usize`.
///
/// # Examples
///
/// ```
/// use hoard::offset::Offset;
///
/// assert_eq!(Offset::new(0).unwrap().get(), 0);
/// assert_eq!(Offset::new(1).unwrap().get(), 1);
/// ```
#[inline(always)]
pub fn get(&self) -> usize {
(self.raw.get().get() >> 1) as usize
}
/// Creates a dangling `Offset`.
///
/// # Examples
///
/// ```
/// use hoard::offset::Offset;
///
/// assert_eq!(Offset::dangling().get(), Offset::MAX);
/// ```
#[inline(always)]
pub fn dangling() -> Self {
Self::new(Self::MAX).unwrap()
}
/// Erases the lifetime of an `Offset`.
pub fn to_static(&self) -> Offset<'static,'static> {
Offset {
marker: PhantomData,
raw: self.raw,
}
}
}
/// Enum for the kinds of `OffsetMut`.
#[derive(Debug)]
pub enum Kind<'p, 'v> {
/// An unmodified `Offset`.
Offset(Offset<'p, 'v>),
/// A pointer to something in the heap.
Ptr(HeapPtr),
}
impl<'p, 'v, A> OffsetMut<'p, 'v, A> {
/// Create an `OffsetMut` from a pointer.
///
/// Returns `None` if the alignment is incorrect.
#[inline]
pub fn from_ptr(ptr: NonNull<u16>) -> Option<Self> {
let raw = ptr.as_ptr() as usize as u64;
if raw & 1 == 1 {
unsafe { Some(mem::transmute(ptr.as_ptr() as usize as u64)) }
} else {
None
}
}
/// Creates an `OffsetMut` from a pointer without checking the alignment.
///
/// # Safety
///
/// The pointer must be properly aligned.
#[inline]
pub unsafe fn from_ptr_unchecked(ptr: NonNull<u16>) -> Self {
match Self::from_ptr(ptr) {
Some(this) => this,
None => {
unreachable_unchecked()
}
}
}
/// Returns the kind of offset.
pub fn kind(&self) -> Kind<'p, 'v> {
if self.inner.raw.get().get() & 1 == 1 {
Kind::Offset(self.inner)
} else {
Kind::Ptr(unsafe { mem::transmute(self.inner) })
}
}
/// Gets the `Offset` from a clean `OffsetMut`.
#[inline(always)]
pub fn get_offset(&self) -> Option<Offset<'p, 'v>> {
match self.kind() {
Kind::Offset(offset) => Some(offset),
Kind::Ptr(_) => None,
}
}
/// Gets the pointer from a dirty `OffsetMut`.
#[inline(always)]
pub fn get_ptr(&self) -> Option<HeapPtr> {
match self.kind() {
Kind::Ptr(ptr) => Some(ptr),
Kind::Offset(_) => None,
}
}
}
/*
impl<'p, 'v, A> AsPtr<OffsetMut<'p, 'v, A>> for HeapPtr {
#[inline(always)]
fn as_ptr(&self) -> &OffsetMut<'p, 'v, A> {
static_assertions::assert_eq_size!(OffsetMut, HeapPtr);
unsafe {
&*(self as *const _ as *const _)
}
}
}
*/
impl<'p, 'v> Ptr for OffsetMut<'p, 'v> {
type Zone = TryPile<'p, 'v>;
type BlobZone = TryPile<'p, 'v>;
type Persist = Offset<'p, 'v>;
unsafe fn dealloc<T:?Sized + Pointee>(&self, metadata: T::Metadata) {
match self.kind() {
Kind::Offset(_) => {},
Kind::Ptr(heap_ptr) => heap_ptr.dealloc::<T>(metadata),
}
}
unsafe fn try_get_dirty_unchecked<T:?Sized + Pointee>(&self, metadata: T::Metadata) -> Result<&T, Self::Persist> {
match self.kind() {
Kind::Ptr(ptr) => {
todo!()
},
Kind::Offset(offset) => Err(offset),
}
}
}
impl<'p,'v> Default for OffsetMut<'p, 'v> {
fn default() -> Self {
Offset::dangling().into()
}
}
#[derive(Debug, Default)]
pub struct ShallowDumper<'p, 'v> {
marker: PhantomData<OffsetMut<'p, 'v>>,
written: Vec<u8>,
initial_offset: usize,
}
impl<'p, 'v> Saver for ShallowDumper<'p, 'v> {
type SrcPtr = OffsetMut<'p, 'v>;
type DstPtr = Offset<'p, 'v>;
type Error =!;
fn try_save_raw<R, T:?Sized + ValidateBlob>(&self,
ptr: &Offset<'p, 'v>,
_metadata: T::Metadata,
_f: impl FnOnce(ValidBlob<T>, &<Self::SrcPtr as Ptr>::BlobZone) -> R,
) -> Result<Result<<Self::DstPtr as Ptr>::Persist, R>,
Self::Error>
{
Ok(Ok(*ptr))
}
fn finish_save<T>(&mut self, value_poll: &T) -> Result<Offset<'p, 'v>, Self::Error>
where T: EncodeBlob
{
let offset = self.initial_offset
.checked_add(self.written.len())
.and_then(Offset::new)
.expect("overflow");
let written = mem::replace(&mut self.written, vec![]);
self.written = value_poll.encode_blob(written).into_ok();
Ok(offset)
}
}
impl<'p, 'v> ShallowDumper<'p, 'v> {
pub fn new(initial_offset: usize) -> Self {
Self {
marker: PhantomData,
written: vec![],
initial_offset,
}
}
pub fn from_buf(buf: impl Into<Vec<u8>>) -> Self {
Self {
marker: PhantomData,
initial_offset: 0,
written: buf.into(),
}
}
pub fn save<T:?Sized>(mut self, value: &T) -> (Vec<u8>, Offset<'p, 'v>)
where T: SavePtr<OffsetMut<'p, 'v>, Offset<'p, 'v>>
{
let mut encoder = value.init_save_ptr();
encoder.save_poll(&mut self).into_ok();
let offset = self.finish_save(&encoder).into_ok();
(self.written, offset)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::bag::Bag;
#[test]
fn test_shallow_dumper() {
let (buf, offset) = ShallowDumper::new(0).save(&42u8);
//assert_eq!(offset, 0);
//assert_eq!(buf, &[42]);
/*
let own = OffsetMut::alloc(42u8);
let (buf, offset) = ShallowDumper::new(0).save(&own);
assert_eq!(offset, 1);
assert_eq!(buf, &[42, 1,0,0,0,0,0,0,0]);
let own2 = OffsetMut::alloc(own);
let (buf, offset) = ShallowDumper::new(0).save(&own2);
assert_eq!(offset, 9);
assert_eq!(buf,
&[42,
1,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,
]);
*/
}
}
| {
let raw = u64::from_le_bytes(blob.as_bytes().try_into().unwrap());
if raw & 0b1 == 0b1 && (raw >> 1) < Offset::MAX as u64 {
unsafe { Ok(blob.assume_valid()) }
} else {
Err(ValidateOffsetBlobError)
}
} | identifier_body |
glyph.rs | use super::math::*;
use crate::config::font::{Font, FontDescription};
use crate::config::ui_config::Delta;
use crate::config::Config;
use crate::cursor;
use alacritty_terminal::ansi::CursorStyle;
use alacritty_terminal::term::CursorKey;
use crossfont::{FontDesc, FontKey, Rasterize, Rasterizer, Size, Slant, Style, Weight};
use fnv::FnvHasher;
use log::*;
use std::collections::HashMap;
use std::hash::BuildHasherDefault;
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub struct GlyphKey {
pub key: crossfont::GlyphKey,
pub wide: bool,
pub zero_width: bool,
}
#[derive(Debug)]
pub struct RasterizedGlyph {
pub rasterized: crossfont::RasterizedGlyph,
pub wide: bool,
pub zero_width: bool,
}
/// `LoadGlyph` allows for copying a rasterized glyph into graphics memory.
pub trait LoadGlyph {
/// Load the rasterized glyph into GPU memory.
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> AtlasGlyph;
/// Clear any state accumulated from previous loaded glyphs.
///
/// This can, for instance, be used to reset the texture Atlas.
fn clear(&mut self, cell_size: Vec2<i32>, cell_offset: Vec2<i32>);
}
#[derive(Copy, Debug, Clone)]
pub struct GridAtlasGlyph {
pub atlas_index: usize,
pub line: u16,
pub column: u16,
pub colored: bool,
}
#[derive(Copy, Debug, Clone)]
pub struct QuadAtlasGlyph {
pub atlas_index: usize,
pub uv_bot: f32,
pub uv_left: f32,
pub uv_width: f32,
pub uv_height: f32,
pub top: i16,
pub left: i16,
pub width: i16,
pub height: i16,
pub colored: bool,
}
#[derive(Copy, Debug, Clone)]
pub enum AtlasGlyph {
Grid(GridAtlasGlyph),
Quad(QuadAtlasGlyph),
}
/// Naïve glyph cache.
///
/// Currently only keyed by `char`, and thus not possible to hold different
/// representations of the same code point.
pub struct GlyphCache {
/// Cache of buffered glyphs.
pub cache: HashMap<GlyphKey, AtlasGlyph, BuildHasherDefault<FnvHasher>>,
/// Cache of buffered cursor glyphs.
pub cursor_cache: HashMap<CursorKey, AtlasGlyph, BuildHasherDefault<FnvHasher>>,
/// Rasterizer for loading new glyphs.
rasterizer: Rasterizer,
/// Regular font.
pub font_key: FontKey,
/// Bold font.
pub bold_key: FontKey,
/// Italic font.
pub italic_key: FontKey,
/// Bold italic font.
pub bold_italic_key: FontKey,
/// Font size.
pub font_size: crossfont::Size,
/// Glyph offset.
glyph_offset: Delta<i8>,
/// Font metrics.
pub metrics: crossfont::Metrics,
/// Cell size
pub cell_size: Vec2<i32>,
}
impl GlyphCache {
pub fn new<L>(
mut rasterizer: Rasterizer,
config: &Config,
font: &Font,
loader: &mut L,
) -> Result<GlyphCache, crossfont::Error>
where
L: LoadGlyph,
{
let (regular, bold, italic, bold_italic) = Self::compute_font_keys(font, &mut rasterizer)?;
// Need to load at least one glyph for the face before calling metrics.
// The glyph requested here ('m' at the time of writing) has no special
// meaning.
rasterizer.get_glyph(crossfont::GlyphKey { font_key: regular, c:'m', size: font.size })?;
let metrics = rasterizer.metrics(regular, font.size)?;
let (cell_width, cell_height) = Self::compute_cell_size(config, &metrics);
let cell_size = Vec2::new(cell_width as i32, cell_height as i32);
let mut cache = Self {
cache: HashMap::default(),
cursor_cache: HashMap::default(),
rasterizer,
font_size: font.size,
font_key: regular,
bold_key: bold,
italic_key: italic,
bold_italic_key: bold_italic,
glyph_offset: font.glyph_offset,
metrics,
cell_size,
};
cache.clear_cache_with_common_glyphs(loader, config);
Ok(cache)
}
/// Computes font keys for (Regular, Bold, Italic, Bold Italic).
fn compute_font_keys(
font: &Font,
rasterizer: &mut Rasterizer,
) -> Result<(FontKey, FontKey, FontKey, FontKey), crossfont::Error> {
let size = font.size;
// Load regular font.
let regular_desc = Self::make_desc(&font.normal(), Slant::Normal, Weight::Normal);
let regular = Self::load_regular_font(rasterizer, ®ular_desc, size)?;
// Helper to load a description if it is not the `regular_desc`.
let mut load_or_regular = |desc: FontDesc| {
if desc == regular_desc { | else {
rasterizer.load_font(&desc, size).unwrap_or_else(|_| regular)
}
};
// Load bold font.
let bold_desc = Self::make_desc(&font.bold(), Slant::Normal, Weight::Bold);
let bold = load_or_regular(bold_desc);
// Load italic font.
let italic_desc = Self::make_desc(&font.italic(), Slant::Italic, Weight::Normal);
let italic = load_or_regular(italic_desc);
// Load bold italic font.
let bold_italic_desc = Self::make_desc(&font.bold_italic(), Slant::Italic, Weight::Bold);
let bold_italic = load_or_regular(bold_italic_desc);
Ok((regular, bold, italic, bold_italic))
}
fn load_regular_font(
rasterizer: &mut Rasterizer,
description: &FontDesc,
size: Size,
) -> Result<FontKey, crossfont::Error> {
match rasterizer.load_font(description, size) {
Ok(font) => Ok(font),
Err(err) => {
error!("{}", err);
let fallback_desc =
Self::make_desc(&Font::default().normal(), Slant::Normal, Weight::Normal);
rasterizer.load_font(&fallback_desc, size)
},
}
}
fn make_desc(desc: &FontDescription, slant: Slant, weight: Weight) -> FontDesc {
let style = if let Some(ref spec) = desc.style {
Style::Specific(spec.to_owned())
} else {
Style::Description { slant, weight }
};
FontDesc::new(desc.family.clone(), style)
}
fn rasterize_glyph(
glyph_key: GlyphKey,
rasterizer: &mut Rasterizer,
glyph_offset: Delta<i8>,
metrics: &crossfont::Metrics,
) -> RasterizedGlyph {
let mut rasterized =
rasterizer.get_glyph(glyph_key.key).unwrap_or_else(|_| Default::default());
rasterized.left += i32::from(glyph_offset.x);
rasterized.top += i32::from(glyph_offset.y);
rasterized.top -= metrics.descent as i32;
RasterizedGlyph { wide: glyph_key.wide, zero_width: glyph_key.zero_width, rasterized }
}
pub fn get<L>(&mut self, glyph_key: GlyphKey, loader: &mut L) -> &AtlasGlyph
where
L: LoadGlyph,
{
let glyph_offset = self.glyph_offset;
let rasterizer = &mut self.rasterizer;
let metrics = &self.metrics;
self.cache.entry(glyph_key).or_insert_with(|| {
let rasterized = Self::rasterize_glyph(glyph_key, rasterizer, glyph_offset, metrics);
loader.load_glyph(&rasterized)
})
}
/// Clear currently cached data in both GL and the registry.
pub fn clear_glyph_cache<L: LoadGlyph>(&mut self, config: &Config, loader: &mut L) {
let (cell_width, cell_height) = Self::compute_cell_size(config, &self.metrics);
self.cell_size = Vec2::new(cell_width as i32, cell_height as i32);
self.cache = HashMap::default();
self.cursor_cache = HashMap::default();
self.clear_cache_with_common_glyphs(loader, config);
}
pub fn update_font_size<L: LoadGlyph>(
&mut self,
config: &Config,
font: &Font,
dpr: f64,
loader: &mut L,
) -> Result<(), crossfont::Error> {
// Update dpi scaling.
self.rasterizer.update_dpr(dpr as f32);
// Recompute font keys.
let (regular, bold, italic, bold_italic) =
Self::compute_font_keys(font, &mut self.rasterizer)?;
self.rasterizer.get_glyph(crossfont::GlyphKey {
font_key: regular,
c:'m',
size: font.size,
})?;
let metrics = self.rasterizer.metrics(regular, font.size)?;
info!("Font size changed to {:?} with DPR of {}", font.size, dpr);
self.font_size = font.size;
self.font_key = regular;
self.bold_key = bold;
self.italic_key = italic;
self.bold_italic_key = bold_italic;
self.metrics = metrics;
self.clear_glyph_cache(config, loader);
Ok(())
}
pub fn font_metrics(&self) -> crossfont::Metrics {
self.metrics
}
/// Prefetch glyphs that are almost guaranteed to be loaded anyways.
fn clear_cache_with_common_glyphs<L: LoadGlyph>(&mut self, loader: &mut L, config: &Config) {
let glyph_offset = self.glyph_offset;
let metrics = &self.metrics;
let font_size = self.font_size;
let rasterizer = &mut self.rasterizer;
let cell_size = self.cell_size;
let mut atlas_cell_size = self.cell_size;
let mut atlas_cell_offset = Vec2 { x: 0, y: 0 };
type Glyphs = Vec<(GlyphKey, RasterizedGlyph)>;
let glyphs: Glyphs = [self.font_key, self.bold_key, self.italic_key, self.bold_italic_key]
.iter()
.flat_map(|font| {
(32u8..=126u8)
.map(|c| {
let glyph_key = GlyphKey {
wide: false,
zero_width: false,
key: crossfont::GlyphKey {
font_key: *font,
c: c as char,
size: font_size,
},
};
let glyph =
Self::rasterize_glyph(glyph_key, rasterizer, glyph_offset, metrics);
atlas_cell_size.x = std::cmp::max(
atlas_cell_size.x,
glyph.rasterized.left + glyph.rasterized.width,
);
atlas_cell_size.y = std::cmp::max(atlas_cell_size.y, glyph.rasterized.top);
atlas_cell_offset.x =
std::cmp::max(atlas_cell_offset.x, -glyph.rasterized.left);
atlas_cell_offset.y = std::cmp::max(
atlas_cell_offset.y,
glyph.rasterized.height - glyph.rasterized.top,
);
debug!(
"precomp: '{}' left={} top={} w={} h={} off={:?} atlas_cell={:?} \
offset={:?}",
glyph.rasterized.c,
glyph.rasterized.left,
glyph.rasterized.top,
glyph.rasterized.width,
glyph.rasterized.height,
glyph_offset,
atlas_cell_size,
atlas_cell_offset,
);
(glyph_key, glyph)
})
.collect::<Glyphs>()
})
.collect();
info!("Max glyph size: {:?}", cell_size);
loader.clear(atlas_cell_size, atlas_cell_offset);
// Multipass grid render workaround for large font sizes
// Generate cursor glyphs first to ensure that they end up strictly
// in the first atlas/pass
for style in [
CursorStyle::Block,
CursorStyle::Beam,
CursorStyle::Underline,
CursorStyle::HollowBlock,
]
.iter()
{
let cursor_key = CursorKey { style: *style, is_wide: false };
let cursor_glyph = RasterizedGlyph {
wide: false,
zero_width: false,
rasterized: cursor::get_cursor_glyph(
cursor_key.style,
*metrics,
config.ui_config.font.offset.x,
config.ui_config.font.offset.y,
cursor_key.is_wide,
config.cursor.thickness(),
),
};
self.cursor_cache.entry(cursor_key).or_insert_with(|| loader.load_glyph(&cursor_glyph));
}
for glyph in glyphs {
self.cache.entry(glyph.0).or_insert_with(|| loader.load_glyph(&glyph.1));
}
}
/// Calculate font metrics without access to a glyph cache.
pub fn static_metrics(font: Font, dpr: f64) -> Result<crossfont::Metrics, crossfont::Error> {
let mut rasterizer = crossfont::Rasterizer::new(dpr as f32, font.use_thin_strokes())?;
let regular_desc = GlyphCache::make_desc(&font.normal(), Slant::Normal, Weight::Normal);
let regular = Self::load_regular_font(&mut rasterizer, ®ular_desc, font.size)?;
rasterizer.get_glyph(crossfont::GlyphKey { font_key: regular, c:'m', size: font.size })?;
rasterizer.metrics(regular, font.size)
}
/// Calculate the cell dimensions based on font metrics.
///
/// This will return a tuple of the cell width and height.
#[inline]
pub fn compute_cell_size(config: &Config, metrics: &crossfont::Metrics) -> (f32, f32) {
let offset_x = f64::from(config.ui_config.font.offset.x);
let offset_y = f64::from(config.ui_config.font.offset.y);
(
(metrics.average_advance + offset_x).floor().max(1.) as f32,
(metrics.line_height + offset_y).floor().max(1.) as f32,
)
}
}
|
regular
} | conditional_block |
glyph.rs | use super::math::*;
use crate::config::font::{Font, FontDescription};
use crate::config::ui_config::Delta;
use crate::config::Config;
use crate::cursor;
use alacritty_terminal::ansi::CursorStyle;
use alacritty_terminal::term::CursorKey;
use crossfont::{FontDesc, FontKey, Rasterize, Rasterizer, Size, Slant, Style, Weight};
use fnv::FnvHasher;
use log::*;
use std::collections::HashMap;
use std::hash::BuildHasherDefault;
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub struct GlyphKey {
pub key: crossfont::GlyphKey,
pub wide: bool,
pub zero_width: bool,
}
#[derive(Debug)]
pub struct | {
pub rasterized: crossfont::RasterizedGlyph,
pub wide: bool,
pub zero_width: bool,
}
/// `LoadGlyph` allows for copying a rasterized glyph into graphics memory.
pub trait LoadGlyph {
/// Load the rasterized glyph into GPU memory.
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> AtlasGlyph;
/// Clear any state accumulated from previous loaded glyphs.
///
/// This can, for instance, be used to reset the texture Atlas.
fn clear(&mut self, cell_size: Vec2<i32>, cell_offset: Vec2<i32>);
}
#[derive(Copy, Debug, Clone)]
pub struct GridAtlasGlyph {
pub atlas_index: usize,
pub line: u16,
pub column: u16,
pub colored: bool,
}
#[derive(Copy, Debug, Clone)]
pub struct QuadAtlasGlyph {
pub atlas_index: usize,
pub uv_bot: f32,
pub uv_left: f32,
pub uv_width: f32,
pub uv_height: f32,
pub top: i16,
pub left: i16,
pub width: i16,
pub height: i16,
pub colored: bool,
}
#[derive(Copy, Debug, Clone)]
pub enum AtlasGlyph {
Grid(GridAtlasGlyph),
Quad(QuadAtlasGlyph),
}
/// Naïve glyph cache.
///
/// Currently only keyed by `char`, and thus not possible to hold different
/// representations of the same code point.
pub struct GlyphCache {
/// Cache of buffered glyphs.
pub cache: HashMap<GlyphKey, AtlasGlyph, BuildHasherDefault<FnvHasher>>,
/// Cache of buffered cursor glyphs.
pub cursor_cache: HashMap<CursorKey, AtlasGlyph, BuildHasherDefault<FnvHasher>>,
/// Rasterizer for loading new glyphs.
rasterizer: Rasterizer,
/// Regular font.
pub font_key: FontKey,
/// Bold font.
pub bold_key: FontKey,
/// Italic font.
pub italic_key: FontKey,
/// Bold italic font.
pub bold_italic_key: FontKey,
/// Font size.
pub font_size: crossfont::Size,
/// Glyph offset.
glyph_offset: Delta<i8>,
/// Font metrics.
pub metrics: crossfont::Metrics,
/// Cell size
pub cell_size: Vec2<i32>,
}
impl GlyphCache {
pub fn new<L>(
mut rasterizer: Rasterizer,
config: &Config,
font: &Font,
loader: &mut L,
) -> Result<GlyphCache, crossfont::Error>
where
L: LoadGlyph,
{
let (regular, bold, italic, bold_italic) = Self::compute_font_keys(font, &mut rasterizer)?;
// Need to load at least one glyph for the face before calling metrics.
// The glyph requested here ('m' at the time of writing) has no special
// meaning.
rasterizer.get_glyph(crossfont::GlyphKey { font_key: regular, c:'m', size: font.size })?;
let metrics = rasterizer.metrics(regular, font.size)?;
let (cell_width, cell_height) = Self::compute_cell_size(config, &metrics);
let cell_size = Vec2::new(cell_width as i32, cell_height as i32);
let mut cache = Self {
cache: HashMap::default(),
cursor_cache: HashMap::default(),
rasterizer,
font_size: font.size,
font_key: regular,
bold_key: bold,
italic_key: italic,
bold_italic_key: bold_italic,
glyph_offset: font.glyph_offset,
metrics,
cell_size,
};
cache.clear_cache_with_common_glyphs(loader, config);
Ok(cache)
}
/// Computes font keys for (Regular, Bold, Italic, Bold Italic).
fn compute_font_keys(
font: &Font,
rasterizer: &mut Rasterizer,
) -> Result<(FontKey, FontKey, FontKey, FontKey), crossfont::Error> {
let size = font.size;
// Load regular font.
let regular_desc = Self::make_desc(&font.normal(), Slant::Normal, Weight::Normal);
let regular = Self::load_regular_font(rasterizer, ®ular_desc, size)?;
// Helper to load a description if it is not the `regular_desc`.
let mut load_or_regular = |desc: FontDesc| {
if desc == regular_desc {
regular
} else {
rasterizer.load_font(&desc, size).unwrap_or_else(|_| regular)
}
};
// Load bold font.
let bold_desc = Self::make_desc(&font.bold(), Slant::Normal, Weight::Bold);
let bold = load_or_regular(bold_desc);
// Load italic font.
let italic_desc = Self::make_desc(&font.italic(), Slant::Italic, Weight::Normal);
let italic = load_or_regular(italic_desc);
// Load bold italic font.
let bold_italic_desc = Self::make_desc(&font.bold_italic(), Slant::Italic, Weight::Bold);
let bold_italic = load_or_regular(bold_italic_desc);
Ok((regular, bold, italic, bold_italic))
}
fn load_regular_font(
rasterizer: &mut Rasterizer,
description: &FontDesc,
size: Size,
) -> Result<FontKey, crossfont::Error> {
match rasterizer.load_font(description, size) {
Ok(font) => Ok(font),
Err(err) => {
error!("{}", err);
let fallback_desc =
Self::make_desc(&Font::default().normal(), Slant::Normal, Weight::Normal);
rasterizer.load_font(&fallback_desc, size)
},
}
}
fn make_desc(desc: &FontDescription, slant: Slant, weight: Weight) -> FontDesc {
let style = if let Some(ref spec) = desc.style {
Style::Specific(spec.to_owned())
} else {
Style::Description { slant, weight }
};
FontDesc::new(desc.family.clone(), style)
}
fn rasterize_glyph(
glyph_key: GlyphKey,
rasterizer: &mut Rasterizer,
glyph_offset: Delta<i8>,
metrics: &crossfont::Metrics,
) -> RasterizedGlyph {
let mut rasterized =
rasterizer.get_glyph(glyph_key.key).unwrap_or_else(|_| Default::default());
rasterized.left += i32::from(glyph_offset.x);
rasterized.top += i32::from(glyph_offset.y);
rasterized.top -= metrics.descent as i32;
RasterizedGlyph { wide: glyph_key.wide, zero_width: glyph_key.zero_width, rasterized }
}
pub fn get<L>(&mut self, glyph_key: GlyphKey, loader: &mut L) -> &AtlasGlyph
where
L: LoadGlyph,
{
let glyph_offset = self.glyph_offset;
let rasterizer = &mut self.rasterizer;
let metrics = &self.metrics;
self.cache.entry(glyph_key).or_insert_with(|| {
let rasterized = Self::rasterize_glyph(glyph_key, rasterizer, glyph_offset, metrics);
loader.load_glyph(&rasterized)
})
}
/// Clear currently cached data in both GL and the registry.
pub fn clear_glyph_cache<L: LoadGlyph>(&mut self, config: &Config, loader: &mut L) {
let (cell_width, cell_height) = Self::compute_cell_size(config, &self.metrics);
self.cell_size = Vec2::new(cell_width as i32, cell_height as i32);
self.cache = HashMap::default();
self.cursor_cache = HashMap::default();
self.clear_cache_with_common_glyphs(loader, config);
}
pub fn update_font_size<L: LoadGlyph>(
&mut self,
config: &Config,
font: &Font,
dpr: f64,
loader: &mut L,
) -> Result<(), crossfont::Error> {
// Update dpi scaling.
self.rasterizer.update_dpr(dpr as f32);
// Recompute font keys.
let (regular, bold, italic, bold_italic) =
Self::compute_font_keys(font, &mut self.rasterizer)?;
self.rasterizer.get_glyph(crossfont::GlyphKey {
font_key: regular,
c:'m',
size: font.size,
})?;
let metrics = self.rasterizer.metrics(regular, font.size)?;
info!("Font size changed to {:?} with DPR of {}", font.size, dpr);
self.font_size = font.size;
self.font_key = regular;
self.bold_key = bold;
self.italic_key = italic;
self.bold_italic_key = bold_italic;
self.metrics = metrics;
self.clear_glyph_cache(config, loader);
Ok(())
}
pub fn font_metrics(&self) -> crossfont::Metrics {
self.metrics
}
/// Prefetch glyphs that are almost guaranteed to be loaded anyways.
fn clear_cache_with_common_glyphs<L: LoadGlyph>(&mut self, loader: &mut L, config: &Config) {
let glyph_offset = self.glyph_offset;
let metrics = &self.metrics;
let font_size = self.font_size;
let rasterizer = &mut self.rasterizer;
let cell_size = self.cell_size;
let mut atlas_cell_size = self.cell_size;
let mut atlas_cell_offset = Vec2 { x: 0, y: 0 };
type Glyphs = Vec<(GlyphKey, RasterizedGlyph)>;
let glyphs: Glyphs = [self.font_key, self.bold_key, self.italic_key, self.bold_italic_key]
.iter()
.flat_map(|font| {
(32u8..=126u8)
.map(|c| {
let glyph_key = GlyphKey {
wide: false,
zero_width: false,
key: crossfont::GlyphKey {
font_key: *font,
c: c as char,
size: font_size,
},
};
let glyph =
Self::rasterize_glyph(glyph_key, rasterizer, glyph_offset, metrics);
atlas_cell_size.x = std::cmp::max(
atlas_cell_size.x,
glyph.rasterized.left + glyph.rasterized.width,
);
atlas_cell_size.y = std::cmp::max(atlas_cell_size.y, glyph.rasterized.top);
atlas_cell_offset.x =
std::cmp::max(atlas_cell_offset.x, -glyph.rasterized.left);
atlas_cell_offset.y = std::cmp::max(
atlas_cell_offset.y,
glyph.rasterized.height - glyph.rasterized.top,
);
debug!(
"precomp: '{}' left={} top={} w={} h={} off={:?} atlas_cell={:?} \
offset={:?}",
glyph.rasterized.c,
glyph.rasterized.left,
glyph.rasterized.top,
glyph.rasterized.width,
glyph.rasterized.height,
glyph_offset,
atlas_cell_size,
atlas_cell_offset,
);
(glyph_key, glyph)
})
.collect::<Glyphs>()
})
.collect();
info!("Max glyph size: {:?}", cell_size);
loader.clear(atlas_cell_size, atlas_cell_offset);
// Multipass grid render workaround for large font sizes
// Generate cursor glyphs first to ensure that they end up strictly
// in the first atlas/pass
for style in [
CursorStyle::Block,
CursorStyle::Beam,
CursorStyle::Underline,
CursorStyle::HollowBlock,
]
.iter()
{
let cursor_key = CursorKey { style: *style, is_wide: false };
let cursor_glyph = RasterizedGlyph {
wide: false,
zero_width: false,
rasterized: cursor::get_cursor_glyph(
cursor_key.style,
*metrics,
config.ui_config.font.offset.x,
config.ui_config.font.offset.y,
cursor_key.is_wide,
config.cursor.thickness(),
),
};
self.cursor_cache.entry(cursor_key).or_insert_with(|| loader.load_glyph(&cursor_glyph));
}
for glyph in glyphs {
self.cache.entry(glyph.0).or_insert_with(|| loader.load_glyph(&glyph.1));
}
}
/// Calculate font metrics without access to a glyph cache.
pub fn static_metrics(font: Font, dpr: f64) -> Result<crossfont::Metrics, crossfont::Error> {
let mut rasterizer = crossfont::Rasterizer::new(dpr as f32, font.use_thin_strokes())?;
let regular_desc = GlyphCache::make_desc(&font.normal(), Slant::Normal, Weight::Normal);
let regular = Self::load_regular_font(&mut rasterizer, ®ular_desc, font.size)?;
rasterizer.get_glyph(crossfont::GlyphKey { font_key: regular, c:'m', size: font.size })?;
rasterizer.metrics(regular, font.size)
}
/// Calculate the cell dimensions based on font metrics.
///
/// This will return a tuple of the cell width and height.
#[inline]
pub fn compute_cell_size(config: &Config, metrics: &crossfont::Metrics) -> (f32, f32) {
let offset_x = f64::from(config.ui_config.font.offset.x);
let offset_y = f64::from(config.ui_config.font.offset.y);
(
(metrics.average_advance + offset_x).floor().max(1.) as f32,
(metrics.line_height + offset_y).floor().max(1.) as f32,
)
}
}
| RasterizedGlyph | identifier_name |
glyph.rs | use super::math::*;
use crate::config::font::{Font, FontDescription};
use crate::config::ui_config::Delta;
use crate::config::Config;
use crate::cursor;
use alacritty_terminal::ansi::CursorStyle;
use alacritty_terminal::term::CursorKey;
use crossfont::{FontDesc, FontKey, Rasterize, Rasterizer, Size, Slant, Style, Weight};
use fnv::FnvHasher;
use log::*;
use std::collections::HashMap;
use std::hash::BuildHasherDefault;
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub struct GlyphKey {
pub key: crossfont::GlyphKey,
pub wide: bool,
pub zero_width: bool,
}
#[derive(Debug)]
pub struct RasterizedGlyph {
pub rasterized: crossfont::RasterizedGlyph,
pub wide: bool,
pub zero_width: bool,
}
/// `LoadGlyph` allows for copying a rasterized glyph into graphics memory.
pub trait LoadGlyph {
/// Load the rasterized glyph into GPU memory.
fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> AtlasGlyph;
/// Clear any state accumulated from previous loaded glyphs.
///
/// This can, for instance, be used to reset the texture Atlas.
fn clear(&mut self, cell_size: Vec2<i32>, cell_offset: Vec2<i32>);
}
#[derive(Copy, Debug, Clone)]
pub struct GridAtlasGlyph {
pub atlas_index: usize,
pub line: u16,
pub column: u16,
pub colored: bool,
}
#[derive(Copy, Debug, Clone)]
pub struct QuadAtlasGlyph {
pub atlas_index: usize,
pub uv_bot: f32,
pub uv_left: f32,
pub uv_width: f32,
pub uv_height: f32,
pub top: i16,
pub left: i16,
pub width: i16,
pub height: i16,
pub colored: bool,
}
#[derive(Copy, Debug, Clone)]
pub enum AtlasGlyph {
Grid(GridAtlasGlyph),
Quad(QuadAtlasGlyph),
}
/// Naïve glyph cache.
///
/// Currently only keyed by `char`, and thus not possible to hold different
/// representations of the same code point.
pub struct GlyphCache {
/// Cache of buffered glyphs.
pub cache: HashMap<GlyphKey, AtlasGlyph, BuildHasherDefault<FnvHasher>>,
/// Cache of buffered cursor glyphs.
pub cursor_cache: HashMap<CursorKey, AtlasGlyph, BuildHasherDefault<FnvHasher>>,
/// Rasterizer for loading new glyphs.
rasterizer: Rasterizer,
/// Regular font.
pub font_key: FontKey,
/// Bold font.
pub bold_key: FontKey,
/// Italic font.
pub italic_key: FontKey,
/// Bold italic font.
pub bold_italic_key: FontKey,
/// Font size.
pub font_size: crossfont::Size,
/// Glyph offset.
glyph_offset: Delta<i8>,
/// Font metrics.
pub metrics: crossfont::Metrics,
/// Cell size
pub cell_size: Vec2<i32>,
}
impl GlyphCache {
pub fn new<L>(
mut rasterizer: Rasterizer,
config: &Config,
font: &Font,
loader: &mut L,
) -> Result<GlyphCache, crossfont::Error>
where
L: LoadGlyph,
{
let (regular, bold, italic, bold_italic) = Self::compute_font_keys(font, &mut rasterizer)?;
// Need to load at least one glyph for the face before calling metrics.
// The glyph requested here ('m' at the time of writing) has no special
// meaning.
rasterizer.get_glyph(crossfont::GlyphKey { font_key: regular, c:'m', size: font.size })?;
let metrics = rasterizer.metrics(regular, font.size)?;
let (cell_width, cell_height) = Self::compute_cell_size(config, &metrics);
let cell_size = Vec2::new(cell_width as i32, cell_height as i32);
let mut cache = Self {
cache: HashMap::default(),
cursor_cache: HashMap::default(),
rasterizer,
font_size: font.size,
font_key: regular,
bold_key: bold,
italic_key: italic,
bold_italic_key: bold_italic,
glyph_offset: font.glyph_offset,
metrics,
cell_size,
};
cache.clear_cache_with_common_glyphs(loader, config);
Ok(cache)
}
/// Computes font keys for (Regular, Bold, Italic, Bold Italic).
fn compute_font_keys(
font: &Font,
rasterizer: &mut Rasterizer,
) -> Result<(FontKey, FontKey, FontKey, FontKey), crossfont::Error> {
let size = font.size;
// Load regular font.
let regular_desc = Self::make_desc(&font.normal(), Slant::Normal, Weight::Normal);
let regular = Self::load_regular_font(rasterizer, ®ular_desc, size)?;
// Helper to load a description if it is not the `regular_desc`.
let mut load_or_regular = |desc: FontDesc| {
if desc == regular_desc {
regular
} else {
rasterizer.load_font(&desc, size).unwrap_or_else(|_| regular)
}
};
// Load bold font.
let bold_desc = Self::make_desc(&font.bold(), Slant::Normal, Weight::Bold);
let bold = load_or_regular(bold_desc);
// Load italic font.
let italic_desc = Self::make_desc(&font.italic(), Slant::Italic, Weight::Normal);
let italic = load_or_regular(italic_desc);
// Load bold italic font.
let bold_italic_desc = Self::make_desc(&font.bold_italic(), Slant::Italic, Weight::Bold);
let bold_italic = load_or_regular(bold_italic_desc);
Ok((regular, bold, italic, bold_italic))
}
fn load_regular_font(
rasterizer: &mut Rasterizer,
description: &FontDesc,
size: Size,
) -> Result<FontKey, crossfont::Error> {
match rasterizer.load_font(description, size) {
Ok(font) => Ok(font),
Err(err) => {
error!("{}", err);
let fallback_desc =
Self::make_desc(&Font::default().normal(), Slant::Normal, Weight::Normal);
rasterizer.load_font(&fallback_desc, size)
},
}
}
fn make_desc(desc: &FontDescription, slant: Slant, weight: Weight) -> FontDesc {
let style = if let Some(ref spec) = desc.style {
Style::Specific(spec.to_owned())
} else {
Style::Description { slant, weight }
};
FontDesc::new(desc.family.clone(), style)
}
fn rasterize_glyph(
glyph_key: GlyphKey,
rasterizer: &mut Rasterizer,
glyph_offset: Delta<i8>,
metrics: &crossfont::Metrics,
) -> RasterizedGlyph {
let mut rasterized =
rasterizer.get_glyph(glyph_key.key).unwrap_or_else(|_| Default::default());
rasterized.left += i32::from(glyph_offset.x);
rasterized.top += i32::from(glyph_offset.y);
rasterized.top -= metrics.descent as i32;
RasterizedGlyph { wide: glyph_key.wide, zero_width: glyph_key.zero_width, rasterized }
}
pub fn get<L>(&mut self, glyph_key: GlyphKey, loader: &mut L) -> &AtlasGlyph
where
L: LoadGlyph,
{
let glyph_offset = self.glyph_offset;
let rasterizer = &mut self.rasterizer;
let metrics = &self.metrics;
self.cache.entry(glyph_key).or_insert_with(|| {
let rasterized = Self::rasterize_glyph(glyph_key, rasterizer, glyph_offset, metrics);
loader.load_glyph(&rasterized)
})
}
/// Clear currently cached data in both GL and the registry.
pub fn clear_glyph_cache<L: LoadGlyph>(&mut self, config: &Config, loader: &mut L) {
let (cell_width, cell_height) = Self::compute_cell_size(config, &self.metrics);
self.cell_size = Vec2::new(cell_width as i32, cell_height as i32);
self.cache = HashMap::default();
self.cursor_cache = HashMap::default();
self.clear_cache_with_common_glyphs(loader, config);
}
pub fn update_font_size<L: LoadGlyph>(
&mut self,
config: &Config,
font: &Font,
dpr: f64,
loader: &mut L,
) -> Result<(), crossfont::Error> {
// Update dpi scaling.
self.rasterizer.update_dpr(dpr as f32);
// Recompute font keys.
let (regular, bold, italic, bold_italic) =
Self::compute_font_keys(font, &mut self.rasterizer)?;
self.rasterizer.get_glyph(crossfont::GlyphKey {
font_key: regular,
c:'m',
size: font.size,
})?;
let metrics = self.rasterizer.metrics(regular, font.size)?;
info!("Font size changed to {:?} with DPR of {}", font.size, dpr);
self.font_size = font.size;
self.font_key = regular;
self.bold_key = bold;
self.italic_key = italic;
self.bold_italic_key = bold_italic;
self.metrics = metrics;
self.clear_glyph_cache(config, loader);
Ok(())
}
pub fn font_metrics(&self) -> crossfont::Metrics {
self.metrics
}
/// Prefetch glyphs that are almost guaranteed to be loaded anyways.
fn clear_cache_with_common_glyphs<L: LoadGlyph>(&mut self, loader: &mut L, config: &Config) {
let glyph_offset = self.glyph_offset;
let metrics = &self.metrics;
let font_size = self.font_size;
let rasterizer = &mut self.rasterizer;
let cell_size = self.cell_size;
let mut atlas_cell_size = self.cell_size;
let mut atlas_cell_offset = Vec2 { x: 0, y: 0 };
type Glyphs = Vec<(GlyphKey, RasterizedGlyph)>;
let glyphs: Glyphs = [self.font_key, self.bold_key, self.italic_key, self.bold_italic_key]
.iter()
.flat_map(|font| {
(32u8..=126u8)
.map(|c| {
let glyph_key = GlyphKey {
wide: false,
zero_width: false, | size: font_size,
},
};
let glyph =
Self::rasterize_glyph(glyph_key, rasterizer, glyph_offset, metrics);
atlas_cell_size.x = std::cmp::max(
atlas_cell_size.x,
glyph.rasterized.left + glyph.rasterized.width,
);
atlas_cell_size.y = std::cmp::max(atlas_cell_size.y, glyph.rasterized.top);
atlas_cell_offset.x =
std::cmp::max(atlas_cell_offset.x, -glyph.rasterized.left);
atlas_cell_offset.y = std::cmp::max(
atlas_cell_offset.y,
glyph.rasterized.height - glyph.rasterized.top,
);
debug!(
"precomp: '{}' left={} top={} w={} h={} off={:?} atlas_cell={:?} \
offset={:?}",
glyph.rasterized.c,
glyph.rasterized.left,
glyph.rasterized.top,
glyph.rasterized.width,
glyph.rasterized.height,
glyph_offset,
atlas_cell_size,
atlas_cell_offset,
);
(glyph_key, glyph)
})
.collect::<Glyphs>()
})
.collect();
info!("Max glyph size: {:?}", cell_size);
loader.clear(atlas_cell_size, atlas_cell_offset);
// Multipass grid render workaround for large font sizes
// Generate cursor glyphs first to ensure that they end up strictly
// in the first atlas/pass
for style in [
CursorStyle::Block,
CursorStyle::Beam,
CursorStyle::Underline,
CursorStyle::HollowBlock,
]
.iter()
{
let cursor_key = CursorKey { style: *style, is_wide: false };
let cursor_glyph = RasterizedGlyph {
wide: false,
zero_width: false,
rasterized: cursor::get_cursor_glyph(
cursor_key.style,
*metrics,
config.ui_config.font.offset.x,
config.ui_config.font.offset.y,
cursor_key.is_wide,
config.cursor.thickness(),
),
};
self.cursor_cache.entry(cursor_key).or_insert_with(|| loader.load_glyph(&cursor_glyph));
}
for glyph in glyphs {
self.cache.entry(glyph.0).or_insert_with(|| loader.load_glyph(&glyph.1));
}
}
/// Calculate font metrics without access to a glyph cache.
pub fn static_metrics(font: Font, dpr: f64) -> Result<crossfont::Metrics, crossfont::Error> {
let mut rasterizer = crossfont::Rasterizer::new(dpr as f32, font.use_thin_strokes())?;
let regular_desc = GlyphCache::make_desc(&font.normal(), Slant::Normal, Weight::Normal);
let regular = Self::load_regular_font(&mut rasterizer, ®ular_desc, font.size)?;
rasterizer.get_glyph(crossfont::GlyphKey { font_key: regular, c:'m', size: font.size })?;
rasterizer.metrics(regular, font.size)
}
/// Calculate the cell dimensions based on font metrics.
///
/// This will return a tuple of the cell width and height.
#[inline]
pub fn compute_cell_size(config: &Config, metrics: &crossfont::Metrics) -> (f32, f32) {
let offset_x = f64::from(config.ui_config.font.offset.x);
let offset_y = f64::from(config.ui_config.font.offset.y);
(
(metrics.average_advance + offset_x).floor().max(1.) as f32,
(metrics.line_height + offset_y).floor().max(1.) as f32,
)
}
} | key: crossfont::GlyphKey {
font_key: *font,
c: c as char, | random_line_split |
sourcemap.rs | sourcemap::SourceMap;
use swc_common::{comments::SingleThreadedComments, source_map::SourceMapGenConfig};
use swc_ecma_ast::EsVersion;
use swc_ecma_codegen::{self, text_writer::WriteJs, Emitter};
use swc_ecma_parser::{lexer::Lexer, Parser, Syntax};
use swc_ecma_testing::{exec_node_js, JsExecOptions};
static IGNORED_PASS_TESTS: &[&str] = &[
// Temporally ignored
"16c7073c546fdd58.js",
"369fd0a1e40030d8.js",
"3df03e7e138b7760.js",
"5333f04581124314.js",
"a157424306915066.js",
"ce5f3bc27d5ccaac.js",
"d4e81043d808dc31.js",
// Stack size (Stupid parens)
"6b5e7e125097d439.js",
"714be6d28082eaa7.js",
"882910de7dd1aef9.js",
"dd3c63403db5c06e.js",
// Wrong tests (variable name or value is different)
"0339fa95c78c11bd.js",
"0426f15dac46e92d.js",
"0b4d61559ccce0f9.js",
"0f88c334715d2489.js",
"1093d98f5fc0758d.js",
"15d9592709b947a0.js",
"2179895ec5cc6276.js",
"247a3a57e8176ebd.js",
"441a92357939904a.js",
"47f974d6fc52e3e4.js",
"4e1a0da46ca45afe.js",
"5829d742ab805866.js",
"589dc8ad3b9aa28f.js",
"598a5cedba92154d.js",
"72d79750e81ef03d.js",
"7788d3c1e1247da9.js",
"7b72d7b43bedc895.js",
"7dab6e55461806c9.js",
"82c827ccaecbe22b.js",
"87a9b0d1d80812cc.js",
"8c80f7ee04352eba.js",
"96f5d93be9a54573.js",
"988e362ed9ddcac5.js",
"9bcae7c7f00b4e3c.js",
"a8a03a88237c4e8f.js",
"ad06370e34811a6a.js",
"b0fdc038ee292aba.js",
"b62c6dd890bef675.js",
"cb211fadccb029c7.js",
"ce968fcdf3a1987c.js",
"db3c01738aaf0b92.js",
"e1387fe892984e2b.js",
"e71c1d5f0b6b833c.js",
"e8ea384458526db0.js",
// We don't implement Annex B fully.
"1c1e2a43fe5515b6.js",
"3dabeca76119d501.js",
"52aeec7b8da212a2.js",
"59ae0289778b80cd.js",
"a4d62a651f69d815.js",
"c06df922631aeabc.js",
// swc_common issue - `\r` should be treated as a newline
"be2c3fff6426873e.js",
"db66e1e8f3f1faef.js",
"a7b8ce1d4c0f0bc2.js",
"6498dcc494193cb4.js",
"6a240463b40550d2.js",
// TODO: (maybe) fix span of `,`
"641ac9060a206183.js",
"e4cef19dab44335a.js",
"a6806d6fedbf6759.js",
"2dc0ded5a1bff643.js",
"547fa50af16beca7.js",
"547fa50af16beca7.js",
"8c8a7a2941fb6d64.js",
"9e98dbfde77e3dfe.js",
"d9eb39b11bc766f4.js",
"f9888fa1a1e366e7.js",
"78cf02220fb0937c.js",
// TODO(kdy1): Non-ascii char count
"58cb05d17f7ec010.js",
"4d2c7020de650d40.js",
"dafb7abe5b9b44f5.js",
// Our one is better
"1efde9ddd9d6e6ce.module.js",
"d010d377bcfd5565.js",
"ce0aaec02d5d4465.js",
"edd1f39f90576180.js",
"290fdc5a2f826ead.js",
"e71a91c61343cdb1.js",
"409f30dc7efe75d5.js",
"03608b6e222ae700.js",
"e54c1a2fc15cd4b8.js",
"e08e181172bad2b1.js",
"cc793d44a11617e7.js",
"54e70df597a4f9a3.js",
"efef19e06f58fdd9.js",
"e0fc2148b455a6be.js",
"10857a84ed2962f1.js",
"d7c7ff252e84e81d.js",
"0aa6aab640155051.js",
"c80d9415dde647cd.js",
"09e84f25af85b836.js",
"ce8c443eb361e1a2.js",
"affd557fd820e1f2.js",
"ec99a663d6f3983d.js",
"01fd8e8a0a42307b.js",
"e01c7172cf204b92.js",
"12d5bedf1812952a.js",
"df20c9b7a7d534cb.js",
"c767fa4d683aa3ce.js",
"bf8ffad512a5f568.js",
"c8513472857eae9c.js",
"b86b0122e80c330e.js",
"aa7e721756949024.js",
"a830df7cf2e74c9f.js",
"845631d1a33b3409.js",
"066b76285ce79182.js",
"fe2d3b945530c806.js",
"bd28a7d19ac0d50b.js",
"06c7efc128ce74a0.js",
"075c7204d0b0af60.js",
"0827a8316cca777a.js",
"b9a0cb6df76a73d2.js",
"bf210a4f0cf9e352.js",
"6edc155d463535cb.js",
"b8f8dfc41df97add.js",
"b549d045fc8e93bf.js",
"e42f306327c0f578.js",
"9a9cb616daadf90a.js",
"d2ae1c7b6e55143f.js",
"a445a478b4ce0c58.js",
"0d137e8a97ffe083.js",
"b7a6a807ae6db312.js",
"bb8b546cf9db5996.js",
"50ac15a08f7c812f.js",
"a2cb5a14559c6a50.js",
"bbff5671643cc2ea.js",
"c2f12d66ce17d5ab.js",
"13045bfdda0434e0.js",
"10d6486502949e74.js",
"119e9dce4feae643.js",
"1223609b0f7a2129.js",
"177fef3d002eb873.js",
"19ffea7e9e887e08.js",
"1c6c67fcd71f2d08.js",
"1cdce2d337e64b4f.js",
"1f039e0eeb1bc271.js",
"227118dffd2c9935.js",
"250ced8c8e83b389.js",
"a2798917405b080b.js",
"ad6bf12aa7eda975.js",
"24fa28a37061a18f.js",
"252bb992a448270e.js",
"285648c16156804f.js",
"2d10fed2af94fbd1.js",
"3097f73926c93640.js",
"30aee1020fc69090.js",
"312f85fecc352681.js",
"317532451c2ce8ff.js",
"32b635a9667a9fb1.js",
"36224cf8215ad8e4.js",
"37e4a6eca1ece7e5.js",
"38284ea2d9914d86.js",
"3b57183c81070eec.js",
"3bbd75d597d54fe6.js",
"3c1e2ada0ac2b8e3.js",
"3e1a6f702041b599.js",
"3e3a99768a4a1502.js",
"3e69c5cc1a7ac103.js",
"3eac36e29398cdc5.js",
"3ff52d86c77678bd.js",
"43023cd549deee77.js",
"44af28febe2288cc.js",
"478ede4cfe7906d5.js",
"4869454dd215468e.js",
"48b6f8ce65d3b3ee.js",
"4c71e11fbbc56349.js",
"4d833cbc56caaaf9.js",
"4e7c58761e24d77c.js",
"4e7c58761e24d77c.js",
"5641ad33abcd1752.js",
"587400d1c019785a.js",
"58ed6ffb30191684.js",
"5b8d2b991d2c1f5b.js",
"5f730961df66e8e8.js",
"597108fd45a6e79b.js",
"60dcd48a3f6af44f.js",
"62d7c1ee4e1626c4.js",
"665f4940c7cf30c9.js",
"64cc57f82a54b7fb.js",
"66d2dbcb692491ec.module.js",
"697b3d30c1d06918.js",
"698a8cfb0705c277.js",
"69bbdc7c34ed23cc.js",
"6a323491fe75918a.js",
"6b76b8761a049c19.js",
"70bf2c409480ae10.js",
"74c5ebda713c8bd7.js",
"75172741c27c7703.js",
"753a8b016a700975.js",
"77c661b2fbe3dd3a.js",
"784a059faa166072.js",
"7855fbf5ea10e622.js",
"7cd7c68a6131f816.js",
"7df2a606ecc6cd84.js",
"7dfb625b91c5c879.js",
"7fdf990c6f42edcd.module.js",
"80d2351a5ae68524.js",
"84250e15785d8a9e.js",
"85263ecacc7a4dc5.js",
"8628cd459b39ffe8.js",
"870a0b8d891753e9.js",
"8d14286a8cc6ee9d.js",
"8d67ad04bfc356c9.js",
"8ecaef2617d8c6a7.js",
"918e105a2ff6c64a.js",
"92fd8e24864fde0a.js",
"94b8a654a87039b9.js",
"94cb828d5dcfd136.js",
"98df58b0c40fac90.js",
"9949a2e1a6844836.module.js",
"99cdfc40e20af6f5.js",
"9a666205cafd530f.js",
"a454d2e2ab3484e6.js",
"a54cca69085ad35a.js",
"a86a29773d1168d3.js",
"b205355de22689d1.js",
"b93d116fd0409637.js",
"c85bc4de504befc7.js",
"c8689b6da6fd227a.js",
"cda499c521ff60c7.js",
"d4b898b45172a637.js",
"e2ac0bea41202dc9.js",
"f01d9f3c7b2b2717.js",
"f15772354efa5ecf.js",
"f17ec9517a3339d9.js",
"fa5b398eeef697a6.js",
"fa9eaf58f51d6926.js",
"faa4a026e1e86145.js",
"fada2c7bbfabe14a.js",
"fb8db7a71f3755fc.js",
"fbde237f11796df9.js",
"fd5ea844fcc07d3d.js",
"6c5f0dd83c417a5a.js",
"78eb22badc114b6f.js",
"7afd38d79e6795a8.js",
"80950061e291542b.js",
"8a0fc8ea31727188.module.js",
"af97a3752e579223.js",
"bbffb851469a3f0e.js",
"bc302492d441d561.js",
"be2fd5888f434cbd.js",
"f3260491590325af.js",
];
#[testing::fixture("../swc_ecma_parser/tests/test262-parser/pass/*.js")]
fn identity(entry: PathBuf) {
let file_name = entry
.file_name()
.unwrap()
.to_str()
.expect("to_str() failed")
.to_string();
let input = read_to_string(&entry).unwrap();
let ignore = IGNORED_PASS_TESTS.contains(&&*file_name);
if ignore {
return;
}
let is_module = file_name.contains("module");
let msg = format!(
"\n\n========== Running codegen test {}\nSource:\n{}\n",
file_name, input
);
let mut wr = vec![];
::testing::run_test(false, |cm, handler| {
let fm = cm.load_file(&entry).expect("failed to load file");
eprintln!(
"{}\nPos: {:?} ~ {:?} (L{})",
msg,
fm.start_pos,
fm.end_pos,
fm.count_lines()
);
let (expected_code, expected_map, visualizer_url_for_expected) =
match get_expected(&fm.src, is_module) {
Some(v) => v,
None => return Ok(()),
};
println!("Expected code:\n{}", expected_code);
let expected_tokens = print_source_map(&expected_map);
let comments = SingleThreadedComments::default();
let lexer = Lexer::new(
Syntax::default(),
Default::default(),
(&*fm).into(),
Some(&comments),
);
let mut parser: Parser<Lexer> = Parser::new_from(lexer);
let mut src_map = vec![];
{
let mut wr = Box::new(swc_ecma_codegen::text_writer::JsWriter::new(
cm.clone(),
"\n",
&mut wr,
Some(&mut src_map),
)) as Box<dyn WriteJs>;
wr = Box::new(swc_ecma_codegen::text_writer::omit_trailing_semi(wr));
let mut emitter = Emitter {
cfg: swc_ecma_codegen::Config {
minify: true,
target: EsVersion::Es5,
ascii_only: true,
..Default::default()
},
cm: cm.clone(),
wr,
comments: None,
};
// Parse source
if is_module {
emitter
.emit_module(
&parser
.parse_module()
.map_err(|e| e.into_diagnostic(handler).emit())?,
)
.unwrap();
} else {
emitter
.emit_script(
&parser
.parse_script()
.map_err(|e| e.into_diagnostic(handler).emit())?,
)
.unwrap();
}
}
let actual_code = String::from_utf8(wr).unwrap();
let actual_map = cm.build_source_map_with_config(&src_map, None, SourceMapConfigImpl);
let visualizer_url_for_actual = visualizer_url(&actual_code, &actual_map);
let actual_tokens = print_source_map(&actual_map);
let common_tokens = actual_tokens
.iter()
.filter(|a| expected_tokens.contains(&**a))
.map(|v| v.to_string())
.collect::<FxHashSet<_>>();
let actual_tokens_diff = actual_tokens
.iter()
.filter(|a|!common_tokens.contains(&**a))
.map(|v| v.to_string())
.collect::<Vec<_>>();
let expected_tokens_diff = expected_tokens
.iter()
.filter(|a|!common_tokens.contains(&**a))
.map(|v| v.to_string())
.collect::<Vec<_>>();
eprintln!("---- Actual -----");
for s in actual_tokens_diff {
eprintln!("{}", s);
}
eprintln!("---- Expected -----");
for s in expected_tokens_diff {
eprintln!("{}", s);
}
dbg!(&src_map);
if actual_code!= expected_code {
// Generated code is different
// We can't ensure that identical sourcemap will mean identical code
eprintln!("Actual code:\n{}", actual_code);
eprintln!("Expected code:\n{}", expected_code);
return Ok(());
}
eprintln!(
"----- Visualizer -----\nExpected: {}\nActual: {}",
visualizer_url_for_expected, visualizer_url_for_actual
);
assert_eq_same_map(&expected_map, &actual_map);
Ok(())
})
.expect("failed to run test");
}
fn get_expected(code: &str, is_module: bool) -> Option<(String, SourceMap, String)> {
let output = exec_node_js(
include_str!("./srcmap.mjs"),
JsExecOptions {
cache: true,
module: true,
args: vec![
code.to_string(),
if is_module {
"module".into()
} else {
"script".into()
},
],
},
)
.ok()?;
let v = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(&output).unwrap();
let code = v.get("code").unwrap().as_str().unwrap();
let map = v.get("map").unwrap().as_str().unwrap();
let map = SourceMap::from_slice(map.as_bytes()).expect("invalid sourcemap");
let visualizer_url = visualizer_url(code, &map);
Some((code.to_string(), map, visualizer_url))
}
fn print_source_map(map: &SourceMap) -> Vec<String> {
let mut v = map
.tokens()
.map(|t| {
format!(
"Token: {}:{} => {}:{}",
t.get_src_line(),
t.get_src_col(),
t.get_dst_line(),
t.get_dst_col()
)
})
.collect::<Vec<_>>();
v.sort();
v
}
fn assert_eq_same_map(expected: &SourceMap, actual: &SourceMap) {
for expected_token in expected.tokens() {
let actual_token = actual
.lookup_token(expected_token.get_dst_line(), expected_token.get_dst_col())
.unwrap_or_else(|| panic!("token not found: {:?}", expected_token));
if expected_token.get_src_line() == 0 && expected_token.get_src_col() == 0 {
continue;
}
assert_eq!(
expected_token.get_src_line(),
actual_token.get_src_line(),
"line mismatch at {}:{}",
expected_token.get_dst_line(),
expected_token.get_dst_col()
);
assert_eq!(
expected_token.get_src_col(),
actual_token.get_src_col(),
"col mismatch at {}:{}",
expected_token.get_dst_line(),
expected_token.get_dst_col()
);
}
}
/// Creates a url for https://evanw.github.io/source-map-visualization/
fn visualizer_url(code: &str, map: &SourceMap) -> String {
let map = {
let mut buf = vec![];
map.to_writer(&mut buf).unwrap();
String::from_utf8(buf).unwrap()
};
let code_len = format!("{}\0", code.len());
let map_len = format!("{}\0", map.len());
let hash = base64::encode(format!("{}{}{}{}", code_len, code, map_len, map));
format!("https://evanw.github.io/source-map-visualization/#{}", hash)
}
struct SourceMapConfigImpl;
impl SourceMapGenConfig for SourceMapConfigImpl {
fn file_name_to_source(&self, f: &swc_common::FileName) -> String {
f.to_string()
}
fn inline_sources_content(&self, _: &swc_common::FileName) -> bool | {
true
} | identifier_body |
|
sourcemap.rs | string, path::PathBuf};
use rustc_hash::FxHashSet;
use sourcemap::SourceMap;
use swc_common::{comments::SingleThreadedComments, source_map::SourceMapGenConfig};
use swc_ecma_ast::EsVersion;
use swc_ecma_codegen::{self, text_writer::WriteJs, Emitter};
use swc_ecma_parser::{lexer::Lexer, Parser, Syntax};
use swc_ecma_testing::{exec_node_js, JsExecOptions};
static IGNORED_PASS_TESTS: &[&str] = &[
// Temporally ignored
"16c7073c546fdd58.js",
"369fd0a1e40030d8.js",
"3df03e7e138b7760.js",
"5333f04581124314.js",
"a157424306915066.js",
"ce5f3bc27d5ccaac.js",
"d4e81043d808dc31.js",
// Stack size (Stupid parens)
"6b5e7e125097d439.js",
"714be6d28082eaa7.js",
"882910de7dd1aef9.js",
"dd3c63403db5c06e.js",
// Wrong tests (variable name or value is different)
"0339fa95c78c11bd.js",
"0426f15dac46e92d.js",
"0b4d61559ccce0f9.js",
"0f88c334715d2489.js",
"1093d98f5fc0758d.js",
"15d9592709b947a0.js",
"2179895ec5cc6276.js",
"247a3a57e8176ebd.js",
"441a92357939904a.js",
"47f974d6fc52e3e4.js",
"4e1a0da46ca45afe.js",
"5829d742ab805866.js",
"589dc8ad3b9aa28f.js",
"598a5cedba92154d.js",
"72d79750e81ef03d.js",
"7788d3c1e1247da9.js",
"7b72d7b43bedc895.js",
"7dab6e55461806c9.js",
"82c827ccaecbe22b.js",
"87a9b0d1d80812cc.js",
"8c80f7ee04352eba.js",
"96f5d93be9a54573.js",
"988e362ed9ddcac5.js",
"9bcae7c7f00b4e3c.js",
"a8a03a88237c4e8f.js",
"ad06370e34811a6a.js",
"b0fdc038ee292aba.js",
"b62c6dd890bef675.js",
"cb211fadccb029c7.js",
"ce968fcdf3a1987c.js",
"db3c01738aaf0b92.js",
"e1387fe892984e2b.js",
"e71c1d5f0b6b833c.js",
"e8ea384458526db0.js",
// We don't implement Annex B fully.
"1c1e2a43fe5515b6.js",
"3dabeca76119d501.js",
"52aeec7b8da212a2.js",
"59ae0289778b80cd.js",
"a4d62a651f69d815.js",
"c06df922631aeabc.js",
// swc_common issue - `\r` should be treated as a newline
"be2c3fff6426873e.js",
"db66e1e8f3f1faef.js",
"a7b8ce1d4c0f0bc2.js",
"6498dcc494193cb4.js",
"6a240463b40550d2.js",
// TODO: (maybe) fix span of `,`
"641ac9060a206183.js",
"e4cef19dab44335a.js",
"a6806d6fedbf6759.js",
"2dc0ded5a1bff643.js",
"547fa50af16beca7.js",
"547fa50af16beca7.js",
"8c8a7a2941fb6d64.js",
"9e98dbfde77e3dfe.js",
"d9eb39b11bc766f4.js",
"f9888fa1a1e366e7.js",
"78cf02220fb0937c.js",
// TODO(kdy1): Non-ascii char count
"58cb05d17f7ec010.js",
"4d2c7020de650d40.js",
"dafb7abe5b9b44f5.js",
// Our one is better
"1efde9ddd9d6e6ce.module.js",
"d010d377bcfd5565.js",
"ce0aaec02d5d4465.js",
"edd1f39f90576180.js",
"290fdc5a2f826ead.js",
"e71a91c61343cdb1.js",
"409f30dc7efe75d5.js",
"03608b6e222ae700.js",
"e54c1a2fc15cd4b8.js",
"e08e181172bad2b1.js",
"cc793d44a11617e7.js",
"54e70df597a4f9a3.js",
"efef19e06f58fdd9.js",
"e0fc2148b455a6be.js",
"10857a84ed2962f1.js",
"d7c7ff252e84e81d.js",
"0aa6aab640155051.js",
"c80d9415dde647cd.js",
"09e84f25af85b836.js",
"ce8c443eb361e1a2.js",
"affd557fd820e1f2.js",
"ec99a663d6f3983d.js",
"01fd8e8a0a42307b.js",
"e01c7172cf204b92.js",
"12d5bedf1812952a.js",
"df20c9b7a7d534cb.js",
"c767fa4d683aa3ce.js",
"bf8ffad512a5f568.js",
"c8513472857eae9c.js",
"b86b0122e80c330e.js",
"aa7e721756949024.js",
"a830df7cf2e74c9f.js",
"845631d1a33b3409.js",
"066b76285ce79182.js",
"fe2d3b945530c806.js",
"bd28a7d19ac0d50b.js",
"06c7efc128ce74a0.js",
"075c7204d0b0af60.js",
"0827a8316cca777a.js",
"b9a0cb6df76a73d2.js",
"bf210a4f0cf9e352.js",
"6edc155d463535cb.js",
"b8f8dfc41df97add.js",
"b549d045fc8e93bf.js",
"e42f306327c0f578.js",
"9a9cb616daadf90a.js",
"d2ae1c7b6e55143f.js",
"a445a478b4ce0c58.js",
"0d137e8a97ffe083.js",
"b7a6a807ae6db312.js",
"bb8b546cf9db5996.js",
"50ac15a08f7c812f.js",
"a2cb5a14559c6a50.js",
"bbff5671643cc2ea.js",
"c2f12d66ce17d5ab.js",
"13045bfdda0434e0.js",
"10d6486502949e74.js",
"119e9dce4feae643.js",
"1223609b0f7a2129.js",
"177fef3d002eb873.js",
"19ffea7e9e887e08.js",
"1c6c67fcd71f2d08.js",
"1cdce2d337e64b4f.js",
"1f039e0eeb1bc271.js",
"227118dffd2c9935.js",
"250ced8c8e83b389.js",
"a2798917405b080b.js",
"ad6bf12aa7eda975.js",
"24fa28a37061a18f.js",
"252bb992a448270e.js",
"285648c16156804f.js",
"2d10fed2af94fbd1.js",
"3097f73926c93640.js",
"30aee1020fc69090.js",
"312f85fecc352681.js",
"317532451c2ce8ff.js",
"32b635a9667a9fb1.js",
"36224cf8215ad8e4.js",
"37e4a6eca1ece7e5.js",
"38284ea2d9914d86.js",
"3b57183c81070eec.js",
"3bbd75d597d54fe6.js",
"3c1e2ada0ac2b8e3.js",
"3e1a6f702041b599.js",
"3e3a99768a4a1502.js",
"3e69c5cc1a7ac103.js",
"3eac36e29398cdc5.js",
"3ff52d86c77678bd.js",
"43023cd549deee77.js",
"44af28febe2288cc.js",
"478ede4cfe7906d5.js",
"4869454dd215468e.js",
"48b6f8ce65d3b3ee.js",
"4c71e11fbbc56349.js",
"4d833cbc56caaaf9.js",
"4e7c58761e24d77c.js",
"4e7c58761e24d77c.js",
"5641ad33abcd1752.js",
"587400d1c019785a.js",
"58ed6ffb30191684.js",
"5b8d2b991d2c1f5b.js",
"5f730961df66e8e8.js",
"597108fd45a6e79b.js",
"60dcd48a3f6af44f.js",
"62d7c1ee4e1626c4.js",
"665f4940c7cf30c9.js",
"64cc57f82a54b7fb.js",
"66d2dbcb692491ec.module.js",
"697b3d30c1d06918.js",
"698a8cfb0705c277.js",
"69bbdc7c34ed23cc.js",
"6a323491fe75918a.js",
"6b76b8761a049c19.js",
"70bf2c409480ae10.js",
"74c5ebda713c8bd7.js",
"75172741c27c7703.js",
"753a8b016a700975.js",
"77c661b2fbe3dd3a.js",
"784a059faa166072.js",
"7855fbf5ea10e622.js",
"7cd7c68a6131f816.js",
"7df2a606ecc6cd84.js",
"7dfb625b91c5c879.js",
"7fdf990c6f42edcd.module.js",
"80d2351a5ae68524.js",
"84250e15785d8a9e.js",
"85263ecacc7a4dc5.js",
"8628cd459b39ffe8.js",
"870a0b8d891753e9.js",
"8d14286a8cc6ee9d.js",
"8d67ad04bfc356c9.js",
"8ecaef2617d8c6a7.js",
"918e105a2ff6c64a.js",
"92fd8e24864fde0a.js",
"94b8a654a87039b9.js",
"94cb828d5dcfd136.js",
"98df58b0c40fac90.js",
"9949a2e1a6844836.module.js",
"99cdfc40e20af6f5.js",
"9a666205cafd530f.js",
"a454d2e2ab3484e6.js",
"a54cca69085ad35a.js",
"a86a29773d1168d3.js",
"b205355de22689d1.js",
"b93d116fd0409637.js",
"c85bc4de504befc7.js",
"c8689b6da6fd227a.js",
"cda499c521ff60c7.js",
"d4b898b45172a637.js",
"e2ac0bea41202dc9.js",
"f01d9f3c7b2b2717.js",
"f15772354efa5ecf.js",
"f17ec9517a3339d9.js",
"fa5b398eeef697a6.js",
"fa9eaf58f51d6926.js",
"faa4a026e1e86145.js",
"fada2c7bbfabe14a.js",
"fb8db7a71f3755fc.js",
"fbde237f11796df9.js",
"fd5ea844fcc07d3d.js",
"6c5f0dd83c417a5a.js",
"78eb22badc114b6f.js",
"7afd38d79e6795a8.js",
"80950061e291542b.js",
"8a0fc8ea31727188.module.js",
"af97a3752e579223.js",
"bbffb851469a3f0e.js",
"bc302492d441d561.js",
"be2fd5888f434cbd.js",
"f3260491590325af.js",
];
#[testing::fixture("../swc_ecma_parser/tests/test262-parser/pass/*.js")]
fn identity(entry: PathBuf) {
let file_name = entry
.file_name()
.unwrap()
.to_str()
.expect("to_str() failed")
.to_string();
let input = read_to_string(&entry).unwrap();
let ignore = IGNORED_PASS_TESTS.contains(&&*file_name);
if ignore {
return;
}
let is_module = file_name.contains("module");
let msg = format!(
"\n\n========== Running codegen test {}\nSource:\n{}\n",
file_name, input
);
let mut wr = vec![];
::testing::run_test(false, |cm, handler| {
let fm = cm.load_file(&entry).expect("failed to load file");
eprintln!(
"{}\nPos: {:?} ~ {:?} (L{})",
msg,
fm.start_pos,
fm.end_pos,
fm.count_lines()
);
let (expected_code, expected_map, visualizer_url_for_expected) =
match get_expected(&fm.src, is_module) {
Some(v) => v,
None => return Ok(()),
};
println!("Expected code:\n{}", expected_code);
let expected_tokens = print_source_map(&expected_map);
let comments = SingleThreadedComments::default();
let lexer = Lexer::new(
Syntax::default(),
Default::default(),
(&*fm).into(),
Some(&comments),
);
let mut parser: Parser<Lexer> = Parser::new_from(lexer);
let mut src_map = vec![];
{
let mut wr = Box::new(swc_ecma_codegen::text_writer::JsWriter::new(
cm.clone(),
"\n",
&mut wr,
Some(&mut src_map),
)) as Box<dyn WriteJs>;
wr = Box::new(swc_ecma_codegen::text_writer::omit_trailing_semi(wr));
let mut emitter = Emitter {
cfg: swc_ecma_codegen::Config {
minify: true,
target: EsVersion::Es5,
ascii_only: true,
..Default::default()
},
cm: cm.clone(),
wr,
comments: None,
};
// Parse source
if is_module {
emitter
.emit_module(
&parser
.parse_module()
.map_err(|e| e.into_diagnostic(handler).emit())?,
)
.unwrap();
} else {
emitter
.emit_script(
&parser
.parse_script()
.map_err(|e| e.into_diagnostic(handler).emit())?,
)
.unwrap();
}
}
let actual_code = String::from_utf8(wr).unwrap();
let actual_map = cm.build_source_map_with_config(&src_map, None, SourceMapConfigImpl);
let visualizer_url_for_actual = visualizer_url(&actual_code, &actual_map);
let actual_tokens = print_source_map(&actual_map);
let common_tokens = actual_tokens
.iter()
.filter(|a| expected_tokens.contains(&**a))
.map(|v| v.to_string())
.collect::<FxHashSet<_>>();
let actual_tokens_diff = actual_tokens
.iter()
.filter(|a|!common_tokens.contains(&**a))
.map(|v| v.to_string())
.collect::<Vec<_>>();
let expected_tokens_diff = expected_tokens
.iter()
.filter(|a|!common_tokens.contains(&**a))
.map(|v| v.to_string())
.collect::<Vec<_>>();
eprintln!("---- Actual -----");
for s in actual_tokens_diff {
eprintln!("{}", s);
}
eprintln!("---- Expected -----");
for s in expected_tokens_diff {
eprintln!("{}", s);
}
dbg!(&src_map);
if actual_code!= expected_code {
// Generated code is different
// We can't ensure that identical sourcemap will mean identical code
eprintln!("Actual code:\n{}", actual_code);
eprintln!("Expected code:\n{}", expected_code);
return Ok(());
}
eprintln!(
"----- Visualizer -----\nExpected: {}\nActual: {}",
visualizer_url_for_expected, visualizer_url_for_actual
);
assert_eq_same_map(&expected_map, &actual_map);
Ok(())
})
.expect("failed to run test");
}
fn get_expected(code: &str, is_module: bool) -> Option<(String, SourceMap, String)> {
let output = exec_node_js(
include_str!("./srcmap.mjs"),
JsExecOptions {
cache: true,
module: true,
args: vec![
code.to_string(),
if is_module {
"module".into()
} else {
"script".into()
},
],
},
)
.ok()?;
let v = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(&output).unwrap();
let code = v.get("code").unwrap().as_str().unwrap();
let map = v.get("map").unwrap().as_str().unwrap();
let map = SourceMap::from_slice(map.as_bytes()).expect("invalid sourcemap");
let visualizer_url = visualizer_url(code, &map);
Some((code.to_string(), map, visualizer_url))
}
fn print_source_map(map: &SourceMap) -> Vec<String> {
let mut v = map
.tokens()
.map(|t| {
format!(
"Token: {}:{} => {}:{}",
t.get_src_line(),
t.get_src_col(),
t.get_dst_line(),
t.get_dst_col()
)
})
.collect::<Vec<_>>();
v.sort();
v
}
fn assert_eq_same_map(expected: &SourceMap, actual: &SourceMap) {
for expected_token in expected.tokens() {
let actual_token = actual
.lookup_token(expected_token.get_dst_line(), expected_token.get_dst_col())
.unwrap_or_else(|| panic!("token not found: {:?}", expected_token));
if expected_token.get_src_line() == 0 && expected_token.get_src_col() == 0 {
continue;
}
assert_eq!(
expected_token.get_src_line(),
actual_token.get_src_line(),
"line mismatch at {}:{}",
expected_token.get_dst_line(),
expected_token.get_dst_col()
);
assert_eq!(
expected_token.get_src_col(),
actual_token.get_src_col(),
"col mismatch at {}:{}",
expected_token.get_dst_line(),
expected_token.get_dst_col()
);
}
}
/// Creates a url for https://evanw.github.io/source-map-visualization/
fn visualizer_url(code: &str, map: &SourceMap) -> String {
let map = {
let mut buf = vec![];
map.to_writer(&mut buf).unwrap();
String::from_utf8(buf).unwrap()
};
let code_len = format!("{}\0", code.len());
let map_len = format!("{}\0", map.len());
let hash = base64::encode(format!("{}{}{}{}", code_len, code, map_len, map));
format!("https://evanw.github.io/source-map-visualization/#{}", hash)
}
struct | ;
impl SourceMapGenConfig for SourceMapConfigImpl {
fn file_name_to_source(&self, f: &swc_common::FileName) -> String {
f.to_string()
}
fn inline_sources_content(&self, _: &swc_common::FileName | SourceMapConfigImpl | identifier_name |
sourcemap.rs | to_string, path::PathBuf};
use rustc_hash::FxHashSet;
use sourcemap::SourceMap;
use swc_common::{comments::SingleThreadedComments, source_map::SourceMapGenConfig};
use swc_ecma_ast::EsVersion;
use swc_ecma_codegen::{self, text_writer::WriteJs, Emitter};
use swc_ecma_parser::{lexer::Lexer, Parser, Syntax};
use swc_ecma_testing::{exec_node_js, JsExecOptions};
static IGNORED_PASS_TESTS: &[&str] = &[
// Temporally ignored
"16c7073c546fdd58.js",
"369fd0a1e40030d8.js",
"3df03e7e138b7760.js",
"5333f04581124314.js",
"a157424306915066.js",
"ce5f3bc27d5ccaac.js",
"d4e81043d808dc31.js",
// Stack size (Stupid parens)
"6b5e7e125097d439.js",
"714be6d28082eaa7.js",
"882910de7dd1aef9.js",
"dd3c63403db5c06e.js",
// Wrong tests (variable name or value is different)
"0339fa95c78c11bd.js",
"0426f15dac46e92d.js",
"0b4d61559ccce0f9.js",
"0f88c334715d2489.js",
"1093d98f5fc0758d.js",
"15d9592709b947a0.js",
"2179895ec5cc6276.js",
"247a3a57e8176ebd.js",
"441a92357939904a.js",
"47f974d6fc52e3e4.js",
"4e1a0da46ca45afe.js",
"5829d742ab805866.js",
"589dc8ad3b9aa28f.js",
"598a5cedba92154d.js",
"72d79750e81ef03d.js",
"7788d3c1e1247da9.js",
"7b72d7b43bedc895.js",
"7dab6e55461806c9.js",
"82c827ccaecbe22b.js",
"87a9b0d1d80812cc.js",
"8c80f7ee04352eba.js",
"96f5d93be9a54573.js",
"988e362ed9ddcac5.js",
"9bcae7c7f00b4e3c.js",
"a8a03a88237c4e8f.js",
"ad06370e34811a6a.js",
"b0fdc038ee292aba.js",
"b62c6dd890bef675.js",
"cb211fadccb029c7.js",
"ce968fcdf3a1987c.js",
"db3c01738aaf0b92.js",
"e1387fe892984e2b.js",
"e71c1d5f0b6b833c.js",
"e8ea384458526db0.js",
// We don't implement Annex B fully.
"1c1e2a43fe5515b6.js",
"3dabeca76119d501.js",
"52aeec7b8da212a2.js",
"59ae0289778b80cd.js",
"a4d62a651f69d815.js",
"c06df922631aeabc.js",
// swc_common issue - `\r` should be treated as a newline
"be2c3fff6426873e.js",
"db66e1e8f3f1faef.js",
"a7b8ce1d4c0f0bc2.js",
"6498dcc494193cb4.js",
"6a240463b40550d2.js",
// TODO: (maybe) fix span of `,`
"641ac9060a206183.js",
"e4cef19dab44335a.js",
"a6806d6fedbf6759.js",
"2dc0ded5a1bff643.js",
"547fa50af16beca7.js",
"547fa50af16beca7.js",
"8c8a7a2941fb6d64.js",
"9e98dbfde77e3dfe.js",
"d9eb39b11bc766f4.js",
"f9888fa1a1e366e7.js",
"78cf02220fb0937c.js",
// TODO(kdy1): Non-ascii char count
"58cb05d17f7ec010.js",
"4d2c7020de650d40.js",
"dafb7abe5b9b44f5.js",
// Our one is better
"1efde9ddd9d6e6ce.module.js",
"d010d377bcfd5565.js",
"ce0aaec02d5d4465.js",
"edd1f39f90576180.js",
"290fdc5a2f826ead.js",
"e71a91c61343cdb1.js",
"409f30dc7efe75d5.js",
"03608b6e222ae700.js",
"e54c1a2fc15cd4b8.js",
"e08e181172bad2b1.js",
"cc793d44a11617e7.js",
"54e70df597a4f9a3.js",
"efef19e06f58fdd9.js",
"e0fc2148b455a6be.js",
"10857a84ed2962f1.js",
"d7c7ff252e84e81d.js",
"0aa6aab640155051.js",
"c80d9415dde647cd.js",
"09e84f25af85b836.js",
"ce8c443eb361e1a2.js",
"affd557fd820e1f2.js",
"ec99a663d6f3983d.js",
"01fd8e8a0a42307b.js",
"e01c7172cf204b92.js",
"12d5bedf1812952a.js",
"df20c9b7a7d534cb.js",
"c767fa4d683aa3ce.js",
"bf8ffad512a5f568.js",
"c8513472857eae9c.js",
"b86b0122e80c330e.js",
"aa7e721756949024.js",
"a830df7cf2e74c9f.js",
"845631d1a33b3409.js",
"066b76285ce79182.js",
"fe2d3b945530c806.js",
"bd28a7d19ac0d50b.js",
"06c7efc128ce74a0.js",
"075c7204d0b0af60.js",
"0827a8316cca777a.js",
"b9a0cb6df76a73d2.js",
"bf210a4f0cf9e352.js",
"6edc155d463535cb.js",
"b8f8dfc41df97add.js",
"b549d045fc8e93bf.js",
"e42f306327c0f578.js",
"9a9cb616daadf90a.js",
"d2ae1c7b6e55143f.js",
"a445a478b4ce0c58.js",
"0d137e8a97ffe083.js",
"b7a6a807ae6db312.js",
"bb8b546cf9db5996.js",
"50ac15a08f7c812f.js",
"a2cb5a14559c6a50.js",
"bbff5671643cc2ea.js",
"c2f12d66ce17d5ab.js",
"13045bfdda0434e0.js",
"10d6486502949e74.js",
"119e9dce4feae643.js",
"1223609b0f7a2129.js",
"177fef3d002eb873.js",
"19ffea7e9e887e08.js",
"1c6c67fcd71f2d08.js",
"1cdce2d337e64b4f.js",
"1f039e0eeb1bc271.js",
"227118dffd2c9935.js",
"250ced8c8e83b389.js",
"a2798917405b080b.js",
"ad6bf12aa7eda975.js",
"24fa28a37061a18f.js",
"252bb992a448270e.js",
"285648c16156804f.js",
"2d10fed2af94fbd1.js",
"3097f73926c93640.js",
"30aee1020fc69090.js",
"312f85fecc352681.js",
"317532451c2ce8ff.js", | "38284ea2d9914d86.js",
"3b57183c81070eec.js",
"3bbd75d597d54fe6.js",
"3c1e2ada0ac2b8e3.js",
"3e1a6f702041b599.js",
"3e3a99768a4a1502.js",
"3e69c5cc1a7ac103.js",
"3eac36e29398cdc5.js",
"3ff52d86c77678bd.js",
"43023cd549deee77.js",
"44af28febe2288cc.js",
"478ede4cfe7906d5.js",
"4869454dd215468e.js",
"48b6f8ce65d3b3ee.js",
"4c71e11fbbc56349.js",
"4d833cbc56caaaf9.js",
"4e7c58761e24d77c.js",
"4e7c58761e24d77c.js",
"5641ad33abcd1752.js",
"587400d1c019785a.js",
"58ed6ffb30191684.js",
"5b8d2b991d2c1f5b.js",
"5f730961df66e8e8.js",
"597108fd45a6e79b.js",
"60dcd48a3f6af44f.js",
"62d7c1ee4e1626c4.js",
"665f4940c7cf30c9.js",
"64cc57f82a54b7fb.js",
"66d2dbcb692491ec.module.js",
"697b3d30c1d06918.js",
"698a8cfb0705c277.js",
"69bbdc7c34ed23cc.js",
"6a323491fe75918a.js",
"6b76b8761a049c19.js",
"70bf2c409480ae10.js",
"74c5ebda713c8bd7.js",
"75172741c27c7703.js",
"753a8b016a700975.js",
"77c661b2fbe3dd3a.js",
"784a059faa166072.js",
"7855fbf5ea10e622.js",
"7cd7c68a6131f816.js",
"7df2a606ecc6cd84.js",
"7dfb625b91c5c879.js",
"7fdf990c6f42edcd.module.js",
"80d2351a5ae68524.js",
"84250e15785d8a9e.js",
"85263ecacc7a4dc5.js",
"8628cd459b39ffe8.js",
"870a0b8d891753e9.js",
"8d14286a8cc6ee9d.js",
"8d67ad04bfc356c9.js",
"8ecaef2617d8c6a7.js",
"918e105a2ff6c64a.js",
"92fd8e24864fde0a.js",
"94b8a654a87039b9.js",
"94cb828d5dcfd136.js",
"98df58b0c40fac90.js",
"9949a2e1a6844836.module.js",
"99cdfc40e20af6f5.js",
"9a666205cafd530f.js",
"a454d2e2ab3484e6.js",
"a54cca69085ad35a.js",
"a86a29773d1168d3.js",
"b205355de22689d1.js",
"b93d116fd0409637.js",
"c85bc4de504befc7.js",
"c8689b6da6fd227a.js",
"cda499c521ff60c7.js",
"d4b898b45172a637.js",
"e2ac0bea41202dc9.js",
"f01d9f3c7b2b2717.js",
"f15772354efa5ecf.js",
"f17ec9517a3339d9.js",
"fa5b398eeef697a6.js",
"fa9eaf58f51d6926.js",
"faa4a026e1e86145.js",
"fada2c7bbfabe14a.js",
"fb8db7a71f3755fc.js",
"fbde237f11796df9.js",
"fd5ea844fcc07d3d.js",
"6c5f0dd83c417a5a.js",
"78eb22badc114b6f.js",
"7afd38d79e6795a8.js",
"80950061e291542b.js",
"8a0fc8ea31727188.module.js",
"af97a3752e579223.js",
"bbffb851469a3f0e.js",
"bc302492d441d561.js",
"be2fd5888f434cbd.js",
"f3260491590325af.js",
];
#[testing::fixture("../swc_ecma_parser/tests/test262-parser/pass/*.js")]
fn identity(entry: PathBuf) {
let file_name = entry
.file_name()
.unwrap()
.to_str()
.expect("to_str() failed")
.to_string();
let input = read_to_string(&entry).unwrap();
let ignore = IGNORED_PASS_TESTS.contains(&&*file_name);
if ignore {
return;
}
let is_module = file_name.contains("module");
let msg = format!(
"\n\n========== Running codegen test {}\nSource:\n{}\n",
file_name, input
);
let mut wr = vec![];
::testing::run_test(false, |cm, handler| {
let fm = cm.load_file(&entry).expect("failed to load file");
eprintln!(
"{}\nPos: {:?} ~ {:?} (L{})",
msg,
fm.start_pos,
fm.end_pos,
fm.count_lines()
);
let (expected_code, expected_map, visualizer_url_for_expected) =
match get_expected(&fm.src, is_module) {
Some(v) => v,
None => return Ok(()),
};
println!("Expected code:\n{}", expected_code);
let expected_tokens = print_source_map(&expected_map);
let comments = SingleThreadedComments::default();
let lexer = Lexer::new(
Syntax::default(),
Default::default(),
(&*fm).into(),
Some(&comments),
);
let mut parser: Parser<Lexer> = Parser::new_from(lexer);
let mut src_map = vec![];
{
let mut wr = Box::new(swc_ecma_codegen::text_writer::JsWriter::new(
cm.clone(),
"\n",
&mut wr,
Some(&mut src_map),
)) as Box<dyn WriteJs>;
wr = Box::new(swc_ecma_codegen::text_writer::omit_trailing_semi(wr));
let mut emitter = Emitter {
cfg: swc_ecma_codegen::Config {
minify: true,
target: EsVersion::Es5,
ascii_only: true,
..Default::default()
},
cm: cm.clone(),
wr,
comments: None,
};
// Parse source
if is_module {
emitter
.emit_module(
&parser
.parse_module()
.map_err(|e| e.into_diagnostic(handler).emit())?,
)
.unwrap();
} else {
emitter
.emit_script(
&parser
.parse_script()
.map_err(|e| e.into_diagnostic(handler).emit())?,
)
.unwrap();
}
}
let actual_code = String::from_utf8(wr).unwrap();
let actual_map = cm.build_source_map_with_config(&src_map, None, SourceMapConfigImpl);
let visualizer_url_for_actual = visualizer_url(&actual_code, &actual_map);
let actual_tokens = print_source_map(&actual_map);
let common_tokens = actual_tokens
.iter()
.filter(|a| expected_tokens.contains(&**a))
.map(|v| v.to_string())
.collect::<FxHashSet<_>>();
let actual_tokens_diff = actual_tokens
.iter()
.filter(|a|!common_tokens.contains(&**a))
.map(|v| v.to_string())
.collect::<Vec<_>>();
let expected_tokens_diff = expected_tokens
.iter()
.filter(|a|!common_tokens.contains(&**a))
.map(|v| v.to_string())
.collect::<Vec<_>>();
eprintln!("---- Actual -----");
for s in actual_tokens_diff {
eprintln!("{}", s);
}
eprintln!("---- Expected -----");
for s in expected_tokens_diff {
eprintln!("{}", s);
}
dbg!(&src_map);
if actual_code!= expected_code {
// Generated code is different
// We can't ensure that identical sourcemap will mean identical code
eprintln!("Actual code:\n{}", actual_code);
eprintln!("Expected code:\n{}", expected_code);
return Ok(());
}
eprintln!(
"----- Visualizer -----\nExpected: {}\nActual: {}",
visualizer_url_for_expected, visualizer_url_for_actual
);
assert_eq_same_map(&expected_map, &actual_map);
Ok(())
})
.expect("failed to run test");
}
fn get_expected(code: &str, is_module: bool) -> Option<(String, SourceMap, String)> {
let output = exec_node_js(
include_str!("./srcmap.mjs"),
JsExecOptions {
cache: true,
module: true,
args: vec![
code.to_string(),
if is_module {
"module".into()
} else {
"script".into()
},
],
},
)
.ok()?;
let v = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(&output).unwrap();
let code = v.get("code").unwrap().as_str().unwrap();
let map = v.get("map").unwrap().as_str().unwrap();
let map = SourceMap::from_slice(map.as_bytes()).expect("invalid sourcemap");
let visualizer_url = visualizer_url(code, &map);
Some((code.to_string(), map, visualizer_url))
}
fn print_source_map(map: &SourceMap) -> Vec<String> {
let mut v = map
.tokens()
.map(|t| {
format!(
"Token: {}:{} => {}:{}",
t.get_src_line(),
t.get_src_col(),
t.get_dst_line(),
t.get_dst_col()
)
})
.collect::<Vec<_>>();
v.sort();
v
}
fn assert_eq_same_map(expected: &SourceMap, actual: &SourceMap) {
for expected_token in expected.tokens() {
let actual_token = actual
.lookup_token(expected_token.get_dst_line(), expected_token.get_dst_col())
.unwrap_or_else(|| panic!("token not found: {:?}", expected_token));
if expected_token.get_src_line() == 0 && expected_token.get_src_col() == 0 {
continue;
}
assert_eq!(
expected_token.get_src_line(),
actual_token.get_src_line(),
"line mismatch at {}:{}",
expected_token.get_dst_line(),
expected_token.get_dst_col()
);
assert_eq!(
expected_token.get_src_col(),
actual_token.get_src_col(),
"col mismatch at {}:{}",
expected_token.get_dst_line(),
expected_token.get_dst_col()
);
}
}
/// Creates a url for https://evanw.github.io/source-map-visualization/
fn visualizer_url(code: &str, map: &SourceMap) -> String {
let map = {
let mut buf = vec![];
map.to_writer(&mut buf).unwrap();
String::from_utf8(buf).unwrap()
};
let code_len = format!("{}\0", code.len());
let map_len = format!("{}\0", map.len());
let hash = base64::encode(format!("{}{}{}{}", code_len, code, map_len, map));
format!("https://evanw.github.io/source-map-visualization/#{}", hash)
}
struct SourceMapConfigImpl;
impl SourceMapGenConfig for SourceMapConfigImpl {
fn file_name_to_source(&self, f: &swc_common::FileName) -> String {
f.to_string()
}
fn inline_sources_content(&self, _: &swc_common::FileName) | "32b635a9667a9fb1.js",
"36224cf8215ad8e4.js",
"37e4a6eca1ece7e5.js", | random_line_split |
audio.rs | use crate::config::Config;
use crate::test::InputSampleStream;
use failure::Error;
use num::{Complex, Zero};
use portaudio as pa;
use std::sync::mpsc::{Receiver, Sender};
const CHANNELS: i32 = 1;
const FRAMES: u32 = 256;
const INTERLEAVED: bool = true;
fn run<'c, T>(
mut modulator: Modulate<'c, T>,
rx_sender: Sender<f32>,
sample_rate: f32,
) -> Result<(), pa::Error>
where
T: Iterator<Item = Complex<f32>>,
{
let pa = pa::PortAudio::new()?;
println!("PortAudio");
println!("version: {}", pa.version());
println!("version text: {:?}", pa.version_text());
println!("host count: {}", pa.host_api_count()?);
let default_host = pa.default_host_api()?;
println!("default host: {:#?}", pa.host_api_info(default_host));
let def_input = pa.default_input_device()?;
let input_info = pa.device_info(def_input)?;
println!("Default input device info: {:#?}", &input_info);
// Construct the input stream parameters.
let latency = input_info.default_low_input_latency;
let input_params = pa::StreamParameters::<f32>::new(def_input, CHANNELS, INTERLEAVED, latency);
let def_output = pa.default_output_device()?;
let output_info = pa.device_info(def_output)?;
println!("Default output device info: {:#?}", &output_info);
// Construct the output stream parameters.
let latency = output_info.default_low_output_latency;
let output_params =
pa::StreamParameters::<f32>::new(def_output, CHANNELS, INTERLEAVED, latency);
// Check that the stream format is supported.
pa.is_duplex_format_supported(input_params, output_params, sample_rate as f64)?;
// Construct the settings with which we'll open our duplex stream.
let settings =
pa::DuplexStreamSettings::new(input_params, output_params, sample_rate as f64, FRAMES);
let mut stream = pa.open_blocking_stream(settings)?;
stream.start()?;
// We'll use this function to wait for read/write availability.
fn wait_for_stream<F>(f: F, name: &str) -> u32
where
F: Fn() -> Result<pa::StreamAvailable, pa::error::Error>,
{
loop {
match f() {
Ok(available) => match available {
pa::StreamAvailable::Frames(frames) => return frames as u32,
pa::StreamAvailable::InputOverflowed => println!("Input stream has overflowed"),
pa::StreamAvailable::OutputUnderflowed => {
println!("Output stream has underflowed")
}
},
Err(err) => panic!(
"An error occurred while waiting for the {} stream: {}",
name, err
),
}
}
};
// Now start the main read/write loop! In this example, we pass
// the input buffer directly to the output buffer, so watch out
// for feedback.
loop {
// How many frames are available on the input stream?
let in_frames = wait_for_stream(|| stream.read_available(), "Read");
// If there are frames available, let's take them and add them
// to our buffer.
if in_frames > 0 {
let input_samples = stream.read(in_frames)?;
for samp in input_samples {
rx_sender.send(*samp).unwrap();
}
}
// How many frames are available for writing on the output stream?
let out_frames = wait_for_stream(|| stream.write_available(), "Write");
// If there are frames available for writing and we have some
// to write, then write!
if out_frames > 0 {
// If we have more than enough frames for writing, take
// them from the start of the buffer. Otherwise if we
// have less, just take what we can for now.
let write_frames = out_frames;
let n_write_samples = write_frames as usize * CHANNELS as usize;
let mut flag = false;
stream.write(write_frames, |output| {
for i in 0..n_write_samples {
if let Some(samp) = modulator.next() {
output[i] = samp;
} else {
println!("Tx samples finished. Exiting");
flag = true;
break;
}
}
})?;
if flag {
break;
}
}
}
Ok(())
}
pub fn start_audio<'c>(
tx_receiver: Receiver<Complex<f32>>,
config: &'c Config,
) -> Result<(std::thread::JoinHandle<()>, AudioSampleStream<'c>), Error> {
// For the microphone
let (rx_sender, rx_receiver) = std::sync::mpsc::channel::<f32>();
let sample_rate = config.audio.sample_rate;
let config_c = config.clone();
let handle = std::thread::spawn(move || {
// Use the modulator to go from baseband to carrier frequency
let modulator = Modulate::new(tx_receiver.iter(), sample_rate, &config_c);
run(modulator, rx_sender, sample_rate).unwrap();
});
return Ok((
handle,
AudioSampleStream::new(rx_receiver, sample_rate as f32, config),
));
}
| /// Stream of samples from an audio device
pub struct AudioSampleStream<'c> {
channel: Receiver<f32>,
demod: Demodulate<'c>,
}
impl<'c> AudioSampleStream<'c> {
fn new(channel: Receiver<f32>, sample_rate: f32, config: &'c Config) -> Self {
Self {
channel,
demod: Demodulate::new(sample_rate, config),
}
}
}
impl<'c> InputSampleStream for AudioSampleStream<'c> {}
impl<'c> Iterator for AudioSampleStream<'c> {
type Item = Complex<f32>;
fn next(&mut self) -> Option<Complex<f32>> {
loop {
let in_samp = if let Ok(samp) = self.channel.recv() {
samp
} else {
return None;
};
let out = self.demod.push(in_samp);
if out.is_some() {
return out;
}
}
}
}
/// Upconvert signal from baseband to carrier frequency
struct Modulate<'c, T>
where
T: Iterator<Item = Complex<f32>>,
{
config: &'c Config,
/// Our source of baseband samples
src: T,
/// Number of carrier samples to skip per baseband sample
to_skip: u64,
/// Sample rate of the audio signal
sample_rate: f32,
/// Total number of (audio) samples transmitted so far
num_samps: u64,
/// If true, input is done and we'll always return None
done: bool,
/// The two samples we are currently in between sending
cur_samps: (Complex<f32>, Complex<f32>),
/// The current sample we are sending
cur_ewma: Complex<f32>,
}
impl<'c, T> Modulate<'c, T>
where
T: Iterator<Item = Complex<f32>>,
{
fn new(src: T, sample_rate: f32, config: &'c Config) -> Self {
// For now, sample_rate has to be a multiple of bandwidth. Can
// remove restriction later
let to_skip = sample_rate / config.audio.bandwidth;
assert!((to_skip.round() - to_skip).abs() <= 1e-3);
let to_skip = 1; //to_skip.round() as u64;
Self {
config,
src,
to_skip,
sample_rate,
num_samps: 0,
done: false,
cur_samps: (Complex::zero(), Complex::zero()),
cur_ewma: Complex::zero(),
}
}
}
impl<'c, T> Iterator for Modulate<'c, T>
where
T: Iterator<Item = Complex<f32>>,
{
type Item = f32;
fn next(&mut self) -> Option<f32> {
if self.done {
return None;
}
// See if we need to update the current sample
if self.num_samps % self.to_skip == 0 {
if let Some(x) = self.src.next() {
self.cur_samps = (self.cur_samps.1, x);
} else {
self.done = true;
return None;
}
}
let pi2 = 2. * std::f32::consts::PI;
let e = Complex::new(
0.,
pi2 * self.num_samps as f32 / self.sample_rate * self.config.audio.center_frequency,
)
.exp();
self.num_samps += 1;
// Low-pass filter
// Frequency-domain sinc
//let f = (self.num_samps % self.to_skip) as f32 / self.to_skip as f32;
//let samp = prev * (1. - f) + cur * f;
// EWMA (equivalent to RC-filter)
// let alpha = 1. / (1. + self.to_skip as f32);
// self.cur_ewma = alpha * self.cur_samps.1 + (1. - alpha) * self.cur_ewma;
// let samp = self.cur_ewma;
let samp = self.cur_samps.1;
Some(e.re * samp.re + e.im * samp.im)
}
}
/// Convert to baseband from carrier frequency
struct Demodulate<'c> {
config: &'c Config,
/// Number of carrier samples to skip per baseband sample
to_skip: usize,
/// Sample rate of the audio signal
sample_rate: f32,
/// Total number of (audio) samples transmitted so far
num_samps: u64,
/// Average of the sample so far
samp_avg: Complex<f32>,
}
impl<'c> Demodulate<'c> {
fn new(sample_rate: f32, config: &'c Config) -> Self {
// For now, sample_rate has to be a multiple of bandwidth. Can
// remove restriction later
let to_skip = sample_rate / config.audio.bandwidth;
assert!((to_skip.round() - to_skip).abs() <= 1e-3);
let to_skip = to_skip.round() as usize;
Self {
config,
to_skip,
sample_rate,
num_samps: 0,
samp_avg: Complex::zero(),
}
}
/// Takes an audio sample, and if appropriate, returns a baseband
/// sample
fn push(&mut self, samp: f32) -> Option<Complex<f32>> {
// Add to the average
let pi2 = 2. * std::f32::consts::PI;
let e = Complex::new(
0.,
pi2 * self.num_samps as f32 / self.sample_rate * self.config.audio.center_frequency,
)
.exp();
self.samp_avg += samp * e;
// Should we output something?
let res = if self.num_samps % self.to_skip as u64 == 0 {
let res = Some(self.samp_avg / self.to_skip as f32);
self.samp_avg = Complex::zero();
res
} else {
None
};
self.num_samps += 1;
res
}
}
#[cfg(test)]
mod tests {
use super::{Demodulate, Modulate};
use crate::config::Config;
use num::Complex;
use rand::Rng;
#[test]
fn mod_demod() {
let config = Config::default();
let sample_rate = 44100.;
// The samples we'll transmit
let mut rng = rand_pcg::Pcg32::new(1, 1);
let samples: Vec<_> = (0..100_000)
.map(|_| Complex::new(rng.gen(), rng.gen()))
.collect();
let modulate = Modulate::new(samples.clone().into_iter(), sample_rate, &config);
let mut demodulate = Demodulate::new(sample_rate, &config);
let mut pos = 0;
let mut channel = None;
for x in modulate {
if let Some(out) = demodulate.push(x) {
if channel.is_none() {
channel = Some(out / samples[pos]);
}
let channel = channel.unwrap();
//println!("{:?} {:?}", out.to_polar(), samples[pos].to_polar());
//println!("{} {}", out / samples[pos], channel);
assert!((out / samples[pos] - channel).norm() <= 1e-3);
pos += 1;
}
}
}
} | random_line_split |
|
audio.rs | use crate::config::Config;
use crate::test::InputSampleStream;
use failure::Error;
use num::{Complex, Zero};
use portaudio as pa;
use std::sync::mpsc::{Receiver, Sender};
const CHANNELS: i32 = 1;
const FRAMES: u32 = 256;
const INTERLEAVED: bool = true;
fn run<'c, T>(
mut modulator: Modulate<'c, T>,
rx_sender: Sender<f32>,
sample_rate: f32,
) -> Result<(), pa::Error>
where
T: Iterator<Item = Complex<f32>>,
{
let pa = pa::PortAudio::new()?;
println!("PortAudio");
println!("version: {}", pa.version());
println!("version text: {:?}", pa.version_text());
println!("host count: {}", pa.host_api_count()?);
let default_host = pa.default_host_api()?;
println!("default host: {:#?}", pa.host_api_info(default_host));
let def_input = pa.default_input_device()?;
let input_info = pa.device_info(def_input)?;
println!("Default input device info: {:#?}", &input_info);
// Construct the input stream parameters.
let latency = input_info.default_low_input_latency;
let input_params = pa::StreamParameters::<f32>::new(def_input, CHANNELS, INTERLEAVED, latency);
let def_output = pa.default_output_device()?;
let output_info = pa.device_info(def_output)?;
println!("Default output device info: {:#?}", &output_info);
// Construct the output stream parameters.
let latency = output_info.default_low_output_latency;
let output_params =
pa::StreamParameters::<f32>::new(def_output, CHANNELS, INTERLEAVED, latency);
// Check that the stream format is supported.
pa.is_duplex_format_supported(input_params, output_params, sample_rate as f64)?;
// Construct the settings with which we'll open our duplex stream.
let settings =
pa::DuplexStreamSettings::new(input_params, output_params, sample_rate as f64, FRAMES);
let mut stream = pa.open_blocking_stream(settings)?;
stream.start()?;
// We'll use this function to wait for read/write availability.
fn wait_for_stream<F>(f: F, name: &str) -> u32
where
F: Fn() -> Result<pa::StreamAvailable, pa::error::Error>,
{
loop {
match f() {
Ok(available) => match available {
pa::StreamAvailable::Frames(frames) => return frames as u32,
pa::StreamAvailable::InputOverflowed => println!("Input stream has overflowed"),
pa::StreamAvailable::OutputUnderflowed => {
println!("Output stream has underflowed")
}
},
Err(err) => panic!(
"An error occurred while waiting for the {} stream: {}",
name, err
),
}
}
};
// Now start the main read/write loop! In this example, we pass
// the input buffer directly to the output buffer, so watch out
// for feedback.
loop {
// How many frames are available on the input stream?
let in_frames = wait_for_stream(|| stream.read_available(), "Read");
// If there are frames available, let's take them and add them
// to our buffer.
if in_frames > 0 {
let input_samples = stream.read(in_frames)?;
for samp in input_samples {
rx_sender.send(*samp).unwrap();
}
}
// How many frames are available for writing on the output stream?
let out_frames = wait_for_stream(|| stream.write_available(), "Write");
// If there are frames available for writing and we have some
// to write, then write!
if out_frames > 0 {
// If we have more than enough frames for writing, take
// them from the start of the buffer. Otherwise if we
// have less, just take what we can for now.
let write_frames = out_frames;
let n_write_samples = write_frames as usize * CHANNELS as usize;
let mut flag = false;
stream.write(write_frames, |output| {
for i in 0..n_write_samples {
if let Some(samp) = modulator.next() {
output[i] = samp;
} else {
println!("Tx samples finished. Exiting");
flag = true;
break;
}
}
})?;
if flag |
}
}
Ok(())
}
pub fn start_audio<'c>(
tx_receiver: Receiver<Complex<f32>>,
config: &'c Config,
) -> Result<(std::thread::JoinHandle<()>, AudioSampleStream<'c>), Error> {
// For the microphone
let (rx_sender, rx_receiver) = std::sync::mpsc::channel::<f32>();
let sample_rate = config.audio.sample_rate;
let config_c = config.clone();
let handle = std::thread::spawn(move || {
// Use the modulator to go from baseband to carrier frequency
let modulator = Modulate::new(tx_receiver.iter(), sample_rate, &config_c);
run(modulator, rx_sender, sample_rate).unwrap();
});
return Ok((
handle,
AudioSampleStream::new(rx_receiver, sample_rate as f32, config),
));
}
/// Stream of samples from an audio device
pub struct AudioSampleStream<'c> {
channel: Receiver<f32>,
demod: Demodulate<'c>,
}
impl<'c> AudioSampleStream<'c> {
fn new(channel: Receiver<f32>, sample_rate: f32, config: &'c Config) -> Self {
Self {
channel,
demod: Demodulate::new(sample_rate, config),
}
}
}
impl<'c> InputSampleStream for AudioSampleStream<'c> {}
impl<'c> Iterator for AudioSampleStream<'c> {
type Item = Complex<f32>;
fn next(&mut self) -> Option<Complex<f32>> {
loop {
let in_samp = if let Ok(samp) = self.channel.recv() {
samp
} else {
return None;
};
let out = self.demod.push(in_samp);
if out.is_some() {
return out;
}
}
}
}
/// Upconvert signal from baseband to carrier frequency
struct Modulate<'c, T>
where
T: Iterator<Item = Complex<f32>>,
{
config: &'c Config,
/// Our source of baseband samples
src: T,
/// Number of carrier samples to skip per baseband sample
to_skip: u64,
/// Sample rate of the audio signal
sample_rate: f32,
/// Total number of (audio) samples transmitted so far
num_samps: u64,
/// If true, input is done and we'll always return None
done: bool,
/// The two samples we are currently in between sending
cur_samps: (Complex<f32>, Complex<f32>),
/// The current sample we are sending
cur_ewma: Complex<f32>,
}
impl<'c, T> Modulate<'c, T>
where
T: Iterator<Item = Complex<f32>>,
{
fn new(src: T, sample_rate: f32, config: &'c Config) -> Self {
// For now, sample_rate has to be a multiple of bandwidth. Can
// remove restriction later
let to_skip = sample_rate / config.audio.bandwidth;
assert!((to_skip.round() - to_skip).abs() <= 1e-3);
let to_skip = 1; //to_skip.round() as u64;
Self {
config,
src,
to_skip,
sample_rate,
num_samps: 0,
done: false,
cur_samps: (Complex::zero(), Complex::zero()),
cur_ewma: Complex::zero(),
}
}
}
impl<'c, T> Iterator for Modulate<'c, T>
where
T: Iterator<Item = Complex<f32>>,
{
type Item = f32;
fn next(&mut self) -> Option<f32> {
if self.done {
return None;
}
// See if we need to update the current sample
if self.num_samps % self.to_skip == 0 {
if let Some(x) = self.src.next() {
self.cur_samps = (self.cur_samps.1, x);
} else {
self.done = true;
return None;
}
}
let pi2 = 2. * std::f32::consts::PI;
let e = Complex::new(
0.,
pi2 * self.num_samps as f32 / self.sample_rate * self.config.audio.center_frequency,
)
.exp();
self.num_samps += 1;
// Low-pass filter
// Frequency-domain sinc
//let f = (self.num_samps % self.to_skip) as f32 / self.to_skip as f32;
//let samp = prev * (1. - f) + cur * f;
// EWMA (equivalent to RC-filter)
// let alpha = 1. / (1. + self.to_skip as f32);
// self.cur_ewma = alpha * self.cur_samps.1 + (1. - alpha) * self.cur_ewma;
// let samp = self.cur_ewma;
let samp = self.cur_samps.1;
Some(e.re * samp.re + e.im * samp.im)
}
}
/// Convert to baseband from carrier frequency
struct Demodulate<'c> {
config: &'c Config,
/// Number of carrier samples to skip per baseband sample
to_skip: usize,
/// Sample rate of the audio signal
sample_rate: f32,
/// Total number of (audio) samples transmitted so far
num_samps: u64,
/// Average of the sample so far
samp_avg: Complex<f32>,
}
impl<'c> Demodulate<'c> {
fn new(sample_rate: f32, config: &'c Config) -> Self {
// For now, sample_rate has to be a multiple of bandwidth. Can
// remove restriction later
let to_skip = sample_rate / config.audio.bandwidth;
assert!((to_skip.round() - to_skip).abs() <= 1e-3);
let to_skip = to_skip.round() as usize;
Self {
config,
to_skip,
sample_rate,
num_samps: 0,
samp_avg: Complex::zero(),
}
}
/// Takes an audio sample, and if appropriate, returns a baseband
/// sample
fn push(&mut self, samp: f32) -> Option<Complex<f32>> {
// Add to the average
let pi2 = 2. * std::f32::consts::PI;
let e = Complex::new(
0.,
pi2 * self.num_samps as f32 / self.sample_rate * self.config.audio.center_frequency,
)
.exp();
self.samp_avg += samp * e;
// Should we output something?
let res = if self.num_samps % self.to_skip as u64 == 0 {
let res = Some(self.samp_avg / self.to_skip as f32);
self.samp_avg = Complex::zero();
res
} else {
None
};
self.num_samps += 1;
res
}
}
#[cfg(test)]
mod tests {
use super::{Demodulate, Modulate};
use crate::config::Config;
use num::Complex;
use rand::Rng;
#[test]
fn mod_demod() {
let config = Config::default();
let sample_rate = 44100.;
// The samples we'll transmit
let mut rng = rand_pcg::Pcg32::new(1, 1);
let samples: Vec<_> = (0..100_000)
.map(|_| Complex::new(rng.gen(), rng.gen()))
.collect();
let modulate = Modulate::new(samples.clone().into_iter(), sample_rate, &config);
let mut demodulate = Demodulate::new(sample_rate, &config);
let mut pos = 0;
let mut channel = None;
for x in modulate {
if let Some(out) = demodulate.push(x) {
if channel.is_none() {
channel = Some(out / samples[pos]);
}
let channel = channel.unwrap();
//println!("{:?} {:?}", out.to_polar(), samples[pos].to_polar());
//println!("{} {}", out / samples[pos], channel);
assert!((out / samples[pos] - channel).norm() <= 1e-3);
pos += 1;
}
}
}
}
| {
break;
} | conditional_block |
audio.rs | use crate::config::Config;
use crate::test::InputSampleStream;
use failure::Error;
use num::{Complex, Zero};
use portaudio as pa;
use std::sync::mpsc::{Receiver, Sender};
const CHANNELS: i32 = 1;
const FRAMES: u32 = 256;
const INTERLEAVED: bool = true;
fn run<'c, T>(
mut modulator: Modulate<'c, T>,
rx_sender: Sender<f32>,
sample_rate: f32,
) -> Result<(), pa::Error>
where
T: Iterator<Item = Complex<f32>>,
{
let pa = pa::PortAudio::new()?;
println!("PortAudio");
println!("version: {}", pa.version());
println!("version text: {:?}", pa.version_text());
println!("host count: {}", pa.host_api_count()?);
let default_host = pa.default_host_api()?;
println!("default host: {:#?}", pa.host_api_info(default_host));
let def_input = pa.default_input_device()?;
let input_info = pa.device_info(def_input)?;
println!("Default input device info: {:#?}", &input_info);
// Construct the input stream parameters.
let latency = input_info.default_low_input_latency;
let input_params = pa::StreamParameters::<f32>::new(def_input, CHANNELS, INTERLEAVED, latency);
let def_output = pa.default_output_device()?;
let output_info = pa.device_info(def_output)?;
println!("Default output device info: {:#?}", &output_info);
// Construct the output stream parameters.
let latency = output_info.default_low_output_latency;
let output_params =
pa::StreamParameters::<f32>::new(def_output, CHANNELS, INTERLEAVED, latency);
// Check that the stream format is supported.
pa.is_duplex_format_supported(input_params, output_params, sample_rate as f64)?;
// Construct the settings with which we'll open our duplex stream.
let settings =
pa::DuplexStreamSettings::new(input_params, output_params, sample_rate as f64, FRAMES);
let mut stream = pa.open_blocking_stream(settings)?;
stream.start()?;
// We'll use this function to wait for read/write availability.
fn wait_for_stream<F>(f: F, name: &str) -> u32
where
F: Fn() -> Result<pa::StreamAvailable, pa::error::Error>,
{
loop {
match f() {
Ok(available) => match available {
pa::StreamAvailable::Frames(frames) => return frames as u32,
pa::StreamAvailable::InputOverflowed => println!("Input stream has overflowed"),
pa::StreamAvailable::OutputUnderflowed => {
println!("Output stream has underflowed")
}
},
Err(err) => panic!(
"An error occurred while waiting for the {} stream: {}",
name, err
),
}
}
};
// Now start the main read/write loop! In this example, we pass
// the input buffer directly to the output buffer, so watch out
// for feedback.
loop {
// How many frames are available on the input stream?
let in_frames = wait_for_stream(|| stream.read_available(), "Read");
// If there are frames available, let's take them and add them
// to our buffer.
if in_frames > 0 {
let input_samples = stream.read(in_frames)?;
for samp in input_samples {
rx_sender.send(*samp).unwrap();
}
}
// How many frames are available for writing on the output stream?
let out_frames = wait_for_stream(|| stream.write_available(), "Write");
// If there are frames available for writing and we have some
// to write, then write!
if out_frames > 0 {
// If we have more than enough frames for writing, take
// them from the start of the buffer. Otherwise if we
// have less, just take what we can for now.
let write_frames = out_frames;
let n_write_samples = write_frames as usize * CHANNELS as usize;
let mut flag = false;
stream.write(write_frames, |output| {
for i in 0..n_write_samples {
if let Some(samp) = modulator.next() {
output[i] = samp;
} else {
println!("Tx samples finished. Exiting");
flag = true;
break;
}
}
})?;
if flag {
break;
}
}
}
Ok(())
}
pub fn start_audio<'c>(
tx_receiver: Receiver<Complex<f32>>,
config: &'c Config,
) -> Result<(std::thread::JoinHandle<()>, AudioSampleStream<'c>), Error> {
// For the microphone
let (rx_sender, rx_receiver) = std::sync::mpsc::channel::<f32>();
let sample_rate = config.audio.sample_rate;
let config_c = config.clone();
let handle = std::thread::spawn(move || {
// Use the modulator to go from baseband to carrier frequency
let modulator = Modulate::new(tx_receiver.iter(), sample_rate, &config_c);
run(modulator, rx_sender, sample_rate).unwrap();
});
return Ok((
handle,
AudioSampleStream::new(rx_receiver, sample_rate as f32, config),
));
}
/// Stream of samples from an audio device
pub struct AudioSampleStream<'c> {
channel: Receiver<f32>,
demod: Demodulate<'c>,
}
impl<'c> AudioSampleStream<'c> {
fn new(channel: Receiver<f32>, sample_rate: f32, config: &'c Config) -> Self {
Self {
channel,
demod: Demodulate::new(sample_rate, config),
}
}
}
impl<'c> InputSampleStream for AudioSampleStream<'c> {}
impl<'c> Iterator for AudioSampleStream<'c> {
type Item = Complex<f32>;
fn next(&mut self) -> Option<Complex<f32>> {
loop {
let in_samp = if let Ok(samp) = self.channel.recv() {
samp
} else {
return None;
};
let out = self.demod.push(in_samp);
if out.is_some() {
return out;
}
}
}
}
/// Upconvert signal from baseband to carrier frequency
struct Modulate<'c, T>
where
T: Iterator<Item = Complex<f32>>,
{
config: &'c Config,
/// Our source of baseband samples
src: T,
/// Number of carrier samples to skip per baseband sample
to_skip: u64,
/// Sample rate of the audio signal
sample_rate: f32,
/// Total number of (audio) samples transmitted so far
num_samps: u64,
/// If true, input is done and we'll always return None
done: bool,
/// The two samples we are currently in between sending
cur_samps: (Complex<f32>, Complex<f32>),
/// The current sample we are sending
cur_ewma: Complex<f32>,
}
impl<'c, T> Modulate<'c, T>
where
T: Iterator<Item = Complex<f32>>,
{
fn new(src: T, sample_rate: f32, config: &'c Config) -> Self {
// For now, sample_rate has to be a multiple of bandwidth. Can
// remove restriction later
let to_skip = sample_rate / config.audio.bandwidth;
assert!((to_skip.round() - to_skip).abs() <= 1e-3);
let to_skip = 1; //to_skip.round() as u64;
Self {
config,
src,
to_skip,
sample_rate,
num_samps: 0,
done: false,
cur_samps: (Complex::zero(), Complex::zero()),
cur_ewma: Complex::zero(),
}
}
}
impl<'c, T> Iterator for Modulate<'c, T>
where
T: Iterator<Item = Complex<f32>>,
{
type Item = f32;
fn next(&mut self) -> Option<f32> {
if self.done {
return None;
}
// See if we need to update the current sample
if self.num_samps % self.to_skip == 0 {
if let Some(x) = self.src.next() {
self.cur_samps = (self.cur_samps.1, x);
} else {
self.done = true;
return None;
}
}
let pi2 = 2. * std::f32::consts::PI;
let e = Complex::new(
0.,
pi2 * self.num_samps as f32 / self.sample_rate * self.config.audio.center_frequency,
)
.exp();
self.num_samps += 1;
// Low-pass filter
// Frequency-domain sinc
//let f = (self.num_samps % self.to_skip) as f32 / self.to_skip as f32;
//let samp = prev * (1. - f) + cur * f;
// EWMA (equivalent to RC-filter)
// let alpha = 1. / (1. + self.to_skip as f32);
// self.cur_ewma = alpha * self.cur_samps.1 + (1. - alpha) * self.cur_ewma;
// let samp = self.cur_ewma;
let samp = self.cur_samps.1;
Some(e.re * samp.re + e.im * samp.im)
}
}
/// Convert to baseband from carrier frequency
struct | <'c> {
config: &'c Config,
/// Number of carrier samples to skip per baseband sample
to_skip: usize,
/// Sample rate of the audio signal
sample_rate: f32,
/// Total number of (audio) samples transmitted so far
num_samps: u64,
/// Average of the sample so far
samp_avg: Complex<f32>,
}
impl<'c> Demodulate<'c> {
fn new(sample_rate: f32, config: &'c Config) -> Self {
// For now, sample_rate has to be a multiple of bandwidth. Can
// remove restriction later
let to_skip = sample_rate / config.audio.bandwidth;
assert!((to_skip.round() - to_skip).abs() <= 1e-3);
let to_skip = to_skip.round() as usize;
Self {
config,
to_skip,
sample_rate,
num_samps: 0,
samp_avg: Complex::zero(),
}
}
/// Takes an audio sample, and if appropriate, returns a baseband
/// sample
fn push(&mut self, samp: f32) -> Option<Complex<f32>> {
// Add to the average
let pi2 = 2. * std::f32::consts::PI;
let e = Complex::new(
0.,
pi2 * self.num_samps as f32 / self.sample_rate * self.config.audio.center_frequency,
)
.exp();
self.samp_avg += samp * e;
// Should we output something?
let res = if self.num_samps % self.to_skip as u64 == 0 {
let res = Some(self.samp_avg / self.to_skip as f32);
self.samp_avg = Complex::zero();
res
} else {
None
};
self.num_samps += 1;
res
}
}
#[cfg(test)]
mod tests {
use super::{Demodulate, Modulate};
use crate::config::Config;
use num::Complex;
use rand::Rng;
#[test]
fn mod_demod() {
let config = Config::default();
let sample_rate = 44100.;
// The samples we'll transmit
let mut rng = rand_pcg::Pcg32::new(1, 1);
let samples: Vec<_> = (0..100_000)
.map(|_| Complex::new(rng.gen(), rng.gen()))
.collect();
let modulate = Modulate::new(samples.clone().into_iter(), sample_rate, &config);
let mut demodulate = Demodulate::new(sample_rate, &config);
let mut pos = 0;
let mut channel = None;
for x in modulate {
if let Some(out) = demodulate.push(x) {
if channel.is_none() {
channel = Some(out / samples[pos]);
}
let channel = channel.unwrap();
//println!("{:?} {:?}", out.to_polar(), samples[pos].to_polar());
//println!("{} {}", out / samples[pos], channel);
assert!((out / samples[pos] - channel).norm() <= 1e-3);
pos += 1;
}
}
}
}
| Demodulate | identifier_name |
tests.rs | use quickcheck::{QuickCheck, StdGen, TestResult};
use snap::raw::{decompress_len, Decoder, Encoder};
use snap::Error;
#[cfg(feature = "cpp")]
use snappy_cpp as cpp;
// roundtrip is a macro that compresses the input, then decompresses the result
// and compares it with the original input. If they are not equal, then the
// test fails.
macro_rules! roundtrip {
($data:expr) => {{
let d = &$data[..];
assert_eq!(d, &*depress(&press(d)));
}};
}
// errored is a macro that tries to decompress the input and asserts that it
// resulted in an error. If decompression was successful, then the test fails.
macro_rules! errored {
($data:expr, $err:expr) => {
errored!($data, $err, false);
};
($data:expr, $err:expr, $bad_header:expr) => {{
let d = &$data[..];
let mut buf = if $bad_header {
assert_eq!($err, decompress_len(d).unwrap_err());
vec![0; 1024]
} else {
vec![0; decompress_len(d).unwrap()]
};
match Decoder::new().decompress(d, &mut buf) {
Err(ref err) if err == &$err => {}
Err(ref err) => panic!(
"expected decompression to fail with {:?}, \
but got {:?}",
$err, err
),
Ok(n) => {
panic!(
"\nexpected decompression to fail, but did not!
original (len == {:?})
----------------------
{:?}
decompressed (len == {:?})
--------------------------
{:?}
",
d.len(),
d,
n,
buf
);
}
}
}};
}
// testtrip is a macro that defines a test that compresses the input, then
// decompresses the result and compares it with the original input. If they are
// not equal, then the test fails. This test is performed both on the raw
// Snappy format and the framed Snappy format.
//
// If tests are compiled with the cpp feature, then this also tests that the
// C++ library compresses to the same bytes that the Rust library does.
macro_rules! testtrip {
($name:ident, $data:expr) => {
mod $name {
#[test]
fn roundtrip_raw() {
use super::{depress, press};
roundtrip!($data);
}
#[test]
fn roundtrip_frame() {
use super::{read_frame_depress, write_frame_press};
let d = &$data[..];
assert_eq!(d, &*read_frame_depress(&write_frame_press(d)));
}
#[test]
fn read_and_write_frame_encoder_match() {
use super::{read_frame_press, write_frame_press};
let d = &$data[..];
assert_eq!(read_frame_press(d), write_frame_press(d));
}
#[test]
#[cfg(feature = "cpp")]
fn cmpcpp() {
use super::{press, press_cpp};
let data = &$data[..];
let rust = press(data);
let cpp = press_cpp(data);
if rust == cpp {
return;
}
panic!(
"\ncompression results are not equal!
original (len == {:?})
----------------------
{:?}
rust (len == {:?})
------------------
{:?}
cpp (len == {:?})
-----------------
{:?}
",
data.len(),
data,
rust.len(),
rust,
cpp.len(),
cpp
);
}
}
};
}
// testcorrupt is a macro that defines a test that decompresses the input,
// and if the result is anything other than the error given, the test fails.
macro_rules! testerrored {
($name:ident, $data:expr, $err:expr) => {
testerrored!($name, $data, $err, false);
};
($name:ident, $data:expr, $err:expr, $bad_header:expr) => {
#[test]
fn $name() {
errored!($data, $err, $bad_header);
}
};
}
// Simple test cases.
testtrip!(empty, &[]);
testtrip!(one_zero, &[0]);
// Roundtrip all of the benchmark data.
testtrip!(data_html, include_bytes!("../data/html"));
testtrip!(data_urls, include_bytes!("../data/urls.10K"));
testtrip!(data_jpg, include_bytes!("../data/fireworks.jpeg"));
testtrip!(data_pdf, include_bytes!("../data/paper-100k.pdf"));
testtrip!(data_html4, include_bytes!("../data/html_x_4"));
testtrip!(data_txt1, include_bytes!("../data/alice29.txt"));
testtrip!(data_txt2, include_bytes!("../data/asyoulik.txt"));
testtrip!(data_txt3, include_bytes!("../data/lcet10.txt"));
testtrip!(data_txt4, include_bytes!("../data/plrabn12.txt"));
testtrip!(data_pb, include_bytes!("../data/geo.protodata"));
testtrip!(data_gaviota, include_bytes!("../data/kppkn.gtb"));
testtrip!(data_golden, include_bytes!("../data/Mark.Twain-Tom.Sawyer.txt"));
// Do it again, with the Snappy frame format.
// Roundtrip the golden data, starting with the compressed bytes.
#[test]
fn data_golden_rev() {
let data = include_bytes!("../data/Mark.Twain-Tom.Sawyer.txt.rawsnappy");
let data = &data[..];
assert_eq!(data, &*press(&depress(data)));
}
// Miscellaneous tests.
#[test]
fn small_copy() {
use std::iter::repeat;
for i in 0..32 {
let inner: String = repeat('b').take(i).collect();
roundtrip!(format!("aaaa{}aaaabbbb", inner).into_bytes());
}
}
#[test]
fn small_regular() {
let mut i = 1;
while i < 20_000 {
let mut buf = vec![0; i];
for (j, x) in buf.iter_mut().enumerate() {
*x = (j % 10) as u8 + b'a';
}
roundtrip!(buf);
i += 23;
}
}
// Test that triggered an out of bounds write.
#[test]
fn decompress_copy_close_to_end_1() {
let buf = [
27,
0b000010_00,
1,
2,
3,
0b000_000_10,
3,
0,
0b010110_00,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
];
let decompressed = [
1, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26,
];
assert_eq!(decompressed, &*depress(&buf));
}
#[test]
fn decompress_copy_close_to_end_2() {
let buf = [
28,
0b000010_00,
1,
2,
3,
0b000_000_10,
3,
0,
0b010111_00,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
];
let decompressed = [
1, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27,
];
assert_eq!(decompressed, &*depress(&buf));
}
// The `read::FrameEncoder` code uses different code paths depending on buffer
// size, so let's test both. Also, very small buffers are a good stress test.
#[test]
fn read_frame_encoder_big_and_little_buffers() {
use snap::read;
use std::io::{BufReader, Read};
let bytes = &include_bytes!("../data/html")[..];
let mut big =
BufReader::with_capacity(1_000_000, read::FrameEncoder::new(bytes));
let mut big_out = vec![];
big.read_to_end(&mut big_out).unwrap();
// 5 bytes is small enough to break up headers, etc.
let mut little =
BufReader::with_capacity(5, read::FrameEncoder::new(bytes));
let mut little_out = vec![];
little.read_to_end(&mut little_out).unwrap();
assert_eq!(big_out, little_out);
}
// Tests decompression on malformed data.
// An empty buffer.
testerrored!(err_empty, &b""[..], Error::Empty);
// Decompress fewer bytes than the header reports.
testerrored!(
err_header_mismatch,
&b"\x05\x00a"[..],
Error::HeaderMismatch { expected_len: 5, got_len: 1 }
);
// An invalid varint (final byte has continuation bit set).
testerrored!(err_varint1, &b"\xFF"[..], Error::Header, true);
// A varint that overflows u64.
testerrored!(
err_varint2,
&b"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00"[..],
Error::Header,
true
);
// A varint that fits in u64 but overflows u32.
testerrored!(
err_varint3,
&b"\x80\x80\x80\x80\x10"[..],
Error::TooBig { given: 4294967296, max: 4294967295 },
true
);
// A literal whose length is too small.
// Since the literal length is 1, 'h' is read as a literal and 'i' is
// interpreted as a copy 1 operation missing its offset byte.
testerrored!(
err_lit,
&b"\x02\x00hi"[..],
Error::CopyRead { len: 1, src_len: 0 }
);
// A literal whose length is too big.
testerrored!(
err_lit_big1,
&b"\x02\xechi"[..],
Error::Literal { len: 60, src_len: 2, dst_len: 2 }
);
// A literal whose length is too big, requires 1 extra byte to be read, and
// src is too short to read that byte.
testerrored!(
err_lit_big2a,
&b"\x02\xf0hi"[..],
Error::Literal { len: 4, src_len: 2, dst_len: 2 }
);
// A literal whose length is too big, requires 1 extra byte to be read,
// src is too short to read the full literal.
testerrored!(
err_lit_big2b,
&b"\x02\xf0hi\x00\x00\x00"[..],
Error::Literal {
len: 105, // because 105 == 'h' as u8 + 1
src_len: 4,
dst_len: 2,
}
);
// A copy 1 operation that stops at the tag byte. This fails because there's
// no byte to read for the copy offset.
testerrored!(
err_copy1,
&b"\x02\x00a\x01"[..],
Error::CopyRead { len: 1, src_len: 0 }
);
// A copy 2 operation that stops at the tag byte and another copy 2 operation
// that stops after the first byte in the offset.
testerrored!(
err_copy2a,
&b"\x11\x00a\x3e"[..],
Error::CopyRead { len: 2, src_len: 0 }
);
testerrored!(
err_copy2b,
&b"\x11\x00a\x3e\x01"[..],
Error::CopyRead { len: 2, src_len: 1 }
);
// Same as copy 2, but for copy 4.
testerrored!(
err_copy3a,
&b"\x11\x00a\x3f"[..],
Error::CopyRead { len: 4, src_len: 0 }
);
testerrored!(
err_copy3b,
&b"\x11\x00a\x3f\x00"[..],
Error::CopyRead { len: 4, src_len: 1 }
);
testerrored!(
err_copy3c,
&b"\x11\x00a\x3f\x00\x00"[..],
Error::CopyRead { len: 4, src_len: 2 }
);
testerrored!(
err_copy3d,
&b"\x11\x00a\x3f\x00\x00\x00"[..],
Error::CopyRead { len: 4, src_len: 3 }
);
// A copy operation whose offset is zero.
testerrored!(
err_copy_offset_zero,
&b"\x11\x00a\x01\x00"[..],
Error::Offset { offset: 0, dst_pos: 1 }
);
// A copy operation whose offset is too big.
testerrored!(
err_copy_offset_big,
&b"\x11\x00a\x01\xFF"[..],
Error::Offset { offset: 255, dst_pos: 1 }
);
// A copy operation whose length is too big.
testerrored!(
err_copy_len_big,
&b"\x05\x00a\x1d\x01"[..],
Error::CopyWrite { len: 11, dst_len: 4 }
);
// Selected random inputs pulled from quickcheck failure witnesses.
testtrip!(
random1,
&[
0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0,
1, 1, 0, 0, 1, 2, 0, 0, 2, 1, 0, 0, 2, 2, 0, 0, 0, 6, 0, 0, 3, 1, 0,
0, 0, 7, 0, 0, 1, 3, 0, 0, 0, 8, 0, 0, 2, 3, 0, 0, 0, 9, 0, 0, 1, 4,
0, 0, 1, 0, 0, 3, 0, 0, 1, 0, 1, 0, 0, 0, 10, 0, 0, 0, 0, 2, 4, 0, 0,
2, 0, 0, 3, 0, 1, 0, 0, 1, 5, 0, 0, 6, 0, 0, 0, 0, 11, 0, 0, 1, 6, 0,
0, 1, 7, 0, 0, 0, 12, 0, 0, 3, 2, 0, 0, 0, 13, 0, 0, 2, 5, 0, 0, 0, 3,
3, 0, 0, 0, 1, 8, 0, 0, 1, 0, 1, 0, 0, 0, 4, 1, 0, 0, 0, 0, 14, 0, 0,
0, 1, 9, 0, 0, 0, 1, 10, 0, 0, 0, 0, 1, 11, 0, 0, 0, 1, 0, 2, 0, 0, 0,
1, 1, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 2, 6, 0,
0, 0, 0, 0, 1, 12, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0,
0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]
);
testtrip!(
random2,
&[
10, 2, 14, 13, 0, 8, 2, 10, 2, 14, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
]
);
testtrip!(
random3,
&[0, 0, 0, 4, 1, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,]
);
testtrip!(
random4,
&[
0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0,
1, 1, 0, 0, 1, 2, 0, 0, 1, 3, 0, 0, 1, 4, 0, 0, 2, 1, 0, 0, 0, 4, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
);
// QuickCheck properties for testing that random data roundtrips.
// These properties tend to produce the inputs for the "random" tests above.
#[test]
fn qc_roundtrip() {
fn p(bytes: Vec<u8>) -> bool {
depress(&press(&bytes)) == bytes
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(1_000)
.quickcheck(p as fn(_) -> _);
}
#[test]
fn qc_roundtrip_stream() {
fn p(bytes: Vec<u8>) -> TestResult {
if bytes.is_empty() |
TestResult::from_bool(
read_frame_depress(&write_frame_press(&bytes)) == bytes,
)
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(1_000)
.quickcheck(p as fn(_) -> _);
}
#[test]
fn test_short_input() {
// Regression test for https://github.com/BurntSushi/rust-snappy/issues/42
use snap::read;
use std::io::Read;
let err =
read::FrameDecoder::new(&b"123"[..]).read_to_end(&mut Vec::new());
assert_eq!(err.unwrap_err().kind(), std::io::ErrorKind::UnexpectedEof);
}
#[test]
#[cfg(feature = "cpp")]
fn qc_cmpcpp() {
fn p(bytes: Vec<u8>) -> bool {
press(&bytes) == press_cpp(&bytes)
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(10_000)
.quickcheck(p as fn(_) -> _);
}
// Regression tests.
// See: https://github.com/BurntSushi/rust-snappy/issues/3
#[cfg(target_pointer_width = "32")]
testerrored!(
err_lit_len_overflow1,
&b"\x11\x00\x00\xfc\xfe\xff\xff\xff"[..],
Error::Literal { len: std::u32::MAX as u64, src_len: 0, dst_len: 16 }
);
#[cfg(target_pointer_width = "32")]
testerrored!(
err_lit_len_overflow2,
&b"\x11\x00\x00\xfc\xff\xff\xff\xff"[..],
Error::Literal { len: std::u32::MAX as u64 + 1, src_len: 0, dst_len: 16 }
);
// Helper functions.
fn press(bytes: &[u8]) -> Vec<u8> {
Encoder::new().compress_vec(bytes).unwrap()
}
fn depress(bytes: &[u8]) -> Vec<u8> {
Decoder::new().decompress_vec(bytes).unwrap()
}
fn write_frame_press(bytes: &[u8]) -> Vec<u8> {
use snap::write;
use std::io::Write;
let mut wtr = write::FrameEncoder::new(vec![]);
wtr.write_all(bytes).unwrap();
wtr.into_inner().unwrap()
}
fn read_frame_depress(bytes: &[u8]) -> Vec<u8> {
use snap::read;
use std::io::Read;
let mut buf = vec![];
read::FrameDecoder::new(bytes).read_to_end(&mut buf).unwrap();
buf
}
fn read_frame_press(bytes: &[u8]) -> Vec<u8> {
use snap::read;
use std::io::Read;
let mut buf = vec![];
read::FrameEncoder::new(bytes).read_to_end(&mut buf).unwrap();
buf
}
#[cfg(feature = "cpp")]
fn press_cpp(bytes: &[u8]) -> Vec<u8> {
use snap::raw::max_compress_len;
let mut buf = vec![0; max_compress_len(bytes.len())];
let n = cpp::compress(bytes, &mut buf).unwrap();
buf.truncate(n);
buf
}
| {
return TestResult::discard();
} | conditional_block |
tests.rs | use quickcheck::{QuickCheck, StdGen, TestResult};
use snap::raw::{decompress_len, Decoder, Encoder};
use snap::Error;
#[cfg(feature = "cpp")]
use snappy_cpp as cpp;
// roundtrip is a macro that compresses the input, then decompresses the result
// and compares it with the original input. If they are not equal, then the
// test fails.
macro_rules! roundtrip {
($data:expr) => {{
let d = &$data[..];
assert_eq!(d, &*depress(&press(d)));
}};
}
// errored is a macro that tries to decompress the input and asserts that it
// resulted in an error. If decompression was successful, then the test fails.
macro_rules! errored {
($data:expr, $err:expr) => {
errored!($data, $err, false);
};
($data:expr, $err:expr, $bad_header:expr) => {{
let d = &$data[..];
let mut buf = if $bad_header {
assert_eq!($err, decompress_len(d).unwrap_err());
vec![0; 1024]
} else {
vec![0; decompress_len(d).unwrap()]
};
match Decoder::new().decompress(d, &mut buf) {
Err(ref err) if err == &$err => {}
Err(ref err) => panic!(
"expected decompression to fail with {:?}, \
but got {:?}",
$err, err
),
Ok(n) => {
panic!(
"\nexpected decompression to fail, but did not!
original (len == {:?})
----------------------
{:?}
decompressed (len == {:?})
--------------------------
{:?}
",
d.len(),
d,
n,
buf
);
}
}
}};
}
// testtrip is a macro that defines a test that compresses the input, then
// decompresses the result and compares it with the original input. If they are
// not equal, then the test fails. This test is performed both on the raw
// Snappy format and the framed Snappy format.
//
// If tests are compiled with the cpp feature, then this also tests that the
// C++ library compresses to the same bytes that the Rust library does.
macro_rules! testtrip {
($name:ident, $data:expr) => {
mod $name {
#[test]
fn roundtrip_raw() {
use super::{depress, press};
roundtrip!($data);
}
#[test]
fn roundtrip_frame() {
use super::{read_frame_depress, write_frame_press};
let d = &$data[..];
assert_eq!(d, &*read_frame_depress(&write_frame_press(d)));
}
#[test]
fn read_and_write_frame_encoder_match() {
use super::{read_frame_press, write_frame_press};
let d = &$data[..];
assert_eq!(read_frame_press(d), write_frame_press(d));
}
#[test]
#[cfg(feature = "cpp")]
fn cmpcpp() {
use super::{press, press_cpp};
let data = &$data[..];
let rust = press(data);
let cpp = press_cpp(data);
if rust == cpp {
return;
}
panic!(
"\ncompression results are not equal!
original (len == {:?})
----------------------
{:?}
rust (len == {:?})
------------------
{:?}
cpp (len == {:?})
-----------------
{:?}
",
data.len(),
data,
rust.len(),
rust,
cpp.len(),
cpp
);
}
}
};
}
// testcorrupt is a macro that defines a test that decompresses the input,
// and if the result is anything other than the error given, the test fails.
macro_rules! testerrored {
($name:ident, $data:expr, $err:expr) => {
testerrored!($name, $data, $err, false);
};
($name:ident, $data:expr, $err:expr, $bad_header:expr) => {
#[test]
fn $name() {
errored!($data, $err, $bad_header);
}
};
}
// Simple test cases.
testtrip!(empty, &[]);
testtrip!(one_zero, &[0]);
// Roundtrip all of the benchmark data.
testtrip!(data_html, include_bytes!("../data/html"));
testtrip!(data_urls, include_bytes!("../data/urls.10K"));
testtrip!(data_jpg, include_bytes!("../data/fireworks.jpeg"));
testtrip!(data_pdf, include_bytes!("../data/paper-100k.pdf"));
testtrip!(data_html4, include_bytes!("../data/html_x_4"));
testtrip!(data_txt1, include_bytes!("../data/alice29.txt"));
testtrip!(data_txt2, include_bytes!("../data/asyoulik.txt"));
testtrip!(data_txt3, include_bytes!("../data/lcet10.txt"));
testtrip!(data_txt4, include_bytes!("../data/plrabn12.txt"));
testtrip!(data_pb, include_bytes!("../data/geo.protodata"));
testtrip!(data_gaviota, include_bytes!("../data/kppkn.gtb"));
testtrip!(data_golden, include_bytes!("../data/Mark.Twain-Tom.Sawyer.txt"));
// Do it again, with the Snappy frame format.
// Roundtrip the golden data, starting with the compressed bytes.
#[test]
fn data_golden_rev() {
let data = include_bytes!("../data/Mark.Twain-Tom.Sawyer.txt.rawsnappy");
let data = &data[..];
assert_eq!(data, &*press(&depress(data)));
}
// Miscellaneous tests.
#[test]
fn small_copy() {
use std::iter::repeat;
for i in 0..32 {
let inner: String = repeat('b').take(i).collect();
roundtrip!(format!("aaaa{}aaaabbbb", inner).into_bytes());
}
}
#[test]
fn small_regular() {
let mut i = 1;
while i < 20_000 {
let mut buf = vec![0; i];
for (j, x) in buf.iter_mut().enumerate() {
*x = (j % 10) as u8 + b'a';
}
roundtrip!(buf);
i += 23;
}
}
// Test that triggered an out of bounds write.
#[test]
fn decompress_copy_close_to_end_1() {
let buf = [
27,
0b000010_00,
1,
2,
3,
0b000_000_10,
3,
0,
0b010110_00,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
];
let decompressed = [
1, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26,
];
assert_eq!(decompressed, &*depress(&buf));
}
#[test]
fn decompress_copy_close_to_end_2() {
let buf = [
28,
0b000010_00,
1,
2,
3,
0b000_000_10,
3,
0,
0b010111_00,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
];
let decompressed = [
1, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27,
];
assert_eq!(decompressed, &*depress(&buf));
}
// The `read::FrameEncoder` code uses different code paths depending on buffer
// size, so let's test both. Also, very small buffers are a good stress test.
#[test]
fn read_frame_encoder_big_and_little_buffers() {
use snap::read;
use std::io::{BufReader, Read};
let bytes = &include_bytes!("../data/html")[..];
let mut big =
BufReader::with_capacity(1_000_000, read::FrameEncoder::new(bytes));
let mut big_out = vec![];
big.read_to_end(&mut big_out).unwrap();
// 5 bytes is small enough to break up headers, etc.
let mut little =
BufReader::with_capacity(5, read::FrameEncoder::new(bytes));
let mut little_out = vec![];
little.read_to_end(&mut little_out).unwrap();
assert_eq!(big_out, little_out);
}
// Tests decompression on malformed data.
// An empty buffer.
testerrored!(err_empty, &b""[..], Error::Empty);
// Decompress fewer bytes than the header reports.
testerrored!(
err_header_mismatch,
&b"\x05\x00a"[..],
Error::HeaderMismatch { expected_len: 5, got_len: 1 }
);
// An invalid varint (final byte has continuation bit set).
testerrored!(err_varint1, &b"\xFF"[..], Error::Header, true);
// A varint that overflows u64.
testerrored!(
err_varint2,
&b"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00"[..],
Error::Header,
true
);
// A varint that fits in u64 but overflows u32.
testerrored!(
err_varint3,
&b"\x80\x80\x80\x80\x10"[..],
Error::TooBig { given: 4294967296, max: 4294967295 },
true
);
// A literal whose length is too small.
// Since the literal length is 1, 'h' is read as a literal and 'i' is
// interpreted as a copy 1 operation missing its offset byte.
testerrored!(
err_lit,
&b"\x02\x00hi"[..],
Error::CopyRead { len: 1, src_len: 0 }
);
// A literal whose length is too big.
testerrored!(
err_lit_big1,
&b"\x02\xechi"[..],
Error::Literal { len: 60, src_len: 2, dst_len: 2 }
);
// A literal whose length is too big, requires 1 extra byte to be read, and
// src is too short to read that byte.
testerrored!(
err_lit_big2a,
&b"\x02\xf0hi"[..],
Error::Literal { len: 4, src_len: 2, dst_len: 2 }
);
// A literal whose length is too big, requires 1 extra byte to be read,
// src is too short to read the full literal.
testerrored!(
err_lit_big2b,
&b"\x02\xf0hi\x00\x00\x00"[..],
Error::Literal {
len: 105, // because 105 == 'h' as u8 + 1
src_len: 4,
dst_len: 2,
}
);
// A copy 1 operation that stops at the tag byte. This fails because there's
// no byte to read for the copy offset.
testerrored!(
err_copy1,
&b"\x02\x00a\x01"[..],
Error::CopyRead { len: 1, src_len: 0 }
);
// A copy 2 operation that stops at the tag byte and another copy 2 operation
// that stops after the first byte in the offset.
testerrored!(
err_copy2a,
&b"\x11\x00a\x3e"[..],
Error::CopyRead { len: 2, src_len: 0 }
);
testerrored!(
err_copy2b,
&b"\x11\x00a\x3e\x01"[..],
Error::CopyRead { len: 2, src_len: 1 }
);
// Same as copy 2, but for copy 4.
testerrored!(
err_copy3a,
&b"\x11\x00a\x3f"[..],
Error::CopyRead { len: 4, src_len: 0 }
);
testerrored!(
err_copy3b,
&b"\x11\x00a\x3f\x00"[..],
Error::CopyRead { len: 4, src_len: 1 }
);
testerrored!(
err_copy3c,
&b"\x11\x00a\x3f\x00\x00"[..],
Error::CopyRead { len: 4, src_len: 2 }
);
testerrored!(
err_copy3d,
&b"\x11\x00a\x3f\x00\x00\x00"[..],
Error::CopyRead { len: 4, src_len: 3 }
);
// A copy operation whose offset is zero.
testerrored!(
err_copy_offset_zero,
&b"\x11\x00a\x01\x00"[..],
Error::Offset { offset: 0, dst_pos: 1 }
);
// A copy operation whose offset is too big.
testerrored!(
err_copy_offset_big,
&b"\x11\x00a\x01\xFF"[..],
Error::Offset { offset: 255, dst_pos: 1 }
);
// A copy operation whose length is too big.
testerrored!(
err_copy_len_big,
&b"\x05\x00a\x1d\x01"[..],
Error::CopyWrite { len: 11, dst_len: 4 }
);
// Selected random inputs pulled from quickcheck failure witnesses.
testtrip!(
random1,
&[
0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0,
1, 1, 0, 0, 1, 2, 0, 0, 2, 1, 0, 0, 2, 2, 0, 0, 0, 6, 0, 0, 3, 1, 0,
0, 0, 7, 0, 0, 1, 3, 0, 0, 0, 8, 0, 0, 2, 3, 0, 0, 0, 9, 0, 0, 1, 4,
0, 0, 1, 0, 0, 3, 0, 0, 1, 0, 1, 0, 0, 0, 10, 0, 0, 0, 0, 2, 4, 0, 0,
2, 0, 0, 3, 0, 1, 0, 0, 1, 5, 0, 0, 6, 0, 0, 0, 0, 11, 0, 0, 1, 6, 0,
0, 1, 7, 0, 0, 0, 12, 0, 0, 3, 2, 0, 0, 0, 13, 0, 0, 2, 5, 0, 0, 0, 3,
3, 0, 0, 0, 1, 8, 0, 0, 1, 0, 1, 0, 0, 0, 4, 1, 0, 0, 0, 0, 14, 0, 0,
0, 1, 9, 0, 0, 0, 1, 10, 0, 0, 0, 0, 1, 11, 0, 0, 0, 1, 0, 2, 0, 0, 0,
1, 1, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 2, 6, 0,
0, 0, 0, 0, 1, 12, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0,
0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]
);
testtrip!(
random2,
&[
10, 2, 14, 13, 0, 8, 2, 10, 2, 14, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
]
);
testtrip!(
random3,
&[0, 0, 0, 4, 1, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,]
);
testtrip!(
random4,
&[
0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0,
1, 1, 0, 0, 1, 2, 0, 0, 1, 3, 0, 0, 1, 4, 0, 0, 2, 1, 0, 0, 0, 4, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
);
// QuickCheck properties for testing that random data roundtrips.
// These properties tend to produce the inputs for the "random" tests above.
#[test]
fn qc_roundtrip() {
fn p(bytes: Vec<u8>) -> bool {
depress(&press(&bytes)) == bytes
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(1_000)
.quickcheck(p as fn(_) -> _);
}
#[test]
fn qc_roundtrip_stream() {
fn p(bytes: Vec<u8>) -> TestResult {
if bytes.is_empty() {
return TestResult::discard();
}
TestResult::from_bool(
read_frame_depress(&write_frame_press(&bytes)) == bytes,
)
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(1_000)
.quickcheck(p as fn(_) -> _);
}
#[test]
fn test_short_input() {
// Regression test for https://github.com/BurntSushi/rust-snappy/issues/42
use snap::read;
use std::io::Read;
let err =
read::FrameDecoder::new(&b"123"[..]).read_to_end(&mut Vec::new());
assert_eq!(err.unwrap_err().kind(), std::io::ErrorKind::UnexpectedEof);
}
#[test]
#[cfg(feature = "cpp")]
fn qc_cmpcpp() {
fn p(bytes: Vec<u8>) -> bool {
press(&bytes) == press_cpp(&bytes)
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(10_000)
.quickcheck(p as fn(_) -> _);
}
// Regression tests.
// See: https://github.com/BurntSushi/rust-snappy/issues/3
#[cfg(target_pointer_width = "32")]
testerrored!(
err_lit_len_overflow1,
&b"\x11\x00\x00\xfc\xfe\xff\xff\xff"[..],
Error::Literal { len: std::u32::MAX as u64, src_len: 0, dst_len: 16 }
);
#[cfg(target_pointer_width = "32")]
testerrored!(
err_lit_len_overflow2,
&b"\x11\x00\x00\xfc\xff\xff\xff\xff"[..],
Error::Literal { len: std::u32::MAX as u64 + 1, src_len: 0, dst_len: 16 }
);
// Helper functions.
fn press(bytes: &[u8]) -> Vec<u8> {
Encoder::new().compress_vec(bytes).unwrap()
}
fn | (bytes: &[u8]) -> Vec<u8> {
Decoder::new().decompress_vec(bytes).unwrap()
}
fn write_frame_press(bytes: &[u8]) -> Vec<u8> {
use snap::write;
use std::io::Write;
let mut wtr = write::FrameEncoder::new(vec![]);
wtr.write_all(bytes).unwrap();
wtr.into_inner().unwrap()
}
fn read_frame_depress(bytes: &[u8]) -> Vec<u8> {
use snap::read;
use std::io::Read;
let mut buf = vec![];
read::FrameDecoder::new(bytes).read_to_end(&mut buf).unwrap();
buf
}
fn read_frame_press(bytes: &[u8]) -> Vec<u8> {
use snap::read;
use std::io::Read;
let mut buf = vec![];
read::FrameEncoder::new(bytes).read_to_end(&mut buf).unwrap();
buf
}
#[cfg(feature = "cpp")]
fn press_cpp(bytes: &[u8]) -> Vec<u8> {
use snap::raw::max_compress_len;
let mut buf = vec![0; max_compress_len(bytes.len())];
let n = cpp::compress(bytes, &mut buf).unwrap();
buf.truncate(n);
buf
}
| depress | identifier_name |
tests.rs | use quickcheck::{QuickCheck, StdGen, TestResult};
use snap::raw::{decompress_len, Decoder, Encoder};
use snap::Error;
#[cfg(feature = "cpp")]
use snappy_cpp as cpp;
// roundtrip is a macro that compresses the input, then decompresses the result
// and compares it with the original input. If they are not equal, then the
// test fails.
macro_rules! roundtrip {
($data:expr) => {{
let d = &$data[..];
assert_eq!(d, &*depress(&press(d)));
}};
}
// errored is a macro that tries to decompress the input and asserts that it
// resulted in an error. If decompression was successful, then the test fails.
macro_rules! errored {
($data:expr, $err:expr) => {
errored!($data, $err, false);
};
($data:expr, $err:expr, $bad_header:expr) => {{
let d = &$data[..];
let mut buf = if $bad_header {
assert_eq!($err, decompress_len(d).unwrap_err());
vec![0; 1024]
} else {
vec![0; decompress_len(d).unwrap()]
};
match Decoder::new().decompress(d, &mut buf) {
Err(ref err) if err == &$err => {}
Err(ref err) => panic!(
"expected decompression to fail with {:?}, \
but got {:?}",
$err, err
),
Ok(n) => {
panic!(
"\nexpected decompression to fail, but did not!
original (len == {:?})
----------------------
{:?}
decompressed (len == {:?})
--------------------------
{:?}
",
d.len(),
d,
n,
buf
);
}
}
}};
}
// testtrip is a macro that defines a test that compresses the input, then
// decompresses the result and compares it with the original input. If they are
// not equal, then the test fails. This test is performed both on the raw
// Snappy format and the framed Snappy format.
//
// If tests are compiled with the cpp feature, then this also tests that the
// C++ library compresses to the same bytes that the Rust library does.
macro_rules! testtrip {
($name:ident, $data:expr) => {
mod $name {
#[test]
fn roundtrip_raw() {
use super::{depress, press};
roundtrip!($data);
}
#[test]
fn roundtrip_frame() {
use super::{read_frame_depress, write_frame_press};
let d = &$data[..];
assert_eq!(d, &*read_frame_depress(&write_frame_press(d)));
}
#[test]
fn read_and_write_frame_encoder_match() {
use super::{read_frame_press, write_frame_press};
let d = &$data[..];
assert_eq!(read_frame_press(d), write_frame_press(d));
}
#[test]
#[cfg(feature = "cpp")]
fn cmpcpp() {
use super::{press, press_cpp};
let data = &$data[..];
let rust = press(data);
let cpp = press_cpp(data);
if rust == cpp {
return;
}
panic!(
"\ncompression results are not equal!
original (len == {:?})
----------------------
{:?}
rust (len == {:?})
------------------
{:?}
cpp (len == {:?})
-----------------
{:?}
",
data.len(),
data,
rust.len(),
rust,
cpp.len(),
cpp
);
}
}
};
}
// testcorrupt is a macro that defines a test that decompresses the input,
// and if the result is anything other than the error given, the test fails.
macro_rules! testerrored {
($name:ident, $data:expr, $err:expr) => {
testerrored!($name, $data, $err, false);
};
($name:ident, $data:expr, $err:expr, $bad_header:expr) => {
#[test]
fn $name() {
errored!($data, $err, $bad_header);
}
};
}
// Simple test cases.
testtrip!(empty, &[]);
testtrip!(one_zero, &[0]);
// Roundtrip all of the benchmark data.
testtrip!(data_html, include_bytes!("../data/html"));
testtrip!(data_urls, include_bytes!("../data/urls.10K"));
testtrip!(data_jpg, include_bytes!("../data/fireworks.jpeg"));
testtrip!(data_pdf, include_bytes!("../data/paper-100k.pdf"));
testtrip!(data_html4, include_bytes!("../data/html_x_4"));
testtrip!(data_txt1, include_bytes!("../data/alice29.txt"));
testtrip!(data_txt2, include_bytes!("../data/asyoulik.txt"));
testtrip!(data_txt3, include_bytes!("../data/lcet10.txt"));
testtrip!(data_txt4, include_bytes!("../data/plrabn12.txt"));
testtrip!(data_pb, include_bytes!("../data/geo.protodata"));
testtrip!(data_gaviota, include_bytes!("../data/kppkn.gtb"));
testtrip!(data_golden, include_bytes!("../data/Mark.Twain-Tom.Sawyer.txt"));
// Do it again, with the Snappy frame format.
// Roundtrip the golden data, starting with the compressed bytes.
#[test]
fn data_golden_rev() |
// Miscellaneous tests.
#[test]
fn small_copy() {
use std::iter::repeat;
for i in 0..32 {
let inner: String = repeat('b').take(i).collect();
roundtrip!(format!("aaaa{}aaaabbbb", inner).into_bytes());
}
}
#[test]
fn small_regular() {
let mut i = 1;
while i < 20_000 {
let mut buf = vec![0; i];
for (j, x) in buf.iter_mut().enumerate() {
*x = (j % 10) as u8 + b'a';
}
roundtrip!(buf);
i += 23;
}
}
// Test that triggered an out of bounds write.
#[test]
fn decompress_copy_close_to_end_1() {
let buf = [
27,
0b000010_00,
1,
2,
3,
0b000_000_10,
3,
0,
0b010110_00,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
];
let decompressed = [
1, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26,
];
assert_eq!(decompressed, &*depress(&buf));
}
#[test]
fn decompress_copy_close_to_end_2() {
let buf = [
28,
0b000010_00,
1,
2,
3,
0b000_000_10,
3,
0,
0b010111_00,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
];
let decompressed = [
1, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27,
];
assert_eq!(decompressed, &*depress(&buf));
}
// The `read::FrameEncoder` code uses different code paths depending on buffer
// size, so let's test both. Also, very small buffers are a good stress test.
#[test]
fn read_frame_encoder_big_and_little_buffers() {
use snap::read;
use std::io::{BufReader, Read};
let bytes = &include_bytes!("../data/html")[..];
let mut big =
BufReader::with_capacity(1_000_000, read::FrameEncoder::new(bytes));
let mut big_out = vec![];
big.read_to_end(&mut big_out).unwrap();
// 5 bytes is small enough to break up headers, etc.
let mut little =
BufReader::with_capacity(5, read::FrameEncoder::new(bytes));
let mut little_out = vec![];
little.read_to_end(&mut little_out).unwrap();
assert_eq!(big_out, little_out);
}
// Tests decompression on malformed data.
// An empty buffer.
testerrored!(err_empty, &b""[..], Error::Empty);
// Decompress fewer bytes than the header reports.
testerrored!(
err_header_mismatch,
&b"\x05\x00a"[..],
Error::HeaderMismatch { expected_len: 5, got_len: 1 }
);
// An invalid varint (final byte has continuation bit set).
testerrored!(err_varint1, &b"\xFF"[..], Error::Header, true);
// A varint that overflows u64.
testerrored!(
err_varint2,
&b"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00"[..],
Error::Header,
true
);
// A varint that fits in u64 but overflows u32.
testerrored!(
err_varint3,
&b"\x80\x80\x80\x80\x10"[..],
Error::TooBig { given: 4294967296, max: 4294967295 },
true
);
// A literal whose length is too small.
// Since the literal length is 1, 'h' is read as a literal and 'i' is
// interpreted as a copy 1 operation missing its offset byte.
testerrored!(
err_lit,
&b"\x02\x00hi"[..],
Error::CopyRead { len: 1, src_len: 0 }
);
// A literal whose length is too big.
testerrored!(
err_lit_big1,
&b"\x02\xechi"[..],
Error::Literal { len: 60, src_len: 2, dst_len: 2 }
);
// A literal whose length is too big, requires 1 extra byte to be read, and
// src is too short to read that byte.
testerrored!(
err_lit_big2a,
&b"\x02\xf0hi"[..],
Error::Literal { len: 4, src_len: 2, dst_len: 2 }
);
// A literal whose length is too big, requires 1 extra byte to be read,
// src is too short to read the full literal.
testerrored!(
err_lit_big2b,
&b"\x02\xf0hi\x00\x00\x00"[..],
Error::Literal {
len: 105, // because 105 == 'h' as u8 + 1
src_len: 4,
dst_len: 2,
}
);
// A copy 1 operation that stops at the tag byte. This fails because there's
// no byte to read for the copy offset.
testerrored!(
err_copy1,
&b"\x02\x00a\x01"[..],
Error::CopyRead { len: 1, src_len: 0 }
);
// A copy 2 operation that stops at the tag byte and another copy 2 operation
// that stops after the first byte in the offset.
testerrored!(
err_copy2a,
&b"\x11\x00a\x3e"[..],
Error::CopyRead { len: 2, src_len: 0 }
);
testerrored!(
err_copy2b,
&b"\x11\x00a\x3e\x01"[..],
Error::CopyRead { len: 2, src_len: 1 }
);
// Same as copy 2, but for copy 4.
testerrored!(
err_copy3a,
&b"\x11\x00a\x3f"[..],
Error::CopyRead { len: 4, src_len: 0 }
);
testerrored!(
err_copy3b,
&b"\x11\x00a\x3f\x00"[..],
Error::CopyRead { len: 4, src_len: 1 }
);
testerrored!(
err_copy3c,
&b"\x11\x00a\x3f\x00\x00"[..],
Error::CopyRead { len: 4, src_len: 2 }
);
testerrored!(
err_copy3d,
&b"\x11\x00a\x3f\x00\x00\x00"[..],
Error::CopyRead { len: 4, src_len: 3 }
);
// A copy operation whose offset is zero.
testerrored!(
err_copy_offset_zero,
&b"\x11\x00a\x01\x00"[..],
Error::Offset { offset: 0, dst_pos: 1 }
);
// A copy operation whose offset is too big.
testerrored!(
err_copy_offset_big,
&b"\x11\x00a\x01\xFF"[..],
Error::Offset { offset: 255, dst_pos: 1 }
);
// A copy operation whose length is too big.
testerrored!(
err_copy_len_big,
&b"\x05\x00a\x1d\x01"[..],
Error::CopyWrite { len: 11, dst_len: 4 }
);
// Selected random inputs pulled from quickcheck failure witnesses.
testtrip!(
random1,
&[
0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0,
1, 1, 0, 0, 1, 2, 0, 0, 2, 1, 0, 0, 2, 2, 0, 0, 0, 6, 0, 0, 3, 1, 0,
0, 0, 7, 0, 0, 1, 3, 0, 0, 0, 8, 0, 0, 2, 3, 0, 0, 0, 9, 0, 0, 1, 4,
0, 0, 1, 0, 0, 3, 0, 0, 1, 0, 1, 0, 0, 0, 10, 0, 0, 0, 0, 2, 4, 0, 0,
2, 0, 0, 3, 0, 1, 0, 0, 1, 5, 0, 0, 6, 0, 0, 0, 0, 11, 0, 0, 1, 6, 0,
0, 1, 7, 0, 0, 0, 12, 0, 0, 3, 2, 0, 0, 0, 13, 0, 0, 2, 5, 0, 0, 0, 3,
3, 0, 0, 0, 1, 8, 0, 0, 1, 0, 1, 0, 0, 0, 4, 1, 0, 0, 0, 0, 14, 0, 0,
0, 1, 9, 0, 0, 0, 1, 10, 0, 0, 0, 0, 1, 11, 0, 0, 0, 1, 0, 2, 0, 0, 0,
1, 1, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 2, 6, 0,
0, 0, 0, 0, 1, 12, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0,
0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]
);
testtrip!(
random2,
&[
10, 2, 14, 13, 0, 8, 2, 10, 2, 14, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
]
);
testtrip!(
random3,
&[0, 0, 0, 4, 1, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,]
);
testtrip!(
random4,
&[
0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0,
1, 1, 0, 0, 1, 2, 0, 0, 1, 3, 0, 0, 1, 4, 0, 0, 2, 1, 0, 0, 0, 4, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
);
// QuickCheck properties for testing that random data roundtrips.
// These properties tend to produce the inputs for the "random" tests above.
#[test]
fn qc_roundtrip() {
fn p(bytes: Vec<u8>) -> bool {
depress(&press(&bytes)) == bytes
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(1_000)
.quickcheck(p as fn(_) -> _);
}
#[test]
fn qc_roundtrip_stream() {
fn p(bytes: Vec<u8>) -> TestResult {
if bytes.is_empty() {
return TestResult::discard();
}
TestResult::from_bool(
read_frame_depress(&write_frame_press(&bytes)) == bytes,
)
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(1_000)
.quickcheck(p as fn(_) -> _);
}
#[test]
fn test_short_input() {
// Regression test for https://github.com/BurntSushi/rust-snappy/issues/42
use snap::read;
use std::io::Read;
let err =
read::FrameDecoder::new(&b"123"[..]).read_to_end(&mut Vec::new());
assert_eq!(err.unwrap_err().kind(), std::io::ErrorKind::UnexpectedEof);
}
#[test]
#[cfg(feature = "cpp")]
fn qc_cmpcpp() {
fn p(bytes: Vec<u8>) -> bool {
press(&bytes) == press_cpp(&bytes)
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(10_000)
.quickcheck(p as fn(_) -> _);
}
// Regression tests.
// See: https://github.com/BurntSushi/rust-snappy/issues/3
#[cfg(target_pointer_width = "32")]
testerrored!(
err_lit_len_overflow1,
&b"\x11\x00\x00\xfc\xfe\xff\xff\xff"[..],
Error::Literal { len: std::u32::MAX as u64, src_len: 0, dst_len: 16 }
);
#[cfg(target_pointer_width = "32")]
testerrored!(
err_lit_len_overflow2,
&b"\x11\x00\x00\xfc\xff\xff\xff\xff"[..],
Error::Literal { len: std::u32::MAX as u64 + 1, src_len: 0, dst_len: 16 }
);
// Helper functions.
fn press(bytes: &[u8]) -> Vec<u8> {
Encoder::new().compress_vec(bytes).unwrap()
}
fn depress(bytes: &[u8]) -> Vec<u8> {
Decoder::new().decompress_vec(bytes).unwrap()
}
fn write_frame_press(bytes: &[u8]) -> Vec<u8> {
use snap::write;
use std::io::Write;
let mut wtr = write::FrameEncoder::new(vec![]);
wtr.write_all(bytes).unwrap();
wtr.into_inner().unwrap()
}
fn read_frame_depress(bytes: &[u8]) -> Vec<u8> {
use snap::read;
use std::io::Read;
let mut buf = vec![];
read::FrameDecoder::new(bytes).read_to_end(&mut buf).unwrap();
buf
}
fn read_frame_press(bytes: &[u8]) -> Vec<u8> {
use snap::read;
use std::io::Read;
let mut buf = vec![];
read::FrameEncoder::new(bytes).read_to_end(&mut buf).unwrap();
buf
}
#[cfg(feature = "cpp")]
fn press_cpp(bytes: &[u8]) -> Vec<u8> {
use snap::raw::max_compress_len;
let mut buf = vec![0; max_compress_len(bytes.len())];
let n = cpp::compress(bytes, &mut buf).unwrap();
buf.truncate(n);
buf
}
| {
let data = include_bytes!("../data/Mark.Twain-Tom.Sawyer.txt.rawsnappy");
let data = &data[..];
assert_eq!(data, &*press(&depress(data)));
} | identifier_body |
tests.rs | use quickcheck::{QuickCheck, StdGen, TestResult};
use snap::raw::{decompress_len, Decoder, Encoder};
use snap::Error;
#[cfg(feature = "cpp")]
use snappy_cpp as cpp;
// roundtrip is a macro that compresses the input, then decompresses the result
// and compares it with the original input. If they are not equal, then the
// test fails.
macro_rules! roundtrip {
($data:expr) => {{
let d = &$data[..];
assert_eq!(d, &*depress(&press(d)));
}};
}
// errored is a macro that tries to decompress the input and asserts that it
// resulted in an error. If decompression was successful, then the test fails.
macro_rules! errored {
($data:expr, $err:expr) => {
errored!($data, $err, false);
};
($data:expr, $err:expr, $bad_header:expr) => {{
let d = &$data[..];
let mut buf = if $bad_header {
assert_eq!($err, decompress_len(d).unwrap_err());
vec![0; 1024]
} else { | };
match Decoder::new().decompress(d, &mut buf) {
Err(ref err) if err == &$err => {}
Err(ref err) => panic!(
"expected decompression to fail with {:?}, \
but got {:?}",
$err, err
),
Ok(n) => {
panic!(
"\nexpected decompression to fail, but did not!
original (len == {:?})
----------------------
{:?}
decompressed (len == {:?})
--------------------------
{:?}
",
d.len(),
d,
n,
buf
);
}
}
}};
}
// testtrip is a macro that defines a test that compresses the input, then
// decompresses the result and compares it with the original input. If they are
// not equal, then the test fails. This test is performed both on the raw
// Snappy format and the framed Snappy format.
//
// If tests are compiled with the cpp feature, then this also tests that the
// C++ library compresses to the same bytes that the Rust library does.
macro_rules! testtrip {
($name:ident, $data:expr) => {
mod $name {
#[test]
fn roundtrip_raw() {
use super::{depress, press};
roundtrip!($data);
}
#[test]
fn roundtrip_frame() {
use super::{read_frame_depress, write_frame_press};
let d = &$data[..];
assert_eq!(d, &*read_frame_depress(&write_frame_press(d)));
}
#[test]
fn read_and_write_frame_encoder_match() {
use super::{read_frame_press, write_frame_press};
let d = &$data[..];
assert_eq!(read_frame_press(d), write_frame_press(d));
}
#[test]
#[cfg(feature = "cpp")]
fn cmpcpp() {
use super::{press, press_cpp};
let data = &$data[..];
let rust = press(data);
let cpp = press_cpp(data);
if rust == cpp {
return;
}
panic!(
"\ncompression results are not equal!
original (len == {:?})
----------------------
{:?}
rust (len == {:?})
------------------
{:?}
cpp (len == {:?})
-----------------
{:?}
",
data.len(),
data,
rust.len(),
rust,
cpp.len(),
cpp
);
}
}
};
}
// testcorrupt is a macro that defines a test that decompresses the input,
// and if the result is anything other than the error given, the test fails.
macro_rules! testerrored {
($name:ident, $data:expr, $err:expr) => {
testerrored!($name, $data, $err, false);
};
($name:ident, $data:expr, $err:expr, $bad_header:expr) => {
#[test]
fn $name() {
errored!($data, $err, $bad_header);
}
};
}
// Simple test cases.
testtrip!(empty, &[]);
testtrip!(one_zero, &[0]);
// Roundtrip all of the benchmark data.
testtrip!(data_html, include_bytes!("../data/html"));
testtrip!(data_urls, include_bytes!("../data/urls.10K"));
testtrip!(data_jpg, include_bytes!("../data/fireworks.jpeg"));
testtrip!(data_pdf, include_bytes!("../data/paper-100k.pdf"));
testtrip!(data_html4, include_bytes!("../data/html_x_4"));
testtrip!(data_txt1, include_bytes!("../data/alice29.txt"));
testtrip!(data_txt2, include_bytes!("../data/asyoulik.txt"));
testtrip!(data_txt3, include_bytes!("../data/lcet10.txt"));
testtrip!(data_txt4, include_bytes!("../data/plrabn12.txt"));
testtrip!(data_pb, include_bytes!("../data/geo.protodata"));
testtrip!(data_gaviota, include_bytes!("../data/kppkn.gtb"));
testtrip!(data_golden, include_bytes!("../data/Mark.Twain-Tom.Sawyer.txt"));
// Do it again, with the Snappy frame format.
// Roundtrip the golden data, starting with the compressed bytes.
#[test]
fn data_golden_rev() {
let data = include_bytes!("../data/Mark.Twain-Tom.Sawyer.txt.rawsnappy");
let data = &data[..];
assert_eq!(data, &*press(&depress(data)));
}
// Miscellaneous tests.
#[test]
fn small_copy() {
use std::iter::repeat;
for i in 0..32 {
let inner: String = repeat('b').take(i).collect();
roundtrip!(format!("aaaa{}aaaabbbb", inner).into_bytes());
}
}
#[test]
fn small_regular() {
let mut i = 1;
while i < 20_000 {
let mut buf = vec![0; i];
for (j, x) in buf.iter_mut().enumerate() {
*x = (j % 10) as u8 + b'a';
}
roundtrip!(buf);
i += 23;
}
}
// Test that triggered an out of bounds write.
#[test]
fn decompress_copy_close_to_end_1() {
let buf = [
27,
0b000010_00,
1,
2,
3,
0b000_000_10,
3,
0,
0b010110_00,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
];
let decompressed = [
1, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26,
];
assert_eq!(decompressed, &*depress(&buf));
}
#[test]
fn decompress_copy_close_to_end_2() {
let buf = [
28,
0b000010_00,
1,
2,
3,
0b000_000_10,
3,
0,
0b010111_00,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
];
let decompressed = [
1, 2, 3, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27,
];
assert_eq!(decompressed, &*depress(&buf));
}
// The `read::FrameEncoder` code uses different code paths depending on buffer
// size, so let's test both. Also, very small buffers are a good stress test.
#[test]
fn read_frame_encoder_big_and_little_buffers() {
use snap::read;
use std::io::{BufReader, Read};
let bytes = &include_bytes!("../data/html")[..];
let mut big =
BufReader::with_capacity(1_000_000, read::FrameEncoder::new(bytes));
let mut big_out = vec![];
big.read_to_end(&mut big_out).unwrap();
// 5 bytes is small enough to break up headers, etc.
let mut little =
BufReader::with_capacity(5, read::FrameEncoder::new(bytes));
let mut little_out = vec![];
little.read_to_end(&mut little_out).unwrap();
assert_eq!(big_out, little_out);
}
// Tests decompression on malformed data.
// An empty buffer.
testerrored!(err_empty, &b""[..], Error::Empty);
// Decompress fewer bytes than the header reports.
testerrored!(
err_header_mismatch,
&b"\x05\x00a"[..],
Error::HeaderMismatch { expected_len: 5, got_len: 1 }
);
// An invalid varint (final byte has continuation bit set).
testerrored!(err_varint1, &b"\xFF"[..], Error::Header, true);
// A varint that overflows u64.
testerrored!(
err_varint2,
&b"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00"[..],
Error::Header,
true
);
// A varint that fits in u64 but overflows u32.
testerrored!(
err_varint3,
&b"\x80\x80\x80\x80\x10"[..],
Error::TooBig { given: 4294967296, max: 4294967295 },
true
);
// A literal whose length is too small.
// Since the literal length is 1, 'h' is read as a literal and 'i' is
// interpreted as a copy 1 operation missing its offset byte.
testerrored!(
err_lit,
&b"\x02\x00hi"[..],
Error::CopyRead { len: 1, src_len: 0 }
);
// A literal whose length is too big.
testerrored!(
err_lit_big1,
&b"\x02\xechi"[..],
Error::Literal { len: 60, src_len: 2, dst_len: 2 }
);
// A literal whose length is too big, requires 1 extra byte to be read, and
// src is too short to read that byte.
testerrored!(
err_lit_big2a,
&b"\x02\xf0hi"[..],
Error::Literal { len: 4, src_len: 2, dst_len: 2 }
);
// A literal whose length is too big, requires 1 extra byte to be read,
// src is too short to read the full literal.
testerrored!(
err_lit_big2b,
&b"\x02\xf0hi\x00\x00\x00"[..],
Error::Literal {
len: 105, // because 105 == 'h' as u8 + 1
src_len: 4,
dst_len: 2,
}
);
// A copy 1 operation that stops at the tag byte. This fails because there's
// no byte to read for the copy offset.
testerrored!(
err_copy1,
&b"\x02\x00a\x01"[..],
Error::CopyRead { len: 1, src_len: 0 }
);
// A copy 2 operation that stops at the tag byte and another copy 2 operation
// that stops after the first byte in the offset.
testerrored!(
err_copy2a,
&b"\x11\x00a\x3e"[..],
Error::CopyRead { len: 2, src_len: 0 }
);
testerrored!(
err_copy2b,
&b"\x11\x00a\x3e\x01"[..],
Error::CopyRead { len: 2, src_len: 1 }
);
// Same as copy 2, but for copy 4.
testerrored!(
err_copy3a,
&b"\x11\x00a\x3f"[..],
Error::CopyRead { len: 4, src_len: 0 }
);
testerrored!(
err_copy3b,
&b"\x11\x00a\x3f\x00"[..],
Error::CopyRead { len: 4, src_len: 1 }
);
testerrored!(
err_copy3c,
&b"\x11\x00a\x3f\x00\x00"[..],
Error::CopyRead { len: 4, src_len: 2 }
);
testerrored!(
err_copy3d,
&b"\x11\x00a\x3f\x00\x00\x00"[..],
Error::CopyRead { len: 4, src_len: 3 }
);
// A copy operation whose offset is zero.
testerrored!(
err_copy_offset_zero,
&b"\x11\x00a\x01\x00"[..],
Error::Offset { offset: 0, dst_pos: 1 }
);
// A copy operation whose offset is too big.
testerrored!(
err_copy_offset_big,
&b"\x11\x00a\x01\xFF"[..],
Error::Offset { offset: 255, dst_pos: 1 }
);
// A copy operation whose length is too big.
testerrored!(
err_copy_len_big,
&b"\x05\x00a\x1d\x01"[..],
Error::CopyWrite { len: 11, dst_len: 4 }
);
// Selected random inputs pulled from quickcheck failure witnesses.
testtrip!(
random1,
&[
0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0,
1, 1, 0, 0, 1, 2, 0, 0, 2, 1, 0, 0, 2, 2, 0, 0, 0, 6, 0, 0, 3, 1, 0,
0, 0, 7, 0, 0, 1, 3, 0, 0, 0, 8, 0, 0, 2, 3, 0, 0, 0, 9, 0, 0, 1, 4,
0, 0, 1, 0, 0, 3, 0, 0, 1, 0, 1, 0, 0, 0, 10, 0, 0, 0, 0, 2, 4, 0, 0,
2, 0, 0, 3, 0, 1, 0, 0, 1, 5, 0, 0, 6, 0, 0, 0, 0, 11, 0, 0, 1, 6, 0,
0, 1, 7, 0, 0, 0, 12, 0, 0, 3, 2, 0, 0, 0, 13, 0, 0, 2, 5, 0, 0, 0, 3,
3, 0, 0, 0, 1, 8, 0, 0, 1, 0, 1, 0, 0, 0, 4, 1, 0, 0, 0, 0, 14, 0, 0,
0, 1, 9, 0, 0, 0, 1, 10, 0, 0, 0, 0, 1, 11, 0, 0, 0, 1, 0, 2, 0, 0, 0,
1, 1, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 2, 6, 0,
0, 0, 0, 0, 1, 12, 0, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0,
0, 1, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]
);
testtrip!(
random2,
&[
10, 2, 14, 13, 0, 8, 2, 10, 2, 14, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
]
);
testtrip!(
random3,
&[0, 0, 0, 4, 1, 4, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,]
);
testtrip!(
random4,
&[
0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0,
1, 1, 0, 0, 1, 2, 0, 0, 1, 3, 0, 0, 1, 4, 0, 0, 2, 1, 0, 0, 0, 4, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
);
// QuickCheck properties for testing that random data roundtrips.
// These properties tend to produce the inputs for the "random" tests above.
#[test]
fn qc_roundtrip() {
fn p(bytes: Vec<u8>) -> bool {
depress(&press(&bytes)) == bytes
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(1_000)
.quickcheck(p as fn(_) -> _);
}
#[test]
fn qc_roundtrip_stream() {
fn p(bytes: Vec<u8>) -> TestResult {
if bytes.is_empty() {
return TestResult::discard();
}
TestResult::from_bool(
read_frame_depress(&write_frame_press(&bytes)) == bytes,
)
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(1_000)
.quickcheck(p as fn(_) -> _);
}
#[test]
fn test_short_input() {
// Regression test for https://github.com/BurntSushi/rust-snappy/issues/42
use snap::read;
use std::io::Read;
let err =
read::FrameDecoder::new(&b"123"[..]).read_to_end(&mut Vec::new());
assert_eq!(err.unwrap_err().kind(), std::io::ErrorKind::UnexpectedEof);
}
#[test]
#[cfg(feature = "cpp")]
fn qc_cmpcpp() {
fn p(bytes: Vec<u8>) -> bool {
press(&bytes) == press_cpp(&bytes)
}
QuickCheck::new()
.gen(StdGen::new(rand::thread_rng(), 10_000))
.tests(10_000)
.quickcheck(p as fn(_) -> _);
}
// Regression tests.
// See: https://github.com/BurntSushi/rust-snappy/issues/3
#[cfg(target_pointer_width = "32")]
testerrored!(
err_lit_len_overflow1,
&b"\x11\x00\x00\xfc\xfe\xff\xff\xff"[..],
Error::Literal { len: std::u32::MAX as u64, src_len: 0, dst_len: 16 }
);
#[cfg(target_pointer_width = "32")]
testerrored!(
err_lit_len_overflow2,
&b"\x11\x00\x00\xfc\xff\xff\xff\xff"[..],
Error::Literal { len: std::u32::MAX as u64 + 1, src_len: 0, dst_len: 16 }
);
// Helper functions.
fn press(bytes: &[u8]) -> Vec<u8> {
Encoder::new().compress_vec(bytes).unwrap()
}
fn depress(bytes: &[u8]) -> Vec<u8> {
Decoder::new().decompress_vec(bytes).unwrap()
}
fn write_frame_press(bytes: &[u8]) -> Vec<u8> {
use snap::write;
use std::io::Write;
let mut wtr = write::FrameEncoder::new(vec![]);
wtr.write_all(bytes).unwrap();
wtr.into_inner().unwrap()
}
fn read_frame_depress(bytes: &[u8]) -> Vec<u8> {
use snap::read;
use std::io::Read;
let mut buf = vec![];
read::FrameDecoder::new(bytes).read_to_end(&mut buf).unwrap();
buf
}
fn read_frame_press(bytes: &[u8]) -> Vec<u8> {
use snap::read;
use std::io::Read;
let mut buf = vec![];
read::FrameEncoder::new(bytes).read_to_end(&mut buf).unwrap();
buf
}
#[cfg(feature = "cpp")]
fn press_cpp(bytes: &[u8]) -> Vec<u8> {
use snap::raw::max_compress_len;
let mut buf = vec![0; max_compress_len(bytes.len())];
let n = cpp::compress(bytes, &mut buf).unwrap();
buf.truncate(n);
buf
} | vec![0; decompress_len(d).unwrap()] | random_line_split |
learn.rs | use super::events_from_chunks;
use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand};
use futures::{try_join, Stream, StreamExt, TryStreamExt};
use nanoid::nanoid;
use serde::Serialize;
use serde_json;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::io::{stdin, stdout, AsyncWrite};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use optic_diff_engine::streams;
use optic_diff_engine::{analyze_undocumented_bodies, EndpointCommand, SpecCommand};
use optic_diff_engine::{
BodyAnalysisLocation, HttpInteraction, SpecChunkEvent, SpecEvent, SpecIdGenerator,
SpecProjection, TrailObservationsResult,
};
pub const SUBCOMMAND_NAME: &'static str = "learn";
pub fn create_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name(SUBCOMMAND_NAME)
.about("Learns about possible changes to the spec based on interactions or diffs")
.arg(
Arg::with_name("undocumented-bodies")
.long("undocumented-bodies")
.takes_value(false)
.help("Learn shapes of undocumented bodies from interactions piped to stdin"),
)
.arg(
Arg::with_name("shape-diffs")
.long("shape-diffs")
.takes_value(false)
.help("Learn updated shapes from shape diffs piped to stdin"),
)
.group(
ArgGroup::with_name("subject")
.args(&["undocumented-bodies", "shape-diffs"])
.multiple(false)
.required(true),
)
}
pub async fn main<'a>(
command_matches: &'a ArgMatches<'a>,
spec_chunks: Vec<SpecChunkEvent>,
input_queue_size: usize,
) {
let spec_events = events_from_chunks(spec_chunks).await;
if command_matches.is_present("undocumented-bodies") {
let stdin = stdin();
let interaction_lines = streams::http_interaction::json_lines(stdin);
let sink = stdout();
learn_undocumented_bodies(spec_events, input_queue_size, interaction_lines, sink).await;
} else if command_matches.is_present("shape-diffs") {
todo!("shape diffs learning is yet to be implemented");
} else {
unreachable!("subject is required");
}
}
async fn learn_undocumented_bodies<S:'static + AsyncWrite + Unpin + Send>(
spec_events: Vec<SpecEvent>,
input_queue_size: usize,
interaction_lines: impl Stream<Item = Result<String, std::io::Error>>,
sink: S,
) {
let spec_projection = Arc::new(SpecProjection::from(spec_events));
let (analysis_sender, analysis_receiver) = mpsc::channel(32);
let analyzing_bodies = async move {
let analyze_results = interaction_lines
.map(Ok)
.try_for_each_concurrent(input_queue_size, |interaction_json_result| {
let projection = spec_projection.clone();
let analysis_sender = analysis_sender.clone();
let analyze_task = tokio::spawn(async move {
let analyze_comp = tokio::task::spawn_blocking(move || {
let interaction_json =
interaction_json_result.expect("can rad interaction json line form stdin");
let interaction: HttpInteraction =
serde_json::from_str(&interaction_json).expect("could not parse interaction json");
analyze_undocumented_bodies(&projection, interaction)
});
match analyze_comp.await {
Ok(results) => {
for result in results {
analysis_sender
.send(result)
.await
.expect("could not send analysis result to aggregation channel")
}
}
Err(err) => {
// ignore a single interaction not being able to deserialize
eprintln!("interaction ignored: {}", err);
}
}
});
analyze_task
})
.await;
analyze_results
};
let aggregating_results = tokio::spawn(async move {
let mut analysiss = ReceiverStream::new(analysis_receiver);
let mut id_generator = IdGenerator::default();
let mut observations_by_body_location = HashMap::new();
while let Some(analysis) = analysiss.next().await {
let existing_observations = observations_by_body_location
.entry(analysis.body_location)
.or_insert_with(|| TrailObservationsResult::default());
existing_observations.union(analysis.trail_observations);
}
let mut endpoints_by_endpoint = HashMap::new();
for (body_location, observations) in observations_by_body_location {
let (root_shape_id, body_commands) = observations.into_commands(&mut id_generator);
let endpoint_body = EndpointBody::new(&body_location, root_shape_id, body_commands);
let (path_id, method) = match body_location {
BodyAnalysisLocation::Request {
path_id, method,..
} => (path_id, method),
BodyAnalysisLocation::Response {
path_id, method,..
} => (path_id, method),
};
let endpoint_bodies = endpoints_by_endpoint
.entry((path_id, method))
.or_insert_with_key(|(path_id, method)| {
EndpointBodies::new(path_id.clone(), method.clone())
});
endpoint_bodies.push(endpoint_body);
}
streams::write_to_json_lines(sink, endpoints_by_endpoint.values())
.await
.expect("could not write endpoint bodies to stdout");
});
try_join!(analyzing_bodies, aggregating_results).expect("essential worker task panicked");
}
#[derive(Debug, Default)]
struct IdGenerator;
impl SpecIdGenerator for IdGenerator {
fn generate_id(&mut self, prefix: &str) -> String {
// NanoID @ 10 chars:
// - URL-safe,
// - 17 years for a 1% chance of at least one global collision assuming
// writing 1000 ids per hour (https://zelark.github.io/nano-id-cc/)
format!("{}{}", prefix, nanoid!(10))
}
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointBodies {
path_id: String,
method: String,
requests: Vec<EndpointRequestBody>,
responses: Vec<EndpointResponseBody>,
}
impl EndpointBodies {
pub fn new(path_id: String, method: String) -> Self {
Self {
path_id,
method,
requests: vec![],
responses: vec![],
}
}
pub fn push(&mut self, endpoint: EndpointBody) {
match endpoint {
EndpointBody::Request(endpoint_request) => {
self.requests.push(endpoint_request);
}
EndpointBody::Response(endpoint_response) => {
self.responses.push(endpoint_response);
}
}
}
}
#[derive(Debug)]
enum EndpointBody {
Request(EndpointRequestBody),
Response(EndpointResponseBody),
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointRequestBody {
commands: Vec<SpecCommand>,
#[serde(skip)]
path_id: String,
#[serde(skip)]
method: String,
#[serde(flatten)]
body_descriptor: Option<EndpointBodyDescriptor>,
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointResponseBody {
commands: Vec<SpecCommand>,
status_code: u16,
#[serde(skip)]
path_id: String,
#[serde(skip)]
method: String,
#[serde(flatten)]
body_descriptor: Option<EndpointBodyDescriptor>,
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointBodyDescriptor {
content_type: String,
root_shape_id: String,
}
impl EndpointBody {
fn new(
body_location: &BodyAnalysisLocation,
root_shape_id: Option<String>,
body_commands: impl IntoIterator<Item = SpecCommand>,
) -> Self {
let body_descriptor = match root_shape_id {
Some(root_shape_id) => Some(EndpointBodyDescriptor {
content_type: body_location
.content_type()
.expect("root shape id implies a content type to be present")
.clone(),
root_shape_id,
}),
None => None,
};
let mut body = match body_location {
BodyAnalysisLocation::Request {
path_id, method,..
} => EndpointBody::Request(EndpointRequestBody {
body_descriptor,
path_id: path_id.clone(),
method: method.clone(),
commands: body_commands.into_iter().collect(),
}),
BodyAnalysisLocation::Response {
status_code,
path_id,
method,
..
} => EndpointBody::Response(EndpointResponseBody {
body_descriptor,
path_id: path_id.clone(),
method: method.clone(),
commands: body_commands.into_iter().collect(),
status_code: *status_code,
}),
};
body.append_endpoint_commands();
body
}
fn append_endpoint_commands(&mut self) {
let mut ids = IdGenerator::default();
match self {
EndpointBody::Request(request_body) => {
let request_id = ids.request();
request_body
.commands
.push(SpecCommand::from(EndpointCommand::add_request(
request_id.clone(),
request_body.path_id.clone(),
request_body.method.clone(),
)));
if let Some(body_descriptor) = &request_body.body_descriptor |
}
EndpointBody::Response(response_body) => {
let response_id = ids.response();
response_body.commands.push(SpecCommand::from(
EndpointCommand::add_response_by_path_and_method(
response_id.clone(),
response_body.path_id.clone(),
response_body.method.clone(),
response_body.status_code.clone(),
),
));
if let Some(body_descriptor) = &response_body.body_descriptor {
response_body
.commands
.push(SpecCommand::from(EndpointCommand::set_response_body_shape(
response_id,
body_descriptor.root_shape_id.clone(),
body_descriptor.content_type.clone(),
false,
)));
}
}
};
}
}
#[cfg(test)]
mod test {
use super::*;
use serde_json::json;
#[tokio::main]
#[test]
async fn can_learn_endpoint_bodies_from_interactions() {
let spec_events: Vec<SpecEvent> = serde_json::from_value(json!([
{"PathComponentAdded":{"pathId":"path_id_1","parentPathId":"root","name":"todos"}}
]))
.expect("initial spec events should be valid events");
// TODO: feed actual interactions and assert the output
let interaction_lines = streams::http_interaction::json_lines(tokio::io::empty());
let sink = tokio::io::sink();
learn_undocumented_bodies(spec_events, 1, interaction_lines, sink).await;
}
}
| {
request_body
.commands
.push(SpecCommand::from(EndpointCommand::set_request_body_shape(
request_id,
body_descriptor.root_shape_id.clone(),
body_descriptor.content_type.clone(),
false,
)));
} | conditional_block |
learn.rs | use super::events_from_chunks;
use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand};
use futures::{try_join, Stream, StreamExt, TryStreamExt};
use nanoid::nanoid;
use serde::Serialize;
use serde_json;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::io::{stdin, stdout, AsyncWrite};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use optic_diff_engine::streams;
use optic_diff_engine::{analyze_undocumented_bodies, EndpointCommand, SpecCommand};
use optic_diff_engine::{
BodyAnalysisLocation, HttpInteraction, SpecChunkEvent, SpecEvent, SpecIdGenerator,
SpecProjection, TrailObservationsResult,
};
pub const SUBCOMMAND_NAME: &'static str = "learn";
pub fn create_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name(SUBCOMMAND_NAME)
.about("Learns about possible changes to the spec based on interactions or diffs")
.arg(
Arg::with_name("undocumented-bodies")
.long("undocumented-bodies")
.takes_value(false)
.help("Learn shapes of undocumented bodies from interactions piped to stdin"),
)
.arg(
Arg::with_name("shape-diffs")
.long("shape-diffs")
.takes_value(false)
.help("Learn updated shapes from shape diffs piped to stdin"),
)
.group(
ArgGroup::with_name("subject")
.args(&["undocumented-bodies", "shape-diffs"])
.multiple(false)
.required(true),
)
}
pub async fn main<'a>(
command_matches: &'a ArgMatches<'a>,
spec_chunks: Vec<SpecChunkEvent>,
input_queue_size: usize,
) {
let spec_events = events_from_chunks(spec_chunks).await;
if command_matches.is_present("undocumented-bodies") {
let stdin = stdin();
let interaction_lines = streams::http_interaction::json_lines(stdin);
let sink = stdout();
learn_undocumented_bodies(spec_events, input_queue_size, interaction_lines, sink).await;
} else if command_matches.is_present("shape-diffs") {
todo!("shape diffs learning is yet to be implemented");
} else {
unreachable!("subject is required");
}
}
async fn learn_undocumented_bodies<S:'static + AsyncWrite + Unpin + Send>(
spec_events: Vec<SpecEvent>,
input_queue_size: usize,
interaction_lines: impl Stream<Item = Result<String, std::io::Error>>,
sink: S,
) {
let spec_projection = Arc::new(SpecProjection::from(spec_events));
let (analysis_sender, analysis_receiver) = mpsc::channel(32);
let analyzing_bodies = async move {
let analyze_results = interaction_lines
.map(Ok)
.try_for_each_concurrent(input_queue_size, |interaction_json_result| {
let projection = spec_projection.clone();
let analysis_sender = analysis_sender.clone();
let analyze_task = tokio::spawn(async move {
let analyze_comp = tokio::task::spawn_blocking(move || {
let interaction_json =
interaction_json_result.expect("can rad interaction json line form stdin");
let interaction: HttpInteraction =
serde_json::from_str(&interaction_json).expect("could not parse interaction json");
analyze_undocumented_bodies(&projection, interaction)
});
match analyze_comp.await {
Ok(results) => {
for result in results {
analysis_sender
.send(result)
.await
.expect("could not send analysis result to aggregation channel")
}
}
Err(err) => {
// ignore a single interaction not being able to deserialize
eprintln!("interaction ignored: {}", err);
}
}
});
analyze_task
})
.await;
analyze_results
};
let aggregating_results = tokio::spawn(async move {
let mut analysiss = ReceiverStream::new(analysis_receiver);
let mut id_generator = IdGenerator::default();
let mut observations_by_body_location = HashMap::new();
while let Some(analysis) = analysiss.next().await {
let existing_observations = observations_by_body_location
.entry(analysis.body_location)
.or_insert_with(|| TrailObservationsResult::default());
existing_observations.union(analysis.trail_observations);
}
let mut endpoints_by_endpoint = HashMap::new();
for (body_location, observations) in observations_by_body_location {
let (root_shape_id, body_commands) = observations.into_commands(&mut id_generator);
let endpoint_body = EndpointBody::new(&body_location, root_shape_id, body_commands);
let (path_id, method) = match body_location {
BodyAnalysisLocation::Request {
path_id, method,..
} => (path_id, method),
BodyAnalysisLocation::Response {
path_id, method,..
} => (path_id, method),
};
let endpoint_bodies = endpoints_by_endpoint
.entry((path_id, method))
.or_insert_with_key(|(path_id, method)| {
EndpointBodies::new(path_id.clone(), method.clone())
});
endpoint_bodies.push(endpoint_body);
}
streams::write_to_json_lines(sink, endpoints_by_endpoint.values())
.await
.expect("could not write endpoint bodies to stdout");
});
try_join!(analyzing_bodies, aggregating_results).expect("essential worker task panicked");
}
#[derive(Debug, Default)]
struct IdGenerator;
impl SpecIdGenerator for IdGenerator {
fn generate_id(&mut self, prefix: &str) -> String {
// NanoID @ 10 chars:
// - URL-safe,
// - 17 years for a 1% chance of at least one global collision assuming
// writing 1000 ids per hour (https://zelark.github.io/nano-id-cc/)
format!("{}{}", prefix, nanoid!(10))
}
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointBodies {
path_id: String,
method: String,
requests: Vec<EndpointRequestBody>,
responses: Vec<EndpointResponseBody>,
}
impl EndpointBodies {
pub fn new(path_id: String, method: String) -> Self {
Self {
path_id,
method,
requests: vec![],
responses: vec![],
}
}
pub fn push(&mut self, endpoint: EndpointBody) {
match endpoint {
EndpointBody::Request(endpoint_request) => {
self.requests.push(endpoint_request);
}
EndpointBody::Response(endpoint_response) => {
self.responses.push(endpoint_response);
}
}
}
}
#[derive(Debug)]
enum EndpointBody {
Request(EndpointRequestBody),
Response(EndpointResponseBody),
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointRequestBody {
commands: Vec<SpecCommand>,
#[serde(skip)]
path_id: String,
#[serde(skip)]
method: String,
#[serde(flatten)]
body_descriptor: Option<EndpointBodyDescriptor>,
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointResponseBody {
commands: Vec<SpecCommand>,
status_code: u16,
#[serde(skip)]
path_id: String,
#[serde(skip)]
method: String,
#[serde(flatten)]
body_descriptor: Option<EndpointBodyDescriptor>,
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointBodyDescriptor {
content_type: String,
root_shape_id: String,
}
impl EndpointBody {
fn new(
body_location: &BodyAnalysisLocation,
root_shape_id: Option<String>,
body_commands: impl IntoIterator<Item = SpecCommand>,
) -> Self {
let body_descriptor = match root_shape_id {
Some(root_shape_id) => Some(EndpointBodyDescriptor {
content_type: body_location
.content_type()
.expect("root shape id implies a content type to be present")
.clone(),
root_shape_id,
}),
None => None,
};
let mut body = match body_location {
BodyAnalysisLocation::Request {
path_id, method,..
} => EndpointBody::Request(EndpointRequestBody {
body_descriptor,
path_id: path_id.clone(),
method: method.clone(),
commands: body_commands.into_iter().collect(),
}),
BodyAnalysisLocation::Response {
status_code,
path_id,
method,
..
} => EndpointBody::Response(EndpointResponseBody {
body_descriptor,
path_id: path_id.clone(),
method: method.clone(),
commands: body_commands.into_iter().collect(),
status_code: *status_code,
}),
};
body.append_endpoint_commands();
body
}
fn append_endpoint_commands(&mut self) {
let mut ids = IdGenerator::default();
match self {
EndpointBody::Request(request_body) => {
let request_id = ids.request();
request_body
.commands
.push(SpecCommand::from(EndpointCommand::add_request(
request_id.clone(),
request_body.path_id.clone(),
request_body.method.clone(),
)));
if let Some(body_descriptor) = &request_body.body_descriptor { | body_descriptor.content_type.clone(),
false,
)));
}
}
EndpointBody::Response(response_body) => {
let response_id = ids.response();
response_body.commands.push(SpecCommand::from(
EndpointCommand::add_response_by_path_and_method(
response_id.clone(),
response_body.path_id.clone(),
response_body.method.clone(),
response_body.status_code.clone(),
),
));
if let Some(body_descriptor) = &response_body.body_descriptor {
response_body
.commands
.push(SpecCommand::from(EndpointCommand::set_response_body_shape(
response_id,
body_descriptor.root_shape_id.clone(),
body_descriptor.content_type.clone(),
false,
)));
}
}
};
}
}
#[cfg(test)]
mod test {
use super::*;
use serde_json::json;
#[tokio::main]
#[test]
async fn can_learn_endpoint_bodies_from_interactions() {
let spec_events: Vec<SpecEvent> = serde_json::from_value(json!([
{"PathComponentAdded":{"pathId":"path_id_1","parentPathId":"root","name":"todos"}}
]))
.expect("initial spec events should be valid events");
// TODO: feed actual interactions and assert the output
let interaction_lines = streams::http_interaction::json_lines(tokio::io::empty());
let sink = tokio::io::sink();
learn_undocumented_bodies(spec_events, 1, interaction_lines, sink).await;
}
} | request_body
.commands
.push(SpecCommand::from(EndpointCommand::set_request_body_shape(
request_id,
body_descriptor.root_shape_id.clone(), | random_line_split |
learn.rs | use super::events_from_chunks;
use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand};
use futures::{try_join, Stream, StreamExt, TryStreamExt};
use nanoid::nanoid;
use serde::Serialize;
use serde_json;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::io::{stdin, stdout, AsyncWrite};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use optic_diff_engine::streams;
use optic_diff_engine::{analyze_undocumented_bodies, EndpointCommand, SpecCommand};
use optic_diff_engine::{
BodyAnalysisLocation, HttpInteraction, SpecChunkEvent, SpecEvent, SpecIdGenerator,
SpecProjection, TrailObservationsResult,
};
pub const SUBCOMMAND_NAME: &'static str = "learn";
pub fn create_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name(SUBCOMMAND_NAME)
.about("Learns about possible changes to the spec based on interactions or diffs")
.arg(
Arg::with_name("undocumented-bodies")
.long("undocumented-bodies")
.takes_value(false)
.help("Learn shapes of undocumented bodies from interactions piped to stdin"),
)
.arg(
Arg::with_name("shape-diffs")
.long("shape-diffs")
.takes_value(false)
.help("Learn updated shapes from shape diffs piped to stdin"),
)
.group(
ArgGroup::with_name("subject")
.args(&["undocumented-bodies", "shape-diffs"])
.multiple(false)
.required(true),
)
}
pub async fn main<'a>(
command_matches: &'a ArgMatches<'a>,
spec_chunks: Vec<SpecChunkEvent>,
input_queue_size: usize,
) {
let spec_events = events_from_chunks(spec_chunks).await;
if command_matches.is_present("undocumented-bodies") {
let stdin = stdin();
let interaction_lines = streams::http_interaction::json_lines(stdin);
let sink = stdout();
learn_undocumented_bodies(spec_events, input_queue_size, interaction_lines, sink).await;
} else if command_matches.is_present("shape-diffs") {
todo!("shape diffs learning is yet to be implemented");
} else {
unreachable!("subject is required");
}
}
async fn learn_undocumented_bodies<S:'static + AsyncWrite + Unpin + Send>(
spec_events: Vec<SpecEvent>,
input_queue_size: usize,
interaction_lines: impl Stream<Item = Result<String, std::io::Error>>,
sink: S,
) {
let spec_projection = Arc::new(SpecProjection::from(spec_events));
let (analysis_sender, analysis_receiver) = mpsc::channel(32);
let analyzing_bodies = async move {
let analyze_results = interaction_lines
.map(Ok)
.try_for_each_concurrent(input_queue_size, |interaction_json_result| {
let projection = spec_projection.clone();
let analysis_sender = analysis_sender.clone();
let analyze_task = tokio::spawn(async move {
let analyze_comp = tokio::task::spawn_blocking(move || {
let interaction_json =
interaction_json_result.expect("can rad interaction json line form stdin");
let interaction: HttpInteraction =
serde_json::from_str(&interaction_json).expect("could not parse interaction json");
analyze_undocumented_bodies(&projection, interaction)
});
match analyze_comp.await {
Ok(results) => {
for result in results {
analysis_sender
.send(result)
.await
.expect("could not send analysis result to aggregation channel")
}
}
Err(err) => {
// ignore a single interaction not being able to deserialize
eprintln!("interaction ignored: {}", err);
}
}
});
analyze_task
})
.await;
analyze_results
};
let aggregating_results = tokio::spawn(async move {
let mut analysiss = ReceiverStream::new(analysis_receiver);
let mut id_generator = IdGenerator::default();
let mut observations_by_body_location = HashMap::new();
while let Some(analysis) = analysiss.next().await {
let existing_observations = observations_by_body_location
.entry(analysis.body_location)
.or_insert_with(|| TrailObservationsResult::default());
existing_observations.union(analysis.trail_observations);
}
let mut endpoints_by_endpoint = HashMap::new();
for (body_location, observations) in observations_by_body_location {
let (root_shape_id, body_commands) = observations.into_commands(&mut id_generator);
let endpoint_body = EndpointBody::new(&body_location, root_shape_id, body_commands);
let (path_id, method) = match body_location {
BodyAnalysisLocation::Request {
path_id, method,..
} => (path_id, method),
BodyAnalysisLocation::Response {
path_id, method,..
} => (path_id, method),
};
let endpoint_bodies = endpoints_by_endpoint
.entry((path_id, method))
.or_insert_with_key(|(path_id, method)| {
EndpointBodies::new(path_id.clone(), method.clone())
});
endpoint_bodies.push(endpoint_body);
}
streams::write_to_json_lines(sink, endpoints_by_endpoint.values())
.await
.expect("could not write endpoint bodies to stdout");
});
try_join!(analyzing_bodies, aggregating_results).expect("essential worker task panicked");
}
#[derive(Debug, Default)]
struct IdGenerator;
impl SpecIdGenerator for IdGenerator {
fn generate_id(&mut self, prefix: &str) -> String {
// NanoID @ 10 chars:
// - URL-safe,
// - 17 years for a 1% chance of at least one global collision assuming
// writing 1000 ids per hour (https://zelark.github.io/nano-id-cc/)
format!("{}{}", prefix, nanoid!(10))
}
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointBodies {
path_id: String,
method: String,
requests: Vec<EndpointRequestBody>,
responses: Vec<EndpointResponseBody>,
}
impl EndpointBodies {
pub fn new(path_id: String, method: String) -> Self {
Self {
path_id,
method,
requests: vec![],
responses: vec![],
}
}
pub fn push(&mut self, endpoint: EndpointBody) {
match endpoint {
EndpointBody::Request(endpoint_request) => {
self.requests.push(endpoint_request);
}
EndpointBody::Response(endpoint_response) => {
self.responses.push(endpoint_response);
}
}
}
}
#[derive(Debug)]
enum EndpointBody {
Request(EndpointRequestBody),
Response(EndpointResponseBody),
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointRequestBody {
commands: Vec<SpecCommand>,
#[serde(skip)]
path_id: String,
#[serde(skip)]
method: String,
#[serde(flatten)]
body_descriptor: Option<EndpointBodyDescriptor>,
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct | {
commands: Vec<SpecCommand>,
status_code: u16,
#[serde(skip)]
path_id: String,
#[serde(skip)]
method: String,
#[serde(flatten)]
body_descriptor: Option<EndpointBodyDescriptor>,
}
#[derive(Default, Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct EndpointBodyDescriptor {
content_type: String,
root_shape_id: String,
}
impl EndpointBody {
fn new(
body_location: &BodyAnalysisLocation,
root_shape_id: Option<String>,
body_commands: impl IntoIterator<Item = SpecCommand>,
) -> Self {
let body_descriptor = match root_shape_id {
Some(root_shape_id) => Some(EndpointBodyDescriptor {
content_type: body_location
.content_type()
.expect("root shape id implies a content type to be present")
.clone(),
root_shape_id,
}),
None => None,
};
let mut body = match body_location {
BodyAnalysisLocation::Request {
path_id, method,..
} => EndpointBody::Request(EndpointRequestBody {
body_descriptor,
path_id: path_id.clone(),
method: method.clone(),
commands: body_commands.into_iter().collect(),
}),
BodyAnalysisLocation::Response {
status_code,
path_id,
method,
..
} => EndpointBody::Response(EndpointResponseBody {
body_descriptor,
path_id: path_id.clone(),
method: method.clone(),
commands: body_commands.into_iter().collect(),
status_code: *status_code,
}),
};
body.append_endpoint_commands();
body
}
fn append_endpoint_commands(&mut self) {
let mut ids = IdGenerator::default();
match self {
EndpointBody::Request(request_body) => {
let request_id = ids.request();
request_body
.commands
.push(SpecCommand::from(EndpointCommand::add_request(
request_id.clone(),
request_body.path_id.clone(),
request_body.method.clone(),
)));
if let Some(body_descriptor) = &request_body.body_descriptor {
request_body
.commands
.push(SpecCommand::from(EndpointCommand::set_request_body_shape(
request_id,
body_descriptor.root_shape_id.clone(),
body_descriptor.content_type.clone(),
false,
)));
}
}
EndpointBody::Response(response_body) => {
let response_id = ids.response();
response_body.commands.push(SpecCommand::from(
EndpointCommand::add_response_by_path_and_method(
response_id.clone(),
response_body.path_id.clone(),
response_body.method.clone(),
response_body.status_code.clone(),
),
));
if let Some(body_descriptor) = &response_body.body_descriptor {
response_body
.commands
.push(SpecCommand::from(EndpointCommand::set_response_body_shape(
response_id,
body_descriptor.root_shape_id.clone(),
body_descriptor.content_type.clone(),
false,
)));
}
}
};
}
}
#[cfg(test)]
mod test {
use super::*;
use serde_json::json;
#[tokio::main]
#[test]
async fn can_learn_endpoint_bodies_from_interactions() {
let spec_events: Vec<SpecEvent> = serde_json::from_value(json!([
{"PathComponentAdded":{"pathId":"path_id_1","parentPathId":"root","name":"todos"}}
]))
.expect("initial spec events should be valid events");
// TODO: feed actual interactions and assert the output
let interaction_lines = streams::http_interaction::json_lines(tokio::io::empty());
let sink = tokio::io::sink();
learn_undocumented_bodies(spec_events, 1, interaction_lines, sink).await;
}
}
| EndpointResponseBody | identifier_name |
lib.rs | was nonlinear. The problem is that these formats
//! are *non-linear color spaces*, which means that many operations that you may want
//! to perform on colors (addition, subtraction, multiplication, linear interpolation,
//! etc.) will work unexpectedly when performed in such a non-linear color space. As
//! such, the compression has to be reverted to restore linearity and make sure that
//! many operations on the colors are accurate.
//!
//! For example, this does not work:
//!
//! ```rust
//! // An alias for Rgb<Srgb>, which is what most pictures store.
//! use palette::Srgb;
//!
//! let orangeish = Srgb::new(1.0, 0.6, 0.0);
//! let blueish = Srgb::new(0.0, 0.2, 1.0);
//! // let whateve_it_becomes = orangeish + blueish;
//! ```
//!
//! Instead, they have to be made linear before adding:
//!
//! ```rust
//! // An alias for Rgb<Srgb>, which is what most pictures store.
//! use palette::{Pixel, Srgb};
//!
//! let orangeish = Srgb::new(1.0, 0.6, 0.0).into_linear();
//! let blueish = Srgb::new(0.0, 0.2, 1.0).into_linear();
//! let whateve_it_becomes = orangeish + blueish;
//!
//! // Encode the result back into sRGB and create a byte array
//! let pixel: [u8; 3] = Srgb::from_linear(whateve_it_becomes)
//! .into_format()
//! .into_raw();
//! ```
//!
//! But, even when colors *are* 'linear', there is yet more to explore.
//!
//! The most common way that colors are defined, especially for computer
//! storage, is in terms of so-called *tristimulus values*, meaning that
//! all colors are defined as a vector of three values which may represent
//! any color. The reason colors can generally be stored as only a three
//! dimensional vector, and not an *n* dimensional one, where *n* is some
//! number of possible frequencies of light, is because our eyes contain
//! only three types of cones. Each of these cones have different sensitivity
//! curves to different wavelengths of light, giving us three "dimensions"
//! of sensitivity to color. These cones are often called the S, M, and L
//! (for small, medium, and large) cones, and their sensitivity curves
//! *roughly* position them as most sensitive to "red", "green", and "blue"
//! parts of the spectrum. As such, we can choose only three values to
//! represent any possible color that a human is able to see. An interesting
//! consequence of this is that humans can see two different objects which
//! are emitting *completely different actual light spectra* as the *exact
//! same perceptual color* so long as those wavelengths, when transformed
//! by the sensitivity curves of our cones, end up resulting in the same
//! S, M, and L values sent to our brains.
//!
//! A **color space** (which simply refers to a set of standards by which
//! we map a set of arbitrary values to real-world colors) which uses
//! tristimulus values is often defined in terms of
//!
//! 1. Its **primaries**
//! 2. Its **reference white** or **white point**
//!
//! The **primaries** together represent the total *gamut* (i.e. displayable
//! range of colors) of that color space, while the **white point** defines
//! which concrete tristimulus value corresponds to a real, physical white
//! reflecting object being lit by a known light source and observed by the
//!'standard observer' (i.e. a standardized model of human color perception).
//!
//! The informal "RGB" color space is such a tristimulus color space, since
//! it is defined by three values, but it is underspecified since we don't
//! know which primaries are being used (i.e. how exactly are the canonical
//! "red", "green", and "blue" defined?), nor its white point. In most cases,
//! when people talk about "RGB" or "Linear RGB" colors, what they are
//! *actually* talking about is the "Linear sRGB" color space, which uses the
//! primaries and white point defined in the sRGB standard, but which *does
//! not* have the (non-linear) sRGB *transfer function* applied.
//!
//! This library takes these things into account, and attempts to provide an
//! interface which will let those who don't care so much about the intricacies
//! of color still use colors correctly, while also allowing the advanced user
//! a high degree of flexibility in how they use it.
//!
//! # Transparency
//!
//! There are many cases where pixel transparency is important, but there are
//! also many cases where it becomes a dead weight, if it's always stored
//! together with the color, but not used. Palette has therefore adopted a
//! structure where the transparency component (alpha) is attachable using the
//! [`Alpha`](crate::Alpha) type, instead of having copies of each color
//! space.
//!
//! This approach comes with the extra benefit of allowing operations to
//! selectively affect the alpha component:
//!
//! ```rust
//! use palette::{LinSrgb, LinSrgba};
//!
//! let mut c1 = LinSrgba::new(1.0, 0.5, 0.5, 0.8);
//! let c2 = LinSrgb::new(0.5, 1.0, 1.0);
//!
//! c1.color = c1.color * c2; //Leave the alpha as it is
//! c1.blue += 0.2; //The color components can easily be accessed
//! c1 = c1 * 0.5; //Scale both the color and the alpha
//! ```
//!
//! # A Basic Workflow
//!
//! The overall workflow can be divided into three steps, where the first and
//! last may be taken care of by other parts of the application:
//!
//! ```text
//! Decoding -> Processing -> Encoding
//! ```
//!
//! ## 1. Decoding
//!
//! Find out what the source format is and convert it to a linear color space.
//! There may be a specification, such as when working with SVG or CSS.
//!
//! When working with RGB or gray scale (luma):
//!
//! * If you are asking your user to enter an RGB value, you are in a gray zone
//! where it depends on the context. It's usually safe to assume sRGB, but
//! sometimes it's already linear.
//!
//! * If you are decoding an image, there may be some meta data that gives you
//! the necessary details. Otherwise it's most commonly sRGB. Usually you
//! will end up with a slice or vector with RGB bytes, which can easily be
//! converted to Palette colors:
//!
//! ```rust
//! # let mut image_buffer: Vec<u8> = vec![];
//! use palette::{Srgb, Pixel};
//!
//! // This works for any (even non-RGB) color type that can have the
//! // buffer element type as component.
//! let color_buffer: &mut [Srgb<u8>] = Pixel::from_raw_slice_mut(&mut image_buffer);
//! ```
//!
//! * If you are getting your colors from the GPU, in a game or other graphical
//! application, or if they are otherwise generated by the application, then
//! chances are that they are already linear. Still, make sure to check that
//! they are not being encoded somewhere.
//!
//! When working with other colors:
//!
//! * For HSL, HSV, HWB: Check if they are based on any other color space than
//! sRGB, such as Adobe or Apple RGB.
//!
//! * For any of the CIE color spaces, check for a specification of white point
//! and light source. These are necessary for converting to RGB and other
//! colors, that depend on perception and "viewing devices". Common defaults
//! are the D65 light source and the sRGB white point. The Palette defaults
//! should take you far.
//!
//! ## 2. Processing
//!
//! When your color has been decoded into some Palette type, it's ready for
//! processing. This includes things like blending, hue shifting, darkening and
//! conversion to other formats. Just make sure that your non-linear RGB is
//! made linear first (`my_srgb.into_linear()`), to make the operations
//! available.
//!
//! Different color spaced have different capabilities, pros and cons. You may
//! have to experiment a bit (or look at the example programs) to find out what
//! gives the desired result.
//!
//! ## 3. Encoding
//!
//! When the desired processing is done, it's time to encode the colors back
//! into some image format. The same rules applies as for the decoding, but the
//! process reversed.
//!
//! # Working with Raw Data
//!
//! Oftentimes, pixel data is stored in a raw buffer such as a `[u8; 3]`. The
//! [`Pixel`](crate::encoding::pixel::Pixel) trait allows for easy interoperation between
//! Palette colors and other crates or systems. `from_raw` can be used to
//! convert into a Palette color, `into_format` converts from `Srgb<u8>` to
//! `Srgb<f32>`, and finally `into_raw` to convert from a Palette color back to
//! a `[u8;3]`.
//!
//! ```rust
//! use approx::assert_relative_eq;
//! use palette::{Srgb, Pixel};
//!
//! let buffer = [255, 0, 255];
//! let raw = Srgb::from_raw(&buffer);
//! assert_eq!(raw, &Srgb::<u8>::new(255u8, 0, 255));
//!
//! let raw_float: Srgb<f32> = raw.into_format();
//! assert_relative_eq!(raw_float, Srgb::new(1.0, 0.0, 1.0));
//!
//! let raw: [u8; 3] = Srgb::into_raw(raw_float.into_format());
//! assert_eq!(raw, buffer);
//! ```
// Keep the standard library when running tests, too
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
#![doc(html_root_url = "https://docs.rs/palette/0.5.0/palette/")]
#![warn(missing_docs)]
#[cfg(any(feature = "std", test))]
extern crate core;
#[cfg_attr(test, macro_use)]
extern crate approx;
#[macro_use]
extern crate palette_derive;
#[cfg(feature = "phf")]
extern crate phf;
#[cfg(feature = "serializing")]
#[macro_use]
extern crate serde;
#[cfg(all(test, feature = "serializing"))]
extern crate serde_json;
use float::Float;
use luma::Luma;
pub use alpha::{Alpha, WithAlpha};
pub use blend::Blend;
#[cfg(feature = "std")]
pub use gradient::Gradient;
pub use hsl::{Hsl, Hsla};
pub use hsv::{Hsv, Hsva};
pub use hwb::{Hwb, Hwba};
pub use lab::{Lab, Laba};
pub use lch::{Lch, Lcha};
pub use luma::{GammaLuma, GammaLumaa, LinLuma, LinLumaa, SrgbLuma, SrgbLumaa};
pub use rgb::{GammaSrgb, GammaSrgba, LinSrgb, LinSrgba, Packed, RgbChannels, Srgb, Srgba};
pub use xyz::{Xyz, Xyza};
pub use yxy::{Yxy, Yxya};
pub use color_difference::ColorDifference;
pub use component::*;
pub use convert::{FromColor, IntoColor};
pub use encoding::pixel::Pixel;
pub use hues::{LabHue, RgbHue};
pub use matrix::Mat3;
pub use relative_contrast::{contrast_ratio, RelativeContrast};
//Helper macro for checking ranges and clamping.
#[cfg(test)]
macro_rules! assert_ranges {
(@make_tuple $first:pat, $next:ident,) => (($first, $next));
(@make_tuple $first:pat, $next:ident, $($rest:ident,)*) => (
assert_ranges!(@make_tuple ($first, $next), $($rest,)*)
);
(
$ty:ident < $($ty_params:ty),+ >;
limited {$($limited:ident: $limited_from:expr => $limited_to:expr),+}
limited_min {$($limited_min:ident: $limited_min_from:expr => $limited_min_to:expr),*}
unlimited {$($unlimited:ident: $unlimited_from:expr => $unlimited_to:expr),*}
) => (
{
use core::iter::repeat;
use crate::Limited;
{
print!("checking below limits... ");
$(
let from = $limited_from;
let to = $limited_to;
let diff = to - from;
let $limited = (1..11).map(|i| from - (i as f64 / 10.0) * diff);
)+
$(
let from = $limited_min_from;
let to = $limited_min_to;
let diff = to - from;
let $limited_min = (1..11).map(|i| from - (i as f64 / 10.0) * diff);
)*
$(
let from = $unlimited_from;
let to = $unlimited_to;
let diff = to - from;
let $unlimited = (1..11).map(|i| from - (i as f64 / 10.0) * diff);
)*
for assert_ranges!(@make_tuple (), $($limited,)+ $($limited_min,)* $($unlimited,)* ) in repeat(()) $(.zip($limited))+ $(.zip($limited_min))* $(.zip($unlimited))* {
let c: $ty<$($ty_params),+> = $ty {
$($limited: $limited.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
let clamped = c.clamp();
let expected: $ty<$($ty_params),+> = $ty {
$($limited: $limited_from.into(),)+
$($limited_min: $limited_min_from.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
assert!(!c.is_valid());
assert_relative_eq!(clamped, expected);
}
println!("ok")
}
{
print!("checking within limits... ");
$(
let from = $limited_from;
let to = $limited_to;
let diff = to - from;
let $limited = (0..11).map(|i| from + (i as f64 / 10.0) * diff);
)+
$(
let from = $limited_min_from;
let to = $limited_min_to;
let diff = to - from;
let $limited_min = (0..11).map(|i| from + (i as f64 / 10.0) * diff);
)*
$(
let from = $unlimited_from;
let to = $unlimited_to;
let diff = to - from;
let $unlimited = (0..11).map(|i| from + (i as f64 / 10.0) * diff);
)*
for assert_ranges!(@make_tuple (), $($limited,)+ $($limited_min,)* $($unlimited,)* ) in repeat(()) $(.zip($limited))+ $(.zip($limited_min))* $(.zip($unlimited))* {
let c: $ty<$($ty_params),+> = $ty {
$($limited: $limited.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
let clamped = c.clamp();
assert!(c.is_valid());
assert_relative_eq!(clamped, c);
}
println!("ok")
}
{
print!("checking above limits... ");
$(
let from = $limited_from;
let to = $limited_to;
let diff = to - from;
let $limited = (1..11).map(|i| to + (i as f64 / 10.0) * diff);
)+
$(
let from = $limited_min_from;
let to = $limited_min_to;
let diff = to - from;
let $limited_min = (1..11).map(|i| to + (i as f64 / 10.0) * diff);
)*
$(
let from = $unlimited_from;
let to = $unlimited_to;
let diff = to - from;
let $unlimited = (1..11).map(|i| to + (i as f64 / 10.0) * diff);
)*
for assert_ranges!(@make_tuple (), $($limited,)+ $($limited_min,)* $($unlimited,)* ) in repeat(()) $(.zip($limited))+ $(.zip($limited_min))* $(.zip($unlimited))* {
let c: $ty<$($ty_params),+> = $ty {
$($limited: $limited.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
let clamped = c.clamp();
let expected: $ty<$($ty_params),+> = $ty {
$($limited: $limited_to.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
assert!(!c.is_valid());
assert_relative_eq!(clamped, expected);
}
println!("ok")
}
}
);
}
#[macro_use]
mod macros;
pub mod blend;
#[cfg(feature = "std")]
pub mod gradient;
#[cfg(feature = "named")]
pub mod named;
#[cfg(feature = "random")]
mod random_sampling;
mod alpha;
mod hsl;
mod hsv;
mod hwb;
mod lab;
mod lch;
pub mod luma;
pub mod rgb;
mod xyz;
mod yxy;
mod hues;
pub mod chromatic_adaptation;
mod color_difference;
mod component;
pub mod convert;
pub mod encoding;
mod equality;
mod relative_contrast;
pub mod white_point;
pub mod float;
#[doc(hidden)]
pub mod matrix;
fn clamp<T: PartialOrd>(v: T, min: T, max: T) -> T {
if v < min {
min
} else if v > max {
max
} else {
v
}
}
/// A trait for clamping and checking if colors are within their ranges.
pub trait Limited {
/// Check if the color's components are within the expected ranges.
fn is_valid(&self) -> bool;
/// Return a new color where the components has been clamped to the nearest
/// valid values.
fn clamp(&self) -> Self;
/// Clamp the color's components to the nearest valid values.
fn clamp_self(&mut self);
}
/// A trait for linear color interpolation.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{LinSrgb, Mix};
///
/// let a = LinSrgb::new(0.0, 0.5, 1.0);
/// let b = LinSrgb::new(1.0, 0.5, 0.0);
///
/// assert_relative_eq!(a.mix(&b, 0.0), a);
/// assert_relative_eq!(a.mix(&b, 0.5), LinSrgb::new(0.5, 0.5, 0.5));
/// assert_relative_eq!(a.mix(&b, 1.0), b);
/// ```
pub trait Mix {
/// The type of the mixing factor.
type Scalar: Float;
/// Mix the color with an other color, by `factor`.
///
/// `factor` sould be between `0.0` and `1.0`, where `0.0` will result in
/// the same color as `self` and `1.0` will result in the same color as
/// `other`.
fn mix(&self, other: &Self, factor: Self::Scalar) -> Self;
}
/// The `Shade` trait allows a color to be lightened or darkened.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{LinSrgb, Shade};
///
/// let a = LinSrgb::new(0.4, 0.4, 0.4);
/// let b = LinSrgb::new(0.6, 0.6, 0.6);
///
/// assert_relative_eq!(a.lighten(0.1), b.darken(0.1));
/// ```
pub trait Shade: Sized {
/// The type of the lighten/darken amount.
type Scalar: Float;
/// Lighten the color by `amount`.
fn lighten(&self, amount: Self::Scalar) -> Self;
/// Darken the color by `amount`.
fn | (&self, amount: Self::Scalar) -> Self {
self.lighten(-amount)
}
}
/// A trait for colors where a hue may be calculated.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{GetHue, LinSrgb};
///
/// let red = LinSrgb::new(1.0f32, 0.0, 0.0);
/// let green = LinSrgb::new(0.0f32, 1.0, 0.0);
/// let blue = LinSrgb::new(0.0f32, 0.0, 1.0);
/// let gray = LinSrgb::new(0.5f32, 0.5, 0.5);
///
/// assert_relative_eq!(red.get_hue().unwrap(), 0.0.into());
/// assert_relative_eq!(green.get_hue().unwrap(), 120.0.into());
/// assert_relative_eq!(blue.get_hue().unwrap(), 240.0.into());
/// assert_eq!(gray.get_hue(), None);
/// ```
pub trait GetHue {
/// The kind of hue unit this color space uses.
///
/// The hue is most commonly calculated as an angle around a color circle
/// and may not always be uniform between color spaces. It's therefore not
/// recommended to take one type of hue and apply it to a color space that
/// expects an other.
type Hue;
/// Calculate a hue if possible.
///
/// Colors in the gray scale has no well defined hue and should preferably
/// return `None`.
fn get_hue(&self) -> Option<Self::Hue>;
}
/// A trait for colors where the hue can be manipulated without conversion.
pub trait Hue: GetHue {
/// Return a new copy of `self`, but with a specific hue.
fn with_hue<H: Into<Self::Hue>>(&self, hue: H) -> Self;
/// Return a new copy of `self`, but with the hue shifted by `amount`.
fn shift_hue<H: Into<Self::Hue>>(&self, amount: H) -> Self;
}
/// A trait for colors where the saturation (or chroma) can be manipulated
/// without conversion.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{Hsv, Saturate};
///
/// let a = Hsv::new(0.0, 0.25, 1.0);
/// let b = Hsv::new(0.0, 1.0, 1.0);
///
/// assert_relative_eq!(a.saturate(1.0), b.desaturate(0.5));
/// ```
pub trait Saturate: Sized {
/// The type of the (de)saturation factor.
type Scalar: Float;
/// Increase the saturation by `factor`.
fn saturate(&self, factor: Self::Scalar) -> Self;
/// Decrease the saturation by `factor`.
fn desaturate(&self, factor: Self::Scalar) -> Self {
self.saturate(-factor)
}
}
/// Perform a unary or binary operation on each component of a color.
pub trait ComponentWise {
/// The scalar type for color components.
type Scalar;
/// Perform a binary operation on this and an other color.
fn component_wise<F: FnMut(Self::Scalar, Self::Scalar) -> Self::Scalar>(
| darken | identifier_name |
lib.rs | electron gun was nonlinear. The problem is that these formats
//! are *non-linear color spaces*, which means that many operations that you may want
//! to perform on colors (addition, subtraction, multiplication, linear interpolation,
//! etc.) will work unexpectedly when performed in such a non-linear color space. As
//! such, the compression has to be reverted to restore linearity and make sure that
//! many operations on the colors are accurate.
//!
//! For example, this does not work:
//!
//! ```rust
//! // An alias for Rgb<Srgb>, which is what most pictures store.
//! use palette::Srgb;
//!
//! let orangeish = Srgb::new(1.0, 0.6, 0.0);
//! let blueish = Srgb::new(0.0, 0.2, 1.0);
//! // let whateve_it_becomes = orangeish + blueish;
//! ```
//!
//! Instead, they have to be made linear before adding:
//!
//! ```rust
//! // An alias for Rgb<Srgb>, which is what most pictures store.
//! use palette::{Pixel, Srgb};
//!
//! let orangeish = Srgb::new(1.0, 0.6, 0.0).into_linear();
//! let blueish = Srgb::new(0.0, 0.2, 1.0).into_linear();
//! let whateve_it_becomes = orangeish + blueish;
//!
//! // Encode the result back into sRGB and create a byte array
//! let pixel: [u8; 3] = Srgb::from_linear(whateve_it_becomes)
//! .into_format()
//! .into_raw();
//! ```
//!
//! But, even when colors *are* 'linear', there is yet more to explore.
//!
//! The most common way that colors are defined, especially for computer
//! storage, is in terms of so-called *tristimulus values*, meaning that
//! all colors are defined as a vector of three values which may represent
//! any color. The reason colors can generally be stored as only a three
//! dimensional vector, and not an *n* dimensional one, where *n* is some
//! number of possible frequencies of light, is because our eyes contain
//! only three types of cones. Each of these cones have different sensitivity
//! curves to different wavelengths of light, giving us three "dimensions"
//! of sensitivity to color. These cones are often called the S, M, and L
//! (for small, medium, and large) cones, and their sensitivity curves
//! *roughly* position them as most sensitive to "red", "green", and "blue"
//! parts of the spectrum. As such, we can choose only three values to
//! represent any possible color that a human is able to see. An interesting
//! consequence of this is that humans can see two different objects which
//! are emitting *completely different actual light spectra* as the *exact
//! same perceptual color* so long as those wavelengths, when transformed
//! by the sensitivity curves of our cones, end up resulting in the same
//! S, M, and L values sent to our brains.
//!
//! A **color space** (which simply refers to a set of standards by which
//! we map a set of arbitrary values to real-world colors) which uses
//! tristimulus values is often defined in terms of
//!
//! 1. Its **primaries**
//! 2. Its **reference white** or **white point**
//!
//! The **primaries** together represent the total *gamut* (i.e. displayable
//! range of colors) of that color space, while the **white point** defines
//! which concrete tristimulus value corresponds to a real, physical white
//! reflecting object being lit by a known light source and observed by the
//!'standard observer' (i.e. a standardized model of human color perception).
//!
//! The informal "RGB" color space is such a tristimulus color space, since
//! it is defined by three values, but it is underspecified since we don't
//! know which primaries are being used (i.e. how exactly are the canonical
//! "red", "green", and "blue" defined?), nor its white point. In most cases,
//! when people talk about "RGB" or "Linear RGB" colors, what they are
//! *actually* talking about is the "Linear sRGB" color space, which uses the
//! primaries and white point defined in the sRGB standard, but which *does
//! not* have the (non-linear) sRGB *transfer function* applied.
//!
//! This library takes these things into account, and attempts to provide an
//! interface which will let those who don't care so much about the intricacies
//! of color still use colors correctly, while also allowing the advanced user
//! a high degree of flexibility in how they use it.
//!
//! # Transparency
//!
//! There are many cases where pixel transparency is important, but there are
//! also many cases where it becomes a dead weight, if it's always stored
//! together with the color, but not used. Palette has therefore adopted a
//! structure where the transparency component (alpha) is attachable using the
//! [`Alpha`](crate::Alpha) type, instead of having copies of each color
//! space.
//!
//! This approach comes with the extra benefit of allowing operations to
//! selectively affect the alpha component:
//!
//! ```rust
//! use palette::{LinSrgb, LinSrgba};
//!
//! let mut c1 = LinSrgba::new(1.0, 0.5, 0.5, 0.8);
//! let c2 = LinSrgb::new(0.5, 1.0, 1.0);
//!
//! c1.color = c1.color * c2; //Leave the alpha as it is
//! c1.blue += 0.2; //The color components can easily be accessed
//! c1 = c1 * 0.5; //Scale both the color and the alpha
//! ```
//!
//! # A Basic Workflow
//!
//! The overall workflow can be divided into three steps, where the first and
//! last may be taken care of by other parts of the application:
//!
//! ```text
//! Decoding -> Processing -> Encoding
//! ```
//!
//! ## 1. Decoding
//!
//! Find out what the source format is and convert it to a linear color space.
//! There may be a specification, such as when working with SVG or CSS.
//!
//! When working with RGB or gray scale (luma):
//!
//! * If you are asking your user to enter an RGB value, you are in a gray zone
//! where it depends on the context. It's usually safe to assume sRGB, but
//! sometimes it's already linear.
//!
//! * If you are decoding an image, there may be some meta data that gives you
//! the necessary details. Otherwise it's most commonly sRGB. Usually you
//! will end up with a slice or vector with RGB bytes, which can easily be
//! converted to Palette colors:
//!
//! ```rust
//! # let mut image_buffer: Vec<u8> = vec![];
//! use palette::{Srgb, Pixel};
//!
//! // This works for any (even non-RGB) color type that can have the
//! // buffer element type as component.
//! let color_buffer: &mut [Srgb<u8>] = Pixel::from_raw_slice_mut(&mut image_buffer);
//! ```
//!
//! * If you are getting your colors from the GPU, in a game or other graphical
//! application, or if they are otherwise generated by the application, then
//! chances are that they are already linear. Still, make sure to check that
//! they are not being encoded somewhere.
//!
//! When working with other colors:
//!
//! * For HSL, HSV, HWB: Check if they are based on any other color space than
//! sRGB, such as Adobe or Apple RGB.
//!
//! * For any of the CIE color spaces, check for a specification of white point
//! and light source. These are necessary for converting to RGB and other
//! colors, that depend on perception and "viewing devices". Common defaults
//! are the D65 light source and the sRGB white point. The Palette defaults
//! should take you far.
//!
//! ## 2. Processing
//!
//! When your color has been decoded into some Palette type, it's ready for
//! processing. This includes things like blending, hue shifting, darkening and
//! conversion to other formats. Just make sure that your non-linear RGB is
//! made linear first (`my_srgb.into_linear()`), to make the operations
//! available.
//!
//! Different color spaced have different capabilities, pros and cons. You may
//! have to experiment a bit (or look at the example programs) to find out what
//! gives the desired result.
//!
//! ## 3. Encoding
//!
//! When the desired processing is done, it's time to encode the colors back
//! into some image format. The same rules applies as for the decoding, but the
//! process reversed.
//!
//! # Working with Raw Data
//!
//! Oftentimes, pixel data is stored in a raw buffer such as a `[u8; 3]`. The
//! [`Pixel`](crate::encoding::pixel::Pixel) trait allows for easy interoperation between
//! Palette colors and other crates or systems. `from_raw` can be used to
//! convert into a Palette color, `into_format` converts from `Srgb<u8>` to
//! `Srgb<f32>`, and finally `into_raw` to convert from a Palette color back to
//! a `[u8;3]`.
//!
//! ```rust
//! use approx::assert_relative_eq;
//! use palette::{Srgb, Pixel};
//!
//! let buffer = [255, 0, 255];
//! let raw = Srgb::from_raw(&buffer);
//! assert_eq!(raw, &Srgb::<u8>::new(255u8, 0, 255));
//!
//! let raw_float: Srgb<f32> = raw.into_format();
//! assert_relative_eq!(raw_float, Srgb::new(1.0, 0.0, 1.0));
//!
//! let raw: [u8; 3] = Srgb::into_raw(raw_float.into_format());
//! assert_eq!(raw, buffer);
//! ```
// Keep the standard library when running tests, too
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
#![doc(html_root_url = "https://docs.rs/palette/0.5.0/palette/")]
#![warn(missing_docs)]
#[cfg(any(feature = "std", test))]
extern crate core;
#[cfg_attr(test, macro_use)]
extern crate approx;
#[macro_use]
extern crate palette_derive;
#[cfg(feature = "phf")]
extern crate phf;
#[cfg(feature = "serializing")]
#[macro_use]
extern crate serde;
#[cfg(all(test, feature = "serializing"))]
extern crate serde_json;
use float::Float;
use luma::Luma;
pub use alpha::{Alpha, WithAlpha};
pub use blend::Blend;
#[cfg(feature = "std")]
pub use gradient::Gradient;
pub use hsl::{Hsl, Hsla};
pub use hsv::{Hsv, Hsva};
pub use hwb::{Hwb, Hwba};
pub use lab::{Lab, Laba};
pub use lch::{Lch, Lcha};
pub use luma::{GammaLuma, GammaLumaa, LinLuma, LinLumaa, SrgbLuma, SrgbLumaa};
pub use rgb::{GammaSrgb, GammaSrgba, LinSrgb, LinSrgba, Packed, RgbChannels, Srgb, Srgba};
pub use xyz::{Xyz, Xyza};
pub use yxy::{Yxy, Yxya};
pub use color_difference::ColorDifference;
pub use component::*;
pub use convert::{FromColor, IntoColor};
pub use encoding::pixel::Pixel;
pub use hues::{LabHue, RgbHue};
pub use matrix::Mat3;
pub use relative_contrast::{contrast_ratio, RelativeContrast};
//Helper macro for checking ranges and clamping.
#[cfg(test)]
macro_rules! assert_ranges {
(@make_tuple $first:pat, $next:ident,) => (($first, $next));
(@make_tuple $first:pat, $next:ident, $($rest:ident,)*) => (
assert_ranges!(@make_tuple ($first, $next), $($rest,)*)
);
(
$ty:ident < $($ty_params:ty),+ >;
limited {$($limited:ident: $limited_from:expr => $limited_to:expr),+}
limited_min {$($limited_min:ident: $limited_min_from:expr => $limited_min_to:expr),*}
unlimited {$($unlimited:ident: $unlimited_from:expr => $unlimited_to:expr),*}
) => (
{
use core::iter::repeat;
use crate::Limited;
{
print!("checking below limits... ");
$(
let from = $limited_from;
let to = $limited_to;
let diff = to - from;
let $limited = (1..11).map(|i| from - (i as f64 / 10.0) * diff);
)+
$(
let from = $limited_min_from;
let to = $limited_min_to;
let diff = to - from;
let $limited_min = (1..11).map(|i| from - (i as f64 / 10.0) * diff);
)*
$(
let from = $unlimited_from;
let to = $unlimited_to;
let diff = to - from;
let $unlimited = (1..11).map(|i| from - (i as f64 / 10.0) * diff);
)*
for assert_ranges!(@make_tuple (), $($limited,)+ $($limited_min,)* $($unlimited,)* ) in repeat(()) $(.zip($limited))+ $(.zip($limited_min))* $(.zip($unlimited))* {
let c: $ty<$($ty_params),+> = $ty {
$($limited: $limited.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
let clamped = c.clamp();
let expected: $ty<$($ty_params),+> = $ty {
$($limited: $limited_from.into(),)+
$($limited_min: $limited_min_from.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
assert!(!c.is_valid());
assert_relative_eq!(clamped, expected);
}
println!("ok")
}
{
print!("checking within limits... ");
$(
let from = $limited_from;
let to = $limited_to;
let diff = to - from;
let $limited = (0..11).map(|i| from + (i as f64 / 10.0) * diff);
)+
$(
let from = $limited_min_from;
let to = $limited_min_to;
let diff = to - from;
let $limited_min = (0..11).map(|i| from + (i as f64 / 10.0) * diff);
)*
$(
let from = $unlimited_from;
let to = $unlimited_to;
let diff = to - from;
let $unlimited = (0..11).map(|i| from + (i as f64 / 10.0) * diff);
)*
for assert_ranges!(@make_tuple (), $($limited,)+ $($limited_min,)* $($unlimited,)* ) in repeat(()) $(.zip($limited))+ $(.zip($limited_min))* $(.zip($unlimited))* {
let c: $ty<$($ty_params),+> = $ty {
$($limited: $limited.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
let clamped = c.clamp();
assert!(c.is_valid());
assert_relative_eq!(clamped, c);
}
|
{
print!("checking above limits... ");
$(
let from = $limited_from;
let to = $limited_to;
let diff = to - from;
let $limited = (1..11).map(|i| to + (i as f64 / 10.0) * diff);
)+
$(
let from = $limited_min_from;
let to = $limited_min_to;
let diff = to - from;
let $limited_min = (1..11).map(|i| to + (i as f64 / 10.0) * diff);
)*
$(
let from = $unlimited_from;
let to = $unlimited_to;
let diff = to - from;
let $unlimited = (1..11).map(|i| to + (i as f64 / 10.0) * diff);
)*
for assert_ranges!(@make_tuple (), $($limited,)+ $($limited_min,)* $($unlimited,)* ) in repeat(()) $(.zip($limited))+ $(.zip($limited_min))* $(.zip($unlimited))* {
let c: $ty<$($ty_params),+> = $ty {
$($limited: $limited.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
let clamped = c.clamp();
let expected: $ty<$($ty_params),+> = $ty {
$($limited: $limited_to.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
assert!(!c.is_valid());
assert_relative_eq!(clamped, expected);
}
println!("ok")
}
}
);
}
#[macro_use]
mod macros;
pub mod blend;
#[cfg(feature = "std")]
pub mod gradient;
#[cfg(feature = "named")]
pub mod named;
#[cfg(feature = "random")]
mod random_sampling;
mod alpha;
mod hsl;
mod hsv;
mod hwb;
mod lab;
mod lch;
pub mod luma;
pub mod rgb;
mod xyz;
mod yxy;
mod hues;
pub mod chromatic_adaptation;
mod color_difference;
mod component;
pub mod convert;
pub mod encoding;
mod equality;
mod relative_contrast;
pub mod white_point;
pub mod float;
#[doc(hidden)]
pub mod matrix;
fn clamp<T: PartialOrd>(v: T, min: T, max: T) -> T {
if v < min {
min
} else if v > max {
max
} else {
v
}
}
/// A trait for clamping and checking if colors are within their ranges.
pub trait Limited {
/// Check if the color's components are within the expected ranges.
fn is_valid(&self) -> bool;
/// Return a new color where the components has been clamped to the nearest
/// valid values.
fn clamp(&self) -> Self;
/// Clamp the color's components to the nearest valid values.
fn clamp_self(&mut self);
}
/// A trait for linear color interpolation.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{LinSrgb, Mix};
///
/// let a = LinSrgb::new(0.0, 0.5, 1.0);
/// let b = LinSrgb::new(1.0, 0.5, 0.0);
///
/// assert_relative_eq!(a.mix(&b, 0.0), a);
/// assert_relative_eq!(a.mix(&b, 0.5), LinSrgb::new(0.5, 0.5, 0.5));
/// assert_relative_eq!(a.mix(&b, 1.0), b);
/// ```
pub trait Mix {
/// The type of the mixing factor.
type Scalar: Float;
/// Mix the color with an other color, by `factor`.
///
/// `factor` sould be between `0.0` and `1.0`, where `0.0` will result in
/// the same color as `self` and `1.0` will result in the same color as
/// `other`.
fn mix(&self, other: &Self, factor: Self::Scalar) -> Self;
}
/// The `Shade` trait allows a color to be lightened or darkened.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{LinSrgb, Shade};
///
/// let a = LinSrgb::new(0.4, 0.4, 0.4);
/// let b = LinSrgb::new(0.6, 0.6, 0.6);
///
/// assert_relative_eq!(a.lighten(0.1), b.darken(0.1));
/// ```
pub trait Shade: Sized {
/// The type of the lighten/darken amount.
type Scalar: Float;
/// Lighten the color by `amount`.
fn lighten(&self, amount: Self::Scalar) -> Self;
/// Darken the color by `amount`.
fn darken(&self, amount: Self::Scalar) -> Self {
self.lighten(-amount)
}
}
/// A trait for colors where a hue may be calculated.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{GetHue, LinSrgb};
///
/// let red = LinSrgb::new(1.0f32, 0.0, 0.0);
/// let green = LinSrgb::new(0.0f32, 1.0, 0.0);
/// let blue = LinSrgb::new(0.0f32, 0.0, 1.0);
/// let gray = LinSrgb::new(0.5f32, 0.5, 0.5);
///
/// assert_relative_eq!(red.get_hue().unwrap(), 0.0.into());
/// assert_relative_eq!(green.get_hue().unwrap(), 120.0.into());
/// assert_relative_eq!(blue.get_hue().unwrap(), 240.0.into());
/// assert_eq!(gray.get_hue(), None);
/// ```
pub trait GetHue {
/// The kind of hue unit this color space uses.
///
/// The hue is most commonly calculated as an angle around a color circle
/// and may not always be uniform between color spaces. It's therefore not
/// recommended to take one type of hue and apply it to a color space that
/// expects an other.
type Hue;
/// Calculate a hue if possible.
///
/// Colors in the gray scale has no well defined hue and should preferably
/// return `None`.
fn get_hue(&self) -> Option<Self::Hue>;
}
/// A trait for colors where the hue can be manipulated without conversion.
pub trait Hue: GetHue {
/// Return a new copy of `self`, but with a specific hue.
fn with_hue<H: Into<Self::Hue>>(&self, hue: H) -> Self;
/// Return a new copy of `self`, but with the hue shifted by `amount`.
fn shift_hue<H: Into<Self::Hue>>(&self, amount: H) -> Self;
}
/// A trait for colors where the saturation (or chroma) can be manipulated
/// without conversion.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{Hsv, Saturate};
///
/// let a = Hsv::new(0.0, 0.25, 1.0);
/// let b = Hsv::new(0.0, 1.0, 1.0);
///
/// assert_relative_eq!(a.saturate(1.0), b.desaturate(0.5));
/// ```
pub trait Saturate: Sized {
/// The type of the (de)saturation factor.
type Scalar: Float;
/// Increase the saturation by `factor`.
fn saturate(&self, factor: Self::Scalar) -> Self;
/// Decrease the saturation by `factor`.
fn desaturate(&self, factor: Self::Scalar) -> Self {
self.saturate(-factor)
}
}
/// Perform a unary or binary operation on each component of a color.
pub trait ComponentWise {
/// The scalar type for color components.
type Scalar;
/// Perform a binary operation on this and an other color.
fn component_wise<F: FnMut(Self::Scalar, Self::Scalar) -> Self::Scalar>(
| println!("ok")
} | random_line_split |
lib.rs | //! let whateve_it_becomes = orangeish + blueish;
//!
//! // Encode the result back into sRGB and create a byte array
//! let pixel: [u8; 3] = Srgb::from_linear(whateve_it_becomes)
//! .into_format()
//! .into_raw();
//! ```
//!
//! But, even when colors *are* 'linear', there is yet more to explore.
//!
//! The most common way that colors are defined, especially for computer
//! storage, is in terms of so-called *tristimulus values*, meaning that
//! all colors are defined as a vector of three values which may represent
//! any color. The reason colors can generally be stored as only a three
//! dimensional vector, and not an *n* dimensional one, where *n* is some
//! number of possible frequencies of light, is because our eyes contain
//! only three types of cones. Each of these cones have different sensitivity
//! curves to different wavelengths of light, giving us three "dimensions"
//! of sensitivity to color. These cones are often called the S, M, and L
//! (for small, medium, and large) cones, and their sensitivity curves
//! *roughly* position them as most sensitive to "red", "green", and "blue"
//! parts of the spectrum. As such, we can choose only three values to
//! represent any possible color that a human is able to see. An interesting
//! consequence of this is that humans can see two different objects which
//! are emitting *completely different actual light spectra* as the *exact
//! same perceptual color* so long as those wavelengths, when transformed
//! by the sensitivity curves of our cones, end up resulting in the same
//! S, M, and L values sent to our brains.
//!
//! A **color space** (which simply refers to a set of standards by which
//! we map a set of arbitrary values to real-world colors) which uses
//! tristimulus values is often defined in terms of
//!
//! 1. Its **primaries**
//! 2. Its **reference white** or **white point**
//!
//! The **primaries** together represent the total *gamut* (i.e. displayable
//! range of colors) of that color space, while the **white point** defines
//! which concrete tristimulus value corresponds to a real, physical white
//! reflecting object being lit by a known light source and observed by the
//!'standard observer' (i.e. a standardized model of human color perception).
//!
//! The informal "RGB" color space is such a tristimulus color space, since
//! it is defined by three values, but it is underspecified since we don't
//! know which primaries are being used (i.e. how exactly are the canonical
//! "red", "green", and "blue" defined?), nor its white point. In most cases,
//! when people talk about "RGB" or "Linear RGB" colors, what they are
//! *actually* talking about is the "Linear sRGB" color space, which uses the
//! primaries and white point defined in the sRGB standard, but which *does
//! not* have the (non-linear) sRGB *transfer function* applied.
//!
//! This library takes these things into account, and attempts to provide an
//! interface which will let those who don't care so much about the intricacies
//! of color still use colors correctly, while also allowing the advanced user
//! a high degree of flexibility in how they use it.
//!
//! # Transparency
//!
//! There are many cases where pixel transparency is important, but there are
//! also many cases where it becomes a dead weight, if it's always stored
//! together with the color, but not used. Palette has therefore adopted a
//! structure where the transparency component (alpha) is attachable using the
//! [`Alpha`](crate::Alpha) type, instead of having copies of each color
//! space.
//!
//! This approach comes with the extra benefit of allowing operations to
//! selectively affect the alpha component:
//!
//! ```rust
//! use palette::{LinSrgb, LinSrgba};
//!
//! let mut c1 = LinSrgba::new(1.0, 0.5, 0.5, 0.8);
//! let c2 = LinSrgb::new(0.5, 1.0, 1.0);
//!
//! c1.color = c1.color * c2; //Leave the alpha as it is
//! c1.blue += 0.2; //The color components can easily be accessed
//! c1 = c1 * 0.5; //Scale both the color and the alpha
//! ```
//!
//! # A Basic Workflow
//!
//! The overall workflow can be divided into three steps, where the first and
//! last may be taken care of by other parts of the application:
//!
//! ```text
//! Decoding -> Processing -> Encoding
//! ```
//!
//! ## 1. Decoding
//!
//! Find out what the source format is and convert it to a linear color space.
//! There may be a specification, such as when working with SVG or CSS.
//!
//! When working with RGB or gray scale (luma):
//!
//! * If you are asking your user to enter an RGB value, you are in a gray zone
//! where it depends on the context. It's usually safe to assume sRGB, but
//! sometimes it's already linear.
//!
//! * If you are decoding an image, there may be some meta data that gives you
//! the necessary details. Otherwise it's most commonly sRGB. Usually you
//! will end up with a slice or vector with RGB bytes, which can easily be
//! converted to Palette colors:
//!
//! ```rust
//! # let mut image_buffer: Vec<u8> = vec![];
//! use palette::{Srgb, Pixel};
//!
//! // This works for any (even non-RGB) color type that can have the
//! // buffer element type as component.
//! let color_buffer: &mut [Srgb<u8>] = Pixel::from_raw_slice_mut(&mut image_buffer);
//! ```
//!
//! * If you are getting your colors from the GPU, in a game or other graphical
//! application, or if they are otherwise generated by the application, then
//! chances are that they are already linear. Still, make sure to check that
//! they are not being encoded somewhere.
//!
//! When working with other colors:
//!
//! * For HSL, HSV, HWB: Check if they are based on any other color space than
//! sRGB, such as Adobe or Apple RGB.
//!
//! * For any of the CIE color spaces, check for a specification of white point
//! and light source. These are necessary for converting to RGB and other
//! colors, that depend on perception and "viewing devices". Common defaults
//! are the D65 light source and the sRGB white point. The Palette defaults
//! should take you far.
//!
//! ## 2. Processing
//!
//! When your color has been decoded into some Palette type, it's ready for
//! processing. This includes things like blending, hue shifting, darkening and
//! conversion to other formats. Just make sure that your non-linear RGB is
//! made linear first (`my_srgb.into_linear()`), to make the operations
//! available.
//!
//! Different color spaced have different capabilities, pros and cons. You may
//! have to experiment a bit (or look at the example programs) to find out what
//! gives the desired result.
//!
//! ## 3. Encoding
//!
//! When the desired processing is done, it's time to encode the colors back
//! into some image format. The same rules applies as for the decoding, but the
//! process reversed.
//!
//! # Working with Raw Data
//!
//! Oftentimes, pixel data is stored in a raw buffer such as a `[u8; 3]`. The
//! [`Pixel`](crate::encoding::pixel::Pixel) trait allows for easy interoperation between
//! Palette colors and other crates or systems. `from_raw` can be used to
//! convert into a Palette color, `into_format` converts from `Srgb<u8>` to
//! `Srgb<f32>`, and finally `into_raw` to convert from a Palette color back to
//! a `[u8;3]`.
//!
//! ```rust
//! use approx::assert_relative_eq;
//! use palette::{Srgb, Pixel};
//!
//! let buffer = [255, 0, 255];
//! let raw = Srgb::from_raw(&buffer);
//! assert_eq!(raw, &Srgb::<u8>::new(255u8, 0, 255));
//!
//! let raw_float: Srgb<f32> = raw.into_format();
//! assert_relative_eq!(raw_float, Srgb::new(1.0, 0.0, 1.0));
//!
//! let raw: [u8; 3] = Srgb::into_raw(raw_float.into_format());
//! assert_eq!(raw, buffer);
//! ```
// Keep the standard library when running tests, too
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
#![doc(html_root_url = "https://docs.rs/palette/0.5.0/palette/")]
#![warn(missing_docs)]
#[cfg(any(feature = "std", test))]
extern crate core;
#[cfg_attr(test, macro_use)]
extern crate approx;
#[macro_use]
extern crate palette_derive;
#[cfg(feature = "phf")]
extern crate phf;
#[cfg(feature = "serializing")]
#[macro_use]
extern crate serde;
#[cfg(all(test, feature = "serializing"))]
extern crate serde_json;
use float::Float;
use luma::Luma;
pub use alpha::{Alpha, WithAlpha};
pub use blend::Blend;
#[cfg(feature = "std")]
pub use gradient::Gradient;
pub use hsl::{Hsl, Hsla};
pub use hsv::{Hsv, Hsva};
pub use hwb::{Hwb, Hwba};
pub use lab::{Lab, Laba};
pub use lch::{Lch, Lcha};
pub use luma::{GammaLuma, GammaLumaa, LinLuma, LinLumaa, SrgbLuma, SrgbLumaa};
pub use rgb::{GammaSrgb, GammaSrgba, LinSrgb, LinSrgba, Packed, RgbChannels, Srgb, Srgba};
pub use xyz::{Xyz, Xyza};
pub use yxy::{Yxy, Yxya};
pub use color_difference::ColorDifference;
pub use component::*;
pub use convert::{FromColor, IntoColor};
pub use encoding::pixel::Pixel;
pub use hues::{LabHue, RgbHue};
pub use matrix::Mat3;
pub use relative_contrast::{contrast_ratio, RelativeContrast};
//Helper macro for checking ranges and clamping.
#[cfg(test)]
macro_rules! assert_ranges {
(@make_tuple $first:pat, $next:ident,) => (($first, $next));
(@make_tuple $first:pat, $next:ident, $($rest:ident,)*) => (
assert_ranges!(@make_tuple ($first, $next), $($rest,)*)
);
(
$ty:ident < $($ty_params:ty),+ >;
limited {$($limited:ident: $limited_from:expr => $limited_to:expr),+}
limited_min {$($limited_min:ident: $limited_min_from:expr => $limited_min_to:expr),*}
unlimited {$($unlimited:ident: $unlimited_from:expr => $unlimited_to:expr),*}
) => (
{
use core::iter::repeat;
use crate::Limited;
{
print!("checking below limits... ");
$(
let from = $limited_from;
let to = $limited_to;
let diff = to - from;
let $limited = (1..11).map(|i| from - (i as f64 / 10.0) * diff);
)+
$(
let from = $limited_min_from;
let to = $limited_min_to;
let diff = to - from;
let $limited_min = (1..11).map(|i| from - (i as f64 / 10.0) * diff);
)*
$(
let from = $unlimited_from;
let to = $unlimited_to;
let diff = to - from;
let $unlimited = (1..11).map(|i| from - (i as f64 / 10.0) * diff);
)*
for assert_ranges!(@make_tuple (), $($limited,)+ $($limited_min,)* $($unlimited,)* ) in repeat(()) $(.zip($limited))+ $(.zip($limited_min))* $(.zip($unlimited))* {
let c: $ty<$($ty_params),+> = $ty {
$($limited: $limited.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
let clamped = c.clamp();
let expected: $ty<$($ty_params),+> = $ty {
$($limited: $limited_from.into(),)+
$($limited_min: $limited_min_from.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
assert!(!c.is_valid());
assert_relative_eq!(clamped, expected);
}
println!("ok")
}
{
print!("checking within limits... ");
$(
let from = $limited_from;
let to = $limited_to;
let diff = to - from;
let $limited = (0..11).map(|i| from + (i as f64 / 10.0) * diff);
)+
$(
let from = $limited_min_from;
let to = $limited_min_to;
let diff = to - from;
let $limited_min = (0..11).map(|i| from + (i as f64 / 10.0) * diff);
)*
$(
let from = $unlimited_from;
let to = $unlimited_to;
let diff = to - from;
let $unlimited = (0..11).map(|i| from + (i as f64 / 10.0) * diff);
)*
for assert_ranges!(@make_tuple (), $($limited,)+ $($limited_min,)* $($unlimited,)* ) in repeat(()) $(.zip($limited))+ $(.zip($limited_min))* $(.zip($unlimited))* {
let c: $ty<$($ty_params),+> = $ty {
$($limited: $limited.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
let clamped = c.clamp();
assert!(c.is_valid());
assert_relative_eq!(clamped, c);
}
println!("ok")
}
{
print!("checking above limits... ");
$(
let from = $limited_from;
let to = $limited_to;
let diff = to - from;
let $limited = (1..11).map(|i| to + (i as f64 / 10.0) * diff);
)+
$(
let from = $limited_min_from;
let to = $limited_min_to;
let diff = to - from;
let $limited_min = (1..11).map(|i| to + (i as f64 / 10.0) * diff);
)*
$(
let from = $unlimited_from;
let to = $unlimited_to;
let diff = to - from;
let $unlimited = (1..11).map(|i| to + (i as f64 / 10.0) * diff);
)*
for assert_ranges!(@make_tuple (), $($limited,)+ $($limited_min,)* $($unlimited,)* ) in repeat(()) $(.zip($limited))+ $(.zip($limited_min))* $(.zip($unlimited))* {
let c: $ty<$($ty_params),+> = $ty {
$($limited: $limited.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
let clamped = c.clamp();
let expected: $ty<$($ty_params),+> = $ty {
$($limited: $limited_to.into(),)+
$($limited_min: $limited_min.into(),)*
$($unlimited: $unlimited.into(),)*
..$ty::default() //This prevents exhaustiveness checking
};
assert!(!c.is_valid());
assert_relative_eq!(clamped, expected);
}
println!("ok")
}
}
);
}
#[macro_use]
mod macros;
pub mod blend;
#[cfg(feature = "std")]
pub mod gradient;
#[cfg(feature = "named")]
pub mod named;
#[cfg(feature = "random")]
mod random_sampling;
mod alpha;
mod hsl;
mod hsv;
mod hwb;
mod lab;
mod lch;
pub mod luma;
pub mod rgb;
mod xyz;
mod yxy;
mod hues;
pub mod chromatic_adaptation;
mod color_difference;
mod component;
pub mod convert;
pub mod encoding;
mod equality;
mod relative_contrast;
pub mod white_point;
pub mod float;
#[doc(hidden)]
pub mod matrix;
fn clamp<T: PartialOrd>(v: T, min: T, max: T) -> T {
if v < min {
min
} else if v > max {
max
} else {
v
}
}
/// A trait for clamping and checking if colors are within their ranges.
pub trait Limited {
/// Check if the color's components are within the expected ranges.
fn is_valid(&self) -> bool;
/// Return a new color where the components has been clamped to the nearest
/// valid values.
fn clamp(&self) -> Self;
/// Clamp the color's components to the nearest valid values.
fn clamp_self(&mut self);
}
/// A trait for linear color interpolation.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{LinSrgb, Mix};
///
/// let a = LinSrgb::new(0.0, 0.5, 1.0);
/// let b = LinSrgb::new(1.0, 0.5, 0.0);
///
/// assert_relative_eq!(a.mix(&b, 0.0), a);
/// assert_relative_eq!(a.mix(&b, 0.5), LinSrgb::new(0.5, 0.5, 0.5));
/// assert_relative_eq!(a.mix(&b, 1.0), b);
/// ```
pub trait Mix {
/// The type of the mixing factor.
type Scalar: Float;
/// Mix the color with an other color, by `factor`.
///
/// `factor` sould be between `0.0` and `1.0`, where `0.0` will result in
/// the same color as `self` and `1.0` will result in the same color as
/// `other`.
fn mix(&self, other: &Self, factor: Self::Scalar) -> Self;
}
/// The `Shade` trait allows a color to be lightened or darkened.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{LinSrgb, Shade};
///
/// let a = LinSrgb::new(0.4, 0.4, 0.4);
/// let b = LinSrgb::new(0.6, 0.6, 0.6);
///
/// assert_relative_eq!(a.lighten(0.1), b.darken(0.1));
/// ```
pub trait Shade: Sized {
/// The type of the lighten/darken amount.
type Scalar: Float;
/// Lighten the color by `amount`.
fn lighten(&self, amount: Self::Scalar) -> Self;
/// Darken the color by `amount`.
fn darken(&self, amount: Self::Scalar) -> Self {
self.lighten(-amount)
}
}
/// A trait for colors where a hue may be calculated.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{GetHue, LinSrgb};
///
/// let red = LinSrgb::new(1.0f32, 0.0, 0.0);
/// let green = LinSrgb::new(0.0f32, 1.0, 0.0);
/// let blue = LinSrgb::new(0.0f32, 0.0, 1.0);
/// let gray = LinSrgb::new(0.5f32, 0.5, 0.5);
///
/// assert_relative_eq!(red.get_hue().unwrap(), 0.0.into());
/// assert_relative_eq!(green.get_hue().unwrap(), 120.0.into());
/// assert_relative_eq!(blue.get_hue().unwrap(), 240.0.into());
/// assert_eq!(gray.get_hue(), None);
/// ```
pub trait GetHue {
/// The kind of hue unit this color space uses.
///
/// The hue is most commonly calculated as an angle around a color circle
/// and may not always be uniform between color spaces. It's therefore not
/// recommended to take one type of hue and apply it to a color space that
/// expects an other.
type Hue;
/// Calculate a hue if possible.
///
/// Colors in the gray scale has no well defined hue and should preferably
/// return `None`.
fn get_hue(&self) -> Option<Self::Hue>;
}
/// A trait for colors where the hue can be manipulated without conversion.
pub trait Hue: GetHue {
/// Return a new copy of `self`, but with a specific hue.
fn with_hue<H: Into<Self::Hue>>(&self, hue: H) -> Self;
/// Return a new copy of `self`, but with the hue shifted by `amount`.
fn shift_hue<H: Into<Self::Hue>>(&self, amount: H) -> Self;
}
/// A trait for colors where the saturation (or chroma) can be manipulated
/// without conversion.
///
/// ```
/// use approx::assert_relative_eq;
///
/// use palette::{Hsv, Saturate};
///
/// let a = Hsv::new(0.0, 0.25, 1.0);
/// let b = Hsv::new(0.0, 1.0, 1.0);
///
/// assert_relative_eq!(a.saturate(1.0), b.desaturate(0.5));
/// ```
pub trait Saturate: Sized {
/// The type of the (de)saturation factor.
type Scalar: Float;
/// Increase the saturation by `factor`.
fn saturate(&self, factor: Self::Scalar) -> Self;
/// Decrease the saturation by `factor`.
fn desaturate(&self, factor: Self::Scalar) -> Self {
self.saturate(-factor)
}
}
/// Perform a unary or binary operation on each component of a color.
pub trait ComponentWise {
/// The scalar type for color components.
type Scalar;
/// Perform a binary operation on this and an other color.
fn component_wise<F: FnMut(Self::Scalar, Self::Scalar) -> Self::Scalar>(
&self,
other: &Self,
f: F,
) -> Self;
/// Perform a unary operation on this color.
fn component_wise_self<F: FnMut(Self::Scalar) -> Self::Scalar>(&self, f: F) -> Self;
}
/// A trait for infallible conversion from `f64`. The conversion may be lossy.
pub trait FromF64 {
/// Creates a value from an `f64` constant.
fn from_f64(c: f64) -> Self;
}
impl FromF64 for f32 {
#[inline]
fn from_f64(c: f64) -> Self {
c as f32
}
}
impl FromF64 for f64 {
#[inline]
fn from_f64(c: f64) -> Self {
c
}
}
/// A convenience function to convert a constant number to Float Type
#[inline]
fn from_f64<T: FromF64>(c: f64) -> T | {
T::from_f64(c)
} | identifier_body |
|
list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! Dynamic widgets
use std::iter;
use crate::draw::{DrawHandle, SizeHandle};
use crate::event::{Event, Handler, Manager, ManagerState, Response};
use crate::geom::Coord;
use crate::layout::{
self, AxisInfo, Margins, RowPositionSolver, RulesSetter, RulesSolver, SizeRules,
}; | use kas::geom::Rect;
/// A generic row widget
///
/// See documentation of [`List`] type.
pub type Row<W> = List<Horizontal, W>;
/// A generic column widget
///
/// See documentation of [`List`] type.
pub type Column<W> = List<Vertical, W>;
/// A row of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxRow<M> = BoxList<Horizontal, M>;
/// A column of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxColumn<M> = BoxList<Vertical, M>;
/// A row/column of boxed widgets
///
/// This is parameterised over directionality and handler message type.
///
/// See documentation of [`List`] type.
pub type BoxList<D, M> = List<D, Box<dyn Handler<Msg = M>>>;
/// A generic row/column widget
///
/// This type is generic over both directionality and the type of child widgets.
/// Essentially, it is a [`Vec`] which also implements the [`Widget`] trait.
///
/// [`Row`] and [`Column`] are parameterisations with set directionality.
///
/// [`BoxList`] (and its derivatives [`BoxRow`], [`BoxColumn`]) parameterise
/// `W = Box<dyn Widget>`, thus supporting individually boxed child widgets.
/// This allows use of multiple types of child widget at the cost of extra
/// allocation, and requires dynamic dispatch of methods.
///
/// Configuring and resizing elements is O(n) in the number of children.
/// Drawing and event handling is O(log n) in the number of children (assuming
/// only a small number are visible at any one time).
///
/// For fixed configurations of child widgets, [`make_widget`] can be used
/// instead. [`make_widget`] has the advantage that it can support child widgets
/// of multiple types without allocation and via static dispatch, but the
/// disadvantage that drawing and event handling are O(n) in the number of
/// children.
///
/// [`make_widget`]:../macros/index.html#the-make_widget-macro
#[derive(Clone, Default, Debug)]
pub struct List<D: Directional, W: Widget> {
core: CoreData,
widgets: Vec<W>,
data: layout::DynRowStorage,
direction: D,
}
// We implement this manually, because the derive implementation cannot handle
// vectors of child widgets.
impl<D: Directional, W: Widget> WidgetCore for List<D, W> {
#[inline]
fn core_data(&self) -> &CoreData {
&self.core
}
#[inline]
fn core_data_mut(&mut self) -> &mut CoreData {
&mut self.core
}
#[inline]
fn widget_name(&self) -> &'static str {
"List"
}
#[inline]
fn as_widget(&self) -> &dyn Widget {
self
}
#[inline]
fn as_widget_mut(&mut self) -> &mut dyn Widget {
self
}
#[inline]
fn len(&self) -> usize {
self.widgets.len()
}
#[inline]
fn get(&self, index: usize) -> Option<&dyn Widget> {
self.widgets.get(index).map(|w| w.as_widget())
}
#[inline]
fn get_mut(&mut self, index: usize) -> Option<&mut dyn Widget> {
self.widgets.get_mut(index).map(|w| w.as_widget_mut())
}
fn walk(&self, f: &mut dyn FnMut(&dyn Widget)) {
for child in &self.widgets {
child.walk(f);
}
f(self)
}
fn walk_mut(&mut self, f: &mut dyn FnMut(&mut dyn Widget)) {
for child in &mut self.widgets {
child.walk_mut(f);
}
f(self)
}
}
impl<D: Directional, W: Widget> Widget for List<D, W> {}
impl<D: Directional, W: Widget> Layout for List<D, W> {
fn size_rules(&mut self, size_handle: &mut dyn SizeHandle, axis: AxisInfo) -> SizeRules {
let mut solver = layout::RowSolver::<Vec<u32>, _>::new(
axis,
(self.direction, self.widgets.len()),
&mut self.data,
);
for (n, child) in self.widgets.iter_mut().enumerate() {
solver.for_child(&mut self.data, n, |axis| {
child.size_rules(size_handle, axis)
});
}
solver.finish(&mut self.data, iter::empty(), iter::empty())
}
fn set_rect(&mut self, size_handle: &mut dyn SizeHandle, rect: Rect, _: AlignHints) {
self.core.rect = rect;
let mut setter = layout::RowSetter::<D, Vec<u32>, _>::new(
rect,
Margins::ZERO,
(self.direction, self.widgets.len()),
&mut self.data,
);
for (n, child) in self.widgets.iter_mut().enumerate() {
let align = AlignHints::default();
child.set_rect(size_handle, setter.child_rect(n), align);
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
let solver = RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) {
return child.find_id(coord);
}
// We should return Some(self), but hit a borrow check error.
// This should however be unreachable anyway.
None
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &ManagerState) {
let solver = RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.target_rect(), |w| {
w.draw(draw_handle, mgr)
});
}
}
impl<D: Directional, W: Widget + Handler> Handler for List<D, W> {
type Msg = <W as Handler>::Msg;
fn handle(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
for child in &mut self.widgets {
if id <= child.id() {
return child.handle(mgr, id, event);
}
}
debug_assert!(id == self.id(), "Handler::handle: bad WidgetId");
Response::Unhandled(event)
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.widgets.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn clear(&mut self, mgr: &mut Manager) {
if!self.widgets.is_empty() {
mgr.send_action(TkAction::Reconfigure);
}
self.widgets.clear();
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, mgr: &mut Manager, widget: W) {
self.widgets.push(widget);
mgr.send_action(TkAction::Reconfigure);
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self, mgr: &mut Manager) -> Option<W> {
if!self.widgets.is_empty() {
mgr.send_action(TkAction::Reconfigure);
}
self.widgets.pop()
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, mgr: &mut Manager, index: usize, widget: W) {
self.widgets.insert(index, widget);
mgr.send_action(TkAction::Reconfigure);
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, mgr: &mut Manager, index: usize) -> W {
let r = self.widgets.remove(index);
mgr.send_action(TkAction::Reconfigure);
r
}
/// Replace the child at `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
// TODO: in theory it is possible to avoid a reconfigure where both widgets
// have no children and have compatible size. Is this a good idea and can
// we somehow test "has compatible size"?
pub fn replace(&mut self, mgr: &mut Manager, index: usize, mut widget: W) -> W {
std::mem::swap(&mut widget, &mut self.widgets[index]);
mgr.send_action(TkAction::Reconfigure);
widget
}
/// Append child widgets from an iterator
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are added.
pub fn extend<T: IntoIterator<Item = W>>(&mut self, mgr: &mut Manager, iter: T) {
let len = self.widgets.len();
self.widgets.extend(iter);
if len!= self.widgets.len() {
mgr.send_action(TkAction::Reconfigure);
}
}
/// Resize, using the given closure to construct new widgets
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn resize_with<F: Fn(usize) -> W>(&mut self, mgr: &mut Manager, len: usize, f: F) {
let l0 = self.widgets.len();
if l0 == len {
return;
} else if l0 > len {
self.widgets.truncate(len);
} else {
self.widgets.reserve(len);
for i in l0..len {
self.widgets.push(f(i));
}
}
mgr.send_action(TkAction::Reconfigure);
}
/// Retain only widgets satisfying predicate `f`
///
/// See documentation of [`Vec::retain`].
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are removed.
pub fn retain<F: FnMut(&W) -> bool>(&mut self, mgr: &mut Manager, f: F) {
let len = self.widgets.len();
self.widgets.retain(f);
if len!= self.widgets.len() {
mgr.send_action(TkAction::Reconfigure);
}
}
} | use crate::{AlignHints, Directional, Horizontal, Vertical};
use crate::{CoreData, Layout, TkAction, Widget, WidgetCore, WidgetId}; | random_line_split |
list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! Dynamic widgets
use std::iter;
use crate::draw::{DrawHandle, SizeHandle};
use crate::event::{Event, Handler, Manager, ManagerState, Response};
use crate::geom::Coord;
use crate::layout::{
self, AxisInfo, Margins, RowPositionSolver, RulesSetter, RulesSolver, SizeRules,
};
use crate::{AlignHints, Directional, Horizontal, Vertical};
use crate::{CoreData, Layout, TkAction, Widget, WidgetCore, WidgetId};
use kas::geom::Rect;
/// A generic row widget
///
/// See documentation of [`List`] type.
pub type Row<W> = List<Horizontal, W>;
/// A generic column widget
///
/// See documentation of [`List`] type.
pub type Column<W> = List<Vertical, W>;
/// A row of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxRow<M> = BoxList<Horizontal, M>;
/// A column of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxColumn<M> = BoxList<Vertical, M>;
/// A row/column of boxed widgets
///
/// This is parameterised over directionality and handler message type.
///
/// See documentation of [`List`] type.
pub type BoxList<D, M> = List<D, Box<dyn Handler<Msg = M>>>;
/// A generic row/column widget
///
/// This type is generic over both directionality and the type of child widgets.
/// Essentially, it is a [`Vec`] which also implements the [`Widget`] trait.
///
/// [`Row`] and [`Column`] are parameterisations with set directionality.
///
/// [`BoxList`] (and its derivatives [`BoxRow`], [`BoxColumn`]) parameterise
/// `W = Box<dyn Widget>`, thus supporting individually boxed child widgets.
/// This allows use of multiple types of child widget at the cost of extra
/// allocation, and requires dynamic dispatch of methods.
///
/// Configuring and resizing elements is O(n) in the number of children.
/// Drawing and event handling is O(log n) in the number of children (assuming
/// only a small number are visible at any one time).
///
/// For fixed configurations of child widgets, [`make_widget`] can be used
/// instead. [`make_widget`] has the advantage that it can support child widgets
/// of multiple types without allocation and via static dispatch, but the
/// disadvantage that drawing and event handling are O(n) in the number of
/// children.
///
/// [`make_widget`]:../macros/index.html#the-make_widget-macro
#[derive(Clone, Default, Debug)]
pub struct List<D: Directional, W: Widget> {
core: CoreData,
widgets: Vec<W>,
data: layout::DynRowStorage,
direction: D,
}
// We implement this manually, because the derive implementation cannot handle
// vectors of child widgets.
impl<D: Directional, W: Widget> WidgetCore for List<D, W> {
#[inline]
fn core_data(&self) -> &CoreData {
&self.core
}
#[inline]
fn core_data_mut(&mut self) -> &mut CoreData {
&mut self.core
}
#[inline]
fn widget_name(&self) -> &'static str {
"List"
}
#[inline]
fn as_widget(&self) -> &dyn Widget {
self
}
#[inline]
fn as_widget_mut(&mut self) -> &mut dyn Widget {
self
}
#[inline]
fn len(&self) -> usize {
self.widgets.len()
}
#[inline]
fn get(&self, index: usize) -> Option<&dyn Widget> {
self.widgets.get(index).map(|w| w.as_widget())
}
#[inline]
fn get_mut(&mut self, index: usize) -> Option<&mut dyn Widget> {
self.widgets.get_mut(index).map(|w| w.as_widget_mut())
}
fn walk(&self, f: &mut dyn FnMut(&dyn Widget)) {
for child in &self.widgets {
child.walk(f);
}
f(self)
}
fn walk_mut(&mut self, f: &mut dyn FnMut(&mut dyn Widget)) {
for child in &mut self.widgets {
child.walk_mut(f);
}
f(self)
}
}
impl<D: Directional, W: Widget> Widget for List<D, W> {}
impl<D: Directional, W: Widget> Layout for List<D, W> {
fn size_rules(&mut self, size_handle: &mut dyn SizeHandle, axis: AxisInfo) -> SizeRules {
let mut solver = layout::RowSolver::<Vec<u32>, _>::new(
axis,
(self.direction, self.widgets.len()),
&mut self.data,
);
for (n, child) in self.widgets.iter_mut().enumerate() {
solver.for_child(&mut self.data, n, |axis| {
child.size_rules(size_handle, axis)
});
}
solver.finish(&mut self.data, iter::empty(), iter::empty())
}
fn set_rect(&mut self, size_handle: &mut dyn SizeHandle, rect: Rect, _: AlignHints) {
self.core.rect = rect;
let mut setter = layout::RowSetter::<D, Vec<u32>, _>::new(
rect,
Margins::ZERO,
(self.direction, self.widgets.len()),
&mut self.data,
);
for (n, child) in self.widgets.iter_mut().enumerate() {
let align = AlignHints::default();
child.set_rect(size_handle, setter.child_rect(n), align);
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
let solver = RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) {
return child.find_id(coord);
}
// We should return Some(self), but hit a borrow check error.
// This should however be unreachable anyway.
None
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &ManagerState) {
let solver = RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.target_rect(), |w| {
w.draw(draw_handle, mgr)
});
}
}
impl<D: Directional, W: Widget + Handler> Handler for List<D, W> {
type Msg = <W as Handler>::Msg;
fn handle(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
for child in &mut self.widgets {
if id <= child.id() {
return child.handle(mgr, id, event);
}
}
debug_assert!(id == self.id(), "Handler::handle: bad WidgetId");
Response::Unhandled(event)
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.widgets.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn | (&mut self, mgr: &mut Manager) {
if!self.widgets.is_empty() {
mgr.send_action(TkAction::Reconfigure);
}
self.widgets.clear();
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, mgr: &mut Manager, widget: W) {
self.widgets.push(widget);
mgr.send_action(TkAction::Reconfigure);
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self, mgr: &mut Manager) -> Option<W> {
if!self.widgets.is_empty() {
mgr.send_action(TkAction::Reconfigure);
}
self.widgets.pop()
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, mgr: &mut Manager, index: usize, widget: W) {
self.widgets.insert(index, widget);
mgr.send_action(TkAction::Reconfigure);
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, mgr: &mut Manager, index: usize) -> W {
let r = self.widgets.remove(index);
mgr.send_action(TkAction::Reconfigure);
r
}
/// Replace the child at `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
// TODO: in theory it is possible to avoid a reconfigure where both widgets
// have no children and have compatible size. Is this a good idea and can
// we somehow test "has compatible size"?
pub fn replace(&mut self, mgr: &mut Manager, index: usize, mut widget: W) -> W {
std::mem::swap(&mut widget, &mut self.widgets[index]);
mgr.send_action(TkAction::Reconfigure);
widget
}
/// Append child widgets from an iterator
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are added.
pub fn extend<T: IntoIterator<Item = W>>(&mut self, mgr: &mut Manager, iter: T) {
let len = self.widgets.len();
self.widgets.extend(iter);
if len!= self.widgets.len() {
mgr.send_action(TkAction::Reconfigure);
}
}
/// Resize, using the given closure to construct new widgets
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn resize_with<F: Fn(usize) -> W>(&mut self, mgr: &mut Manager, len: usize, f: F) {
let l0 = self.widgets.len();
if l0 == len {
return;
} else if l0 > len {
self.widgets.truncate(len);
} else {
self.widgets.reserve(len);
for i in l0..len {
self.widgets.push(f(i));
}
}
mgr.send_action(TkAction::Reconfigure);
}
/// Retain only widgets satisfying predicate `f`
///
/// See documentation of [`Vec::retain`].
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are removed.
pub fn retain<F: FnMut(&W) -> bool>(&mut self, mgr: &mut Manager, f: F) {
let len = self.widgets.len();
self.widgets.retain(f);
if len!= self.widgets.len() {
mgr.send_action(TkAction::Reconfigure);
}
}
}
| clear | identifier_name |
list.rs | // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License in the LICENSE-APACHE file or at:
// https://www.apache.org/licenses/LICENSE-2.0
//! Dynamic widgets
use std::iter;
use crate::draw::{DrawHandle, SizeHandle};
use crate::event::{Event, Handler, Manager, ManagerState, Response};
use crate::geom::Coord;
use crate::layout::{
self, AxisInfo, Margins, RowPositionSolver, RulesSetter, RulesSolver, SizeRules,
};
use crate::{AlignHints, Directional, Horizontal, Vertical};
use crate::{CoreData, Layout, TkAction, Widget, WidgetCore, WidgetId};
use kas::geom::Rect;
/// A generic row widget
///
/// See documentation of [`List`] type.
pub type Row<W> = List<Horizontal, W>;
/// A generic column widget
///
/// See documentation of [`List`] type.
pub type Column<W> = List<Vertical, W>;
/// A row of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxRow<M> = BoxList<Horizontal, M>;
/// A column of boxed widgets
///
/// This is parameterised over handler message type.
///
/// See documentation of [`List`] type.
pub type BoxColumn<M> = BoxList<Vertical, M>;
/// A row/column of boxed widgets
///
/// This is parameterised over directionality and handler message type.
///
/// See documentation of [`List`] type.
pub type BoxList<D, M> = List<D, Box<dyn Handler<Msg = M>>>;
/// A generic row/column widget
///
/// This type is generic over both directionality and the type of child widgets.
/// Essentially, it is a [`Vec`] which also implements the [`Widget`] trait.
///
/// [`Row`] and [`Column`] are parameterisations with set directionality.
///
/// [`BoxList`] (and its derivatives [`BoxRow`], [`BoxColumn`]) parameterise
/// `W = Box<dyn Widget>`, thus supporting individually boxed child widgets.
/// This allows use of multiple types of child widget at the cost of extra
/// allocation, and requires dynamic dispatch of methods.
///
/// Configuring and resizing elements is O(n) in the number of children.
/// Drawing and event handling is O(log n) in the number of children (assuming
/// only a small number are visible at any one time).
///
/// For fixed configurations of child widgets, [`make_widget`] can be used
/// instead. [`make_widget`] has the advantage that it can support child widgets
/// of multiple types without allocation and via static dispatch, but the
/// disadvantage that drawing and event handling are O(n) in the number of
/// children.
///
/// [`make_widget`]:../macros/index.html#the-make_widget-macro
#[derive(Clone, Default, Debug)]
pub struct List<D: Directional, W: Widget> {
core: CoreData,
widgets: Vec<W>,
data: layout::DynRowStorage,
direction: D,
}
// We implement this manually, because the derive implementation cannot handle
// vectors of child widgets.
impl<D: Directional, W: Widget> WidgetCore for List<D, W> {
#[inline]
fn core_data(&self) -> &CoreData |
#[inline]
fn core_data_mut(&mut self) -> &mut CoreData {
&mut self.core
}
#[inline]
fn widget_name(&self) -> &'static str {
"List"
}
#[inline]
fn as_widget(&self) -> &dyn Widget {
self
}
#[inline]
fn as_widget_mut(&mut self) -> &mut dyn Widget {
self
}
#[inline]
fn len(&self) -> usize {
self.widgets.len()
}
#[inline]
fn get(&self, index: usize) -> Option<&dyn Widget> {
self.widgets.get(index).map(|w| w.as_widget())
}
#[inline]
fn get_mut(&mut self, index: usize) -> Option<&mut dyn Widget> {
self.widgets.get_mut(index).map(|w| w.as_widget_mut())
}
fn walk(&self, f: &mut dyn FnMut(&dyn Widget)) {
for child in &self.widgets {
child.walk(f);
}
f(self)
}
fn walk_mut(&mut self, f: &mut dyn FnMut(&mut dyn Widget)) {
for child in &mut self.widgets {
child.walk_mut(f);
}
f(self)
}
}
impl<D: Directional, W: Widget> Widget for List<D, W> {}
impl<D: Directional, W: Widget> Layout for List<D, W> {
fn size_rules(&mut self, size_handle: &mut dyn SizeHandle, axis: AxisInfo) -> SizeRules {
let mut solver = layout::RowSolver::<Vec<u32>, _>::new(
axis,
(self.direction, self.widgets.len()),
&mut self.data,
);
for (n, child) in self.widgets.iter_mut().enumerate() {
solver.for_child(&mut self.data, n, |axis| {
child.size_rules(size_handle, axis)
});
}
solver.finish(&mut self.data, iter::empty(), iter::empty())
}
fn set_rect(&mut self, size_handle: &mut dyn SizeHandle, rect: Rect, _: AlignHints) {
self.core.rect = rect;
let mut setter = layout::RowSetter::<D, Vec<u32>, _>::new(
rect,
Margins::ZERO,
(self.direction, self.widgets.len()),
&mut self.data,
);
for (n, child) in self.widgets.iter_mut().enumerate() {
let align = AlignHints::default();
child.set_rect(size_handle, setter.child_rect(n), align);
}
}
fn find_id(&self, coord: Coord) -> Option<WidgetId> {
let solver = RowPositionSolver::new(self.direction);
if let Some(child) = solver.find_child(&self.widgets, coord) {
return child.find_id(coord);
}
// We should return Some(self), but hit a borrow check error.
// This should however be unreachable anyway.
None
}
fn draw(&self, draw_handle: &mut dyn DrawHandle, mgr: &ManagerState) {
let solver = RowPositionSolver::new(self.direction);
solver.for_children(&self.widgets, draw_handle.target_rect(), |w| {
w.draw(draw_handle, mgr)
});
}
}
impl<D: Directional, W: Widget + Handler> Handler for List<D, W> {
type Msg = <W as Handler>::Msg;
fn handle(&mut self, mgr: &mut Manager, id: WidgetId, event: Event) -> Response<Self::Msg> {
for child in &mut self.widgets {
if id <= child.id() {
return child.handle(mgr, id, event);
}
}
debug_assert!(id == self.id(), "Handler::handle: bad WidgetId");
Response::Unhandled(event)
}
}
impl<D: Directional + Default, W: Widget> List<D, W> {
/// Construct a new instance
///
/// This constructor is available where the direction is determined by the
/// type: for `D: Directional + Default`. In other cases, use
/// [`List::new_with_direction`].
pub fn new(widgets: Vec<W>) -> Self {
List {
core: Default::default(),
widgets,
data: Default::default(),
direction: Default::default(),
}
}
}
impl<D: Directional, W: Widget> List<D, W> {
/// Construct a new instance with explicit direction
pub fn new_with_direction(direction: D, widgets: Vec<W>) -> Self {
List {
core: Default::default(),
widgets,
data: Default::default(),
direction,
}
}
/// True if there are no child widgets
pub fn is_empty(&self) -> bool {
self.widgets.is_empty()
}
/// Returns the number of child widgets
pub fn len(&self) -> usize {
self.widgets.len()
}
/// Returns the number of elements the vector can hold without reallocating.
pub fn capacity(&self) -> usize {
self.widgets.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// into the list. See documentation of [`Vec::reserve`].
pub fn reserve(&mut self, additional: usize) {
self.widgets.reserve(additional);
}
/// Remove all child widgets
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn clear(&mut self, mgr: &mut Manager) {
if!self.widgets.is_empty() {
mgr.send_action(TkAction::Reconfigure);
}
self.widgets.clear();
}
/// Append a child widget
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn push(&mut self, mgr: &mut Manager, widget: W) {
self.widgets.push(widget);
mgr.send_action(TkAction::Reconfigure);
}
/// Remove the last child widget
///
/// Returns `None` if there are no children. Otherwise, this
/// triggers a reconfigure before the next draw operation.
///
/// Triggers a [reconfigure action](Manager::send_action) if any widget is
/// removed.
pub fn pop(&mut self, mgr: &mut Manager) -> Option<W> {
if!self.widgets.is_empty() {
mgr.send_action(TkAction::Reconfigure);
}
self.widgets.pop()
}
/// Inserts a child widget position `index`
///
/// Panics if `index > len`.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn insert(&mut self, mgr: &mut Manager, index: usize, widget: W) {
self.widgets.insert(index, widget);
mgr.send_action(TkAction::Reconfigure);
}
/// Removes the child widget at position `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn remove(&mut self, mgr: &mut Manager, index: usize) -> W {
let r = self.widgets.remove(index);
mgr.send_action(TkAction::Reconfigure);
r
}
/// Replace the child at `index`
///
/// Panics if `index` is out of bounds.
///
/// Triggers a [reconfigure action](Manager::send_action).
// TODO: in theory it is possible to avoid a reconfigure where both widgets
// have no children and have compatible size. Is this a good idea and can
// we somehow test "has compatible size"?
pub fn replace(&mut self, mgr: &mut Manager, index: usize, mut widget: W) -> W {
std::mem::swap(&mut widget, &mut self.widgets[index]);
mgr.send_action(TkAction::Reconfigure);
widget
}
/// Append child widgets from an iterator
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are added.
pub fn extend<T: IntoIterator<Item = W>>(&mut self, mgr: &mut Manager, iter: T) {
let len = self.widgets.len();
self.widgets.extend(iter);
if len!= self.widgets.len() {
mgr.send_action(TkAction::Reconfigure);
}
}
/// Resize, using the given closure to construct new widgets
///
/// Triggers a [reconfigure action](Manager::send_action).
pub fn resize_with<F: Fn(usize) -> W>(&mut self, mgr: &mut Manager, len: usize, f: F) {
let l0 = self.widgets.len();
if l0 == len {
return;
} else if l0 > len {
self.widgets.truncate(len);
} else {
self.widgets.reserve(len);
for i in l0..len {
self.widgets.push(f(i));
}
}
mgr.send_action(TkAction::Reconfigure);
}
/// Retain only widgets satisfying predicate `f`
///
/// See documentation of [`Vec::retain`].
///
/// Triggers a [reconfigure action](Manager::send_action) if any widgets
/// are removed.
pub fn retain<F: FnMut(&W) -> bool>(&mut self, mgr: &mut Manager, f: F) {
let len = self.widgets.len();
self.widgets.retain(f);
if len!= self.widgets.len() {
mgr.send_action(TkAction::Reconfigure);
}
}
}
| {
&self.core
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.