file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
progress.rs
use num_enum::IntoPrimitive; use once_cell::sync::Lazy; use std::sync::mpsc::Sender; use std::{mem, pin::Pin, ptr}; use wchar::*; use widestring::*; use winapi::shared::basetsd; use winapi::shared::minwindef as win; use winapi::shared::windef::*; use winapi::um::commctrl; use winapi::um::errhandlingapi; use winapi::um::libloaderapi; use winapi::um::wingdi; use winapi::um::winuser; use wslscript_common::error::*; use wslscript_common::font::Font; use wslscript_common::wcstring; use wslscript_common::win32; pub struct ProgressWindow { /// Maximum value for progress. high_limit: usize, /// Sender to signal for cancellation. cancel_sender: Option<Sender<()>>, /// Window handle. hwnd: HWND, /// Default font. font: Font, } impl Default for ProgressWindow { fn default() -> Self { Self { high_limit: 0, cancel_sender: None, hwnd: ptr::null_mut(), font: Font::default(), } } } /// Progress window class name. static WND_CLASS: Lazy<WideCString> = Lazy::new(|| wcstring("WSLScriptProgress")); /// Window message for progress update. pub const WM_PROGRESS: win::UINT = winuser::WM_USER + 1; /// Child window identifiers. #[derive(IntoPrimitive, PartialEq)] #[repr(u16)] enum Control { ProgressBar = 100, Message, Title, } /// Minimum and initial main window size as a (width, height) tuple. const MIN_WINDOW_SIZE: (i32, i32) = (300, 150); impl ProgressWindow { pub fn new(high_limit: usize, cancel_sender: Sender<()>) -> Result<Pin<Box<Self>>, Error> { use winuser::*; // register window class if!Self::is_window_class_registered() { Self::register_window_class()?; } let mut wnd = Pin::new(Box::new(Self::default())); wnd.high_limit = high_limit; wnd.cancel_sender = Some(cancel_sender); let instance = unsafe { libloaderapi::GetModuleHandleW(ptr::null_mut()) }; let title = wchz!("WSL Script"); // create window #[rustfmt::skip] let hwnd = unsafe { CreateWindowExW( WS_EX_TOOLWINDOW | WS_EX_TOPMOST, WND_CLASS.as_ptr(), title.as_ptr(), WS_OVERLAPPEDWINDOW &!WS_MAXIMIZEBOX | WS_VISIBLE, CW_USEDEFAULT, CW_USEDEFAULT, MIN_WINDOW_SIZE.0, MIN_WINDOW_SIZE.1, ptr::null_mut(), ptr::null_mut(), instance, // self as a `CREATESTRUCT`'s `lpCreateParams` &*wnd as *const Self as win::LPVOID) }; if hwnd.is_null() { return Err(win32::last_error()); } Ok(wnd) } /// Get handle to main window. pub fn handle(&self) -> HWND { self.hwnd } /// Run message loop. pub fn run(&self) -> Result<(), Error> { log::debug!("Starting message loop"); loop { let mut msg: winuser::MSG = unsafe { mem::zeroed() }; match unsafe { winuser::GetMessageW(&mut msg, ptr::null_mut(), 0, 0) } { 1..=std::i32::MAX => unsafe { winuser::TranslateMessage(&msg); winuser::DispatchMessageW(&msg); }, std::i32::MIN..=-1 => return Err(win32::last_error()), 0 => { log::debug!("Received WM_QUIT"); return Ok(()); } } } } /// Signal that progress should be cancelled. pub fn cancel(&self) { if let Some(tx) = &self.cancel_sender { tx.send(()).unwrap_or_else(|_| { log::error!("Failed to send cancel signal"); }); } } /// Close main window. pub fn close(&self) { unsafe { winuser::PostMessageW(self.hwnd, winuser::WM_CLOSE, 0, 0) }; } /// Create child control windows. fn create_window_controls(&mut self) -> Result<(), Error> { use winuser::*; let instance = unsafe { GetWindowLongPtrW(self.hwnd, GWLP_HINSTANCE) as win::HINSTANCE }; self.font = Font::new_caption(20)?; // init common controls let icex = commctrl::INITCOMMONCONTROLSEX { dwSize: mem::size_of::<commctrl::INITCOMMONCONTROLSEX>() as u32, dwICC: commctrl::ICC_PROGRESS_CLASS, }; unsafe { commctrl::InitCommonControlsEx(&icex) }; // progress bar #[rustfmt::skip] let hwnd = unsafe { CreateWindowExW( 0, wcstring(commctrl::PROGRESS_CLASS).as_ptr(), ptr::null_mut(), WS_CHILD | WS_VISIBLE | commctrl::PBS_MARQUEE, 0, 0, 0, 0, self.hwnd, Control::ProgressBar as u16 as _, instance, ptr::null_mut(), ) }; unsafe { SendMessageW(hwnd, commctrl::PBM_SETRANGE32, 0, self.high_limit as _) }; unsafe { SendMessageW(hwnd, commctrl::PBM_SETMARQUEE, 1, 0) }; // static message area #[rustfmt::skip] let hwnd = unsafe { CreateWindowExW( 0, wchz!("STATIC").as_ptr(), ptr::null_mut(), SS_CENTER | WS_CHILD | WS_VISIBLE, 0, 0, 0, 0, self.hwnd, Control::Message as u16 as _, instance, ptr::null_mut(), ) }; Self::set_window_font(hwnd, &self.font); // static title #[rustfmt::skip] let hwnd = unsafe { CreateWindowExW( 0, wchz!("STATIC").as_ptr(), ptr::null_mut(), SS_CENTER | WS_CHILD | WS_VISIBLE, 0, 0, 0, 0, self.hwnd, Control::Title as u16 as _, instance, ptr::null_mut(), ) }; Self::set_window_font(hwnd, &self.font); unsafe { SetWindowTextW(hwnd, wchz!("Converting paths...").as_ptr()) }; Ok(()) } /// Called when client was resized. fn on_resize(&self, width: i32, _height: i32) { self.move_control(Control::Title, 10, 10, width - 20, 20); self.move_control(Control::ProgressBar, 10, 40, width - 20, 30); self.move_control(Control::Message, 10, 80, width - 20, 20); } /// Move control relative to main window. fn move_control(&self, control: Control, x: i32, y: i32, width: i32, height: i32) { let hwnd = self.get_control_handle(control); unsafe { winuser::MoveWindow(hwnd, x, y, width, height, win::TRUE) }; } /// Get window handle of given control. fn get_control_handle(&self, control: Control) -> HWND { unsafe { winuser::GetDlgItem(self.hwnd, control as i32) } } /// Set font to given window. fn set_window_font(hwnd: HWND, font: &Font) { unsafe { winuser::SendMessageW(hwnd, winuser::WM_SETFONT, font.handle as _, win::TRUE as _) }; } /// Update controls to display given progress. fn update_progress(&mut self, current: usize, max: usize) { use commctrl::*; use winuser::*; log::debug!("Progress update: {}/{}", current, max); let msg = format!("{} / {}", current, max); unsafe { SetWindowTextW( self.get_control_handle(Control::Message), wcstring(msg).as_ptr(), ) }; if self.is_marquee_progress() { self.set_progress_to_range_mode(); } let hwnd = self.get_control_handle(Control::ProgressBar); unsafe { SendMessageW(hwnd, PBM_SETPOS, current, 0) }; // if done, close cancellation channel if current == max { self.cancel_sender.take(); } } /// Check whether progress bar is in marquee mode. fn is_marquee_progress(&self) -> bool { let style = unsafe { winuser::GetWindowLongW( self.get_control_handle(Control::ProgressBar), winuser::GWL_STYLE, ) } as u32; style & commctrl::PBS_MARQUEE!= 0 } /// Set progress bar to range mode. fn set_progress_to_range_mode(&self) { use commctrl::*; use winuser::*; let hwnd = self.get_control_handle(Control::ProgressBar); let mut style = unsafe { GetWindowLongW(hwnd, GWL_STYLE) } as u32; style &=!PBS_MARQUEE; style |= PBS_SMOOTH; unsafe { SetWindowLongW(hwnd, GWL_STYLE, style as _) }; unsafe { SendMessageW(hwnd, PBM_SETMARQUEE, 0, 0) }; } } impl ProgressWindow { /// Check whether window class is registered. pub fn
() -> bool { unsafe { let instance = libloaderapi::GetModuleHandleW(ptr::null_mut()); let mut wc: winuser::WNDCLASSEXW = mem::zeroed(); winuser::GetClassInfoExW(instance, WND_CLASS.as_ptr(), &mut wc)!= 0 } } /// Register window class. pub fn register_window_class() -> Result<(), Error> { use winuser::*; log::debug!("Registering {} window class", WND_CLASS.to_string_lossy()); let instance = unsafe { libloaderapi::GetModuleHandleW(ptr::null_mut()) }; let wc = WNDCLASSEXW { cbSize: mem::size_of::<WNDCLASSEXW>() as u32, style: CS_OWNDC | CS_HREDRAW | CS_VREDRAW, hbrBackground: (COLOR_WINDOW + 1) as HBRUSH, lpfnWndProc: Some(window_proc_wrapper::<ProgressWindow>), hInstance: instance, lpszClassName: WND_CLASS.as_ptr(), hIcon: ptr::null_mut(), hCursor: unsafe { LoadCursorW(ptr::null_mut(), IDC_ARROW) }, ..unsafe { mem::zeroed() } }; if 0 == unsafe { RegisterClassExW(&wc) } { Err(win32::last_error()) } else { Ok(()) } } /// Unregister window class. pub fn unregister_window_class() { log::debug!("Unregistering {} window class", WND_CLASS.to_string_lossy()); unsafe { let instance = libloaderapi::GetModuleHandleW(ptr::null_mut()); winuser::UnregisterClassW(WND_CLASS.as_ptr(), instance); } } } trait WindowProc { /// Window procedure callback. /// /// If None is returned, underlying wrapper calls `DefWindowProcW`. fn window_proc( &mut self, hwnd: HWND, msg: win::UINT, wparam: win::WPARAM, lparam: win::LPARAM, ) -> Option<win::LRESULT>; } /// Window proc wrapper that manages the `&self` pointer to `ProgressWindow` object. /// /// Must be `extern "system"` because the function is called by Windows. extern "system" fn window_proc_wrapper<T: WindowProc>( hwnd: HWND, msg: win::UINT, wparam: win::WPARAM, lparam: win::LPARAM, ) -> win::LRESULT { use winuser::*; // get pointer to T from userdata let mut ptr = unsafe { GetWindowLongPtrW(hwnd, GWLP_USERDATA) } as *mut T; // not yet set, initialize from CREATESTRUCT if ptr.is_null() && msg == WM_NCCREATE { let cs = unsafe { &*(lparam as LPCREATESTRUCTW) }; ptr = cs.lpCreateParams as *mut T; log::debug!("Initialize window pointer {:p}", ptr); unsafe { errhandlingapi::SetLastError(0) }; if 0 == unsafe { SetWindowLongPtrW(hwnd, GWLP_USERDATA, ptr as *const _ as basetsd::LONG_PTR) } && unsafe { errhandlingapi::GetLastError() }!= 0 { return win::FALSE as win::LRESULT; } } // call wrapped window proc if!ptr.is_null() { let this = unsafe { &mut *(ptr as *mut T) }; if let Some(result) = this.window_proc(hwnd, msg, wparam, lparam) { return result; } } unsafe { DefWindowProcW(hwnd, msg, wparam, lparam) } } impl WindowProc for ProgressWindow { fn window_proc( &mut self, hwnd: HWND, msg: win::UINT, wparam: win::WPARAM, lparam: win::LPARAM, ) -> Option<win::LRESULT> { use winuser::*; match msg { // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-nccreate WM_NCCREATE => { // store main window handle self.hwnd = hwnd; // WM_NCCREATE must be passed to DefWindowProc None } // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-create WM_CREATE => match self.create_window_controls() { Err(e) => { log::error!("Failed to create window controls: {}", e); Some(-1) } Ok(()) => Some(0), }, // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-size WM_SIZE => { self.on_resize( i32::from(win::LOWORD(lparam as u32)), i32::from(win::HIWORD(lparam as u32)), ); Some(0) } // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-getminmaxinfo WM_GETMINMAXINFO => { let mmi = unsafe { &mut *(lparam as LPMINMAXINFO) }; mmi.ptMinTrackSize.x = MIN_WINDOW_SIZE.0; mmi.ptMinTrackSize.y = MIN_WINDOW_SIZE.1; Some(0) } // https://docs.microsoft.com/en-us/windows/win32/controls/wm-ctlcolorstatic WM_CTLCOLORSTATIC => { Some(unsafe { wingdi::GetStockObject(COLOR_WINDOW + 1) } as win::LPARAM) } // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-close WM_CLOSE => { self.cancel(); unsafe { DestroyWindow(hwnd) }; Some(0) } // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-destroy WM_DESTROY => { unsafe { PostQuitMessage(0) }; Some(0) } WM_PROGRESS => { self.update_progress(wparam, lparam as _); Some(0) } _ => None, } } }
is_window_class_registered
identifier_name
progress.rs
use num_enum::IntoPrimitive; use once_cell::sync::Lazy; use std::sync::mpsc::Sender; use std::{mem, pin::Pin, ptr}; use wchar::*; use widestring::*; use winapi::shared::basetsd; use winapi::shared::minwindef as win; use winapi::shared::windef::*; use winapi::um::commctrl; use winapi::um::errhandlingapi; use winapi::um::libloaderapi; use winapi::um::wingdi; use winapi::um::winuser; use wslscript_common::error::*; use wslscript_common::font::Font; use wslscript_common::wcstring; use wslscript_common::win32; pub struct ProgressWindow { /// Maximum value for progress. high_limit: usize, /// Sender to signal for cancellation. cancel_sender: Option<Sender<()>>, /// Window handle. hwnd: HWND, /// Default font. font: Font, } impl Default for ProgressWindow { fn default() -> Self { Self { high_limit: 0, cancel_sender: None, hwnd: ptr::null_mut(), font: Font::default(), } } } /// Progress window class name. static WND_CLASS: Lazy<WideCString> = Lazy::new(|| wcstring("WSLScriptProgress")); /// Window message for progress update. pub const WM_PROGRESS: win::UINT = winuser::WM_USER + 1; /// Child window identifiers. #[derive(IntoPrimitive, PartialEq)] #[repr(u16)] enum Control { ProgressBar = 100, Message, Title, } /// Minimum and initial main window size as a (width, height) tuple. const MIN_WINDOW_SIZE: (i32, i32) = (300, 150); impl ProgressWindow { pub fn new(high_limit: usize, cancel_sender: Sender<()>) -> Result<Pin<Box<Self>>, Error> { use winuser::*; // register window class if!Self::is_window_class_registered() { Self::register_window_class()?; } let mut wnd = Pin::new(Box::new(Self::default())); wnd.high_limit = high_limit; wnd.cancel_sender = Some(cancel_sender); let instance = unsafe { libloaderapi::GetModuleHandleW(ptr::null_mut()) }; let title = wchz!("WSL Script"); // create window #[rustfmt::skip] let hwnd = unsafe { CreateWindowExW( WS_EX_TOOLWINDOW | WS_EX_TOPMOST, WND_CLASS.as_ptr(), title.as_ptr(), WS_OVERLAPPEDWINDOW &!WS_MAXIMIZEBOX | WS_VISIBLE, CW_USEDEFAULT, CW_USEDEFAULT, MIN_WINDOW_SIZE.0, MIN_WINDOW_SIZE.1, ptr::null_mut(), ptr::null_mut(), instance, // self as a `CREATESTRUCT`'s `lpCreateParams` &*wnd as *const Self as win::LPVOID) }; if hwnd.is_null() { return Err(win32::last_error()); } Ok(wnd) } /// Get handle to main window. pub fn handle(&self) -> HWND { self.hwnd } /// Run message loop. pub fn run(&self) -> Result<(), Error> { log::debug!("Starting message loop"); loop { let mut msg: winuser::MSG = unsafe { mem::zeroed() }; match unsafe { winuser::GetMessageW(&mut msg, ptr::null_mut(), 0, 0) } { 1..=std::i32::MAX => unsafe { winuser::TranslateMessage(&msg); winuser::DispatchMessageW(&msg); }, std::i32::MIN..=-1 => return Err(win32::last_error()), 0 => { log::debug!("Received WM_QUIT"); return Ok(()); } } } } /// Signal that progress should be cancelled. pub fn cancel(&self) { if let Some(tx) = &self.cancel_sender { tx.send(()).unwrap_or_else(|_| { log::error!("Failed to send cancel signal"); }); } } /// Close main window. pub fn close(&self) { unsafe { winuser::PostMessageW(self.hwnd, winuser::WM_CLOSE, 0, 0) }; } /// Create child control windows. fn create_window_controls(&mut self) -> Result<(), Error> { use winuser::*; let instance = unsafe { GetWindowLongPtrW(self.hwnd, GWLP_HINSTANCE) as win::HINSTANCE }; self.font = Font::new_caption(20)?; // init common controls let icex = commctrl::INITCOMMONCONTROLSEX { dwSize: mem::size_of::<commctrl::INITCOMMONCONTROLSEX>() as u32, dwICC: commctrl::ICC_PROGRESS_CLASS, }; unsafe { commctrl::InitCommonControlsEx(&icex) }; // progress bar #[rustfmt::skip] let hwnd = unsafe { CreateWindowExW( 0, wcstring(commctrl::PROGRESS_CLASS).as_ptr(), ptr::null_mut(), WS_CHILD | WS_VISIBLE | commctrl::PBS_MARQUEE, 0, 0, 0, 0, self.hwnd, Control::ProgressBar as u16 as _, instance, ptr::null_mut(), ) }; unsafe { SendMessageW(hwnd, commctrl::PBM_SETRANGE32, 0, self.high_limit as _) }; unsafe { SendMessageW(hwnd, commctrl::PBM_SETMARQUEE, 1, 0) }; // static message area #[rustfmt::skip] let hwnd = unsafe { CreateWindowExW( 0, wchz!("STATIC").as_ptr(), ptr::null_mut(), SS_CENTER | WS_CHILD | WS_VISIBLE, 0, 0, 0, 0, self.hwnd, Control::Message as u16 as _, instance, ptr::null_mut(), ) }; Self::set_window_font(hwnd, &self.font); // static title #[rustfmt::skip] let hwnd = unsafe { CreateWindowExW( 0, wchz!("STATIC").as_ptr(), ptr::null_mut(), SS_CENTER | WS_CHILD | WS_VISIBLE, 0, 0, 0, 0, self.hwnd, Control::Title as u16 as _, instance, ptr::null_mut(), ) }; Self::set_window_font(hwnd, &self.font); unsafe { SetWindowTextW(hwnd, wchz!("Converting paths...").as_ptr()) }; Ok(()) } /// Called when client was resized. fn on_resize(&self, width: i32, _height: i32) { self.move_control(Control::Title, 10, 10, width - 20, 20); self.move_control(Control::ProgressBar, 10, 40, width - 20, 30); self.move_control(Control::Message, 10, 80, width - 20, 20); } /// Move control relative to main window. fn move_control(&self, control: Control, x: i32, y: i32, width: i32, height: i32) { let hwnd = self.get_control_handle(control); unsafe { winuser::MoveWindow(hwnd, x, y, width, height, win::TRUE) }; } /// Get window handle of given control. fn get_control_handle(&self, control: Control) -> HWND { unsafe { winuser::GetDlgItem(self.hwnd, control as i32) } } /// Set font to given window. fn set_window_font(hwnd: HWND, font: &Font) { unsafe { winuser::SendMessageW(hwnd, winuser::WM_SETFONT, font.handle as _, win::TRUE as _) }; } /// Update controls to display given progress. fn update_progress(&mut self, current: usize, max: usize) { use commctrl::*; use winuser::*; log::debug!("Progress update: {}/{}", current, max); let msg = format!("{} / {}", current, max); unsafe { SetWindowTextW( self.get_control_handle(Control::Message), wcstring(msg).as_ptr(), ) }; if self.is_marquee_progress()
let hwnd = self.get_control_handle(Control::ProgressBar); unsafe { SendMessageW(hwnd, PBM_SETPOS, current, 0) }; // if done, close cancellation channel if current == max { self.cancel_sender.take(); } } /// Check whether progress bar is in marquee mode. fn is_marquee_progress(&self) -> bool { let style = unsafe { winuser::GetWindowLongW( self.get_control_handle(Control::ProgressBar), winuser::GWL_STYLE, ) } as u32; style & commctrl::PBS_MARQUEE!= 0 } /// Set progress bar to range mode. fn set_progress_to_range_mode(&self) { use commctrl::*; use winuser::*; let hwnd = self.get_control_handle(Control::ProgressBar); let mut style = unsafe { GetWindowLongW(hwnd, GWL_STYLE) } as u32; style &=!PBS_MARQUEE; style |= PBS_SMOOTH; unsafe { SetWindowLongW(hwnd, GWL_STYLE, style as _) }; unsafe { SendMessageW(hwnd, PBM_SETMARQUEE, 0, 0) }; } } impl ProgressWindow { /// Check whether window class is registered. pub fn is_window_class_registered() -> bool { unsafe { let instance = libloaderapi::GetModuleHandleW(ptr::null_mut()); let mut wc: winuser::WNDCLASSEXW = mem::zeroed(); winuser::GetClassInfoExW(instance, WND_CLASS.as_ptr(), &mut wc)!= 0 } } /// Register window class. pub fn register_window_class() -> Result<(), Error> { use winuser::*; log::debug!("Registering {} window class", WND_CLASS.to_string_lossy()); let instance = unsafe { libloaderapi::GetModuleHandleW(ptr::null_mut()) }; let wc = WNDCLASSEXW { cbSize: mem::size_of::<WNDCLASSEXW>() as u32, style: CS_OWNDC | CS_HREDRAW | CS_VREDRAW, hbrBackground: (COLOR_WINDOW + 1) as HBRUSH, lpfnWndProc: Some(window_proc_wrapper::<ProgressWindow>), hInstance: instance, lpszClassName: WND_CLASS.as_ptr(), hIcon: ptr::null_mut(), hCursor: unsafe { LoadCursorW(ptr::null_mut(), IDC_ARROW) }, ..unsafe { mem::zeroed() } }; if 0 == unsafe { RegisterClassExW(&wc) } { Err(win32::last_error()) } else { Ok(()) } } /// Unregister window class. pub fn unregister_window_class() { log::debug!("Unregistering {} window class", WND_CLASS.to_string_lossy()); unsafe { let instance = libloaderapi::GetModuleHandleW(ptr::null_mut()); winuser::UnregisterClassW(WND_CLASS.as_ptr(), instance); } } } trait WindowProc { /// Window procedure callback. /// /// If None is returned, underlying wrapper calls `DefWindowProcW`. fn window_proc( &mut self, hwnd: HWND, msg: win::UINT, wparam: win::WPARAM, lparam: win::LPARAM, ) -> Option<win::LRESULT>; } /// Window proc wrapper that manages the `&self` pointer to `ProgressWindow` object. /// /// Must be `extern "system"` because the function is called by Windows. extern "system" fn window_proc_wrapper<T: WindowProc>( hwnd: HWND, msg: win::UINT, wparam: win::WPARAM, lparam: win::LPARAM, ) -> win::LRESULT { use winuser::*; // get pointer to T from userdata let mut ptr = unsafe { GetWindowLongPtrW(hwnd, GWLP_USERDATA) } as *mut T; // not yet set, initialize from CREATESTRUCT if ptr.is_null() && msg == WM_NCCREATE { let cs = unsafe { &*(lparam as LPCREATESTRUCTW) }; ptr = cs.lpCreateParams as *mut T; log::debug!("Initialize window pointer {:p}", ptr); unsafe { errhandlingapi::SetLastError(0) }; if 0 == unsafe { SetWindowLongPtrW(hwnd, GWLP_USERDATA, ptr as *const _ as basetsd::LONG_PTR) } && unsafe { errhandlingapi::GetLastError() }!= 0 { return win::FALSE as win::LRESULT; } } // call wrapped window proc if!ptr.is_null() { let this = unsafe { &mut *(ptr as *mut T) }; if let Some(result) = this.window_proc(hwnd, msg, wparam, lparam) { return result; } } unsafe { DefWindowProcW(hwnd, msg, wparam, lparam) } } impl WindowProc for ProgressWindow { fn window_proc( &mut self, hwnd: HWND, msg: win::UINT, wparam: win::WPARAM, lparam: win::LPARAM, ) -> Option<win::LRESULT> { use winuser::*; match msg { // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-nccreate WM_NCCREATE => { // store main window handle self.hwnd = hwnd; // WM_NCCREATE must be passed to DefWindowProc None } // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-create WM_CREATE => match self.create_window_controls() { Err(e) => { log::error!("Failed to create window controls: {}", e); Some(-1) } Ok(()) => Some(0), }, // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-size WM_SIZE => { self.on_resize( i32::from(win::LOWORD(lparam as u32)), i32::from(win::HIWORD(lparam as u32)), ); Some(0) } // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-getminmaxinfo WM_GETMINMAXINFO => { let mmi = unsafe { &mut *(lparam as LPMINMAXINFO) }; mmi.ptMinTrackSize.x = MIN_WINDOW_SIZE.0; mmi.ptMinTrackSize.y = MIN_WINDOW_SIZE.1; Some(0) } // https://docs.microsoft.com/en-us/windows/win32/controls/wm-ctlcolorstatic WM_CTLCOLORSTATIC => { Some(unsafe { wingdi::GetStockObject(COLOR_WINDOW + 1) } as win::LPARAM) } // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-close WM_CLOSE => { self.cancel(); unsafe { DestroyWindow(hwnd) }; Some(0) } // https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-destroy WM_DESTROY => { unsafe { PostQuitMessage(0) }; Some(0) } WM_PROGRESS => { self.update_progress(wparam, lparam as _); Some(0) } _ => None, } } }
{ self.set_progress_to_range_mode(); }
conditional_block
typescript.rs
//! Generation of Typescript types from Stencila Schema use std::{ collections::HashSet, fs::read_dir, path::{Path, PathBuf}, }; use common::{ async_recursion::async_recursion, eyre::{bail, Context, Report, Result}, futures::future::try_join_all, inflector::Inflector, itertools::Itertools, tokio::fs::{create_dir_all, remove_file, write}, }; use crate::schemas::{Items, Schema, Schemas, Type, Value}; /// Comment to place at top of a files to indicate it is generated const GENERATED_COMMENT: &str = "// Generated file; do not edit. See `../rust/schema-gen` crate."; /// Modules that should not be generated /// /// These modules are manually written, usually because they are /// an alias for a native JavasScript type. const NO_GENERATE_MODULE: &[&str] = &[ "Array", "Boolean", "Integer", "Null", "Number", "Object", "Primitive", "String", "TextValue", "UnsignedInteger", ]; /// Types for which native to TypesScript types are used directly /// Note that this excludes `Integer`, `UnsignedInteger` and `Object` /// which although they are implemented as native types have different semantics. const NATIVE_TYPES: &[&str] = &["null", "boolean", "number", "string"]; impl Schemas { /// Generate a TypeScript module for each schema pub async fn typescript(&self) -> Result<()> { eprintln!("Generating TypeScript types"); // The top level destination let dest = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../typescript/src"); let dest = dest .canonicalize() .context(format!("can not find directory `{}`", dest.display()))?; // The types directory that modules get generated into let types = dest.join("types"); if types.exists() { // Already exists, so clean up existing files, except for those that are not generated for file in read_dir(&types)?.flatten() { let path = file.path(); if NO_GENERATE_MODULE.contains( &path .file_name() .unwrap() .to_string_lossy() .strip_suffix(".ts") .unwrap(), ) { continue; } remove_file(&path).await? } } else { // Doesn't exist, so create it create_dir_all(&types).await?; } // Create a module for each schema let futures = self .schemas .values() .map(|schema| Self::typescript_module(&types, schema)); try_join_all(futures).await?; // Create an index.ts which export types from all modules (including those // that are not generated) let exports = read_dir(&types) .wrap_err(format!("unable to read directory `{}`", types.display()))? .flatten() .map(|entry| { entry .path() .file_name() .unwrap() .to_string_lossy() .strip_suffix(".ts") .unwrap() .to_string() }) .sorted() .map(|module| format!("export * from './types/{module}';")) .join("\n"); write( dest.join("index.ts"), format!( r"{GENERATED_COMMENT} {exports} " ), ) .await?; Ok(()) } /// Generate a TypeScript module for a schema async fn typescript_module(dest: &Path, schema: &Schema) -> Result<()> { let Some(title) = &schema.title else { bail!("Schema has no title"); }; if NO_GENERATE_MODULE.contains(&title.as_str()) || schema.r#abstract { return Ok(()); } if schema.any_of.is_some() { Self::typescript_any_of(dest, schema).await?; } else if schema.r#type.is_none() { Self::typescript_object(dest, title, schema).await?; } Ok(()) } /// Generate a TypeScript type for a schema /// /// Returns the name of the type and whether: /// - it is an array /// - it is a type (rather than an enum variant) #[async_recursion] async fn typescript_type(dest: &Path, schema: &Schema) -> Result<(String, bool, bool)> { use Type::*; // If the Stencila Schema type name corresponds to a TypeScript // native type then return the name of the native type, otherwise // return the pascal cased name (e.g. `integer` -> `Integer`) let maybe_native_type = |type_name: &str| { let lower = type_name.to_lowercase(); if NATIVE_TYPES.contains(&lower.as_str()) { lower } else { type_name.to_pascal_case() } }; let result = if let Some(r#type) = &schema.r#type { match r#type { Array => { let items = match &schema.items { Some(Items::Ref(inner)) => maybe_native_type(&inner.r#ref), Some(Items::Type(inner)) => maybe_native_type(&inner.r#type), Some(Items::AnyOf(inner)) => { let schema = Schema { any_of: Some(inner.any_of.clone()), ..Default::default() }; Self::typescript_type(dest, &schema).await?.0 } Some(Items::List(inner)) => { let schema = Schema { any_of: Some(inner.clone()), ..Default::default() }; Self::typescript_type(dest, &schema).await?.0 } None => "Unhandled".to_string(), }; (items, true, true) } _ => (maybe_native_type(r#type.as_ref()), false, true), } } else if let Some(r#ref) = &schema.r#ref { (maybe_native_type(r#ref), false, true) } else if schema.any_of.is_some() { (Self::typescript_any_of(dest, schema).await?, false, true) } else if let Some(title) = &schema.title { (title.to_string(), false, true) } else if let Some(r#const) = &schema.r#const { (Self::typescript_value(r#const), false, false) } else { ("Unhandled".to_string(), false, true) }; Ok(result) } /// Generate a TypeScript `class` for an object schema with `properties` /// /// Returns the name of the generated `class`. async fn
(dest: &Path, title: &String, schema: &Schema) -> Result<String> { let path = dest.join(format!("{}.ts", title)); if path.exists() { return Ok(title.to_string()); } let description = schema .description .as_ref() .unwrap_or(title) .trim_end_matches('\n') .replace('\n', "\n // "); let mut props = Vec::new(); let mut required_props = Vec::new(); let mut used_types = HashSet::new(); for (name, property) in schema.properties.iter().flatten() { let description = property .description .as_ref() .unwrap_or(name) .trim_end_matches('\n') .replace('\n', "\n // "); let name = name.to_camel_case(); // Early return for "type" property if name == "type" { props.push(format!(" type = \"{title}\";")); continue; } let mut prop = name.clone(); // Determine Typescript type of the property let (mut prop_type, is_array,..) = Self::typescript_type(dest, property).await?; used_types.insert(prop_type.clone()); // Is the property optional? if!property.is_required { prop.push('?'); } prop.push_str(": "); // Is the property an array? if is_array { prop_type.push_str("[]"); }; prop.push_str(&prop_type); // If the property is required, add it to the constructor args. if property.is_required { // An argument can not be named `arguments` so deal with that // special case here. required_props.push(if name == "arguments" { ( format!("this.{name} = args;"), format!("args: {prop_type}, "), ) } else { ( format!("this.{name} = {name};"), format!("{name}: {prop_type}, "), ) }); } // Does the property have a default? if let Some(default) = property.default.as_ref() { let default = Self::typescript_value(default); prop.push_str(&format!(" = {default}")); }; props.push(format!(" // {description}\n {prop};")); } let props = props.join("\n\n"); let required_args = required_props.iter().map(|(.., arg)| arg).join(""); let required_assignments = required_props .iter() .map(|(assignment,..)| assignment) .join("\n "); let mut imports = used_types .into_iter() .filter(|used_type| { used_type!= title &&!NATIVE_TYPES.contains(&used_type.to_lowercase().as_str()) }) .sorted() .map(|used_type| format!("import {{ {used_type} }} from './{used_type}';")) .join("\n"); if!imports.is_empty() { imports.push_str("\n\n"); } write( path, &format!( r#"{GENERATED_COMMENT} {imports}// {description} export class {title} {{ {props} constructor({required_args}options?: {title}) {{ if (options) Object.assign(this, options) {required_assignments} }} }} "# ), ) .await?; Ok(title.to_string()) } /// Generate a TypeScript discriminated union `type` for an `anyOf` root schema or property schema /// /// Returns the name of the generated enum. async fn typescript_any_of(dest: &Path, schema: &Schema) -> Result<String> { let Some(any_of) = &schema.any_of else { bail!("Schema has no anyOf"); }; let (alternatives, are_types): (Vec<_>, Vec<_>) = try_join_all(any_of.iter().map(|schema| async { let (typ, is_array, is_type) = Self::typescript_type(dest, schema).await?; let typ = if is_array { Self::typescript_array_of(dest, &typ).await? } else { typ }; Ok::<_, Report>((typ, is_type)) })) .await? .into_iter() .unzip(); let name = schema.title.clone().unwrap_or_else(|| { alternatives .iter() .map(|name| name.to_pascal_case()) .join("Or") }); let path = dest.join(format!("{}.ts", name)); if path.exists() { return Ok(name); } let description = if let Some(title) = &schema.title { schema .description .clone() .unwrap_or(title.clone()) .trim_end_matches('\n') .replace('\n', "\n // ") } else { alternatives .iter() .map(|variant| format!("`{variant}`")) .join(" or ") }; let alternatives = alternatives .into_iter() .zip(are_types.into_iter()) .collect_vec(); let mut imports = alternatives .iter() .sorted() .filter_map(|(name, is_type)| { (*is_type &&!NATIVE_TYPES.contains(&name.to_lowercase().as_str())) .then_some(format!("import {{ {name} }} from './{name}'",)) }) .join("\n"); if!imports.is_empty() { imports.push_str("\n\n"); } let variants = alternatives .into_iter() .map(|(variant, is_type)| { if is_type { variant } else { format!("'{variant}'") } }) .join(" |\n "); write( path, format!( r#"{GENERATED_COMMENT} {imports}// {description} export type {name} = {variants}; "# ), ) .await?; Ok(name) } /// Generate a TypeScript `type` for an "array of" type /// /// Returns the name of the generated type which will be the plural /// of the type of the array items. async fn typescript_array_of(dest: &Path, item_type: &str) -> Result<String> { let name = item_type.to_plural(); let path = dest.join(format!("{}.ts", name)); if path.exists() { return Ok(name); } write( path, format!( r#"{GENERATED_COMMENT} import {{ {item_type} }} from './{item_type}'; export type {name} = {item_type}[]; "# ), ) .await?; Ok(name) } /// Generate a TypeScript representation of a JSON schema value /// /// Returns a literal to the type of value. fn typescript_value(value: &Value) -> String { match value { Value::Null => "null".to_string(), Value::Boolean(inner) => inner.to_string(), Value::Integer(inner) => inner.to_string(), Value::Number(inner) => inner.to_string(), Value::String(inner) => inner.to_string(), _ => "Unhandled value type".to_string(), } } }
typescript_object
identifier_name
typescript.rs
//! Generation of Typescript types from Stencila Schema use std::{ collections::HashSet, fs::read_dir, path::{Path, PathBuf}, }; use common::{ async_recursion::async_recursion, eyre::{bail, Context, Report, Result}, futures::future::try_join_all, inflector::Inflector, itertools::Itertools, tokio::fs::{create_dir_all, remove_file, write}, }; use crate::schemas::{Items, Schema, Schemas, Type, Value}; /// Comment to place at top of a files to indicate it is generated const GENERATED_COMMENT: &str = "// Generated file; do not edit. See `../rust/schema-gen` crate."; /// Modules that should not be generated /// /// These modules are manually written, usually because they are /// an alias for a native JavasScript type. const NO_GENERATE_MODULE: &[&str] = &[ "Array", "Boolean", "Integer", "Null", "Number", "Object", "Primitive", "String", "TextValue", "UnsignedInteger", ]; /// Types for which native to TypesScript types are used directly /// Note that this excludes `Integer`, `UnsignedInteger` and `Object` /// which although they are implemented as native types have different semantics. const NATIVE_TYPES: &[&str] = &["null", "boolean", "number", "string"]; impl Schemas { /// Generate a TypeScript module for each schema pub async fn typescript(&self) -> Result<()> { eprintln!("Generating TypeScript types"); // The top level destination let dest = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../typescript/src"); let dest = dest .canonicalize() .context(format!("can not find directory `{}`", dest.display()))?; // The types directory that modules get generated into let types = dest.join("types"); if types.exists() { // Already exists, so clean up existing files, except for those that are not generated for file in read_dir(&types)?.flatten() { let path = file.path(); if NO_GENERATE_MODULE.contains( &path .file_name() .unwrap() .to_string_lossy() .strip_suffix(".ts") .unwrap(), ) { continue; } remove_file(&path).await? } } else { // Doesn't exist, so create it create_dir_all(&types).await?; } // Create a module for each schema let futures = self .schemas .values() .map(|schema| Self::typescript_module(&types, schema)); try_join_all(futures).await?; // Create an index.ts which export types from all modules (including those // that are not generated) let exports = read_dir(&types) .wrap_err(format!("unable to read directory `{}`", types.display()))? .flatten() .map(|entry| { entry .path() .file_name() .unwrap() .to_string_lossy() .strip_suffix(".ts") .unwrap() .to_string() }) .sorted() .map(|module| format!("export * from './types/{module}';")) .join("\n"); write( dest.join("index.ts"), format!( r"{GENERATED_COMMENT} {exports} " ), ) .await?; Ok(()) } /// Generate a TypeScript module for a schema async fn typescript_module(dest: &Path, schema: &Schema) -> Result<()>
/// Generate a TypeScript type for a schema /// /// Returns the name of the type and whether: /// - it is an array /// - it is a type (rather than an enum variant) #[async_recursion] async fn typescript_type(dest: &Path, schema: &Schema) -> Result<(String, bool, bool)> { use Type::*; // If the Stencila Schema type name corresponds to a TypeScript // native type then return the name of the native type, otherwise // return the pascal cased name (e.g. `integer` -> `Integer`) let maybe_native_type = |type_name: &str| { let lower = type_name.to_lowercase(); if NATIVE_TYPES.contains(&lower.as_str()) { lower } else { type_name.to_pascal_case() } }; let result = if let Some(r#type) = &schema.r#type { match r#type { Array => { let items = match &schema.items { Some(Items::Ref(inner)) => maybe_native_type(&inner.r#ref), Some(Items::Type(inner)) => maybe_native_type(&inner.r#type), Some(Items::AnyOf(inner)) => { let schema = Schema { any_of: Some(inner.any_of.clone()), ..Default::default() }; Self::typescript_type(dest, &schema).await?.0 } Some(Items::List(inner)) => { let schema = Schema { any_of: Some(inner.clone()), ..Default::default() }; Self::typescript_type(dest, &schema).await?.0 } None => "Unhandled".to_string(), }; (items, true, true) } _ => (maybe_native_type(r#type.as_ref()), false, true), } } else if let Some(r#ref) = &schema.r#ref { (maybe_native_type(r#ref), false, true) } else if schema.any_of.is_some() { (Self::typescript_any_of(dest, schema).await?, false, true) } else if let Some(title) = &schema.title { (title.to_string(), false, true) } else if let Some(r#const) = &schema.r#const { (Self::typescript_value(r#const), false, false) } else { ("Unhandled".to_string(), false, true) }; Ok(result) } /// Generate a TypeScript `class` for an object schema with `properties` /// /// Returns the name of the generated `class`. async fn typescript_object(dest: &Path, title: &String, schema: &Schema) -> Result<String> { let path = dest.join(format!("{}.ts", title)); if path.exists() { return Ok(title.to_string()); } let description = schema .description .as_ref() .unwrap_or(title) .trim_end_matches('\n') .replace('\n', "\n // "); let mut props = Vec::new(); let mut required_props = Vec::new(); let mut used_types = HashSet::new(); for (name, property) in schema.properties.iter().flatten() { let description = property .description .as_ref() .unwrap_or(name) .trim_end_matches('\n') .replace('\n', "\n // "); let name = name.to_camel_case(); // Early return for "type" property if name == "type" { props.push(format!(" type = \"{title}\";")); continue; } let mut prop = name.clone(); // Determine Typescript type of the property let (mut prop_type, is_array,..) = Self::typescript_type(dest, property).await?; used_types.insert(prop_type.clone()); // Is the property optional? if!property.is_required { prop.push('?'); } prop.push_str(": "); // Is the property an array? if is_array { prop_type.push_str("[]"); }; prop.push_str(&prop_type); // If the property is required, add it to the constructor args. if property.is_required { // An argument can not be named `arguments` so deal with that // special case here. required_props.push(if name == "arguments" { ( format!("this.{name} = args;"), format!("args: {prop_type}, "), ) } else { ( format!("this.{name} = {name};"), format!("{name}: {prop_type}, "), ) }); } // Does the property have a default? if let Some(default) = property.default.as_ref() { let default = Self::typescript_value(default); prop.push_str(&format!(" = {default}")); }; props.push(format!(" // {description}\n {prop};")); } let props = props.join("\n\n"); let required_args = required_props.iter().map(|(.., arg)| arg).join(""); let required_assignments = required_props .iter() .map(|(assignment,..)| assignment) .join("\n "); let mut imports = used_types .into_iter() .filter(|used_type| { used_type!= title &&!NATIVE_TYPES.contains(&used_type.to_lowercase().as_str()) }) .sorted() .map(|used_type| format!("import {{ {used_type} }} from './{used_type}';")) .join("\n"); if!imports.is_empty() { imports.push_str("\n\n"); } write( path, &format!( r#"{GENERATED_COMMENT} {imports}// {description} export class {title} {{ {props} constructor({required_args}options?: {title}) {{ if (options) Object.assign(this, options) {required_assignments} }} }} "# ), ) .await?; Ok(title.to_string()) } /// Generate a TypeScript discriminated union `type` for an `anyOf` root schema or property schema /// /// Returns the name of the generated enum. async fn typescript_any_of(dest: &Path, schema: &Schema) -> Result<String> { let Some(any_of) = &schema.any_of else { bail!("Schema has no anyOf"); }; let (alternatives, are_types): (Vec<_>, Vec<_>) = try_join_all(any_of.iter().map(|schema| async { let (typ, is_array, is_type) = Self::typescript_type(dest, schema).await?; let typ = if is_array { Self::typescript_array_of(dest, &typ).await? } else { typ }; Ok::<_, Report>((typ, is_type)) })) .await? .into_iter() .unzip(); let name = schema.title.clone().unwrap_or_else(|| { alternatives .iter() .map(|name| name.to_pascal_case()) .join("Or") }); let path = dest.join(format!("{}.ts", name)); if path.exists() { return Ok(name); } let description = if let Some(title) = &schema.title { schema .description .clone() .unwrap_or(title.clone()) .trim_end_matches('\n') .replace('\n', "\n // ") } else { alternatives .iter() .map(|variant| format!("`{variant}`")) .join(" or ") }; let alternatives = alternatives .into_iter() .zip(are_types.into_iter()) .collect_vec(); let mut imports = alternatives .iter() .sorted() .filter_map(|(name, is_type)| { (*is_type &&!NATIVE_TYPES.contains(&name.to_lowercase().as_str())) .then_some(format!("import {{ {name} }} from './{name}'",)) }) .join("\n"); if!imports.is_empty() { imports.push_str("\n\n"); } let variants = alternatives .into_iter() .map(|(variant, is_type)| { if is_type { variant } else { format!("'{variant}'") } }) .join(" |\n "); write( path, format!( r#"{GENERATED_COMMENT} {imports}// {description} export type {name} = {variants}; "# ), ) .await?; Ok(name) } /// Generate a TypeScript `type` for an "array of" type /// /// Returns the name of the generated type which will be the plural /// of the type of the array items. async fn typescript_array_of(dest: &Path, item_type: &str) -> Result<String> { let name = item_type.to_plural(); let path = dest.join(format!("{}.ts", name)); if path.exists() { return Ok(name); } write( path, format!( r#"{GENERATED_COMMENT} import {{ {item_type} }} from './{item_type}'; export type {name} = {item_type}[]; "# ), ) .await?; Ok(name) } /// Generate a TypeScript representation of a JSON schema value /// /// Returns a literal to the type of value. fn typescript_value(value: &Value) -> String { match value { Value::Null => "null".to_string(), Value::Boolean(inner) => inner.to_string(), Value::Integer(inner) => inner.to_string(), Value::Number(inner) => inner.to_string(), Value::String(inner) => inner.to_string(), _ => "Unhandled value type".to_string(), } } }
{ let Some(title) = &schema.title else { bail!("Schema has no title"); }; if NO_GENERATE_MODULE.contains(&title.as_str()) || schema.r#abstract { return Ok(()); } if schema.any_of.is_some() { Self::typescript_any_of(dest, schema).await?; } else if schema.r#type.is_none() { Self::typescript_object(dest, title, schema).await?; } Ok(()) }
identifier_body
typescript.rs
//! Generation of Typescript types from Stencila Schema use std::{ collections::HashSet, fs::read_dir, path::{Path, PathBuf}, }; use common::{ async_recursion::async_recursion, eyre::{bail, Context, Report, Result}, futures::future::try_join_all, inflector::Inflector, itertools::Itertools, tokio::fs::{create_dir_all, remove_file, write}, }; use crate::schemas::{Items, Schema, Schemas, Type, Value}; /// Comment to place at top of a files to indicate it is generated const GENERATED_COMMENT: &str = "// Generated file; do not edit. See `../rust/schema-gen` crate."; /// Modules that should not be generated /// /// These modules are manually written, usually because they are /// an alias for a native JavasScript type. const NO_GENERATE_MODULE: &[&str] = &[ "Array", "Boolean", "Integer",
"Object", "Primitive", "String", "TextValue", "UnsignedInteger", ]; /// Types for which native to TypesScript types are used directly /// Note that this excludes `Integer`, `UnsignedInteger` and `Object` /// which although they are implemented as native types have different semantics. const NATIVE_TYPES: &[&str] = &["null", "boolean", "number", "string"]; impl Schemas { /// Generate a TypeScript module for each schema pub async fn typescript(&self) -> Result<()> { eprintln!("Generating TypeScript types"); // The top level destination let dest = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../typescript/src"); let dest = dest .canonicalize() .context(format!("can not find directory `{}`", dest.display()))?; // The types directory that modules get generated into let types = dest.join("types"); if types.exists() { // Already exists, so clean up existing files, except for those that are not generated for file in read_dir(&types)?.flatten() { let path = file.path(); if NO_GENERATE_MODULE.contains( &path .file_name() .unwrap() .to_string_lossy() .strip_suffix(".ts") .unwrap(), ) { continue; } remove_file(&path).await? } } else { // Doesn't exist, so create it create_dir_all(&types).await?; } // Create a module for each schema let futures = self .schemas .values() .map(|schema| Self::typescript_module(&types, schema)); try_join_all(futures).await?; // Create an index.ts which export types from all modules (including those // that are not generated) let exports = read_dir(&types) .wrap_err(format!("unable to read directory `{}`", types.display()))? .flatten() .map(|entry| { entry .path() .file_name() .unwrap() .to_string_lossy() .strip_suffix(".ts") .unwrap() .to_string() }) .sorted() .map(|module| format!("export * from './types/{module}';")) .join("\n"); write( dest.join("index.ts"), format!( r"{GENERATED_COMMENT} {exports} " ), ) .await?; Ok(()) } /// Generate a TypeScript module for a schema async fn typescript_module(dest: &Path, schema: &Schema) -> Result<()> { let Some(title) = &schema.title else { bail!("Schema has no title"); }; if NO_GENERATE_MODULE.contains(&title.as_str()) || schema.r#abstract { return Ok(()); } if schema.any_of.is_some() { Self::typescript_any_of(dest, schema).await?; } else if schema.r#type.is_none() { Self::typescript_object(dest, title, schema).await?; } Ok(()) } /// Generate a TypeScript type for a schema /// /// Returns the name of the type and whether: /// - it is an array /// - it is a type (rather than an enum variant) #[async_recursion] async fn typescript_type(dest: &Path, schema: &Schema) -> Result<(String, bool, bool)> { use Type::*; // If the Stencila Schema type name corresponds to a TypeScript // native type then return the name of the native type, otherwise // return the pascal cased name (e.g. `integer` -> `Integer`) let maybe_native_type = |type_name: &str| { let lower = type_name.to_lowercase(); if NATIVE_TYPES.contains(&lower.as_str()) { lower } else { type_name.to_pascal_case() } }; let result = if let Some(r#type) = &schema.r#type { match r#type { Array => { let items = match &schema.items { Some(Items::Ref(inner)) => maybe_native_type(&inner.r#ref), Some(Items::Type(inner)) => maybe_native_type(&inner.r#type), Some(Items::AnyOf(inner)) => { let schema = Schema { any_of: Some(inner.any_of.clone()), ..Default::default() }; Self::typescript_type(dest, &schema).await?.0 } Some(Items::List(inner)) => { let schema = Schema { any_of: Some(inner.clone()), ..Default::default() }; Self::typescript_type(dest, &schema).await?.0 } None => "Unhandled".to_string(), }; (items, true, true) } _ => (maybe_native_type(r#type.as_ref()), false, true), } } else if let Some(r#ref) = &schema.r#ref { (maybe_native_type(r#ref), false, true) } else if schema.any_of.is_some() { (Self::typescript_any_of(dest, schema).await?, false, true) } else if let Some(title) = &schema.title { (title.to_string(), false, true) } else if let Some(r#const) = &schema.r#const { (Self::typescript_value(r#const), false, false) } else { ("Unhandled".to_string(), false, true) }; Ok(result) } /// Generate a TypeScript `class` for an object schema with `properties` /// /// Returns the name of the generated `class`. async fn typescript_object(dest: &Path, title: &String, schema: &Schema) -> Result<String> { let path = dest.join(format!("{}.ts", title)); if path.exists() { return Ok(title.to_string()); } let description = schema .description .as_ref() .unwrap_or(title) .trim_end_matches('\n') .replace('\n', "\n // "); let mut props = Vec::new(); let mut required_props = Vec::new(); let mut used_types = HashSet::new(); for (name, property) in schema.properties.iter().flatten() { let description = property .description .as_ref() .unwrap_or(name) .trim_end_matches('\n') .replace('\n', "\n // "); let name = name.to_camel_case(); // Early return for "type" property if name == "type" { props.push(format!(" type = \"{title}\";")); continue; } let mut prop = name.clone(); // Determine Typescript type of the property let (mut prop_type, is_array,..) = Self::typescript_type(dest, property).await?; used_types.insert(prop_type.clone()); // Is the property optional? if!property.is_required { prop.push('?'); } prop.push_str(": "); // Is the property an array? if is_array { prop_type.push_str("[]"); }; prop.push_str(&prop_type); // If the property is required, add it to the constructor args. if property.is_required { // An argument can not be named `arguments` so deal with that // special case here. required_props.push(if name == "arguments" { ( format!("this.{name} = args;"), format!("args: {prop_type}, "), ) } else { ( format!("this.{name} = {name};"), format!("{name}: {prop_type}, "), ) }); } // Does the property have a default? if let Some(default) = property.default.as_ref() { let default = Self::typescript_value(default); prop.push_str(&format!(" = {default}")); }; props.push(format!(" // {description}\n {prop};")); } let props = props.join("\n\n"); let required_args = required_props.iter().map(|(.., arg)| arg).join(""); let required_assignments = required_props .iter() .map(|(assignment,..)| assignment) .join("\n "); let mut imports = used_types .into_iter() .filter(|used_type| { used_type!= title &&!NATIVE_TYPES.contains(&used_type.to_lowercase().as_str()) }) .sorted() .map(|used_type| format!("import {{ {used_type} }} from './{used_type}';")) .join("\n"); if!imports.is_empty() { imports.push_str("\n\n"); } write( path, &format!( r#"{GENERATED_COMMENT} {imports}// {description} export class {title} {{ {props} constructor({required_args}options?: {title}) {{ if (options) Object.assign(this, options) {required_assignments} }} }} "# ), ) .await?; Ok(title.to_string()) } /// Generate a TypeScript discriminated union `type` for an `anyOf` root schema or property schema /// /// Returns the name of the generated enum. async fn typescript_any_of(dest: &Path, schema: &Schema) -> Result<String> { let Some(any_of) = &schema.any_of else { bail!("Schema has no anyOf"); }; let (alternatives, are_types): (Vec<_>, Vec<_>) = try_join_all(any_of.iter().map(|schema| async { let (typ, is_array, is_type) = Self::typescript_type(dest, schema).await?; let typ = if is_array { Self::typescript_array_of(dest, &typ).await? } else { typ }; Ok::<_, Report>((typ, is_type)) })) .await? .into_iter() .unzip(); let name = schema.title.clone().unwrap_or_else(|| { alternatives .iter() .map(|name| name.to_pascal_case()) .join("Or") }); let path = dest.join(format!("{}.ts", name)); if path.exists() { return Ok(name); } let description = if let Some(title) = &schema.title { schema .description .clone() .unwrap_or(title.clone()) .trim_end_matches('\n') .replace('\n', "\n // ") } else { alternatives .iter() .map(|variant| format!("`{variant}`")) .join(" or ") }; let alternatives = alternatives .into_iter() .zip(are_types.into_iter()) .collect_vec(); let mut imports = alternatives .iter() .sorted() .filter_map(|(name, is_type)| { (*is_type &&!NATIVE_TYPES.contains(&name.to_lowercase().as_str())) .then_some(format!("import {{ {name} }} from './{name}'",)) }) .join("\n"); if!imports.is_empty() { imports.push_str("\n\n"); } let variants = alternatives .into_iter() .map(|(variant, is_type)| { if is_type { variant } else { format!("'{variant}'") } }) .join(" |\n "); write( path, format!( r#"{GENERATED_COMMENT} {imports}// {description} export type {name} = {variants}; "# ), ) .await?; Ok(name) } /// Generate a TypeScript `type` for an "array of" type /// /// Returns the name of the generated type which will be the plural /// of the type of the array items. async fn typescript_array_of(dest: &Path, item_type: &str) -> Result<String> { let name = item_type.to_plural(); let path = dest.join(format!("{}.ts", name)); if path.exists() { return Ok(name); } write( path, format!( r#"{GENERATED_COMMENT} import {{ {item_type} }} from './{item_type}'; export type {name} = {item_type}[]; "# ), ) .await?; Ok(name) } /// Generate a TypeScript representation of a JSON schema value /// /// Returns a literal to the type of value. fn typescript_value(value: &Value) -> String { match value { Value::Null => "null".to_string(), Value::Boolean(inner) => inner.to_string(), Value::Integer(inner) => inner.to_string(), Value::Number(inner) => inner.to_string(), Value::String(inner) => inner.to_string(), _ => "Unhandled value type".to_string(), } } }
"Null", "Number",
random_line_split
version_info.rs
/*! Version Information. See [Microsoft's documentation](https://docs.microsoft.com/en-us/windows/desktop/menurc/version-information) for more information. */ use std::{char, cmp, fmt, mem, slice}; use std::collections::HashMap; use crate::image::VS_FIXEDFILEINFO; use crate::{Error, Result, _Pod as Pod}; use crate::util::{AlignTo, wstrn}; //---------------------------------------------------------------- /// Language and charset pair. /// /// References [langID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#langID) and [charsetID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#charsetID). #[derive(Copy, Clone, Debug, Pod, Eq, PartialEq)] #[repr(C)] pub struct Language { pub lang_id: u16, pub charset_id: u16, } impl Language { /// Parse language hex strings. pub fn parse(lang: &[u16]) -> std::result::Result<Language, &[u16]> { if lang.len()!= 8 { return Err(lang); } fn digit(word: u16) -> u16 { let num = word.wrapping_sub('0' as u16); let upper = word.wrapping_sub('A' as u16).wrapping_add(10); let lower = word.wrapping_sub('a' as u16).wrapping_add(10); if word >= 'a' as u16 { lower } else if word >= 'A' as u16 { upper } else { num } } let mut digits = [0u16; 8]; for i in 0..8 { digits[i] = digit(lang[i]); } let lang_id = (digits[0] << 12) | (digits[1] << 8) | (digits[2] << 4) | digits[3]; let charset_id = (digits[4] << 12) | (digits[5] << 8) | (digits[6] << 4) | digits[7]; Ok(Language { lang_id, charset_id }) } } impl fmt::Display for Language { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:04X}{:04X}", self.lang_id, self.charset_id) } } //---------------------------------------------------------------- /// Version Information. #[derive(Copy, Clone, Debug)] pub struct VersionInfo<'a> { bytes: &'a [u8], } impl<'a> VersionInfo<'a> { pub fn try_from(bytes: &'a [u8]) -> Result<VersionInfo<'a>> { // Alignment of 4 bytes is assumed everywhere, // unsafe code in this module relies on this if!bytes.as_ptr().aligned_to(4) { return Err(Error::Misaligned); } Ok(VersionInfo { bytes }) } /// Gets the fixed file information if available. pub fn fixed(self) -> Option<&'a VS_FIXEDFILEINFO> { let mut fixed = None; self.visit(&mut fixed); fixed } /// Queries a string value by name. /// /// The returned string is UTF-16 encoded, convert to UTF-8 with `String::from_utf16` and friends. pub fn query_value<S: AsRef<str>>(self, key: &S) -> Option<&'a [u16]> { let mut this = QueryValue { key: key.as_ref(), value: None, }; self.visit(&mut this); this.value } /// Iterates over all the strings. /// /// The closure's arguments are the lang, name and value for each string pair in the version information. pub fn for_each_string<F: FnMut(&'a [u16], &'a [u16], &'a [u16])>(self, mut f: F)
/// Gets the strings in a hash map. pub fn to_hash_map(self) -> HashMap<String, String> { let mut hash_map = HashMap::new(); self.visit(&mut hash_map); hash_map } /// Parse the version information. /// /// Because of the super convoluted format, the visitor pattern is used. /// Implement the [`Visit` trait](trait.Visit.html) to get the desired information. /// /// To keep the API simple all errors are ignored, any invalid or corrupted data is skipped. pub fn visit(self, visit: &mut dyn Visit<'a>) { let words = unsafe { slice::from_raw_parts(self.bytes.as_ptr() as *const u16, self.bytes.len() / 2) }; for version_info_r in Parser::new_bytes(words) { if let Ok(version_info) = version_info_r { const VS_FIXEDFILEINFO_SIZEOF: usize = mem::size_of::<VS_FIXEDFILEINFO>(); let fixed = match mem::size_of_val(version_info.value) { 0 => None, VS_FIXEDFILEINFO_SIZEOF => { let value = unsafe { &*(version_info.value.as_ptr() as *const VS_FIXEDFILEINFO) }; Some(value) }, _ => None,//return Err(Error::Invalid), }; if!visit.version_info(version_info.key, fixed) { continue; } // MS docs: This member is always equal to zero. for file_info_r in Parser::new_zero(version_info.children) { if let Ok(file_info) = file_info_r { if!visit.file_info(file_info.key) { continue; } // MS docs: L"StringFileInfo" if file_info.key == &self::strings::StringFileInfo { // MS docs: This member is always equal to zero. for string_table_r in Parser::new_zero(file_info.children) { if let Ok(string_table) = string_table_r { if!visit.string_table(string_table.key) { continue; } for string_r in Parser::new_words(string_table.children) { if let Ok(string) = string_r { // Strip the nul terminator... let value = if string.value.last()!= Some(&0) { string.value } else { &string.value[..string.value.len() - 1] }; visit.string(string_table.key, string.key, value); } } } } } // MS docs: L"VarFileInfo" else if file_info.key == &self::strings::VarFileInfo { for var_r in Parser::new_bytes(file_info.children) { if let Ok(var) = var_r { visit.var(var.key, var.value); } } } } } } } } } //---------------------------------------------------------------- /// Visitor pattern to view the version information details. #[allow(unused_variables)] pub trait Visit<'a> { fn version_info(&mut self, key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { true } fn file_info(&mut self, key: &'a [u16]) -> bool { true } fn string_table(&mut self, lang: &'a [u16]) -> bool { true } fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {} fn var(&mut self, key: &'a [u16], pairs: &'a [u16]) {} } impl<'a> Visit<'a> for HashMap<String, String> { fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { self.insert( String::from_utf16_lossy(key), String::from_utf16_lossy(value), ); } } impl<'a> Visit<'a> for Option<&'a VS_FIXEDFILEINFO> { fn version_info(&mut self, _key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { *self = fixed; false } } struct ForEachString<F>(F); impl<'a, F: FnMut(&'a [u16], &'a [u16], &'a [u16])> Visit<'a> for ForEachString<F> { fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { (self.0)(lang, key, value); } } struct QueryValue<'a,'s> { key: &'s str, value: Option<&'a [u16]>, } impl<'a,'s> Visit<'a> for QueryValue<'a,'s> { fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { if Iterator::eq(self.key.chars().map(Ok), char::decode_utf16(key.iter().cloned())) { self.value = Some(value); } } } //---------------------------------------------------------------- /* "version_info": { "fixed": {.. }, "strings": {.. }, }, */ #[cfg(feature = "serde")] mod serde { use crate::util::serde_helper::*; use super::{VersionInfo}; impl<'a> Serialize for VersionInfo<'a> { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut state = serializer.serialize_struct("VersionInfo", 2)?; state.serialize_field("fixed", &self.fixed())?; state.serialize_field("strings", &self.to_hash_map())?; state.end() } } } //---------------------------------------------------------------- mod strings { #![allow(non_upper_case_globals)] // static VS_VERSION_INFO: [u16; 15] = [86u16, 83, 95, 86, 69, 82, 83, 73, 79, 78, 95, 73, 78, 70, 79]; pub(crate) static StringFileInfo: [u16; 14] = [83u16, 116, 114, 105, 110, 103, 70, 105, 108, 101, 73, 110, 102, 111]; pub(crate) static VarFileInfo: [u16; 11] = [86u16, 97, 114, 70, 105, 108, 101, 73, 110, 102, 111]; // static Translation: [u16; 11] = [84u16, 114, 97, 110, 115, 108, 97, 116, 105, 111, 110]; // static Comments: [u16; 8] = [67u16, 111, 109, 109, 101, 110, 116, 115]; // static CompanyName: [u16; 11] = [67u16, 111, 109, 112, 97, 110, 121, 78, 97, 109, 101]; // static FileDescription: [u16; 15] = [70u16, 105, 108, 101, 68, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110]; // static FileVersion: [u16; 11] = [70u16, 105, 108, 101, 86, 101, 114, 115, 105, 111, 110]; // static InternalName: [u16; 12] = [73u16, 110, 116, 101, 114, 110, 97, 108, 78, 97, 109, 101]; // static LegalCopyright: [u16; 14] = [76u16, 101, 103, 97, 108, 67, 111, 112, 121, 114, 105, 103, 104, 116]; // static LegalTrademarks: [u16; 15] = [76u16, 101, 103, 97, 108, 84, 114, 97, 100, 101, 109, 97, 114, 107, 115]; // static OriginalFilename: [u16; 16] = [79u16, 114, 105, 103, 105, 110, 97, 108, 70, 105, 108, 101, 110, 97, 109, 101]; // static PrivateBuild: [u16; 12] = [80u16, 114, 105, 118, 97, 116, 101, 66, 117, 105, 108, 100]; // static ProductName: [u16; 11] = [80u16, 114, 111, 100, 117, 99, 116, 78, 97, 109, 101]; // static ProductVersion: [u16; 14] = [80u16, 114, 111, 100, 117, 99, 116, 86, 101, 114, 115, 105, 111, 110]; // static SpecialBuild: [u16; 12] = [83u16, 112, 101, 99, 105, 97, 108, 66, 117, 105, 108, 100]; } //---------------------------------------------------------------- #[cfg(test)] pub(crate) fn test(version_info: VersionInfo<'_>) { let _fixed = version_info.fixed(); let _hash_map = version_info.to_hash_map(); } //---------------------------------------------------------------- /// Fixed file info constants. pub mod image { pub const VS_FF_DEBUG: u32 = 0x01; pub const VS_FF_PRERELEASE: u32 = 0x02; pub const VS_FF_PATCHED: u32 = 0x04; pub const VS_FF_PRIVATEBUILD: u32 = 0x08; pub const VS_FF_INFOINFERRED: u32 = 0x10; pub const VS_FF_SPECIALBUILD: u32 = 0x20; pub const VOS_UNKNOWN: u32 = 0x00000000; pub const VOS_DOS: u32 = 0x00010000; pub const VOS_OS216: u32 = 0x00020000; pub const VOS_OS232: u32 = 0x00030000; pub const VOS_NT: u32 = 0x00040000; pub const VOS__WINDOWS16: u32 = 0x00000001; pub const VOS__PM16: u32 = 0x00000002; pub const VOS__PM32: u32 = 0x00000003; pub const VOS__WINDOWS32: u32 = 0x00000004; pub const VFT_UNKNOWN: u32 = 0x00000000; pub const VFT_APP: u32 = 0x00000001; pub const VFT_DLL: u32 = 0x00000002; pub const VFT_DRV: u32 = 0x00000003; pub const VFT_FONT: u32 = 0x00000004; pub const VFT_VXD: u32 = 0x00000005; pub const VFT_STATIC_LIB: u32 = 0x00000007; pub const VFT2_UNKNOWN: u32 = 0x00000000; pub const VFT2_DRV_PRINTER: u32 = 0x00000001; pub const VFT2_DRV_KEYBOARD: u32 = 0x00000002; pub const VFT2_DRV_LANGUAGE: u32 = 0x00000003; pub const VFT2_DRV_DISPLAY: u32 = 0x00000004; pub const VFT2_DRV_MOUSE: u32 = 0x00000005; pub const VFT2_DRV_NETWORK: u32 = 0x00000006; pub const VFT2_DRV_SYSTEM: u32 = 0x00000007; pub const VFT2_DRV_INSTALLABLE: u32 = 0x00000008; pub const VFT2_DRV_SOUND: u32 = 0x00000009; pub const VFT2_DRV_COMM: u32 = 0x0000000A; pub const VFT2_DRV_VERSIONED_PRINTER: u32 = 0x0000000C; pub const VFT2_FONT_RASTER: u32 = 0x00000001; pub const VFT2_FONT_VECTOR: u32 = 0x00000002; pub const VFT2_FONT_TRUETYPE: u32 = 0x00000003; } //---------------------------------------------------------------- // This is an absolutely god awful format... #[derive(Copy, Clone, Debug, Eq, PartialEq)] struct TLV<'a> { pub key: &'a [u16], pub value: &'a [u16], // DWORD aligned pub children: &'a [u16], // DWORD aligned } #[derive(Copy, Clone, Debug, Eq, PartialEq)] enum ValueLengthType { Zero, Bytes, Words } #[derive(Clone)] struct Parser<'a> { words: &'a [u16], vlt: ValueLengthType, } impl<'a> Iterator for Parser<'a> { type Item = Result<TLV<'a>>; fn next(&mut self) -> Option<Result<TLV<'a>>> { if self.words.len() == 0 { return None; } let result = parse_tlv(self); // If the parser errors, ensure the Iterator stops if result.is_err() { self.words = &self.words[self.words.len()..]; } Some(result) } } impl<'a> Parser<'a> { pub(crate) fn new_zero(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Zero } } pub(crate) fn new_bytes(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Bytes } } pub(crate) fn new_words(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Words } } } fn parse_tlv<'a>(state: &mut Parser<'a>) -> Result<TLV<'a>> { let mut words = state.words; // Parse the first three words from the TLV structure: // wLength, wValueLength and wType (plus at least zero terminator of szKey) if words.len() < 4 { return Err(Error::Invalid); } // This is tricky, the struct contains a fixed and variable length parts // However the length field includes the size of the fixed part // Further complicating things, if the variable length part is absent the total length is set to zero (?!) let length = cmp::max(4, words[0] as usize / 2).align_to(2); // Oh god why, interpret the value_length let value_length = match state.vlt { ValueLengthType::Zero if words[1] == 0 => 0, ValueLengthType::Zero => return Err(Error::Invalid), ValueLengthType::Bytes => words[1] as usize / 2, ValueLengthType::Words => words[1] as usize, }; // let wType = words[2]; // Split the input where this structure ends and the next sibling begins if length > words.len() { return Err(Error::Invalid); } state.words = &words[length..]; words = &words[..length]; // Parse the nul terminated szKey let key = wstrn(&words[3..]); if words[3..].len() == key.len() { return Err(Error::Invalid); } // Padding for the Value words = &words[key.len().align_to(2) + 4..]; // Split the remaining words between the Value and Children if value_length > words.len() { return Err(Error::Invalid); } let value = &words[..value_length]; let children = &words[value.len().align_to(2)..]; Ok(TLV { key, value, children }) } #[test] fn test_parse_tlv_oob() { let mut parser; // TLV header too short parser = Parser::new_zero(&[0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV length field larger than the data parser = Parser::new_zero(&[12, 0, 0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV key not nul terminated parser = Parser::new_zero(&[16, 0, 1, 20, 20, 20, 20, 20]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV value field larger than the data parser = Parser::new_zero(&[8, 10, 0, 0, 0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); }
{ self.visit(&mut ForEachString(&mut f)); }
identifier_body
version_info.rs
/*! Version Information. See [Microsoft's documentation](https://docs.microsoft.com/en-us/windows/desktop/menurc/version-information) for more information. */ use std::{char, cmp, fmt, mem, slice}; use std::collections::HashMap; use crate::image::VS_FIXEDFILEINFO; use crate::{Error, Result, _Pod as Pod}; use crate::util::{AlignTo, wstrn}; //---------------------------------------------------------------- /// Language and charset pair. /// /// References [langID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#langID) and [charsetID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#charsetID). #[derive(Copy, Clone, Debug, Pod, Eq, PartialEq)] #[repr(C)] pub struct Language { pub lang_id: u16, pub charset_id: u16, } impl Language { /// Parse language hex strings. pub fn parse(lang: &[u16]) -> std::result::Result<Language, &[u16]> { if lang.len()!= 8 { return Err(lang); } fn digit(word: u16) -> u16 { let num = word.wrapping_sub('0' as u16); let upper = word.wrapping_sub('A' as u16).wrapping_add(10); let lower = word.wrapping_sub('a' as u16).wrapping_add(10); if word >= 'a' as u16 { lower } else if word >= 'A' as u16 { upper } else { num } } let mut digits = [0u16; 8]; for i in 0..8 { digits[i] = digit(lang[i]); } let lang_id = (digits[0] << 12) | (digits[1] << 8) | (digits[2] << 4) | digits[3]; let charset_id = (digits[4] << 12) | (digits[5] << 8) | (digits[6] << 4) | digits[7]; Ok(Language { lang_id, charset_id }) } } impl fmt::Display for Language { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:04X}{:04X}", self.lang_id, self.charset_id) } } //---------------------------------------------------------------- /// Version Information. #[derive(Copy, Clone, Debug)] pub struct VersionInfo<'a> { bytes: &'a [u8], } impl<'a> VersionInfo<'a> { pub fn try_from(bytes: &'a [u8]) -> Result<VersionInfo<'a>> { // Alignment of 4 bytes is assumed everywhere, // unsafe code in this module relies on this if!bytes.as_ptr().aligned_to(4) { return Err(Error::Misaligned); } Ok(VersionInfo { bytes }) } /// Gets the fixed file information if available. pub fn fixed(self) -> Option<&'a VS_FIXEDFILEINFO> { let mut fixed = None; self.visit(&mut fixed); fixed } /// Queries a string value by name. /// /// The returned string is UTF-16 encoded, convert to UTF-8 with `String::from_utf16` and friends. pub fn query_value<S: AsRef<str>>(self, key: &S) -> Option<&'a [u16]> { let mut this = QueryValue { key: key.as_ref(), value: None, }; self.visit(&mut this); this.value } /// Iterates over all the strings. /// /// The closure's arguments are the lang, name and value for each string pair in the version information. pub fn for_each_string<F: FnMut(&'a [u16], &'a [u16], &'a [u16])>(self, mut f: F) { self.visit(&mut ForEachString(&mut f)); } /// Gets the strings in a hash map. pub fn to_hash_map(self) -> HashMap<String, String> { let mut hash_map = HashMap::new(); self.visit(&mut hash_map); hash_map } /// Parse the version information. /// /// Because of the super convoluted format, the visitor pattern is used. /// Implement the [`Visit` trait](trait.Visit.html) to get the desired information. /// /// To keep the API simple all errors are ignored, any invalid or corrupted data is skipped. pub fn visit(self, visit: &mut dyn Visit<'a>) { let words = unsafe { slice::from_raw_parts(self.bytes.as_ptr() as *const u16, self.bytes.len() / 2) }; for version_info_r in Parser::new_bytes(words) { if let Ok(version_info) = version_info_r { const VS_FIXEDFILEINFO_SIZEOF: usize = mem::size_of::<VS_FIXEDFILEINFO>(); let fixed = match mem::size_of_val(version_info.value) { 0 => None, VS_FIXEDFILEINFO_SIZEOF => { let value = unsafe { &*(version_info.value.as_ptr() as *const VS_FIXEDFILEINFO) }; Some(value) }, _ => None,//return Err(Error::Invalid), }; if!visit.version_info(version_info.key, fixed) { continue; } // MS docs: This member is always equal to zero. for file_info_r in Parser::new_zero(version_info.children) { if let Ok(file_info) = file_info_r { if!visit.file_info(file_info.key) { continue; } // MS docs: L"StringFileInfo" if file_info.key == &self::strings::StringFileInfo { // MS docs: This member is always equal to zero. for string_table_r in Parser::new_zero(file_info.children) { if let Ok(string_table) = string_table_r { if!visit.string_table(string_table.key) { continue; } for string_r in Parser::new_words(string_table.children) { if let Ok(string) = string_r { // Strip the nul terminator... let value = if string.value.last()!= Some(&0) { string.value } else { &string.value[..string.value.len() - 1] }; visit.string(string_table.key, string.key, value); } } } } } // MS docs: L"VarFileInfo" else if file_info.key == &self::strings::VarFileInfo { for var_r in Parser::new_bytes(file_info.children) { if let Ok(var) = var_r { visit.var(var.key, var.value); } } } } } } } } } //---------------------------------------------------------------- /// Visitor pattern to view the version information details. #[allow(unused_variables)] pub trait Visit<'a> { fn version_info(&mut self, key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { true } fn file_info(&mut self, key: &'a [u16]) -> bool { true } fn string_table(&mut self, lang: &'a [u16]) -> bool { true } fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {} fn var(&mut self, key: &'a [u16], pairs: &'a [u16]) {} } impl<'a> Visit<'a> for HashMap<String, String> { fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { self.insert( String::from_utf16_lossy(key), String::from_utf16_lossy(value), ); } } impl<'a> Visit<'a> for Option<&'a VS_FIXEDFILEINFO> { fn version_info(&mut self, _key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { *self = fixed; false } } struct ForEachString<F>(F); impl<'a, F: FnMut(&'a [u16], &'a [u16], &'a [u16])> Visit<'a> for ForEachString<F> { fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { (self.0)(lang, key, value); } } struct QueryValue<'a,'s> { key: &'s str, value: Option<&'a [u16]>, } impl<'a,'s> Visit<'a> for QueryValue<'a,'s> { fn
(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { if Iterator::eq(self.key.chars().map(Ok), char::decode_utf16(key.iter().cloned())) { self.value = Some(value); } } } //---------------------------------------------------------------- /* "version_info": { "fixed": {.. }, "strings": {.. }, }, */ #[cfg(feature = "serde")] mod serde { use crate::util::serde_helper::*; use super::{VersionInfo}; impl<'a> Serialize for VersionInfo<'a> { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut state = serializer.serialize_struct("VersionInfo", 2)?; state.serialize_field("fixed", &self.fixed())?; state.serialize_field("strings", &self.to_hash_map())?; state.end() } } } //---------------------------------------------------------------- mod strings { #![allow(non_upper_case_globals)] // static VS_VERSION_INFO: [u16; 15] = [86u16, 83, 95, 86, 69, 82, 83, 73, 79, 78, 95, 73, 78, 70, 79]; pub(crate) static StringFileInfo: [u16; 14] = [83u16, 116, 114, 105, 110, 103, 70, 105, 108, 101, 73, 110, 102, 111]; pub(crate) static VarFileInfo: [u16; 11] = [86u16, 97, 114, 70, 105, 108, 101, 73, 110, 102, 111]; // static Translation: [u16; 11] = [84u16, 114, 97, 110, 115, 108, 97, 116, 105, 111, 110]; // static Comments: [u16; 8] = [67u16, 111, 109, 109, 101, 110, 116, 115]; // static CompanyName: [u16; 11] = [67u16, 111, 109, 112, 97, 110, 121, 78, 97, 109, 101]; // static FileDescription: [u16; 15] = [70u16, 105, 108, 101, 68, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110]; // static FileVersion: [u16; 11] = [70u16, 105, 108, 101, 86, 101, 114, 115, 105, 111, 110]; // static InternalName: [u16; 12] = [73u16, 110, 116, 101, 114, 110, 97, 108, 78, 97, 109, 101]; // static LegalCopyright: [u16; 14] = [76u16, 101, 103, 97, 108, 67, 111, 112, 121, 114, 105, 103, 104, 116]; // static LegalTrademarks: [u16; 15] = [76u16, 101, 103, 97, 108, 84, 114, 97, 100, 101, 109, 97, 114, 107, 115]; // static OriginalFilename: [u16; 16] = [79u16, 114, 105, 103, 105, 110, 97, 108, 70, 105, 108, 101, 110, 97, 109, 101]; // static PrivateBuild: [u16; 12] = [80u16, 114, 105, 118, 97, 116, 101, 66, 117, 105, 108, 100]; // static ProductName: [u16; 11] = [80u16, 114, 111, 100, 117, 99, 116, 78, 97, 109, 101]; // static ProductVersion: [u16; 14] = [80u16, 114, 111, 100, 117, 99, 116, 86, 101, 114, 115, 105, 111, 110]; // static SpecialBuild: [u16; 12] = [83u16, 112, 101, 99, 105, 97, 108, 66, 117, 105, 108, 100]; } //---------------------------------------------------------------- #[cfg(test)] pub(crate) fn test(version_info: VersionInfo<'_>) { let _fixed = version_info.fixed(); let _hash_map = version_info.to_hash_map(); } //---------------------------------------------------------------- /// Fixed file info constants. pub mod image { pub const VS_FF_DEBUG: u32 = 0x01; pub const VS_FF_PRERELEASE: u32 = 0x02; pub const VS_FF_PATCHED: u32 = 0x04; pub const VS_FF_PRIVATEBUILD: u32 = 0x08; pub const VS_FF_INFOINFERRED: u32 = 0x10; pub const VS_FF_SPECIALBUILD: u32 = 0x20; pub const VOS_UNKNOWN: u32 = 0x00000000; pub const VOS_DOS: u32 = 0x00010000; pub const VOS_OS216: u32 = 0x00020000; pub const VOS_OS232: u32 = 0x00030000; pub const VOS_NT: u32 = 0x00040000; pub const VOS__WINDOWS16: u32 = 0x00000001; pub const VOS__PM16: u32 = 0x00000002; pub const VOS__PM32: u32 = 0x00000003; pub const VOS__WINDOWS32: u32 = 0x00000004; pub const VFT_UNKNOWN: u32 = 0x00000000; pub const VFT_APP: u32 = 0x00000001; pub const VFT_DLL: u32 = 0x00000002; pub const VFT_DRV: u32 = 0x00000003; pub const VFT_FONT: u32 = 0x00000004; pub const VFT_VXD: u32 = 0x00000005; pub const VFT_STATIC_LIB: u32 = 0x00000007; pub const VFT2_UNKNOWN: u32 = 0x00000000; pub const VFT2_DRV_PRINTER: u32 = 0x00000001; pub const VFT2_DRV_KEYBOARD: u32 = 0x00000002; pub const VFT2_DRV_LANGUAGE: u32 = 0x00000003; pub const VFT2_DRV_DISPLAY: u32 = 0x00000004; pub const VFT2_DRV_MOUSE: u32 = 0x00000005; pub const VFT2_DRV_NETWORK: u32 = 0x00000006; pub const VFT2_DRV_SYSTEM: u32 = 0x00000007; pub const VFT2_DRV_INSTALLABLE: u32 = 0x00000008; pub const VFT2_DRV_SOUND: u32 = 0x00000009; pub const VFT2_DRV_COMM: u32 = 0x0000000A; pub const VFT2_DRV_VERSIONED_PRINTER: u32 = 0x0000000C; pub const VFT2_FONT_RASTER: u32 = 0x00000001; pub const VFT2_FONT_VECTOR: u32 = 0x00000002; pub const VFT2_FONT_TRUETYPE: u32 = 0x00000003; } //---------------------------------------------------------------- // This is an absolutely god awful format... #[derive(Copy, Clone, Debug, Eq, PartialEq)] struct TLV<'a> { pub key: &'a [u16], pub value: &'a [u16], // DWORD aligned pub children: &'a [u16], // DWORD aligned } #[derive(Copy, Clone, Debug, Eq, PartialEq)] enum ValueLengthType { Zero, Bytes, Words } #[derive(Clone)] struct Parser<'a> { words: &'a [u16], vlt: ValueLengthType, } impl<'a> Iterator for Parser<'a> { type Item = Result<TLV<'a>>; fn next(&mut self) -> Option<Result<TLV<'a>>> { if self.words.len() == 0 { return None; } let result = parse_tlv(self); // If the parser errors, ensure the Iterator stops if result.is_err() { self.words = &self.words[self.words.len()..]; } Some(result) } } impl<'a> Parser<'a> { pub(crate) fn new_zero(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Zero } } pub(crate) fn new_bytes(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Bytes } } pub(crate) fn new_words(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Words } } } fn parse_tlv<'a>(state: &mut Parser<'a>) -> Result<TLV<'a>> { let mut words = state.words; // Parse the first three words from the TLV structure: // wLength, wValueLength and wType (plus at least zero terminator of szKey) if words.len() < 4 { return Err(Error::Invalid); } // This is tricky, the struct contains a fixed and variable length parts // However the length field includes the size of the fixed part // Further complicating things, if the variable length part is absent the total length is set to zero (?!) let length = cmp::max(4, words[0] as usize / 2).align_to(2); // Oh god why, interpret the value_length let value_length = match state.vlt { ValueLengthType::Zero if words[1] == 0 => 0, ValueLengthType::Zero => return Err(Error::Invalid), ValueLengthType::Bytes => words[1] as usize / 2, ValueLengthType::Words => words[1] as usize, }; // let wType = words[2]; // Split the input where this structure ends and the next sibling begins if length > words.len() { return Err(Error::Invalid); } state.words = &words[length..]; words = &words[..length]; // Parse the nul terminated szKey let key = wstrn(&words[3..]); if words[3..].len() == key.len() { return Err(Error::Invalid); } // Padding for the Value words = &words[key.len().align_to(2) + 4..]; // Split the remaining words between the Value and Children if value_length > words.len() { return Err(Error::Invalid); } let value = &words[..value_length]; let children = &words[value.len().align_to(2)..]; Ok(TLV { key, value, children }) } #[test] fn test_parse_tlv_oob() { let mut parser; // TLV header too short parser = Parser::new_zero(&[0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV length field larger than the data parser = Parser::new_zero(&[12, 0, 0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV key not nul terminated parser = Parser::new_zero(&[16, 0, 1, 20, 20, 20, 20, 20]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV value field larger than the data parser = Parser::new_zero(&[8, 10, 0, 0, 0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); }
string
identifier_name
version_info.rs
/*! Version Information. See [Microsoft's documentation](https://docs.microsoft.com/en-us/windows/desktop/menurc/version-information) for more information. */ use std::{char, cmp, fmt, mem, slice}; use std::collections::HashMap; use crate::image::VS_FIXEDFILEINFO; use crate::{Error, Result, _Pod as Pod}; use crate::util::{AlignTo, wstrn}; //---------------------------------------------------------------- /// Language and charset pair. /// /// References [langID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#langID) and [charsetID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#charsetID). #[derive(Copy, Clone, Debug, Pod, Eq, PartialEq)] #[repr(C)] pub struct Language { pub lang_id: u16, pub charset_id: u16, } impl Language { /// Parse language hex strings. pub fn parse(lang: &[u16]) -> std::result::Result<Language, &[u16]> { if lang.len()!= 8 { return Err(lang); } fn digit(word: u16) -> u16 { let num = word.wrapping_sub('0' as u16); let upper = word.wrapping_sub('A' as u16).wrapping_add(10); let lower = word.wrapping_sub('a' as u16).wrapping_add(10); if word >= 'a' as u16 { lower } else if word >= 'A' as u16 { upper } else { num } } let mut digits = [0u16; 8]; for i in 0..8 { digits[i] = digit(lang[i]); } let lang_id = (digits[0] << 12) | (digits[1] << 8) | (digits[2] << 4) | digits[3]; let charset_id = (digits[4] << 12) | (digits[5] << 8) | (digits[6] << 4) | digits[7]; Ok(Language { lang_id, charset_id }) } } impl fmt::Display for Language { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:04X}{:04X}", self.lang_id, self.charset_id) } } //---------------------------------------------------------------- /// Version Information. #[derive(Copy, Clone, Debug)] pub struct VersionInfo<'a> { bytes: &'a [u8], } impl<'a> VersionInfo<'a> { pub fn try_from(bytes: &'a [u8]) -> Result<VersionInfo<'a>> { // Alignment of 4 bytes is assumed everywhere, // unsafe code in this module relies on this if!bytes.as_ptr().aligned_to(4) { return Err(Error::Misaligned); } Ok(VersionInfo { bytes }) } /// Gets the fixed file information if available. pub fn fixed(self) -> Option<&'a VS_FIXEDFILEINFO> { let mut fixed = None; self.visit(&mut fixed); fixed } /// Queries a string value by name. /// /// The returned string is UTF-16 encoded, convert to UTF-8 with `String::from_utf16` and friends. pub fn query_value<S: AsRef<str>>(self, key: &S) -> Option<&'a [u16]> { let mut this = QueryValue { key: key.as_ref(), value: None, }; self.visit(&mut this); this.value } /// Iterates over all the strings. /// /// The closure's arguments are the lang, name and value for each string pair in the version information. pub fn for_each_string<F: FnMut(&'a [u16], &'a [u16], &'a [u16])>(self, mut f: F) { self.visit(&mut ForEachString(&mut f)); } /// Gets the strings in a hash map. pub fn to_hash_map(self) -> HashMap<String, String> { let mut hash_map = HashMap::new(); self.visit(&mut hash_map); hash_map } /// Parse the version information. /// /// Because of the super convoluted format, the visitor pattern is used. /// Implement the [`Visit` trait](trait.Visit.html) to get the desired information. /// /// To keep the API simple all errors are ignored, any invalid or corrupted data is skipped. pub fn visit(self, visit: &mut dyn Visit<'a>) { let words = unsafe { slice::from_raw_parts(self.bytes.as_ptr() as *const u16, self.bytes.len() / 2) }; for version_info_r in Parser::new_bytes(words) { if let Ok(version_info) = version_info_r { const VS_FIXEDFILEINFO_SIZEOF: usize = mem::size_of::<VS_FIXEDFILEINFO>(); let fixed = match mem::size_of_val(version_info.value) { 0 => None, VS_FIXEDFILEINFO_SIZEOF => { let value = unsafe { &*(version_info.value.as_ptr() as *const VS_FIXEDFILEINFO) }; Some(value) }, _ => None,//return Err(Error::Invalid), }; if!visit.version_info(version_info.key, fixed) { continue; } // MS docs: This member is always equal to zero. for file_info_r in Parser::new_zero(version_info.children) { if let Ok(file_info) = file_info_r { if!visit.file_info(file_info.key) { continue; } // MS docs: L"StringFileInfo" if file_info.key == &self::strings::StringFileInfo { // MS docs: This member is always equal to zero. for string_table_r in Parser::new_zero(file_info.children) { if let Ok(string_table) = string_table_r { if!visit.string_table(string_table.key) { continue; } for string_r in Parser::new_words(string_table.children) { if let Ok(string) = string_r { // Strip the nul terminator... let value = if string.value.last()!= Some(&0) { string.value } else { &string.value[..string.value.len() - 1] }; visit.string(string_table.key, string.key, value); } } } } } // MS docs: L"VarFileInfo" else if file_info.key == &self::strings::VarFileInfo { for var_r in Parser::new_bytes(file_info.children) { if let Ok(var) = var_r { visit.var(var.key, var.value); } } } } } } } } } //---------------------------------------------------------------- /// Visitor pattern to view the version information details. #[allow(unused_variables)] pub trait Visit<'a> { fn version_info(&mut self, key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { true } fn file_info(&mut self, key: &'a [u16]) -> bool { true } fn string_table(&mut self, lang: &'a [u16]) -> bool { true } fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {} fn var(&mut self, key: &'a [u16], pairs: &'a [u16]) {} } impl<'a> Visit<'a> for HashMap<String, String> { fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { self.insert( String::from_utf16_lossy(key), String::from_utf16_lossy(value), ); } } impl<'a> Visit<'a> for Option<&'a VS_FIXEDFILEINFO> { fn version_info(&mut self, _key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { *self = fixed; false } } struct ForEachString<F>(F); impl<'a, F: FnMut(&'a [u16], &'a [u16], &'a [u16])> Visit<'a> for ForEachString<F> { fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { (self.0)(lang, key, value); } } struct QueryValue<'a,'s> { key: &'s str, value: Option<&'a [u16]>, } impl<'a,'s> Visit<'a> for QueryValue<'a,'s> { fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { if Iterator::eq(self.key.chars().map(Ok), char::decode_utf16(key.iter().cloned())) { self.value = Some(value); } } } //---------------------------------------------------------------- /* "version_info": { "fixed": {.. }, "strings": {.. }, }, */ #[cfg(feature = "serde")] mod serde { use crate::util::serde_helper::*; use super::{VersionInfo}; impl<'a> Serialize for VersionInfo<'a> { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut state = serializer.serialize_struct("VersionInfo", 2)?; state.serialize_field("fixed", &self.fixed())?; state.serialize_field("strings", &self.to_hash_map())?; state.end() } } } //---------------------------------------------------------------- mod strings { #![allow(non_upper_case_globals)] // static VS_VERSION_INFO: [u16; 15] = [86u16, 83, 95, 86, 69, 82, 83, 73, 79, 78, 95, 73, 78, 70, 79]; pub(crate) static StringFileInfo: [u16; 14] = [83u16, 116, 114, 105, 110, 103, 70, 105, 108, 101, 73, 110, 102, 111]; pub(crate) static VarFileInfo: [u16; 11] = [86u16, 97, 114, 70, 105, 108, 101, 73, 110, 102, 111]; // static Translation: [u16; 11] = [84u16, 114, 97, 110, 115, 108, 97, 116, 105, 111, 110]; // static Comments: [u16; 8] = [67u16, 111, 109, 109, 101, 110, 116, 115]; // static CompanyName: [u16; 11] = [67u16, 111, 109, 112, 97, 110, 121, 78, 97, 109, 101]; // static FileDescription: [u16; 15] = [70u16, 105, 108, 101, 68, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110]; // static FileVersion: [u16; 11] = [70u16, 105, 108, 101, 86, 101, 114, 115, 105, 111, 110]; // static InternalName: [u16; 12] = [73u16, 110, 116, 101, 114, 110, 97, 108, 78, 97, 109, 101]; // static LegalCopyright: [u16; 14] = [76u16, 101, 103, 97, 108, 67, 111, 112, 121, 114, 105, 103, 104, 116]; // static LegalTrademarks: [u16; 15] = [76u16, 101, 103, 97, 108, 84, 114, 97, 100, 101, 109, 97, 114, 107, 115]; // static OriginalFilename: [u16; 16] = [79u16, 114, 105, 103, 105, 110, 97, 108, 70, 105, 108, 101, 110, 97, 109, 101]; // static PrivateBuild: [u16; 12] = [80u16, 114, 105, 118, 97, 116, 101, 66, 117, 105, 108, 100]; // static ProductName: [u16; 11] = [80u16, 114, 111, 100, 117, 99, 116, 78, 97, 109, 101]; // static ProductVersion: [u16; 14] = [80u16, 114, 111, 100, 117, 99, 116, 86, 101, 114, 115, 105, 111, 110]; // static SpecialBuild: [u16; 12] = [83u16, 112, 101, 99, 105, 97, 108, 66, 117, 105, 108, 100]; } //---------------------------------------------------------------- #[cfg(test)] pub(crate) fn test(version_info: VersionInfo<'_>) { let _fixed = version_info.fixed(); let _hash_map = version_info.to_hash_map(); } //---------------------------------------------------------------- /// Fixed file info constants. pub mod image { pub const VS_FF_DEBUG: u32 = 0x01; pub const VS_FF_PRERELEASE: u32 = 0x02; pub const VS_FF_PATCHED: u32 = 0x04; pub const VS_FF_PRIVATEBUILD: u32 = 0x08; pub const VS_FF_INFOINFERRED: u32 = 0x10; pub const VS_FF_SPECIALBUILD: u32 = 0x20; pub const VOS_UNKNOWN: u32 = 0x00000000; pub const VOS_DOS: u32 = 0x00010000; pub const VOS_OS216: u32 = 0x00020000; pub const VOS_OS232: u32 = 0x00030000; pub const VOS_NT: u32 = 0x00040000; pub const VOS__WINDOWS16: u32 = 0x00000001; pub const VOS__PM16: u32 = 0x00000002; pub const VOS__PM32: u32 = 0x00000003; pub const VOS__WINDOWS32: u32 = 0x00000004; pub const VFT_UNKNOWN: u32 = 0x00000000; pub const VFT_APP: u32 = 0x00000001; pub const VFT_DLL: u32 = 0x00000002; pub const VFT_DRV: u32 = 0x00000003; pub const VFT_FONT: u32 = 0x00000004; pub const VFT_VXD: u32 = 0x00000005; pub const VFT_STATIC_LIB: u32 = 0x00000007; pub const VFT2_UNKNOWN: u32 = 0x00000000; pub const VFT2_DRV_PRINTER: u32 = 0x00000001; pub const VFT2_DRV_KEYBOARD: u32 = 0x00000002; pub const VFT2_DRV_LANGUAGE: u32 = 0x00000003; pub const VFT2_DRV_DISPLAY: u32 = 0x00000004; pub const VFT2_DRV_MOUSE: u32 = 0x00000005; pub const VFT2_DRV_NETWORK: u32 = 0x00000006; pub const VFT2_DRV_SYSTEM: u32 = 0x00000007; pub const VFT2_DRV_INSTALLABLE: u32 = 0x00000008; pub const VFT2_DRV_SOUND: u32 = 0x00000009; pub const VFT2_DRV_COMM: u32 = 0x0000000A; pub const VFT2_DRV_VERSIONED_PRINTER: u32 = 0x0000000C; pub const VFT2_FONT_RASTER: u32 = 0x00000001; pub const VFT2_FONT_VECTOR: u32 = 0x00000002; pub const VFT2_FONT_TRUETYPE: u32 = 0x00000003; } //---------------------------------------------------------------- // This is an absolutely god awful format... #[derive(Copy, Clone, Debug, Eq, PartialEq)] struct TLV<'a> { pub key: &'a [u16], pub value: &'a [u16], // DWORD aligned pub children: &'a [u16], // DWORD aligned } #[derive(Copy, Clone, Debug, Eq, PartialEq)] enum ValueLengthType { Zero, Bytes, Words } #[derive(Clone)] struct Parser<'a> { words: &'a [u16], vlt: ValueLengthType, } impl<'a> Iterator for Parser<'a> { type Item = Result<TLV<'a>>; fn next(&mut self) -> Option<Result<TLV<'a>>> { if self.words.len() == 0 { return None; } let result = parse_tlv(self); // If the parser errors, ensure the Iterator stops if result.is_err() { self.words = &self.words[self.words.len()..]; } Some(result) } } impl<'a> Parser<'a> { pub(crate) fn new_zero(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Zero } } pub(crate) fn new_bytes(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Bytes } } pub(crate) fn new_words(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Words } } } fn parse_tlv<'a>(state: &mut Parser<'a>) -> Result<TLV<'a>> { let mut words = state.words; // Parse the first three words from the TLV structure: // wLength, wValueLength and wType (plus at least zero terminator of szKey) if words.len() < 4 { return Err(Error::Invalid); } // This is tricky, the struct contains a fixed and variable length parts // However the length field includes the size of the fixed part // Further complicating things, if the variable length part is absent the total length is set to zero (?!) let length = cmp::max(4, words[0] as usize / 2).align_to(2); // Oh god why, interpret the value_length let value_length = match state.vlt { ValueLengthType::Zero if words[1] == 0 => 0, ValueLengthType::Zero => return Err(Error::Invalid), ValueLengthType::Bytes => words[1] as usize / 2, ValueLengthType::Words => words[1] as usize, }; // let wType = words[2]; // Split the input where this structure ends and the next sibling begins if length > words.len() { return Err(Error::Invalid); } state.words = &words[length..]; words = &words[..length]; // Parse the nul terminated szKey let key = wstrn(&words[3..]); if words[3..].len() == key.len() { return Err(Error::Invalid); } // Padding for the Value words = &words[key.len().align_to(2) + 4..]; // Split the remaining words between the Value and Children if value_length > words.len() { return Err(Error::Invalid); } let value = &words[..value_length]; let children = &words[value.len().align_to(2)..]; Ok(TLV { key, value, children }) } #[test] fn test_parse_tlv_oob() { let mut parser; // TLV header too short
// TLV length field larger than the data parser = Parser::new_zero(&[12, 0, 0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV key not nul terminated parser = Parser::new_zero(&[16, 0, 1, 20, 20, 20, 20, 20]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV value field larger than the data parser = Parser::new_zero(&[8, 10, 0, 0, 0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); }
parser = Parser::new_zero(&[0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None);
random_line_split
version_info.rs
/*! Version Information. See [Microsoft's documentation](https://docs.microsoft.com/en-us/windows/desktop/menurc/version-information) for more information. */ use std::{char, cmp, fmt, mem, slice}; use std::collections::HashMap; use crate::image::VS_FIXEDFILEINFO; use crate::{Error, Result, _Pod as Pod}; use crate::util::{AlignTo, wstrn}; //---------------------------------------------------------------- /// Language and charset pair. /// /// References [langID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#langID) and [charsetID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#charsetID). #[derive(Copy, Clone, Debug, Pod, Eq, PartialEq)] #[repr(C)] pub struct Language { pub lang_id: u16, pub charset_id: u16, } impl Language { /// Parse language hex strings. pub fn parse(lang: &[u16]) -> std::result::Result<Language, &[u16]> { if lang.len()!= 8 { return Err(lang); } fn digit(word: u16) -> u16 { let num = word.wrapping_sub('0' as u16); let upper = word.wrapping_sub('A' as u16).wrapping_add(10); let lower = word.wrapping_sub('a' as u16).wrapping_add(10); if word >= 'a' as u16 { lower } else if word >= 'A' as u16 { upper } else
} let mut digits = [0u16; 8]; for i in 0..8 { digits[i] = digit(lang[i]); } let lang_id = (digits[0] << 12) | (digits[1] << 8) | (digits[2] << 4) | digits[3]; let charset_id = (digits[4] << 12) | (digits[5] << 8) | (digits[6] << 4) | digits[7]; Ok(Language { lang_id, charset_id }) } } impl fmt::Display for Language { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:04X}{:04X}", self.lang_id, self.charset_id) } } //---------------------------------------------------------------- /// Version Information. #[derive(Copy, Clone, Debug)] pub struct VersionInfo<'a> { bytes: &'a [u8], } impl<'a> VersionInfo<'a> { pub fn try_from(bytes: &'a [u8]) -> Result<VersionInfo<'a>> { // Alignment of 4 bytes is assumed everywhere, // unsafe code in this module relies on this if!bytes.as_ptr().aligned_to(4) { return Err(Error::Misaligned); } Ok(VersionInfo { bytes }) } /// Gets the fixed file information if available. pub fn fixed(self) -> Option<&'a VS_FIXEDFILEINFO> { let mut fixed = None; self.visit(&mut fixed); fixed } /// Queries a string value by name. /// /// The returned string is UTF-16 encoded, convert to UTF-8 with `String::from_utf16` and friends. pub fn query_value<S: AsRef<str>>(self, key: &S) -> Option<&'a [u16]> { let mut this = QueryValue { key: key.as_ref(), value: None, }; self.visit(&mut this); this.value } /// Iterates over all the strings. /// /// The closure's arguments are the lang, name and value for each string pair in the version information. pub fn for_each_string<F: FnMut(&'a [u16], &'a [u16], &'a [u16])>(self, mut f: F) { self.visit(&mut ForEachString(&mut f)); } /// Gets the strings in a hash map. pub fn to_hash_map(self) -> HashMap<String, String> { let mut hash_map = HashMap::new(); self.visit(&mut hash_map); hash_map } /// Parse the version information. /// /// Because of the super convoluted format, the visitor pattern is used. /// Implement the [`Visit` trait](trait.Visit.html) to get the desired information. /// /// To keep the API simple all errors are ignored, any invalid or corrupted data is skipped. pub fn visit(self, visit: &mut dyn Visit<'a>) { let words = unsafe { slice::from_raw_parts(self.bytes.as_ptr() as *const u16, self.bytes.len() / 2) }; for version_info_r in Parser::new_bytes(words) { if let Ok(version_info) = version_info_r { const VS_FIXEDFILEINFO_SIZEOF: usize = mem::size_of::<VS_FIXEDFILEINFO>(); let fixed = match mem::size_of_val(version_info.value) { 0 => None, VS_FIXEDFILEINFO_SIZEOF => { let value = unsafe { &*(version_info.value.as_ptr() as *const VS_FIXEDFILEINFO) }; Some(value) }, _ => None,//return Err(Error::Invalid), }; if!visit.version_info(version_info.key, fixed) { continue; } // MS docs: This member is always equal to zero. for file_info_r in Parser::new_zero(version_info.children) { if let Ok(file_info) = file_info_r { if!visit.file_info(file_info.key) { continue; } // MS docs: L"StringFileInfo" if file_info.key == &self::strings::StringFileInfo { // MS docs: This member is always equal to zero. for string_table_r in Parser::new_zero(file_info.children) { if let Ok(string_table) = string_table_r { if!visit.string_table(string_table.key) { continue; } for string_r in Parser::new_words(string_table.children) { if let Ok(string) = string_r { // Strip the nul terminator... let value = if string.value.last()!= Some(&0) { string.value } else { &string.value[..string.value.len() - 1] }; visit.string(string_table.key, string.key, value); } } } } } // MS docs: L"VarFileInfo" else if file_info.key == &self::strings::VarFileInfo { for var_r in Parser::new_bytes(file_info.children) { if let Ok(var) = var_r { visit.var(var.key, var.value); } } } } } } } } } //---------------------------------------------------------------- /// Visitor pattern to view the version information details. #[allow(unused_variables)] pub trait Visit<'a> { fn version_info(&mut self, key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { true } fn file_info(&mut self, key: &'a [u16]) -> bool { true } fn string_table(&mut self, lang: &'a [u16]) -> bool { true } fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {} fn var(&mut self, key: &'a [u16], pairs: &'a [u16]) {} } impl<'a> Visit<'a> for HashMap<String, String> { fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { self.insert( String::from_utf16_lossy(key), String::from_utf16_lossy(value), ); } } impl<'a> Visit<'a> for Option<&'a VS_FIXEDFILEINFO> { fn version_info(&mut self, _key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { *self = fixed; false } } struct ForEachString<F>(F); impl<'a, F: FnMut(&'a [u16], &'a [u16], &'a [u16])> Visit<'a> for ForEachString<F> { fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { (self.0)(lang, key, value); } } struct QueryValue<'a,'s> { key: &'s str, value: Option<&'a [u16]>, } impl<'a,'s> Visit<'a> for QueryValue<'a,'s> { fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) { if Iterator::eq(self.key.chars().map(Ok), char::decode_utf16(key.iter().cloned())) { self.value = Some(value); } } } //---------------------------------------------------------------- /* "version_info": { "fixed": {.. }, "strings": {.. }, }, */ #[cfg(feature = "serde")] mod serde { use crate::util::serde_helper::*; use super::{VersionInfo}; impl<'a> Serialize for VersionInfo<'a> { fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { let mut state = serializer.serialize_struct("VersionInfo", 2)?; state.serialize_field("fixed", &self.fixed())?; state.serialize_field("strings", &self.to_hash_map())?; state.end() } } } //---------------------------------------------------------------- mod strings { #![allow(non_upper_case_globals)] // static VS_VERSION_INFO: [u16; 15] = [86u16, 83, 95, 86, 69, 82, 83, 73, 79, 78, 95, 73, 78, 70, 79]; pub(crate) static StringFileInfo: [u16; 14] = [83u16, 116, 114, 105, 110, 103, 70, 105, 108, 101, 73, 110, 102, 111]; pub(crate) static VarFileInfo: [u16; 11] = [86u16, 97, 114, 70, 105, 108, 101, 73, 110, 102, 111]; // static Translation: [u16; 11] = [84u16, 114, 97, 110, 115, 108, 97, 116, 105, 111, 110]; // static Comments: [u16; 8] = [67u16, 111, 109, 109, 101, 110, 116, 115]; // static CompanyName: [u16; 11] = [67u16, 111, 109, 112, 97, 110, 121, 78, 97, 109, 101]; // static FileDescription: [u16; 15] = [70u16, 105, 108, 101, 68, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110]; // static FileVersion: [u16; 11] = [70u16, 105, 108, 101, 86, 101, 114, 115, 105, 111, 110]; // static InternalName: [u16; 12] = [73u16, 110, 116, 101, 114, 110, 97, 108, 78, 97, 109, 101]; // static LegalCopyright: [u16; 14] = [76u16, 101, 103, 97, 108, 67, 111, 112, 121, 114, 105, 103, 104, 116]; // static LegalTrademarks: [u16; 15] = [76u16, 101, 103, 97, 108, 84, 114, 97, 100, 101, 109, 97, 114, 107, 115]; // static OriginalFilename: [u16; 16] = [79u16, 114, 105, 103, 105, 110, 97, 108, 70, 105, 108, 101, 110, 97, 109, 101]; // static PrivateBuild: [u16; 12] = [80u16, 114, 105, 118, 97, 116, 101, 66, 117, 105, 108, 100]; // static ProductName: [u16; 11] = [80u16, 114, 111, 100, 117, 99, 116, 78, 97, 109, 101]; // static ProductVersion: [u16; 14] = [80u16, 114, 111, 100, 117, 99, 116, 86, 101, 114, 115, 105, 111, 110]; // static SpecialBuild: [u16; 12] = [83u16, 112, 101, 99, 105, 97, 108, 66, 117, 105, 108, 100]; } //---------------------------------------------------------------- #[cfg(test)] pub(crate) fn test(version_info: VersionInfo<'_>) { let _fixed = version_info.fixed(); let _hash_map = version_info.to_hash_map(); } //---------------------------------------------------------------- /// Fixed file info constants. pub mod image { pub const VS_FF_DEBUG: u32 = 0x01; pub const VS_FF_PRERELEASE: u32 = 0x02; pub const VS_FF_PATCHED: u32 = 0x04; pub const VS_FF_PRIVATEBUILD: u32 = 0x08; pub const VS_FF_INFOINFERRED: u32 = 0x10; pub const VS_FF_SPECIALBUILD: u32 = 0x20; pub const VOS_UNKNOWN: u32 = 0x00000000; pub const VOS_DOS: u32 = 0x00010000; pub const VOS_OS216: u32 = 0x00020000; pub const VOS_OS232: u32 = 0x00030000; pub const VOS_NT: u32 = 0x00040000; pub const VOS__WINDOWS16: u32 = 0x00000001; pub const VOS__PM16: u32 = 0x00000002; pub const VOS__PM32: u32 = 0x00000003; pub const VOS__WINDOWS32: u32 = 0x00000004; pub const VFT_UNKNOWN: u32 = 0x00000000; pub const VFT_APP: u32 = 0x00000001; pub const VFT_DLL: u32 = 0x00000002; pub const VFT_DRV: u32 = 0x00000003; pub const VFT_FONT: u32 = 0x00000004; pub const VFT_VXD: u32 = 0x00000005; pub const VFT_STATIC_LIB: u32 = 0x00000007; pub const VFT2_UNKNOWN: u32 = 0x00000000; pub const VFT2_DRV_PRINTER: u32 = 0x00000001; pub const VFT2_DRV_KEYBOARD: u32 = 0x00000002; pub const VFT2_DRV_LANGUAGE: u32 = 0x00000003; pub const VFT2_DRV_DISPLAY: u32 = 0x00000004; pub const VFT2_DRV_MOUSE: u32 = 0x00000005; pub const VFT2_DRV_NETWORK: u32 = 0x00000006; pub const VFT2_DRV_SYSTEM: u32 = 0x00000007; pub const VFT2_DRV_INSTALLABLE: u32 = 0x00000008; pub const VFT2_DRV_SOUND: u32 = 0x00000009; pub const VFT2_DRV_COMM: u32 = 0x0000000A; pub const VFT2_DRV_VERSIONED_PRINTER: u32 = 0x0000000C; pub const VFT2_FONT_RASTER: u32 = 0x00000001; pub const VFT2_FONT_VECTOR: u32 = 0x00000002; pub const VFT2_FONT_TRUETYPE: u32 = 0x00000003; } //---------------------------------------------------------------- // This is an absolutely god awful format... #[derive(Copy, Clone, Debug, Eq, PartialEq)] struct TLV<'a> { pub key: &'a [u16], pub value: &'a [u16], // DWORD aligned pub children: &'a [u16], // DWORD aligned } #[derive(Copy, Clone, Debug, Eq, PartialEq)] enum ValueLengthType { Zero, Bytes, Words } #[derive(Clone)] struct Parser<'a> { words: &'a [u16], vlt: ValueLengthType, } impl<'a> Iterator for Parser<'a> { type Item = Result<TLV<'a>>; fn next(&mut self) -> Option<Result<TLV<'a>>> { if self.words.len() == 0 { return None; } let result = parse_tlv(self); // If the parser errors, ensure the Iterator stops if result.is_err() { self.words = &self.words[self.words.len()..]; } Some(result) } } impl<'a> Parser<'a> { pub(crate) fn new_zero(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Zero } } pub(crate) fn new_bytes(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Bytes } } pub(crate) fn new_words(words: &'a [u16]) -> Parser<'a> { Parser { words, vlt: ValueLengthType::Words } } } fn parse_tlv<'a>(state: &mut Parser<'a>) -> Result<TLV<'a>> { let mut words = state.words; // Parse the first three words from the TLV structure: // wLength, wValueLength and wType (plus at least zero terminator of szKey) if words.len() < 4 { return Err(Error::Invalid); } // This is tricky, the struct contains a fixed and variable length parts // However the length field includes the size of the fixed part // Further complicating things, if the variable length part is absent the total length is set to zero (?!) let length = cmp::max(4, words[0] as usize / 2).align_to(2); // Oh god why, interpret the value_length let value_length = match state.vlt { ValueLengthType::Zero if words[1] == 0 => 0, ValueLengthType::Zero => return Err(Error::Invalid), ValueLengthType::Bytes => words[1] as usize / 2, ValueLengthType::Words => words[1] as usize, }; // let wType = words[2]; // Split the input where this structure ends and the next sibling begins if length > words.len() { return Err(Error::Invalid); } state.words = &words[length..]; words = &words[..length]; // Parse the nul terminated szKey let key = wstrn(&words[3..]); if words[3..].len() == key.len() { return Err(Error::Invalid); } // Padding for the Value words = &words[key.len().align_to(2) + 4..]; // Split the remaining words between the Value and Children if value_length > words.len() { return Err(Error::Invalid); } let value = &words[..value_length]; let children = &words[value.len().align_to(2)..]; Ok(TLV { key, value, children }) } #[test] fn test_parse_tlv_oob() { let mut parser; // TLV header too short parser = Parser::new_zero(&[0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV length field larger than the data parser = Parser::new_zero(&[12, 0, 0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV key not nul terminated parser = Parser::new_zero(&[16, 0, 1, 20, 20, 20, 20, 20]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); // TLV value field larger than the data parser = Parser::new_zero(&[8, 10, 0, 0, 0, 0]); assert_eq!(parser.next(), Some(Err(Error::Invalid))); assert_eq!(parser.next(), None); }
{ num }
conditional_block
texture.rs
// texture.rs // Creation and handling of images and textures. // (c) 2019, Ryan McGowan <[email protected]> //! Loading and management of textures. use gfx_backend_metal as backend; use nalgebra_glm as glm; use gfx_hal::{ command::{BufferImageCopy, CommandBuffer, OneShot}, format::{Aspects, Format}, image::{Access, Extent, Layout, Offset, SubresourceLayers, SubresourceRange, Usage}, memory::{Barrier, Dependencies}, pso::PipelineStage, Backend as GfxBackend, Device, Graphics, Limits, }; use image::{Rgba, RgbaImage}; use nalgebra_glm::Mat3; use serde::Deserialize; use std::{mem, ops::Range}; use self::backend::Backend; use crate::{ error::Error, render::BufferObject, resource::ResourceManager, serial::{Filename, Index, Size}, }; // Calculates the total memory size of all the textures given. // TODO: I probably don't need this. pub fn total_texture_size( textures: &[Index], resource_manager: &ResourceManager, limits: Limits, ) -> u64 { textures .iter() .map(|texture| { Texture::image_data_size( resource_manager.textures[*texture].get_data().unwrap(), &limits, ) }) .sum() } // Just to make `serde` stop crying when deserializing `Texture`s. fn default_range() -> Range<usize> { 0..0 } /// Owns texture data and handles Vulkan-related constructs like /// `Image`s and `ImageView`s. #[derive(Debug, Deserialize)] pub struct Texture { pub index: Index, /// The size in texels. pub size: Size, pub file: Filename, /// When this `Texture` is bound to buffer memory, this stores the range of bytes within /// the buffer that this `Texture` occupies. #[serde(default = "default_range", skip)] pub buffer_memory_range: Range<usize>, /// The [`ImageView`] for the pipeline to use. #[serde(skip)] pub image_view: Option<<Backend as GfxBackend>::ImageView>, /// The actual image data. #[serde(skip)] pub data: Option<RgbaImage>, /// This `Texture` as a Vulkan object. #[serde(skip)] pub image: Option<<Backend as GfxBackend>::Image>, /// A matrix precalculated based on the `Texture`'s size to scale /// all (u, v) coordinates to be in the [0.0, 1.0] range. #[serde(default = "glm::identity", skip)] pub normalization_matrix: Mat3, /// This `Texture`'s `DescriptorSet`. /// TODO: Will probably need to rework how the pipeline handles textures. #[serde(skip)] pub descriptor_set: Option<<Backend as GfxBackend>::DescriptorSet>, } impl Texture { /// Creates a new `Texture` and copies the texture data to buffer. pub unsafe fn new( index: Index, device: &backend::Device, limits: &Limits, texture_data: RgbaImage, buffer_memory: &mut <Backend as GfxBackend>::Memory, buffer_memory_offset: u64, ) -> Result<Texture, Error> { // Create Image. let image = device.create_image( gfx_hal::image::Kind::D2(texture_data.width(), texture_data.height(), 1, 1), 1, Format::Rgba8Srgb, gfx_hal::image::Tiling::Optimal, Usage::TRANSFER_DST | Usage::SAMPLED, gfx_hal::image::ViewCapabilities::empty(), )?; // Copy texture data to the given buffer. let memory_requirement = device.get_image_requirements(&image); Texture::write_image_to_buffer( device, buffer_memory, buffer_memory_offset..memory_requirement.size + buffer_memory_offset, &texture_data, limits, )?; Ok(Texture { index, file: "".to_string(), size: Size { x: texture_data.width() as f32, y: texture_data.height() as f32, }, normalization_matrix: glm::scaling2d(&glm::vec2( 1.0 / texture_data.width() as f32, 1.0 / texture_data.height() as f32, )), data: Some(texture_data), descriptor_set: None, image: Some(image), image_view: None, buffer_memory_range: buffer_memory_offset as usize ..(memory_requirement.size + buffer_memory_offset) as usize, }) } /// Loads texture data from file and creates the `Texture`'s `Image`. pub fn initialize( &mut self, device: &<Backend as GfxBackend>::Device, color_format: Format, ) -> Result<(), Error> { let data = image::open(&self.file)?.to_rgba(); let image = unsafe { device.create_image( gfx_hal::image::Kind::D2(data.width(), data.height(), 1, 1), 1, color_format, gfx_hal::image::Tiling::Optimal, Usage::TRANSFER_DST | Usage::SAMPLED, gfx_hal::image::ViewCapabilities::empty(), )? }; // Creates the uv normalization matrix for this texture. self.normalization_matrix = glm::scaling2d(&glm::vec2( 1.0 / data.width() as f32, 1.0 / data.height() as f32, )); self.image = Some(image); self.data = Some(data); Ok(()) } /// Copies the `Texture` data to the given buffer memory. pub unsafe fn buffer_data( &mut self, device: &<Backend as GfxBackend>::Device, buffer_memory: &<Backend as GfxBackend>::Memory, buffer_memory_offset: u64, limits: &Limits, ) -> Result<(), Error> { let memory_requirement = device.get_image_requirements(self.get_image()?); self.buffer_memory_range = buffer_memory_offset as usize ..(memory_requirement.size + buffer_memory_offset) as usize; Self::write_image_to_buffer( device, buffer_memory, self.buffer_memory_range.start as u64..self.buffer_memory_range.end as u64, self.get_data()?, limits, )?; Ok(()) } /// Finds the memory size needed for the given texture. pub fn image_data_size(texture: &RgbaImage, limits: &Limits) -> u64 { let pixel_size = mem::size_of::<Rgba<u8>>() as u32; let row_size = pixel_size * texture.width(); // TODO: investigate the wizardry involved in the next two lines. let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1; let row_pitch = (row_size + row_alignment_mask) &!row_alignment_mask; u64::from(row_pitch * texture.height()) } /// Given the location of this Texture's image data in the image memory, /// binds the given memory and copies the data into the Image itself, then /// creates the ImageView. #[allow(clippy::too_many_arguments)] // CLIPPY HUSH pub unsafe fn copy_image_to_memory( &mut self, device: &<Backend as GfxBackend>::Device, image_memory: &<Backend as GfxBackend>::Memory, image_memory_offset: u64, command_pool: &mut gfx_hal::CommandPool<Backend, Graphics>, command_queue: &mut gfx_hal::CommandQueue<Backend, Graphics>, staging_buffer: &BufferObject, limits: &Limits, ) -> Result<(), Error> { device.bind_image_memory( &image_memory, image_memory_offset, &mut self.image.as_mut().unwrap(), )?; // Creating an Image is basically like drawing a regular frame except // the data gets rendered to memory instead of the screen, so we go // through the whole process of creating a command buffer, adding commands, // and submitting. let mut command_buffer = command_pool.acquire_command_buffer::<OneShot>(); command_buffer.begin(); // Set the Image to write mode. Texture::reformat_image( &mut command_buffer, (Access::empty(), Layout::Undefined), (Access::TRANSFER_WRITE, Layout::TransferDstOptimal), self.get_image()?, PipelineStage::TOP_OF_PIPE, PipelineStage::TRANSFER, ); // Figure out the size of the texture data. let pixel_size = mem::size_of::<Rgba<u8>>() as u32; let row_size = pixel_size * self.size.x as u32; let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1; let row_pitch = (row_size + row_alignment_mask) &!row_alignment_mask; // Copy the data. command_buffer.copy_buffer_to_image( &staging_buffer.buffer, self.get_image()?, Layout::TransferDstOptimal, &[BufferImageCopy { buffer_offset: (self.buffer_memory_range.start - staging_buffer.offset) as u64, buffer_width: (row_pitch / pixel_size) as u32, buffer_height: self.size.y as u32, image_layers: SubresourceLayers { aspects: Aspects::COLOR, level: 0, layers: 0..1, }, image_offset: Offset { x: 0, y: 0, z: 0 }, image_extent: Extent { width: self.size.x as u32, height: self.size.y as u32, depth: 1, }, }], ); // Set Image to read mode. Texture::reformat_image( &mut command_buffer, (Access::TRANSFER_WRITE, Layout::TransferDstOptimal), (Access::SHADER_READ, Layout::ShaderReadOnlyOptimal), self.get_image()?, PipelineStage::TRANSFER, PipelineStage::FRAGMENT_SHADER, ); // Synchronize and then perform the rendering. command_buffer.finish(); let upload_fence = device.create_fence(false)?; command_queue.submit_nosemaphores(Some(&command_buffer), Some(&upload_fence)); device.wait_for_fence(&upload_fence, core::u64::MAX)?; device.destroy_fence(upload_fence); command_pool.free(Some(command_buffer)); // Create the ImageView. self.image_view = Some(device.create_image_view( self.get_image()?, gfx_hal::image::ViewKind::D2, // Changing this to match the renderer's surface_color_format does funky things // TODO: Investigate why this happens Format::Rgba8Srgb, gfx_hal::format::Swizzle::NO, SubresourceRange { aspects: Aspects::COLOR, levels: 0..1, layers: 0..1, }, )?); Ok(()) } // Extracted from copy_image_to_memory to clean it up a bit. /// Switches an Image to the given state/format, handling the synchronization /// involved. fn reformat_image( command_buffer: &mut CommandBuffer<Backend, Graphics>, source_format: (Access, Layout), target_format: (Access, Layout), resource: &<Backend as GfxBackend>::Image, source_pipeline_stage: PipelineStage, target_pipeline_stage: PipelineStage, ) { let image_barrier = Barrier::Image { states: source_format..target_format, target: resource, families: None, range: SubresourceRange { aspects: Aspects::COLOR, levels: 0..1, layers: 0..1, }, }; unsafe { command_buffer.pipeline_barrier( source_pipeline_stage..target_pipeline_stage, Dependencies::empty(), &[image_barrier], ) }; } /// Copies an `RgbaImage` containing texture data to the specified buffer. unsafe fn write_image_to_buffer( device: &backend::Device, buffer_memory: &<Backend as GfxBackend>::Memory, data_range: Range<u64>, image: &RgbaImage, limits: &Limits, ) -> Result<(), Error> { let pixel_size = mem::size_of::<Rgba<u8>>() as u32; assert_eq!(pixel_size, 32 / 8); // Calculate image size.
let row_pitch = (row_size + row_alignment_mask) &!row_alignment_mask; // what wizardry is this let mut writer = device.acquire_mapping_writer::<u8>(buffer_memory, data_range)?; // Write the data row by row. for row in 0..image.height() { let image_offset = (row * row_size) as usize; let data = &(**image)[image_offset..(row_size as usize + image_offset)]; let completed_row_size = (row * row_pitch) as usize; writer[completed_row_size..(data.len() + completed_row_size)].copy_from_slice(data); } device.release_mapping_writer(writer)?; Ok(()) } /// A method for getting the `image` field because `unwrap()` unhelpfully moves /// instead of borrowing. pub fn get_image(&self) -> Result<&<Backend as GfxBackend>::Image, Error> { match &self.image { Some(image) => Ok(image), None => Err(Error::None()), } } /// A method for getting the `image_view` field because `unwrap()` unhelpfully moves /// instead of borrowing. pub fn get_image_view(&self) -> Result<&<Backend as GfxBackend>::ImageView, Error> { match &self.image_view { Some(image_view) => Ok(image_view), None => Err(Error::None()), } } /// A method for getting the `data` field because `unwrap()` unhelpfully moves instead /// of borrowing. pub fn get_data(&self) -> Result<&RgbaImage, Error> { match &self.data { Some(data) => Ok(data), None => Err(Error::None()), } } /// Releases resources held by this object. pub unsafe fn destroy(self, device: &backend::Device) { device.destroy_image(self.image.unwrap()); device.destroy_image_view(self.image_view.unwrap()); } }
// TODO: Not sure why I have a function to do this but then write it out twice. let row_size = pixel_size * image.width(); let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1;
random_line_split
texture.rs
// texture.rs // Creation and handling of images and textures. // (c) 2019, Ryan McGowan <[email protected]> //! Loading and management of textures. use gfx_backend_metal as backend; use nalgebra_glm as glm; use gfx_hal::{ command::{BufferImageCopy, CommandBuffer, OneShot}, format::{Aspects, Format}, image::{Access, Extent, Layout, Offset, SubresourceLayers, SubresourceRange, Usage}, memory::{Barrier, Dependencies}, pso::PipelineStage, Backend as GfxBackend, Device, Graphics, Limits, }; use image::{Rgba, RgbaImage}; use nalgebra_glm::Mat3; use serde::Deserialize; use std::{mem, ops::Range}; use self::backend::Backend; use crate::{ error::Error, render::BufferObject, resource::ResourceManager, serial::{Filename, Index, Size}, }; // Calculates the total memory size of all the textures given. // TODO: I probably don't need this. pub fn total_texture_size( textures: &[Index], resource_manager: &ResourceManager, limits: Limits, ) -> u64 { textures .iter() .map(|texture| { Texture::image_data_size( resource_manager.textures[*texture].get_data().unwrap(), &limits, ) }) .sum() } // Just to make `serde` stop crying when deserializing `Texture`s. fn default_range() -> Range<usize> { 0..0 } /// Owns texture data and handles Vulkan-related constructs like /// `Image`s and `ImageView`s. #[derive(Debug, Deserialize)] pub struct Texture { pub index: Index, /// The size in texels. pub size: Size, pub file: Filename, /// When this `Texture` is bound to buffer memory, this stores the range of bytes within /// the buffer that this `Texture` occupies. #[serde(default = "default_range", skip)] pub buffer_memory_range: Range<usize>, /// The [`ImageView`] for the pipeline to use. #[serde(skip)] pub image_view: Option<<Backend as GfxBackend>::ImageView>, /// The actual image data. #[serde(skip)] pub data: Option<RgbaImage>, /// This `Texture` as a Vulkan object. #[serde(skip)] pub image: Option<<Backend as GfxBackend>::Image>, /// A matrix precalculated based on the `Texture`'s size to scale /// all (u, v) coordinates to be in the [0.0, 1.0] range. #[serde(default = "glm::identity", skip)] pub normalization_matrix: Mat3, /// This `Texture`'s `DescriptorSet`. /// TODO: Will probably need to rework how the pipeline handles textures. #[serde(skip)] pub descriptor_set: Option<<Backend as GfxBackend>::DescriptorSet>, } impl Texture { /// Creates a new `Texture` and copies the texture data to buffer. pub unsafe fn new( index: Index, device: &backend::Device, limits: &Limits, texture_data: RgbaImage, buffer_memory: &mut <Backend as GfxBackend>::Memory, buffer_memory_offset: u64, ) -> Result<Texture, Error> { // Create Image. let image = device.create_image( gfx_hal::image::Kind::D2(texture_data.width(), texture_data.height(), 1, 1), 1, Format::Rgba8Srgb, gfx_hal::image::Tiling::Optimal, Usage::TRANSFER_DST | Usage::SAMPLED, gfx_hal::image::ViewCapabilities::empty(), )?; // Copy texture data to the given buffer. let memory_requirement = device.get_image_requirements(&image); Texture::write_image_to_buffer( device, buffer_memory, buffer_memory_offset..memory_requirement.size + buffer_memory_offset, &texture_data, limits, )?; Ok(Texture { index, file: "".to_string(), size: Size { x: texture_data.width() as f32, y: texture_data.height() as f32, }, normalization_matrix: glm::scaling2d(&glm::vec2( 1.0 / texture_data.width() as f32, 1.0 / texture_data.height() as f32, )), data: Some(texture_data), descriptor_set: None, image: Some(image), image_view: None, buffer_memory_range: buffer_memory_offset as usize ..(memory_requirement.size + buffer_memory_offset) as usize, }) } /// Loads texture data from file and creates the `Texture`'s `Image`. pub fn initialize( &mut self, device: &<Backend as GfxBackend>::Device, color_format: Format, ) -> Result<(), Error> { let data = image::open(&self.file)?.to_rgba(); let image = unsafe { device.create_image( gfx_hal::image::Kind::D2(data.width(), data.height(), 1, 1), 1, color_format, gfx_hal::image::Tiling::Optimal, Usage::TRANSFER_DST | Usage::SAMPLED, gfx_hal::image::ViewCapabilities::empty(), )? }; // Creates the uv normalization matrix for this texture. self.normalization_matrix = glm::scaling2d(&glm::vec2( 1.0 / data.width() as f32, 1.0 / data.height() as f32, )); self.image = Some(image); self.data = Some(data); Ok(()) } /// Copies the `Texture` data to the given buffer memory. pub unsafe fn buffer_data( &mut self, device: &<Backend as GfxBackend>::Device, buffer_memory: &<Backend as GfxBackend>::Memory, buffer_memory_offset: u64, limits: &Limits, ) -> Result<(), Error> { let memory_requirement = device.get_image_requirements(self.get_image()?); self.buffer_memory_range = buffer_memory_offset as usize ..(memory_requirement.size + buffer_memory_offset) as usize; Self::write_image_to_buffer( device, buffer_memory, self.buffer_memory_range.start as u64..self.buffer_memory_range.end as u64, self.get_data()?, limits, )?; Ok(()) } /// Finds the memory size needed for the given texture. pub fn image_data_size(texture: &RgbaImage, limits: &Limits) -> u64 { let pixel_size = mem::size_of::<Rgba<u8>>() as u32; let row_size = pixel_size * texture.width(); // TODO: investigate the wizardry involved in the next two lines. let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1; let row_pitch = (row_size + row_alignment_mask) &!row_alignment_mask; u64::from(row_pitch * texture.height()) } /// Given the location of this Texture's image data in the image memory, /// binds the given memory and copies the data into the Image itself, then /// creates the ImageView. #[allow(clippy::too_many_arguments)] // CLIPPY HUSH pub unsafe fn
( &mut self, device: &<Backend as GfxBackend>::Device, image_memory: &<Backend as GfxBackend>::Memory, image_memory_offset: u64, command_pool: &mut gfx_hal::CommandPool<Backend, Graphics>, command_queue: &mut gfx_hal::CommandQueue<Backend, Graphics>, staging_buffer: &BufferObject, limits: &Limits, ) -> Result<(), Error> { device.bind_image_memory( &image_memory, image_memory_offset, &mut self.image.as_mut().unwrap(), )?; // Creating an Image is basically like drawing a regular frame except // the data gets rendered to memory instead of the screen, so we go // through the whole process of creating a command buffer, adding commands, // and submitting. let mut command_buffer = command_pool.acquire_command_buffer::<OneShot>(); command_buffer.begin(); // Set the Image to write mode. Texture::reformat_image( &mut command_buffer, (Access::empty(), Layout::Undefined), (Access::TRANSFER_WRITE, Layout::TransferDstOptimal), self.get_image()?, PipelineStage::TOP_OF_PIPE, PipelineStage::TRANSFER, ); // Figure out the size of the texture data. let pixel_size = mem::size_of::<Rgba<u8>>() as u32; let row_size = pixel_size * self.size.x as u32; let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1; let row_pitch = (row_size + row_alignment_mask) &!row_alignment_mask; // Copy the data. command_buffer.copy_buffer_to_image( &staging_buffer.buffer, self.get_image()?, Layout::TransferDstOptimal, &[BufferImageCopy { buffer_offset: (self.buffer_memory_range.start - staging_buffer.offset) as u64, buffer_width: (row_pitch / pixel_size) as u32, buffer_height: self.size.y as u32, image_layers: SubresourceLayers { aspects: Aspects::COLOR, level: 0, layers: 0..1, }, image_offset: Offset { x: 0, y: 0, z: 0 }, image_extent: Extent { width: self.size.x as u32, height: self.size.y as u32, depth: 1, }, }], ); // Set Image to read mode. Texture::reformat_image( &mut command_buffer, (Access::TRANSFER_WRITE, Layout::TransferDstOptimal), (Access::SHADER_READ, Layout::ShaderReadOnlyOptimal), self.get_image()?, PipelineStage::TRANSFER, PipelineStage::FRAGMENT_SHADER, ); // Synchronize and then perform the rendering. command_buffer.finish(); let upload_fence = device.create_fence(false)?; command_queue.submit_nosemaphores(Some(&command_buffer), Some(&upload_fence)); device.wait_for_fence(&upload_fence, core::u64::MAX)?; device.destroy_fence(upload_fence); command_pool.free(Some(command_buffer)); // Create the ImageView. self.image_view = Some(device.create_image_view( self.get_image()?, gfx_hal::image::ViewKind::D2, // Changing this to match the renderer's surface_color_format does funky things // TODO: Investigate why this happens Format::Rgba8Srgb, gfx_hal::format::Swizzle::NO, SubresourceRange { aspects: Aspects::COLOR, levels: 0..1, layers: 0..1, }, )?); Ok(()) } // Extracted from copy_image_to_memory to clean it up a bit. /// Switches an Image to the given state/format, handling the synchronization /// involved. fn reformat_image( command_buffer: &mut CommandBuffer<Backend, Graphics>, source_format: (Access, Layout), target_format: (Access, Layout), resource: &<Backend as GfxBackend>::Image, source_pipeline_stage: PipelineStage, target_pipeline_stage: PipelineStage, ) { let image_barrier = Barrier::Image { states: source_format..target_format, target: resource, families: None, range: SubresourceRange { aspects: Aspects::COLOR, levels: 0..1, layers: 0..1, }, }; unsafe { command_buffer.pipeline_barrier( source_pipeline_stage..target_pipeline_stage, Dependencies::empty(), &[image_barrier], ) }; } /// Copies an `RgbaImage` containing texture data to the specified buffer. unsafe fn write_image_to_buffer( device: &backend::Device, buffer_memory: &<Backend as GfxBackend>::Memory, data_range: Range<u64>, image: &RgbaImage, limits: &Limits, ) -> Result<(), Error> { let pixel_size = mem::size_of::<Rgba<u8>>() as u32; assert_eq!(pixel_size, 32 / 8); // Calculate image size. // TODO: Not sure why I have a function to do this but then write it out twice. let row_size = pixel_size * image.width(); let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1; let row_pitch = (row_size + row_alignment_mask) &!row_alignment_mask; // what wizardry is this let mut writer = device.acquire_mapping_writer::<u8>(buffer_memory, data_range)?; // Write the data row by row. for row in 0..image.height() { let image_offset = (row * row_size) as usize; let data = &(**image)[image_offset..(row_size as usize + image_offset)]; let completed_row_size = (row * row_pitch) as usize; writer[completed_row_size..(data.len() + completed_row_size)].copy_from_slice(data); } device.release_mapping_writer(writer)?; Ok(()) } /// A method for getting the `image` field because `unwrap()` unhelpfully moves /// instead of borrowing. pub fn get_image(&self) -> Result<&<Backend as GfxBackend>::Image, Error> { match &self.image { Some(image) => Ok(image), None => Err(Error::None()), } } /// A method for getting the `image_view` field because `unwrap()` unhelpfully moves /// instead of borrowing. pub fn get_image_view(&self) -> Result<&<Backend as GfxBackend>::ImageView, Error> { match &self.image_view { Some(image_view) => Ok(image_view), None => Err(Error::None()), } } /// A method for getting the `data` field because `unwrap()` unhelpfully moves instead /// of borrowing. pub fn get_data(&self) -> Result<&RgbaImage, Error> { match &self.data { Some(data) => Ok(data), None => Err(Error::None()), } } /// Releases resources held by this object. pub unsafe fn destroy(self, device: &backend::Device) { device.destroy_image(self.image.unwrap()); device.destroy_image_view(self.image_view.unwrap()); } }
copy_image_to_memory
identifier_name
texture.rs
// texture.rs // Creation and handling of images and textures. // (c) 2019, Ryan McGowan <[email protected]> //! Loading and management of textures. use gfx_backend_metal as backend; use nalgebra_glm as glm; use gfx_hal::{ command::{BufferImageCopy, CommandBuffer, OneShot}, format::{Aspects, Format}, image::{Access, Extent, Layout, Offset, SubresourceLayers, SubresourceRange, Usage}, memory::{Barrier, Dependencies}, pso::PipelineStage, Backend as GfxBackend, Device, Graphics, Limits, }; use image::{Rgba, RgbaImage}; use nalgebra_glm::Mat3; use serde::Deserialize; use std::{mem, ops::Range}; use self::backend::Backend; use crate::{ error::Error, render::BufferObject, resource::ResourceManager, serial::{Filename, Index, Size}, }; // Calculates the total memory size of all the textures given. // TODO: I probably don't need this. pub fn total_texture_size( textures: &[Index], resource_manager: &ResourceManager, limits: Limits, ) -> u64 { textures .iter() .map(|texture| { Texture::image_data_size( resource_manager.textures[*texture].get_data().unwrap(), &limits, ) }) .sum() } // Just to make `serde` stop crying when deserializing `Texture`s. fn default_range() -> Range<usize> { 0..0 } /// Owns texture data and handles Vulkan-related constructs like /// `Image`s and `ImageView`s. #[derive(Debug, Deserialize)] pub struct Texture { pub index: Index, /// The size in texels. pub size: Size, pub file: Filename, /// When this `Texture` is bound to buffer memory, this stores the range of bytes within /// the buffer that this `Texture` occupies. #[serde(default = "default_range", skip)] pub buffer_memory_range: Range<usize>, /// The [`ImageView`] for the pipeline to use. #[serde(skip)] pub image_view: Option<<Backend as GfxBackend>::ImageView>, /// The actual image data. #[serde(skip)] pub data: Option<RgbaImage>, /// This `Texture` as a Vulkan object. #[serde(skip)] pub image: Option<<Backend as GfxBackend>::Image>, /// A matrix precalculated based on the `Texture`'s size to scale /// all (u, v) coordinates to be in the [0.0, 1.0] range. #[serde(default = "glm::identity", skip)] pub normalization_matrix: Mat3, /// This `Texture`'s `DescriptorSet`. /// TODO: Will probably need to rework how the pipeline handles textures. #[serde(skip)] pub descriptor_set: Option<<Backend as GfxBackend>::DescriptorSet>, } impl Texture { /// Creates a new `Texture` and copies the texture data to buffer. pub unsafe fn new( index: Index, device: &backend::Device, limits: &Limits, texture_data: RgbaImage, buffer_memory: &mut <Backend as GfxBackend>::Memory, buffer_memory_offset: u64, ) -> Result<Texture, Error> { // Create Image. let image = device.create_image( gfx_hal::image::Kind::D2(texture_data.width(), texture_data.height(), 1, 1), 1, Format::Rgba8Srgb, gfx_hal::image::Tiling::Optimal, Usage::TRANSFER_DST | Usage::SAMPLED, gfx_hal::image::ViewCapabilities::empty(), )?; // Copy texture data to the given buffer. let memory_requirement = device.get_image_requirements(&image); Texture::write_image_to_buffer( device, buffer_memory, buffer_memory_offset..memory_requirement.size + buffer_memory_offset, &texture_data, limits, )?; Ok(Texture { index, file: "".to_string(), size: Size { x: texture_data.width() as f32, y: texture_data.height() as f32, }, normalization_matrix: glm::scaling2d(&glm::vec2( 1.0 / texture_data.width() as f32, 1.0 / texture_data.height() as f32, )), data: Some(texture_data), descriptor_set: None, image: Some(image), image_view: None, buffer_memory_range: buffer_memory_offset as usize ..(memory_requirement.size + buffer_memory_offset) as usize, }) } /// Loads texture data from file and creates the `Texture`'s `Image`. pub fn initialize( &mut self, device: &<Backend as GfxBackend>::Device, color_format: Format, ) -> Result<(), Error> { let data = image::open(&self.file)?.to_rgba(); let image = unsafe { device.create_image( gfx_hal::image::Kind::D2(data.width(), data.height(), 1, 1), 1, color_format, gfx_hal::image::Tiling::Optimal, Usage::TRANSFER_DST | Usage::SAMPLED, gfx_hal::image::ViewCapabilities::empty(), )? }; // Creates the uv normalization matrix for this texture. self.normalization_matrix = glm::scaling2d(&glm::vec2( 1.0 / data.width() as f32, 1.0 / data.height() as f32, )); self.image = Some(image); self.data = Some(data); Ok(()) } /// Copies the `Texture` data to the given buffer memory. pub unsafe fn buffer_data( &mut self, device: &<Backend as GfxBackend>::Device, buffer_memory: &<Backend as GfxBackend>::Memory, buffer_memory_offset: u64, limits: &Limits, ) -> Result<(), Error> { let memory_requirement = device.get_image_requirements(self.get_image()?); self.buffer_memory_range = buffer_memory_offset as usize ..(memory_requirement.size + buffer_memory_offset) as usize; Self::write_image_to_buffer( device, buffer_memory, self.buffer_memory_range.start as u64..self.buffer_memory_range.end as u64, self.get_data()?, limits, )?; Ok(()) } /// Finds the memory size needed for the given texture. pub fn image_data_size(texture: &RgbaImage, limits: &Limits) -> u64 { let pixel_size = mem::size_of::<Rgba<u8>>() as u32; let row_size = pixel_size * texture.width(); // TODO: investigate the wizardry involved in the next two lines. let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1; let row_pitch = (row_size + row_alignment_mask) &!row_alignment_mask; u64::from(row_pitch * texture.height()) } /// Given the location of this Texture's image data in the image memory, /// binds the given memory and copies the data into the Image itself, then /// creates the ImageView. #[allow(clippy::too_many_arguments)] // CLIPPY HUSH pub unsafe fn copy_image_to_memory( &mut self, device: &<Backend as GfxBackend>::Device, image_memory: &<Backend as GfxBackend>::Memory, image_memory_offset: u64, command_pool: &mut gfx_hal::CommandPool<Backend, Graphics>, command_queue: &mut gfx_hal::CommandQueue<Backend, Graphics>, staging_buffer: &BufferObject, limits: &Limits, ) -> Result<(), Error>
PipelineStage::TOP_OF_PIPE, PipelineStage::TRANSFER, ); // Figure out the size of the texture data. let pixel_size = mem::size_of::<Rgba<u8>>() as u32; let row_size = pixel_size * self.size.x as u32; let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1; let row_pitch = (row_size + row_alignment_mask) &!row_alignment_mask; // Copy the data. command_buffer.copy_buffer_to_image( &staging_buffer.buffer, self.get_image()?, Layout::TransferDstOptimal, &[BufferImageCopy { buffer_offset: (self.buffer_memory_range.start - staging_buffer.offset) as u64, buffer_width: (row_pitch / pixel_size) as u32, buffer_height: self.size.y as u32, image_layers: SubresourceLayers { aspects: Aspects::COLOR, level: 0, layers: 0..1, }, image_offset: Offset { x: 0, y: 0, z: 0 }, image_extent: Extent { width: self.size.x as u32, height: self.size.y as u32, depth: 1, }, }], ); // Set Image to read mode. Texture::reformat_image( &mut command_buffer, (Access::TRANSFER_WRITE, Layout::TransferDstOptimal), (Access::SHADER_READ, Layout::ShaderReadOnlyOptimal), self.get_image()?, PipelineStage::TRANSFER, PipelineStage::FRAGMENT_SHADER, ); // Synchronize and then perform the rendering. command_buffer.finish(); let upload_fence = device.create_fence(false)?; command_queue.submit_nosemaphores(Some(&command_buffer), Some(&upload_fence)); device.wait_for_fence(&upload_fence, core::u64::MAX)?; device.destroy_fence(upload_fence); command_pool.free(Some(command_buffer)); // Create the ImageView. self.image_view = Some(device.create_image_view( self.get_image()?, gfx_hal::image::ViewKind::D2, // Changing this to match the renderer's surface_color_format does funky things // TODO: Investigate why this happens Format::Rgba8Srgb, gfx_hal::format::Swizzle::NO, SubresourceRange { aspects: Aspects::COLOR, levels: 0..1, layers: 0..1, }, )?); Ok(()) } // Extracted from copy_image_to_memory to clean it up a bit. /// Switches an Image to the given state/format, handling the synchronization /// involved. fn reformat_image( command_buffer: &mut CommandBuffer<Backend, Graphics>, source_format: (Access, Layout), target_format: (Access, Layout), resource: &<Backend as GfxBackend>::Image, source_pipeline_stage: PipelineStage, target_pipeline_stage: PipelineStage, ) { let image_barrier = Barrier::Image { states: source_format..target_format, target: resource, families: None, range: SubresourceRange { aspects: Aspects::COLOR, levels: 0..1, layers: 0..1, }, }; unsafe { command_buffer.pipeline_barrier( source_pipeline_stage..target_pipeline_stage, Dependencies::empty(), &[image_barrier], ) }; } /// Copies an `RgbaImage` containing texture data to the specified buffer. unsafe fn write_image_to_buffer( device: &backend::Device, buffer_memory: &<Backend as GfxBackend>::Memory, data_range: Range<u64>, image: &RgbaImage, limits: &Limits, ) -> Result<(), Error> { let pixel_size = mem::size_of::<Rgba<u8>>() as u32; assert_eq!(pixel_size, 32 / 8); // Calculate image size. // TODO: Not sure why I have a function to do this but then write it out twice. let row_size = pixel_size * image.width(); let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1; let row_pitch = (row_size + row_alignment_mask) &!row_alignment_mask; // what wizardry is this let mut writer = device.acquire_mapping_writer::<u8>(buffer_memory, data_range)?; // Write the data row by row. for row in 0..image.height() { let image_offset = (row * row_size) as usize; let data = &(**image)[image_offset..(row_size as usize + image_offset)]; let completed_row_size = (row * row_pitch) as usize; writer[completed_row_size..(data.len() + completed_row_size)].copy_from_slice(data); } device.release_mapping_writer(writer)?; Ok(()) } /// A method for getting the `image` field because `unwrap()` unhelpfully moves /// instead of borrowing. pub fn get_image(&self) -> Result<&<Backend as GfxBackend>::Image, Error> { match &self.image { Some(image) => Ok(image), None => Err(Error::None()), } } /// A method for getting the `image_view` field because `unwrap()` unhelpfully moves /// instead of borrowing. pub fn get_image_view(&self) -> Result<&<Backend as GfxBackend>::ImageView, Error> { match &self.image_view { Some(image_view) => Ok(image_view), None => Err(Error::None()), } } /// A method for getting the `data` field because `unwrap()` unhelpfully moves instead /// of borrowing. pub fn get_data(&self) -> Result<&RgbaImage, Error> { match &self.data { Some(data) => Ok(data), None => Err(Error::None()), } } /// Releases resources held by this object. pub unsafe fn destroy(self, device: &backend::Device) { device.destroy_image(self.image.unwrap()); device.destroy_image_view(self.image_view.unwrap()); } }
{ device.bind_image_memory( &image_memory, image_memory_offset, &mut self.image.as_mut().unwrap(), )?; // Creating an Image is basically like drawing a regular frame except // the data gets rendered to memory instead of the screen, so we go // through the whole process of creating a command buffer, adding commands, // and submitting. let mut command_buffer = command_pool.acquire_command_buffer::<OneShot>(); command_buffer.begin(); // Set the Image to write mode. Texture::reformat_image( &mut command_buffer, (Access::empty(), Layout::Undefined), (Access::TRANSFER_WRITE, Layout::TransferDstOptimal), self.get_image()?,
identifier_body
objects.rs
use ggez::input::keyboard; use ggez::{graphics, Context, GameResult}; use graphics::{Mesh, MeshBuilder, DrawParam}; use crate::game; use game::movement::Movement; use game::Draw; //#region Ship /// The Ship.\ /// Width and Height is sort of switched here.\ /// This is because the mesh is made to face the right but then rotated upwards.\ /// I thought it would make more sense like this but it kind of didn't but whateer who cares.\ pub struct Ship { pub width: f32, pub height: f32, pub x: f32, pub y: f32, pub mov: Movement, /// Current rotation in radials pub rotation: f32, /// Force to add to the movement calculation when using rocket pub movement_force: f32, /// Speed of rotation in radials per tick pub rotation_speed: f32, /// How many time the ship can fire a laser per second pub fire_rate: f32, /// Time the ship fired for the last time pub last_fire: std::time::Instant, /// If the ship is currently using it's rocket pub moving: bool } impl Ship { pub fn new(ctx: &Context) -> Ship
} /// Handle keyboard inputs and update the location of the Ship accordingly pub fn update_movement(&mut self, ctx: &Context) { /* The current implementation does not allow external forces This could be easily achieved by having this call take additional params which set some force before movement calculation like gravity. This is (currently) not needed for this game.*/ self.mov.force_x = 0.0; self.mov.force_y = 0.0; self.moving = false; if keyboard::is_key_pressed(ctx, keyboard::KeyCode::A) { self.rotation -= self.rotation_speed; } if keyboard::is_key_pressed(ctx, keyboard::KeyCode::D) { self.rotation += self.rotation_speed; } if keyboard::is_key_pressed(ctx, keyboard::KeyCode::W) { self.mov.force_x += self.rotation.cos() * self.movement_force; self.mov.force_y += self.rotation.sin() * self.movement_force; self.moving = true; } // Movement structs handles the physics self.mov.update(); self.x += self.mov.speed_x; self.y += self.mov.speed_y; } /// Add a laser to the gamestate appearing from the ship pub fn shoot(&self, lasers: &mut Vec<game::Laser>) { lasers.push(game::Laser::new( self.x + self.height /2.0, self.y - self.width / 2.0, self.rotation) ); } pub fn debug_string(&self) -> String { format!( "Force x: {}\n\ Force y: {}\n\ Acceleration x: {}\n\ Acceleration y: {}\n\ Speed x: {}\n\ Speed y: {}\n\ Rotation speed: {}\n", self.mov.force_x, self.mov.force_y, self.mov.acceleration_x, self.mov.acceleration_y, self.mov.speed_x, self.mov.speed_y, self.rotation_speed ) } } impl game::Draw for Ship { fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> { let mut mesh = MeshBuilder::new(); /* With these points you could make the center of the mesh be the actual center of the triangle This would make writing hit detection easier, and would make the offset trivial. But I did not immediately implement it like this and I don't want to redo the rocket fire mesh right now, so I am leaving this comment instead [-self.height/2.0, -self.width/2.0], [ self.height/2.0, 0.0], [-self.height/2.0, self.width/2.0], [-self.height/2.0, -self.width/2.0] */ // Could be a polygon as well mesh.line( &[ [0.0, 0.0], [self.height, -self.width / 2.0], [0.0, -self.width], [0.0,0.0] ], 1.3, graphics::WHITE )?; // Draw fire behind rocket if self.moving { mesh.line( &[ [ - 0.0, - 0.1 * self.width], [ - 0.3 * self.height, - 0.233 * self.width], [ - 0.2 * self.height, - 0.366 * self.width], [ - 0.6 * self.height, - 0.5 * self.width], [ - 0.2 * self.height, - 0.633 * self.width], [ - 0.3 * self.height, - 0.766 * self.width], [ - 0.0 * self.height, - 0.9 * self.width] ], 1.3, graphics::WHITE )?; } mesh.build(ctx) } fn draw_param(&self) -> DrawParam { DrawParam::new() .dest([self.x, self.y]) .offset([0.5 * self.height, 0.5 * -self.width]) .rotation(self.rotation) } } //#endregion //#region Laser /// Laser that has been fired from Ship pub struct Laser { pub x: f32, pub y: f32, pub rotation: f32, speed: f32, width: f32 } impl Laser { pub fn new(x: f32, y: f32, rotation: f32) -> Laser { Laser { x, y, rotation, speed: 17.0, width: 15.0 } } pub fn update(&mut self) { self.x += self.rotation.cos() * self.speed; self.y += self.rotation.sin() * self.speed; } } impl Draw for Laser { fn mesh(&self, ctx: &mut Context) -> ggez::GameResult<graphics::Mesh> { MeshBuilder::new() .line( &[ [0.0,0.0], [15.0,0.0] ], 2.0, graphics::WHITE )? .build(ctx) } fn draw_param(&self) -> DrawParam { DrawParam::new() .dest([self.x, self.y]) .offset([0.5 * self.width, 0.0]) .rotation(self.rotation) } } //#endregion //#region Asteroid /// The 3 different asteroid sizes #[derive(Copy, Clone)] pub enum AsteroidSize { Big, Medium, Small } /// Factor to multiple mesh with const ASTEROID_BIG: f32 = 40.0; /// Factor to multiple mesh with const ASTEROID_MEDIUM: f32 = 30.0; /// Factor to multiple mesh with const ASTEROID_SMALL: f32 = 20.0; #[derive(Clone)] pub struct Asteroid { pub x: f32, pub y: f32, rotation: f32, rotation_speed: f32, speed_x: f32, speed_y: f32, size: AsteroidSize, mirrored: bool, /// Index for the asteroid_mashes var mesh: usize } const ASTEROID_MAX_MOVEMENT_SPEED: f32 = 5.0; const ASTEROID_MAX_ROTATION_SPEED: f32 = 0.1; /// The width/height of the safezone of the ship.\ /// Asteroids do not spawn here const SHIP_SAFEZONE: f32 = 300.0; /// Array of different random meshes for the asteroids.\ /// The diameter before mulitplication with the asteroid size should be about 2.0 /* const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [ |size| [ [0.0 *size, 0.0 *size], [1.0 *size, 0.0 *size], [2.5 *size, 1.0 *size], [2.5 *size, 1.3 *size], [1.5 *size, 1.7 *size], [2.4 *size, 1.9 *size], [1.5 *size, 2.8 *size], [0.9 *size, 2.6 *size], [0.4 *size, 2.4 *size], [-0.3*size, 1.2 *size], [-0.1*size, 0.8 *size], [0.3 *size, 1.0 *size], [0.0 *size, 0.0 *size] ] ]; */ /// Array of different random meshes for the asteroids const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [ |size| [ [-1.0 *size, -0.8 *size], [0.0 *size, -1.0 *size], [1.0 *size, -0.3 *size], [1.1 *size, 0.3 *size], [0.4 *size, 0.5 *size], [1.0 *size, 0.8 *size], [0.5 *size, 1.3 *size], [-0.1 *size, 1.2 *size], [-0.6 *size, 1.0 *size], [-1.3*size, 0.2 *size], [-1.1*size, -0.2 *size], [-0.7 *size, 0.0 *size], [-1.0 *size, -0.8 *size] ] ]; impl Asteroid { pub fn new(ship_x: f32, ship_y: f32, sizeOption: Option<AsteroidSize>,ctx: &mut Context) -> Asteroid { let (mut x, mut y); loop { // Can't shadow via pattern :( let (x_, y_) = game::random_place(ctx); x = x_; y = y_; // Break out when the coords are not in a safezone if ship_x - x < SHIP_SAFEZONE / 2.0 || ship_x - x > -(SHIP_SAFEZONE / 2.0) || ship_y - y > SHIP_SAFEZONE / 2.0 || ship_y - y < -(SHIP_SAFEZONE / 2.0) { break; } } let size; if let None = sizeOption { size = match (rand::random::<f32>() * 3.0 + 1.0).floor() as u8 { 1 => AsteroidSize::Small, 2 => AsteroidSize::Medium, 3 => AsteroidSize::Big, _ => AsteroidSize::Small } } else { size = sizeOption.unwrap(); } /* let mirrored = match rand::random::<f32>().round() as u8 { 1 => false, 2 => true, _ => true }; */ let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED; let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED; let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED * 2.0 - ASTEROID_MAX_ROTATION_SPEED; //let rotation = rand::random::<f32>() * (2.0 * std::f32::consts::PI); let rotation = 0.0; let mirrored = false; let mesh = (rand::random::<f32>() * ASTEROID_MESHES.len() as f32).floor() as usize; // Asteroid go brrr Asteroid { x, y, size, speed_x, speed_y, rotation_speed, rotation, mirrored, mesh } } pub fn update(&mut self) { self.x += self.speed_x; self.y += self.speed_y; self.rotation += self.rotation_speed; } /// Returns a boolean that states if someone is within the hitbox of this asteroid pub fn in_hitbox(&self, (x, y): (f32, f32)) -> bool { let size; match &self.size { AsteroidSize::Big => size = ASTEROID_BIG, AsteroidSize::Medium => size = ASTEROID_MEDIUM, AsteroidSize::Small => size = ASTEROID_SMALL } // I am going to take 2.0 as the raw diameter of an asteroid let radius = 2.0 * size / 2.0; /* println!("hitboxcalc"); println!("{}", radius); println!("{} {}", x, y); println!("{} {}", self.x, self.y); println!("{}", ((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() ); */ ((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() < radius } /// Split asteroid into 2 of smaller size pub fn split(&self) -> Option<[Asteroid;2]> { let size = match self.size { AsteroidSize::Big => AsteroidSize::Medium, AsteroidSize::Medium => AsteroidSize::Small, AsteroidSize::Small => return None }; let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED; let asteroid1 = Asteroid { speed_x, speed_y, rotation_speed, size, ..*self }; let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED; let asteroid2 = Asteroid { speed_x, speed_y, rotation_speed, size, ..*self }; Some([asteroid1, asteroid2]) } } /* enum SplitResult { New([Asteroid;2]), None } */ impl Draw for Asteroid { fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> { let size; match &self.size { AsteroidSize::Big => size = ASTEROID_BIG, AsteroidSize::Medium => size = ASTEROID_MEDIUM, AsteroidSize::Small => size = ASTEROID_SMALL } let mut mesh = MeshBuilder::new(); mesh.line( &ASTEROID_MESHES[self.mesh](size), 1.0, graphics::WHITE )?; // I am going to take 2.0 as the raw diameter of an asteroid /* let radius = 2.0 * size / 2.0; //DEBUG mesh.circle( graphics::DrawMode::stroke(1.0), [0.0, 0.0], radius, 0.2, graphics::WHITE ); */ mesh.build(ctx) } fn draw_param(&self) -> DrawParam { let size; match &self.size { AsteroidSize::Big => size = ASTEROID_BIG, AsteroidSize::Medium => size = ASTEROID_MEDIUM, AsteroidSize::Small => size = ASTEROID_SMALL } let mut param = DrawParam::new() .dest([self.x, self.y]) //.offset([0.5 * size, 0.5 * -size]) .rotation(self.rotation); if self.mirrored { param = param.scale([-1.0, 1.0]); } param } } //#endregion Asteroid
{ let (ctx_width, ctx_height) = graphics::drawable_size(ctx); let ship_width = 18.0; let ship_height = 20.0; Ship { width: ship_width, height: ship_height, x: (ctx_width - ship_width)/ 2.0, y: (ctx_height- ship_height) / 2.0, rotation: (3.0 / 2.0) * std::f32::consts::PI, // Start facing up movement_force: 5.0, rotation_speed: 0.1, mov: Movement::new(0.3, 10.0), fire_rate: 5.0, last_fire: std::time::Instant::now(), moving: false }
identifier_body
objects.rs
use ggez::input::keyboard; use ggez::{graphics, Context, GameResult}; use graphics::{Mesh, MeshBuilder, DrawParam}; use crate::game; use game::movement::Movement; use game::Draw; //#region Ship /// The Ship.\ /// Width and Height is sort of switched here.\ /// This is because the mesh is made to face the right but then rotated upwards.\ /// I thought it would make more sense like this but it kind of didn't but whateer who cares.\ pub struct Ship { pub width: f32, pub height: f32, pub x: f32, pub y: f32, pub mov: Movement, /// Current rotation in radials pub rotation: f32, /// Force to add to the movement calculation when using rocket pub movement_force: f32, /// Speed of rotation in radials per tick pub rotation_speed: f32, /// How many time the ship can fire a laser per second pub fire_rate: f32, /// Time the ship fired for the last time pub last_fire: std::time::Instant, /// If the ship is currently using it's rocket pub moving: bool } impl Ship { pub fn new(ctx: &Context) -> Ship { let (ctx_width, ctx_height) = graphics::drawable_size(ctx); let ship_width = 18.0; let ship_height = 20.0; Ship { width: ship_width, height: ship_height, x: (ctx_width - ship_width)/ 2.0, y: (ctx_height- ship_height) / 2.0, rotation: (3.0 / 2.0) * std::f32::consts::PI, // Start facing up movement_force: 5.0, rotation_speed: 0.1, mov: Movement::new(0.3, 10.0), fire_rate: 5.0, last_fire: std::time::Instant::now(), moving: false } } /// Handle keyboard inputs and update the location of the Ship accordingly pub fn update_movement(&mut self, ctx: &Context) { /* The current implementation does not allow external forces This could be easily achieved by having this call take additional params which set some force before movement calculation like gravity. This is (currently) not needed for this game.*/ self.mov.force_x = 0.0; self.mov.force_y = 0.0; self.moving = false; if keyboard::is_key_pressed(ctx, keyboard::KeyCode::A) { self.rotation -= self.rotation_speed; } if keyboard::is_key_pressed(ctx, keyboard::KeyCode::D) { self.rotation += self.rotation_speed; } if keyboard::is_key_pressed(ctx, keyboard::KeyCode::W) { self.mov.force_x += self.rotation.cos() * self.movement_force; self.mov.force_y += self.rotation.sin() * self.movement_force; self.moving = true; } // Movement structs handles the physics self.mov.update(); self.x += self.mov.speed_x; self.y += self.mov.speed_y; } /// Add a laser to the gamestate appearing from the ship pub fn shoot(&self, lasers: &mut Vec<game::Laser>) { lasers.push(game::Laser::new( self.x + self.height /2.0, self.y - self.width / 2.0, self.rotation) ); } pub fn debug_string(&self) -> String { format!( "Force x: {}\n\ Force y: {}\n\ Acceleration x: {}\n\ Acceleration y: {}\n\ Speed x: {}\n\ Speed y: {}\n\ Rotation speed: {}\n", self.mov.force_x, self.mov.force_y, self.mov.acceleration_x, self.mov.acceleration_y, self.mov.speed_x, self.mov.speed_y, self.rotation_speed ) } } impl game::Draw for Ship { fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> { let mut mesh = MeshBuilder::new(); /* With these points you could make the center of the mesh be the actual center of the triangle This would make writing hit detection easier, and would make the offset trivial. But I did not immediately implement it like this and I don't want to redo the rocket fire mesh right now, so I am leaving this comment instead [-self.height/2.0, -self.width/2.0], [ self.height/2.0, 0.0], [-self.height/2.0, self.width/2.0], [-self.height/2.0, -self.width/2.0] */ // Could be a polygon as well mesh.line( &[ [0.0, 0.0], [self.height, -self.width / 2.0], [0.0, -self.width], [0.0,0.0] ], 1.3, graphics::WHITE )?; // Draw fire behind rocket if self.moving { mesh.line( &[ [ - 0.0, - 0.1 * self.width], [ - 0.3 * self.height, - 0.233 * self.width], [ - 0.2 * self.height, - 0.366 * self.width], [ - 0.6 * self.height, - 0.5 * self.width], [ - 0.2 * self.height, - 0.633 * self.width], [ - 0.3 * self.height, - 0.766 * self.width], [ - 0.0 * self.height, - 0.9 * self.width] ], 1.3, graphics::WHITE )?; } mesh.build(ctx) } fn draw_param(&self) -> DrawParam { DrawParam::new() .dest([self.x, self.y]) .offset([0.5 * self.height, 0.5 * -self.width]) .rotation(self.rotation) } } //#endregion //#region Laser /// Laser that has been fired from Ship pub struct Laser { pub x: f32, pub y: f32, pub rotation: f32, speed: f32, width: f32 } impl Laser { pub fn new(x: f32, y: f32, rotation: f32) -> Laser { Laser { x, y, rotation, speed: 17.0, width: 15.0 } } pub fn update(&mut self) { self.x += self.rotation.cos() * self.speed; self.y += self.rotation.sin() * self.speed; } } impl Draw for Laser { fn mesh(&self, ctx: &mut Context) -> ggez::GameResult<graphics::Mesh> { MeshBuilder::new() .line( &[ [0.0,0.0], [15.0,0.0] ], 2.0, graphics::WHITE )? .build(ctx) } fn draw_param(&self) -> DrawParam { DrawParam::new() .dest([self.x, self.y]) .offset([0.5 * self.width, 0.0]) .rotation(self.rotation) } } //#endregion //#region Asteroid /// The 3 different asteroid sizes #[derive(Copy, Clone)] pub enum AsteroidSize { Big, Medium, Small } /// Factor to multiple mesh with const ASTEROID_BIG: f32 = 40.0; /// Factor to multiple mesh with const ASTEROID_MEDIUM: f32 = 30.0; /// Factor to multiple mesh with const ASTEROID_SMALL: f32 = 20.0; #[derive(Clone)] pub struct Asteroid { pub x: f32, pub y: f32, rotation: f32, rotation_speed: f32, speed_x: f32, speed_y: f32, size: AsteroidSize, mirrored: bool, /// Index for the asteroid_mashes var mesh: usize } const ASTEROID_MAX_MOVEMENT_SPEED: f32 = 5.0; const ASTEROID_MAX_ROTATION_SPEED: f32 = 0.1; /// The width/height of the safezone of the ship.\ /// Asteroids do not spawn here const SHIP_SAFEZONE: f32 = 300.0; /// Array of different random meshes for the asteroids.\ /// The diameter before mulitplication with the asteroid size should be about 2.0 /* const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [ |size| [ [0.0 *size, 0.0 *size], [1.0 *size, 0.0 *size], [2.5 *size, 1.0 *size], [2.5 *size, 1.3 *size], [1.5 *size, 1.7 *size], [2.4 *size, 1.9 *size], [1.5 *size, 2.8 *size], [0.9 *size, 2.6 *size], [0.4 *size, 2.4 *size], [-0.3*size, 1.2 *size], [-0.1*size, 0.8 *size], [0.3 *size, 1.0 *size], [0.0 *size, 0.0 *size] ] ]; */ /// Array of different random meshes for the asteroids const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [ |size| [ [-1.0 *size, -0.8 *size], [0.0 *size, -1.0 *size], [1.0 *size, -0.3 *size], [1.1 *size, 0.3 *size], [0.4 *size, 0.5 *size], [1.0 *size, 0.8 *size], [0.5 *size, 1.3 *size], [-0.1 *size, 1.2 *size], [-0.6 *size, 1.0 *size], [-1.3*size, 0.2 *size], [-1.1*size, -0.2 *size], [-0.7 *size, 0.0 *size], [-1.0 *size, -0.8 *size] ] ]; impl Asteroid { pub fn new(ship_x: f32, ship_y: f32, sizeOption: Option<AsteroidSize>,ctx: &mut Context) -> Asteroid { let (mut x, mut y); loop { // Can't shadow via pattern :( let (x_, y_) = game::random_place(ctx); x = x_; y = y_; // Break out when the coords are not in a safezone if ship_x - x < SHIP_SAFEZONE / 2.0 || ship_x - x > -(SHIP_SAFEZONE / 2.0) || ship_y - y > SHIP_SAFEZONE / 2.0 || ship_y - y < -(SHIP_SAFEZONE / 2.0) { break; } } let size; if let None = sizeOption { size = match (rand::random::<f32>() * 3.0 + 1.0).floor() as u8 { 1 => AsteroidSize::Small, 2 => AsteroidSize::Medium, 3 => AsteroidSize::Big, _ => AsteroidSize::Small } } else { size = sizeOption.unwrap(); } /* let mirrored = match rand::random::<f32>().round() as u8 { 1 => false, 2 => true, _ => true }; */ let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED; let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED; let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED * 2.0 - ASTEROID_MAX_ROTATION_SPEED; //let rotation = rand::random::<f32>() * (2.0 * std::f32::consts::PI); let rotation = 0.0; let mirrored = false; let mesh = (rand::random::<f32>() * ASTEROID_MESHES.len() as f32).floor() as usize; // Asteroid go brrr Asteroid { x, y, size, speed_x, speed_y, rotation_speed, rotation, mirrored, mesh } } pub fn
(&mut self) { self.x += self.speed_x; self.y += self.speed_y; self.rotation += self.rotation_speed; } /// Returns a boolean that states if someone is within the hitbox of this asteroid pub fn in_hitbox(&self, (x, y): (f32, f32)) -> bool { let size; match &self.size { AsteroidSize::Big => size = ASTEROID_BIG, AsteroidSize::Medium => size = ASTEROID_MEDIUM, AsteroidSize::Small => size = ASTEROID_SMALL } // I am going to take 2.0 as the raw diameter of an asteroid let radius = 2.0 * size / 2.0; /* println!("hitboxcalc"); println!("{}", radius); println!("{} {}", x, y); println!("{} {}", self.x, self.y); println!("{}", ((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() ); */ ((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() < radius } /// Split asteroid into 2 of smaller size pub fn split(&self) -> Option<[Asteroid;2]> { let size = match self.size { AsteroidSize::Big => AsteroidSize::Medium, AsteroidSize::Medium => AsteroidSize::Small, AsteroidSize::Small => return None }; let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED; let asteroid1 = Asteroid { speed_x, speed_y, rotation_speed, size, ..*self }; let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED; let asteroid2 = Asteroid { speed_x, speed_y, rotation_speed, size, ..*self }; Some([asteroid1, asteroid2]) } } /* enum SplitResult { New([Asteroid;2]), None } */ impl Draw for Asteroid { fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> { let size; match &self.size { AsteroidSize::Big => size = ASTEROID_BIG, AsteroidSize::Medium => size = ASTEROID_MEDIUM, AsteroidSize::Small => size = ASTEROID_SMALL } let mut mesh = MeshBuilder::new(); mesh.line( &ASTEROID_MESHES[self.mesh](size), 1.0, graphics::WHITE )?; // I am going to take 2.0 as the raw diameter of an asteroid /* let radius = 2.0 * size / 2.0; //DEBUG mesh.circle( graphics::DrawMode::stroke(1.0), [0.0, 0.0], radius, 0.2, graphics::WHITE ); */ mesh.build(ctx) } fn draw_param(&self) -> DrawParam { let size; match &self.size { AsteroidSize::Big => size = ASTEROID_BIG, AsteroidSize::Medium => size = ASTEROID_MEDIUM, AsteroidSize::Small => size = ASTEROID_SMALL } let mut param = DrawParam::new() .dest([self.x, self.y]) //.offset([0.5 * size, 0.5 * -size]) .rotation(self.rotation); if self.mirrored { param = param.scale([-1.0, 1.0]); } param } } //#endregion Asteroid
update
identifier_name
objects.rs
use ggez::input::keyboard; use ggez::{graphics, Context, GameResult}; use graphics::{Mesh, MeshBuilder, DrawParam}; use crate::game; use game::movement::Movement; use game::Draw; //#region Ship /// The Ship.\ /// Width and Height is sort of switched here.\ /// This is because the mesh is made to face the right but then rotated upwards.\ /// I thought it would make more sense like this but it kind of didn't but whateer who cares.\ pub struct Ship { pub width: f32, pub height: f32, pub x: f32, pub y: f32, pub mov: Movement, /// Current rotation in radials pub rotation: f32, /// Force to add to the movement calculation when using rocket pub movement_force: f32, /// Speed of rotation in radials per tick pub rotation_speed: f32, /// How many time the ship can fire a laser per second pub fire_rate: f32, /// Time the ship fired for the last time pub last_fire: std::time::Instant, /// If the ship is currently using it's rocket pub moving: bool } impl Ship { pub fn new(ctx: &Context) -> Ship { let (ctx_width, ctx_height) = graphics::drawable_size(ctx); let ship_width = 18.0; let ship_height = 20.0; Ship { width: ship_width, height: ship_height, x: (ctx_width - ship_width)/ 2.0, y: (ctx_height- ship_height) / 2.0, rotation: (3.0 / 2.0) * std::f32::consts::PI, // Start facing up movement_force: 5.0, rotation_speed: 0.1, mov: Movement::new(0.3, 10.0), fire_rate: 5.0, last_fire: std::time::Instant::now(), moving: false } } /// Handle keyboard inputs and update the location of the Ship accordingly pub fn update_movement(&mut self, ctx: &Context) { /* The current implementation does not allow external forces This could be easily achieved by having this call take additional params which set some force before movement calculation like gravity. This is (currently) not needed for this game.*/ self.mov.force_x = 0.0; self.mov.force_y = 0.0; self.moving = false; if keyboard::is_key_pressed(ctx, keyboard::KeyCode::A) { self.rotation -= self.rotation_speed; } if keyboard::is_key_pressed(ctx, keyboard::KeyCode::D) { self.rotation += self.rotation_speed; } if keyboard::is_key_pressed(ctx, keyboard::KeyCode::W) { self.mov.force_x += self.rotation.cos() * self.movement_force; self.mov.force_y += self.rotation.sin() * self.movement_force; self.moving = true; } // Movement structs handles the physics self.mov.update(); self.x += self.mov.speed_x; self.y += self.mov.speed_y; } /// Add a laser to the gamestate appearing from the ship pub fn shoot(&self, lasers: &mut Vec<game::Laser>) { lasers.push(game::Laser::new( self.x + self.height /2.0, self.y - self.width / 2.0, self.rotation) ); } pub fn debug_string(&self) -> String { format!( "Force x: {}\n\ Force y: {}\n\ Acceleration x: {}\n\ Acceleration y: {}\n\ Speed x: {}\n\ Speed y: {}\n\
self.mov.force_y, self.mov.acceleration_x, self.mov.acceleration_y, self.mov.speed_x, self.mov.speed_y, self.rotation_speed ) } } impl game::Draw for Ship { fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> { let mut mesh = MeshBuilder::new(); /* With these points you could make the center of the mesh be the actual center of the triangle This would make writing hit detection easier, and would make the offset trivial. But I did not immediately implement it like this and I don't want to redo the rocket fire mesh right now, so I am leaving this comment instead [-self.height/2.0, -self.width/2.0], [ self.height/2.0, 0.0], [-self.height/2.0, self.width/2.0], [-self.height/2.0, -self.width/2.0] */ // Could be a polygon as well mesh.line( &[ [0.0, 0.0], [self.height, -self.width / 2.0], [0.0, -self.width], [0.0,0.0] ], 1.3, graphics::WHITE )?; // Draw fire behind rocket if self.moving { mesh.line( &[ [ - 0.0, - 0.1 * self.width], [ - 0.3 * self.height, - 0.233 * self.width], [ - 0.2 * self.height, - 0.366 * self.width], [ - 0.6 * self.height, - 0.5 * self.width], [ - 0.2 * self.height, - 0.633 * self.width], [ - 0.3 * self.height, - 0.766 * self.width], [ - 0.0 * self.height, - 0.9 * self.width] ], 1.3, graphics::WHITE )?; } mesh.build(ctx) } fn draw_param(&self) -> DrawParam { DrawParam::new() .dest([self.x, self.y]) .offset([0.5 * self.height, 0.5 * -self.width]) .rotation(self.rotation) } } //#endregion //#region Laser /// Laser that has been fired from Ship pub struct Laser { pub x: f32, pub y: f32, pub rotation: f32, speed: f32, width: f32 } impl Laser { pub fn new(x: f32, y: f32, rotation: f32) -> Laser { Laser { x, y, rotation, speed: 17.0, width: 15.0 } } pub fn update(&mut self) { self.x += self.rotation.cos() * self.speed; self.y += self.rotation.sin() * self.speed; } } impl Draw for Laser { fn mesh(&self, ctx: &mut Context) -> ggez::GameResult<graphics::Mesh> { MeshBuilder::new() .line( &[ [0.0,0.0], [15.0,0.0] ], 2.0, graphics::WHITE )? .build(ctx) } fn draw_param(&self) -> DrawParam { DrawParam::new() .dest([self.x, self.y]) .offset([0.5 * self.width, 0.0]) .rotation(self.rotation) } } //#endregion //#region Asteroid /// The 3 different asteroid sizes #[derive(Copy, Clone)] pub enum AsteroidSize { Big, Medium, Small } /// Factor to multiple mesh with const ASTEROID_BIG: f32 = 40.0; /// Factor to multiple mesh with const ASTEROID_MEDIUM: f32 = 30.0; /// Factor to multiple mesh with const ASTEROID_SMALL: f32 = 20.0; #[derive(Clone)] pub struct Asteroid { pub x: f32, pub y: f32, rotation: f32, rotation_speed: f32, speed_x: f32, speed_y: f32, size: AsteroidSize, mirrored: bool, /// Index for the asteroid_mashes var mesh: usize } const ASTEROID_MAX_MOVEMENT_SPEED: f32 = 5.0; const ASTEROID_MAX_ROTATION_SPEED: f32 = 0.1; /// The width/height of the safezone of the ship.\ /// Asteroids do not spawn here const SHIP_SAFEZONE: f32 = 300.0; /// Array of different random meshes for the asteroids.\ /// The diameter before mulitplication with the asteroid size should be about 2.0 /* const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [ |size| [ [0.0 *size, 0.0 *size], [1.0 *size, 0.0 *size], [2.5 *size, 1.0 *size], [2.5 *size, 1.3 *size], [1.5 *size, 1.7 *size], [2.4 *size, 1.9 *size], [1.5 *size, 2.8 *size], [0.9 *size, 2.6 *size], [0.4 *size, 2.4 *size], [-0.3*size, 1.2 *size], [-0.1*size, 0.8 *size], [0.3 *size, 1.0 *size], [0.0 *size, 0.0 *size] ] ]; */ /// Array of different random meshes for the asteroids const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [ |size| [ [-1.0 *size, -0.8 *size], [0.0 *size, -1.0 *size], [1.0 *size, -0.3 *size], [1.1 *size, 0.3 *size], [0.4 *size, 0.5 *size], [1.0 *size, 0.8 *size], [0.5 *size, 1.3 *size], [-0.1 *size, 1.2 *size], [-0.6 *size, 1.0 *size], [-1.3*size, 0.2 *size], [-1.1*size, -0.2 *size], [-0.7 *size, 0.0 *size], [-1.0 *size, -0.8 *size] ] ]; impl Asteroid { pub fn new(ship_x: f32, ship_y: f32, sizeOption: Option<AsteroidSize>,ctx: &mut Context) -> Asteroid { let (mut x, mut y); loop { // Can't shadow via pattern :( let (x_, y_) = game::random_place(ctx); x = x_; y = y_; // Break out when the coords are not in a safezone if ship_x - x < SHIP_SAFEZONE / 2.0 || ship_x - x > -(SHIP_SAFEZONE / 2.0) || ship_y - y > SHIP_SAFEZONE / 2.0 || ship_y - y < -(SHIP_SAFEZONE / 2.0) { break; } } let size; if let None = sizeOption { size = match (rand::random::<f32>() * 3.0 + 1.0).floor() as u8 { 1 => AsteroidSize::Small, 2 => AsteroidSize::Medium, 3 => AsteroidSize::Big, _ => AsteroidSize::Small } } else { size = sizeOption.unwrap(); } /* let mirrored = match rand::random::<f32>().round() as u8 { 1 => false, 2 => true, _ => true }; */ let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED; let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED; let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED * 2.0 - ASTEROID_MAX_ROTATION_SPEED; //let rotation = rand::random::<f32>() * (2.0 * std::f32::consts::PI); let rotation = 0.0; let mirrored = false; let mesh = (rand::random::<f32>() * ASTEROID_MESHES.len() as f32).floor() as usize; // Asteroid go brrr Asteroid { x, y, size, speed_x, speed_y, rotation_speed, rotation, mirrored, mesh } } pub fn update(&mut self) { self.x += self.speed_x; self.y += self.speed_y; self.rotation += self.rotation_speed; } /// Returns a boolean that states if someone is within the hitbox of this asteroid pub fn in_hitbox(&self, (x, y): (f32, f32)) -> bool { let size; match &self.size { AsteroidSize::Big => size = ASTEROID_BIG, AsteroidSize::Medium => size = ASTEROID_MEDIUM, AsteroidSize::Small => size = ASTEROID_SMALL } // I am going to take 2.0 as the raw diameter of an asteroid let radius = 2.0 * size / 2.0; /* println!("hitboxcalc"); println!("{}", radius); println!("{} {}", x, y); println!("{} {}", self.x, self.y); println!("{}", ((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() ); */ ((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() < radius } /// Split asteroid into 2 of smaller size pub fn split(&self) -> Option<[Asteroid;2]> { let size = match self.size { AsteroidSize::Big => AsteroidSize::Medium, AsteroidSize::Medium => AsteroidSize::Small, AsteroidSize::Small => return None }; let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED; let asteroid1 = Asteroid { speed_x, speed_y, rotation_speed, size, ..*self }; let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED; let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED; let asteroid2 = Asteroid { speed_x, speed_y, rotation_speed, size, ..*self }; Some([asteroid1, asteroid2]) } } /* enum SplitResult { New([Asteroid;2]), None } */ impl Draw for Asteroid { fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> { let size; match &self.size { AsteroidSize::Big => size = ASTEROID_BIG, AsteroidSize::Medium => size = ASTEROID_MEDIUM, AsteroidSize::Small => size = ASTEROID_SMALL } let mut mesh = MeshBuilder::new(); mesh.line( &ASTEROID_MESHES[self.mesh](size), 1.0, graphics::WHITE )?; // I am going to take 2.0 as the raw diameter of an asteroid /* let radius = 2.0 * size / 2.0; //DEBUG mesh.circle( graphics::DrawMode::stroke(1.0), [0.0, 0.0], radius, 0.2, graphics::WHITE ); */ mesh.build(ctx) } fn draw_param(&self) -> DrawParam { let size; match &self.size { AsteroidSize::Big => size = ASTEROID_BIG, AsteroidSize::Medium => size = ASTEROID_MEDIUM, AsteroidSize::Small => size = ASTEROID_SMALL } let mut param = DrawParam::new() .dest([self.x, self.y]) //.offset([0.5 * size, 0.5 * -size]) .rotation(self.rotation); if self.mirrored { param = param.scale([-1.0, 1.0]); } param } } //#endregion Asteroid
Rotation speed: {}\n", self.mov.force_x,
random_line_split
fs.rs
// Copyright 2021 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::io; use std::os::unix::io::AsRawFd; use std::os::unix::net::UnixListener; use std::path::{Path, PathBuf}; use std::sync::Arc; use anyhow::{anyhow, bail, Context}; use argh::FromArgs; use base::{error, get_max_open_files, warn, Event, RawDescriptor, Tube, UnlinkUnixListener}; use cros_async::{EventAsync, Executor}; use data_model::{DataInit, Le32}; use fuse::Server; use futures::future::{AbortHandle, Abortable}; use hypervisor::ProtectionType; use minijail::{self, Minijail}; use once_cell::sync::OnceCell; use sync::Mutex; use vm_memory::GuestMemory; use vmm_vhost::message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures}; use crate::virtio; use crate::virtio::copy_config; use crate::virtio::fs::passthrough::PassthroughFs; use crate::virtio::fs::{process_fs_queue, virtio_fs_config, FS_MAX_TAG_LEN}; use crate::virtio::vhost::user::device::handler::{ CallEvent, DeviceRequestHandler, VhostUserBackend, }; static FS_EXECUTOR: OnceCell<Executor> = OnceCell::new(); async fn handle_fs_queue( mut queue: virtio::Queue, mem: GuestMemory, call_evt: Arc<Mutex<CallEvent>>, kick_evt: EventAsync, server: Arc<fuse::Server<PassthroughFs>>, tube: Arc<Mutex<Tube>>, ) { // Slot is always going to be 0 because we do not support DAX let slot: u32 = 0; loop { if let Err(e) = kick_evt.next_val().await { error!("Failed to read kick event for fs queue: {}", e); break; } if let Err(e) = process_fs_queue(&mem, &call_evt, &mut queue, &server, &tube, slot) { error!("Process FS queue failed: {}", e); break; } } } fn default_uidmap() -> String { let euid = unsafe { libc::geteuid() }; format!("{} {} 1", euid, euid) } fn default_gidmap() -> String
fn jail_and_fork( mut keep_rds: Vec<RawDescriptor>, dir_path: PathBuf, uid_map: Option<String>, gid_map: Option<String>, ) -> anyhow::Result<i32> { // Create new minijail sandbox let mut j = Minijail::new()?; j.namespace_pids(); j.namespace_user(); j.namespace_user_disable_setgroups(); j.uidmap(&uid_map.unwrap_or_else(default_uidmap))?; j.gidmap(&gid_map.unwrap_or_else(default_gidmap))?; j.run_as_init(); j.namespace_vfs(); j.namespace_net(); j.no_new_privs(); // Only pivot_root if we are not re-using the current root directory. if dir_path!= Path::new("/") { // It's safe to call `namespace_vfs` multiple times. j.namespace_vfs(); j.enter_pivot_root(&dir_path)?; } j.set_remount_mode(libc::MS_SLAVE); let limit = get_max_open_files().context("failed to get max open files")?; j.set_rlimit(libc::RLIMIT_NOFILE as i32, limit, limit)?; // Make sure there are no duplicates in keep_rds keep_rds.dedup(); // fork on the jail here let pid = unsafe { j.fork(Some(&keep_rds))? }; if pid > 0 { unsafe { libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) }; } if pid < 0 { bail!("Fork error! {}", std::io::Error::last_os_error()); } Ok(pid) } struct FsBackend { server: Arc<fuse::Server<PassthroughFs>>, tag: [u8; FS_MAX_TAG_LEN], avail_features: u64, acked_features: u64, acked_protocol_features: VhostUserProtocolFeatures, workers: [Option<AbortHandle>; Self::MAX_QUEUE_NUM], keep_rds: Vec<RawDescriptor>, } impl FsBackend { pub fn new(tag: &str) -> anyhow::Result<Self> { if tag.len() > FS_MAX_TAG_LEN { bail!( "fs tag is too long: {} (max supported: {})", tag.len(), FS_MAX_TAG_LEN ); } let mut fs_tag = [0u8; FS_MAX_TAG_LEN]; fs_tag[..tag.len()].copy_from_slice(tag.as_bytes()); let avail_features = virtio::base_features(ProtectionType::Unprotected) | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); // Use default passthroughfs config let fs = PassthroughFs::new(Default::default())?; let mut keep_rds: Vec<RawDescriptor> = [0, 1, 2].to_vec(); keep_rds.append(&mut fs.keep_rds()); let server = Arc::new(Server::new(fs)); Ok(FsBackend { server, tag: fs_tag, avail_features, acked_features: 0, acked_protocol_features: VhostUserProtocolFeatures::empty(), workers: Default::default(), keep_rds, }) } } impl VhostUserBackend for FsBackend { const MAX_QUEUE_NUM: usize = 2; /* worker queue and high priority queue */ const MAX_VRING_LEN: u16 = 1024; type Doorbell = CallEvent; type Error = anyhow::Error; fn features(&self) -> u64 { self.avail_features } fn ack_features(&mut self, value: u64) -> anyhow::Result<()> { let unrequested_features = value &!self.avail_features; if unrequested_features!= 0 { bail!("invalid features are given: {:#x}", unrequested_features); } self.acked_features |= value; Ok(()) } fn acked_features(&self) -> u64 { self.acked_features } fn protocol_features(&self) -> VhostUserProtocolFeatures { VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ } fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> { let features = VhostUserProtocolFeatures::from_bits(features) .ok_or_else(|| anyhow!("invalid protocol features are given: {:#x}", features))?; let supported = self.protocol_features(); self.acked_protocol_features = features & supported; Ok(()) } fn acked_protocol_features(&self) -> u64 { self.acked_protocol_features.bits() } fn read_config(&self, offset: u64, data: &mut [u8]) { let config = virtio_fs_config { tag: self.tag, num_request_queues: Le32::from(1), }; copy_config(data, 0, config.as_slice(), offset); } fn reset(&mut self) { for handle in self.workers.iter_mut().filter_map(Option::take) { handle.abort(); } } fn start_queue( &mut self, idx: usize, mut queue: virtio::Queue, mem: GuestMemory, call_evt: Arc<Mutex<CallEvent>>, kick_evt: Event, ) -> anyhow::Result<()> { if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) { warn!("Starting new queue handler without stopping old handler"); handle.abort(); } // Safe because the executor is initialized in main() below. let ex = FS_EXECUTOR.get().expect("Executor not initialized"); // Enable any virtqueue features that were negotiated (like VIRTIO_RING_F_EVENT_IDX). queue.ack_features(self.acked_features); let kick_evt = EventAsync::new(kick_evt.0, ex).context("failed to create EventAsync for kick_evt")?; let (handle, registration) = AbortHandle::new_pair(); let (_, fs_device_tube) = Tube::pair()?; ex.spawn_local(Abortable::new( handle_fs_queue( queue, mem, call_evt, kick_evt, self.server.clone(), Arc::new(Mutex::new(fs_device_tube)), ), registration, )) .detach(); self.workers[idx] = Some(handle); Ok(()) } fn stop_queue(&mut self, idx: usize) { if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) { handle.abort(); } } } #[derive(FromArgs)] #[argh(description = "")] struct Options { #[argh(option, description = "path to a socket", arg_name = "PATH")] socket: String, #[argh(option, description = "the virtio-fs tag", arg_name = "TAG")] tag: String, #[argh(option, description = "path to a directory to share", arg_name = "DIR")] shared_dir: PathBuf, #[argh(option, description = "uid map to use", arg_name = "UIDMAP")] uid_map: Option<String>, #[argh(option, description = "gid map to use", arg_name = "GIDMAP")] gid_map: Option<String>, } /// Starts a vhost-user fs device. /// Returns an error if the given `args` is invalid or the device fails to run. pub fn run_fs_device(program_name: &str, args: &[&str]) -> anyhow::Result<()> { let opts = match Options::from_args(&[program_name], args) { Ok(opts) => opts, Err(e) => { if e.status.is_err() { bail!(e.output); } else { println!("{}", e.output); } return Ok(()); } }; base::syslog::init().context("Failed to initialize syslog")?; let fs_device = FsBackend::new(&opts.tag)?; // Create and bind unix socket let listener = UnixListener::bind(opts.socket).map(UnlinkUnixListener)?; let mut keep_rds = fs_device.keep_rds.clone(); keep_rds.push(listener.as_raw_fd()); base::syslog::push_descriptors(&mut keep_rds); let handler = DeviceRequestHandler::new(fs_device); let pid = jail_and_fork(keep_rds, opts.shared_dir, opts.uid_map, opts.gid_map)?; // Parent, nothing to do but wait and then exit if pid!= 0 { unsafe { libc::waitpid(pid, std::ptr::null_mut(), 0) }; return Ok(()); } // We need to set the no setuid fixup secure bit so that we don't drop capabilities when // changing the thread uid/gid. Without this, creating new entries can fail in some corner // cases. const SECBIT_NO_SETUID_FIXUP: i32 = 1 << 2; // TODO(crbug.com/1199487): Remove this once libc provides the wrapper for all targets. #[cfg(target_os = "linux")] { // Safe because this doesn't modify any memory and we check the return value. let mut securebits = unsafe { libc::prctl(libc::PR_GET_SECUREBITS) }; if securebits < 0 { bail!(io::Error::last_os_error()); } securebits |= SECBIT_NO_SETUID_FIXUP; // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::prctl(libc::PR_SET_SECUREBITS, securebits) }; if ret < 0 { bail!(io::Error::last_os_error()); } } // Child, we can continue by spawning the executor and set up the device let ex = Executor::new().context("Failed to create executor")?; let _ = FS_EXECUTOR.set(ex.clone()); if let Err(e) = ex.run_until(handler.run_with_listener(listener, &ex)) { bail!(e); } Ok(()) }
{ let egid = unsafe { libc::getegid() }; format!("{} {} 1", egid, egid) }
identifier_body
fs.rs
// Copyright 2021 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::io; use std::os::unix::io::AsRawFd; use std::os::unix::net::UnixListener; use std::path::{Path, PathBuf}; use std::sync::Arc; use anyhow::{anyhow, bail, Context}; use argh::FromArgs; use base::{error, get_max_open_files, warn, Event, RawDescriptor, Tube, UnlinkUnixListener}; use cros_async::{EventAsync, Executor}; use data_model::{DataInit, Le32}; use fuse::Server; use futures::future::{AbortHandle, Abortable}; use hypervisor::ProtectionType; use minijail::{self, Minijail}; use once_cell::sync::OnceCell; use sync::Mutex; use vm_memory::GuestMemory; use vmm_vhost::message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures}; use crate::virtio; use crate::virtio::copy_config; use crate::virtio::fs::passthrough::PassthroughFs; use crate::virtio::fs::{process_fs_queue, virtio_fs_config, FS_MAX_TAG_LEN}; use crate::virtio::vhost::user::device::handler::{ CallEvent, DeviceRequestHandler, VhostUserBackend, }; static FS_EXECUTOR: OnceCell<Executor> = OnceCell::new(); async fn handle_fs_queue( mut queue: virtio::Queue, mem: GuestMemory, call_evt: Arc<Mutex<CallEvent>>, kick_evt: EventAsync, server: Arc<fuse::Server<PassthroughFs>>, tube: Arc<Mutex<Tube>>, ) { // Slot is always going to be 0 because we do not support DAX let slot: u32 = 0; loop { if let Err(e) = kick_evt.next_val().await { error!("Failed to read kick event for fs queue: {}", e); break; } if let Err(e) = process_fs_queue(&mem, &call_evt, &mut queue, &server, &tube, slot) { error!("Process FS queue failed: {}", e); break; } } } fn default_uidmap() -> String { let euid = unsafe { libc::geteuid() }; format!("{} {} 1", euid, euid) } fn default_gidmap() -> String { let egid = unsafe { libc::getegid() }; format!("{} {} 1", egid, egid) } fn jail_and_fork( mut keep_rds: Vec<RawDescriptor>, dir_path: PathBuf, uid_map: Option<String>, gid_map: Option<String>, ) -> anyhow::Result<i32> { // Create new minijail sandbox let mut j = Minijail::new()?; j.namespace_pids(); j.namespace_user(); j.namespace_user_disable_setgroups(); j.uidmap(&uid_map.unwrap_or_else(default_uidmap))?; j.gidmap(&gid_map.unwrap_or_else(default_gidmap))?; j.run_as_init(); j.namespace_vfs(); j.namespace_net(); j.no_new_privs(); // Only pivot_root if we are not re-using the current root directory. if dir_path!= Path::new("/") { // It's safe to call `namespace_vfs` multiple times. j.namespace_vfs(); j.enter_pivot_root(&dir_path)?; } j.set_remount_mode(libc::MS_SLAVE); let limit = get_max_open_files().context("failed to get max open files")?; j.set_rlimit(libc::RLIMIT_NOFILE as i32, limit, limit)?; // Make sure there are no duplicates in keep_rds keep_rds.dedup(); // fork on the jail here let pid = unsafe { j.fork(Some(&keep_rds))? }; if pid > 0 { unsafe { libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) }; } if pid < 0 { bail!("Fork error! {}", std::io::Error::last_os_error()); } Ok(pid) } struct FsBackend { server: Arc<fuse::Server<PassthroughFs>>, tag: [u8; FS_MAX_TAG_LEN], avail_features: u64, acked_features: u64, acked_protocol_features: VhostUserProtocolFeatures, workers: [Option<AbortHandle>; Self::MAX_QUEUE_NUM], keep_rds: Vec<RawDescriptor>, } impl FsBackend { pub fn new(tag: &str) -> anyhow::Result<Self> { if tag.len() > FS_MAX_TAG_LEN { bail!( "fs tag is too long: {} (max supported: {})", tag.len(), FS_MAX_TAG_LEN ); } let mut fs_tag = [0u8; FS_MAX_TAG_LEN]; fs_tag[..tag.len()].copy_from_slice(tag.as_bytes()); let avail_features = virtio::base_features(ProtectionType::Unprotected) | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); // Use default passthroughfs config let fs = PassthroughFs::new(Default::default())?; let mut keep_rds: Vec<RawDescriptor> = [0, 1, 2].to_vec(); keep_rds.append(&mut fs.keep_rds()); let server = Arc::new(Server::new(fs)); Ok(FsBackend { server, tag: fs_tag, avail_features, acked_features: 0, acked_protocol_features: VhostUserProtocolFeatures::empty(), workers: Default::default(), keep_rds, }) } } impl VhostUserBackend for FsBackend { const MAX_QUEUE_NUM: usize = 2; /* worker queue and high priority queue */ const MAX_VRING_LEN: u16 = 1024; type Doorbell = CallEvent; type Error = anyhow::Error; fn features(&self) -> u64 { self.avail_features } fn ack_features(&mut self, value: u64) -> anyhow::Result<()> { let unrequested_features = value &!self.avail_features; if unrequested_features!= 0 { bail!("invalid features are given: {:#x}", unrequested_features); } self.acked_features |= value; Ok(()) } fn acked_features(&self) -> u64 { self.acked_features } fn protocol_features(&self) -> VhostUserProtocolFeatures { VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ } fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> { let features = VhostUserProtocolFeatures::from_bits(features) .ok_or_else(|| anyhow!("invalid protocol features are given: {:#x}", features))?; let supported = self.protocol_features(); self.acked_protocol_features = features & supported; Ok(()) } fn acked_protocol_features(&self) -> u64 { self.acked_protocol_features.bits() } fn read_config(&self, offset: u64, data: &mut [u8]) { let config = virtio_fs_config { tag: self.tag, num_request_queues: Le32::from(1), }; copy_config(data, 0, config.as_slice(), offset); } fn reset(&mut self) { for handle in self.workers.iter_mut().filter_map(Option::take) { handle.abort(); } } fn start_queue( &mut self, idx: usize, mut queue: virtio::Queue, mem: GuestMemory, call_evt: Arc<Mutex<CallEvent>>, kick_evt: Event, ) -> anyhow::Result<()> { if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) { warn!("Starting new queue handler without stopping old handler"); handle.abort(); } // Safe because the executor is initialized in main() below. let ex = FS_EXECUTOR.get().expect("Executor not initialized"); // Enable any virtqueue features that were negotiated (like VIRTIO_RING_F_EVENT_IDX). queue.ack_features(self.acked_features); let kick_evt = EventAsync::new(kick_evt.0, ex).context("failed to create EventAsync for kick_evt")?; let (handle, registration) = AbortHandle::new_pair(); let (_, fs_device_tube) = Tube::pair()?; ex.spawn_local(Abortable::new( handle_fs_queue( queue, mem, call_evt, kick_evt, self.server.clone(), Arc::new(Mutex::new(fs_device_tube)), ), registration, )) .detach(); self.workers[idx] = Some(handle); Ok(()) } fn stop_queue(&mut self, idx: usize) { if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) { handle.abort(); } } } #[derive(FromArgs)] #[argh(description = "")] struct
{ #[argh(option, description = "path to a socket", arg_name = "PATH")] socket: String, #[argh(option, description = "the virtio-fs tag", arg_name = "TAG")] tag: String, #[argh(option, description = "path to a directory to share", arg_name = "DIR")] shared_dir: PathBuf, #[argh(option, description = "uid map to use", arg_name = "UIDMAP")] uid_map: Option<String>, #[argh(option, description = "gid map to use", arg_name = "GIDMAP")] gid_map: Option<String>, } /// Starts a vhost-user fs device. /// Returns an error if the given `args` is invalid or the device fails to run. pub fn run_fs_device(program_name: &str, args: &[&str]) -> anyhow::Result<()> { let opts = match Options::from_args(&[program_name], args) { Ok(opts) => opts, Err(e) => { if e.status.is_err() { bail!(e.output); } else { println!("{}", e.output); } return Ok(()); } }; base::syslog::init().context("Failed to initialize syslog")?; let fs_device = FsBackend::new(&opts.tag)?; // Create and bind unix socket let listener = UnixListener::bind(opts.socket).map(UnlinkUnixListener)?; let mut keep_rds = fs_device.keep_rds.clone(); keep_rds.push(listener.as_raw_fd()); base::syslog::push_descriptors(&mut keep_rds); let handler = DeviceRequestHandler::new(fs_device); let pid = jail_and_fork(keep_rds, opts.shared_dir, opts.uid_map, opts.gid_map)?; // Parent, nothing to do but wait and then exit if pid!= 0 { unsafe { libc::waitpid(pid, std::ptr::null_mut(), 0) }; return Ok(()); } // We need to set the no setuid fixup secure bit so that we don't drop capabilities when // changing the thread uid/gid. Without this, creating new entries can fail in some corner // cases. const SECBIT_NO_SETUID_FIXUP: i32 = 1 << 2; // TODO(crbug.com/1199487): Remove this once libc provides the wrapper for all targets. #[cfg(target_os = "linux")] { // Safe because this doesn't modify any memory and we check the return value. let mut securebits = unsafe { libc::prctl(libc::PR_GET_SECUREBITS) }; if securebits < 0 { bail!(io::Error::last_os_error()); } securebits |= SECBIT_NO_SETUID_FIXUP; // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::prctl(libc::PR_SET_SECUREBITS, securebits) }; if ret < 0 { bail!(io::Error::last_os_error()); } } // Child, we can continue by spawning the executor and set up the device let ex = Executor::new().context("Failed to create executor")?; let _ = FS_EXECUTOR.set(ex.clone()); if let Err(e) = ex.run_until(handler.run_with_listener(listener, &ex)) { bail!(e); } Ok(()) }
Options
identifier_name
fs.rs
// Copyright 2021 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::io; use std::os::unix::io::AsRawFd; use std::os::unix::net::UnixListener; use std::path::{Path, PathBuf}; use std::sync::Arc; use anyhow::{anyhow, bail, Context}; use argh::FromArgs; use base::{error, get_max_open_files, warn, Event, RawDescriptor, Tube, UnlinkUnixListener}; use cros_async::{EventAsync, Executor}; use data_model::{DataInit, Le32}; use fuse::Server; use futures::future::{AbortHandle, Abortable}; use hypervisor::ProtectionType; use minijail::{self, Minijail}; use once_cell::sync::OnceCell; use sync::Mutex; use vm_memory::GuestMemory; use vmm_vhost::message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures}; use crate::virtio; use crate::virtio::copy_config; use crate::virtio::fs::passthrough::PassthroughFs; use crate::virtio::fs::{process_fs_queue, virtio_fs_config, FS_MAX_TAG_LEN}; use crate::virtio::vhost::user::device::handler::{ CallEvent, DeviceRequestHandler, VhostUserBackend, }; static FS_EXECUTOR: OnceCell<Executor> = OnceCell::new(); async fn handle_fs_queue( mut queue: virtio::Queue, mem: GuestMemory, call_evt: Arc<Mutex<CallEvent>>, kick_evt: EventAsync, server: Arc<fuse::Server<PassthroughFs>>, tube: Arc<Mutex<Tube>>, ) { // Slot is always going to be 0 because we do not support DAX let slot: u32 = 0; loop { if let Err(e) = kick_evt.next_val().await { error!("Failed to read kick event for fs queue: {}", e); break; } if let Err(e) = process_fs_queue(&mem, &call_evt, &mut queue, &server, &tube, slot) { error!("Process FS queue failed: {}", e); break; } } } fn default_uidmap() -> String { let euid = unsafe { libc::geteuid() }; format!("{} {} 1", euid, euid) } fn default_gidmap() -> String { let egid = unsafe { libc::getegid() }; format!("{} {} 1", egid, egid) } fn jail_and_fork( mut keep_rds: Vec<RawDescriptor>, dir_path: PathBuf, uid_map: Option<String>, gid_map: Option<String>, ) -> anyhow::Result<i32> { // Create new minijail sandbox let mut j = Minijail::new()?; j.namespace_pids(); j.namespace_user(); j.namespace_user_disable_setgroups(); j.uidmap(&uid_map.unwrap_or_else(default_uidmap))?; j.gidmap(&gid_map.unwrap_or_else(default_gidmap))?; j.run_as_init(); j.namespace_vfs(); j.namespace_net(); j.no_new_privs(); // Only pivot_root if we are not re-using the current root directory. if dir_path!= Path::new("/") { // It's safe to call `namespace_vfs` multiple times. j.namespace_vfs(); j.enter_pivot_root(&dir_path)?; } j.set_remount_mode(libc::MS_SLAVE); let limit = get_max_open_files().context("failed to get max open files")?; j.set_rlimit(libc::RLIMIT_NOFILE as i32, limit, limit)?; // Make sure there are no duplicates in keep_rds keep_rds.dedup(); // fork on the jail here let pid = unsafe { j.fork(Some(&keep_rds))? }; if pid > 0
if pid < 0 { bail!("Fork error! {}", std::io::Error::last_os_error()); } Ok(pid) } struct FsBackend { server: Arc<fuse::Server<PassthroughFs>>, tag: [u8; FS_MAX_TAG_LEN], avail_features: u64, acked_features: u64, acked_protocol_features: VhostUserProtocolFeatures, workers: [Option<AbortHandle>; Self::MAX_QUEUE_NUM], keep_rds: Vec<RawDescriptor>, } impl FsBackend { pub fn new(tag: &str) -> anyhow::Result<Self> { if tag.len() > FS_MAX_TAG_LEN { bail!( "fs tag is too long: {} (max supported: {})", tag.len(), FS_MAX_TAG_LEN ); } let mut fs_tag = [0u8; FS_MAX_TAG_LEN]; fs_tag[..tag.len()].copy_from_slice(tag.as_bytes()); let avail_features = virtio::base_features(ProtectionType::Unprotected) | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); // Use default passthroughfs config let fs = PassthroughFs::new(Default::default())?; let mut keep_rds: Vec<RawDescriptor> = [0, 1, 2].to_vec(); keep_rds.append(&mut fs.keep_rds()); let server = Arc::new(Server::new(fs)); Ok(FsBackend { server, tag: fs_tag, avail_features, acked_features: 0, acked_protocol_features: VhostUserProtocolFeatures::empty(), workers: Default::default(), keep_rds, }) } } impl VhostUserBackend for FsBackend { const MAX_QUEUE_NUM: usize = 2; /* worker queue and high priority queue */ const MAX_VRING_LEN: u16 = 1024; type Doorbell = CallEvent; type Error = anyhow::Error; fn features(&self) -> u64 { self.avail_features } fn ack_features(&mut self, value: u64) -> anyhow::Result<()> { let unrequested_features = value &!self.avail_features; if unrequested_features!= 0 { bail!("invalid features are given: {:#x}", unrequested_features); } self.acked_features |= value; Ok(()) } fn acked_features(&self) -> u64 { self.acked_features } fn protocol_features(&self) -> VhostUserProtocolFeatures { VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ } fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> { let features = VhostUserProtocolFeatures::from_bits(features) .ok_or_else(|| anyhow!("invalid protocol features are given: {:#x}", features))?; let supported = self.protocol_features(); self.acked_protocol_features = features & supported; Ok(()) } fn acked_protocol_features(&self) -> u64 { self.acked_protocol_features.bits() } fn read_config(&self, offset: u64, data: &mut [u8]) { let config = virtio_fs_config { tag: self.tag, num_request_queues: Le32::from(1), }; copy_config(data, 0, config.as_slice(), offset); } fn reset(&mut self) { for handle in self.workers.iter_mut().filter_map(Option::take) { handle.abort(); } } fn start_queue( &mut self, idx: usize, mut queue: virtio::Queue, mem: GuestMemory, call_evt: Arc<Mutex<CallEvent>>, kick_evt: Event, ) -> anyhow::Result<()> { if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) { warn!("Starting new queue handler without stopping old handler"); handle.abort(); } // Safe because the executor is initialized in main() below. let ex = FS_EXECUTOR.get().expect("Executor not initialized"); // Enable any virtqueue features that were negotiated (like VIRTIO_RING_F_EVENT_IDX). queue.ack_features(self.acked_features); let kick_evt = EventAsync::new(kick_evt.0, ex).context("failed to create EventAsync for kick_evt")?; let (handle, registration) = AbortHandle::new_pair(); let (_, fs_device_tube) = Tube::pair()?; ex.spawn_local(Abortable::new( handle_fs_queue( queue, mem, call_evt, kick_evt, self.server.clone(), Arc::new(Mutex::new(fs_device_tube)), ), registration, )) .detach(); self.workers[idx] = Some(handle); Ok(()) } fn stop_queue(&mut self, idx: usize) { if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) { handle.abort(); } } } #[derive(FromArgs)] #[argh(description = "")] struct Options { #[argh(option, description = "path to a socket", arg_name = "PATH")] socket: String, #[argh(option, description = "the virtio-fs tag", arg_name = "TAG")] tag: String, #[argh(option, description = "path to a directory to share", arg_name = "DIR")] shared_dir: PathBuf, #[argh(option, description = "uid map to use", arg_name = "UIDMAP")] uid_map: Option<String>, #[argh(option, description = "gid map to use", arg_name = "GIDMAP")] gid_map: Option<String>, } /// Starts a vhost-user fs device. /// Returns an error if the given `args` is invalid or the device fails to run. pub fn run_fs_device(program_name: &str, args: &[&str]) -> anyhow::Result<()> { let opts = match Options::from_args(&[program_name], args) { Ok(opts) => opts, Err(e) => { if e.status.is_err() { bail!(e.output); } else { println!("{}", e.output); } return Ok(()); } }; base::syslog::init().context("Failed to initialize syslog")?; let fs_device = FsBackend::new(&opts.tag)?; // Create and bind unix socket let listener = UnixListener::bind(opts.socket).map(UnlinkUnixListener)?; let mut keep_rds = fs_device.keep_rds.clone(); keep_rds.push(listener.as_raw_fd()); base::syslog::push_descriptors(&mut keep_rds); let handler = DeviceRequestHandler::new(fs_device); let pid = jail_and_fork(keep_rds, opts.shared_dir, opts.uid_map, opts.gid_map)?; // Parent, nothing to do but wait and then exit if pid!= 0 { unsafe { libc::waitpid(pid, std::ptr::null_mut(), 0) }; return Ok(()); } // We need to set the no setuid fixup secure bit so that we don't drop capabilities when // changing the thread uid/gid. Without this, creating new entries can fail in some corner // cases. const SECBIT_NO_SETUID_FIXUP: i32 = 1 << 2; // TODO(crbug.com/1199487): Remove this once libc provides the wrapper for all targets. #[cfg(target_os = "linux")] { // Safe because this doesn't modify any memory and we check the return value. let mut securebits = unsafe { libc::prctl(libc::PR_GET_SECUREBITS) }; if securebits < 0 { bail!(io::Error::last_os_error()); } securebits |= SECBIT_NO_SETUID_FIXUP; // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::prctl(libc::PR_SET_SECUREBITS, securebits) }; if ret < 0 { bail!(io::Error::last_os_error()); } } // Child, we can continue by spawning the executor and set up the device let ex = Executor::new().context("Failed to create executor")?; let _ = FS_EXECUTOR.set(ex.clone()); if let Err(e) = ex.run_until(handler.run_with_listener(listener, &ex)) { bail!(e); } Ok(()) }
{ unsafe { libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) }; }
conditional_block
fs.rs
// Copyright 2021 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::io; use std::os::unix::io::AsRawFd; use std::os::unix::net::UnixListener; use std::path::{Path, PathBuf}; use std::sync::Arc; use anyhow::{anyhow, bail, Context}; use argh::FromArgs; use base::{error, get_max_open_files, warn, Event, RawDescriptor, Tube, UnlinkUnixListener}; use cros_async::{EventAsync, Executor}; use data_model::{DataInit, Le32}; use fuse::Server; use futures::future::{AbortHandle, Abortable}; use hypervisor::ProtectionType; use minijail::{self, Minijail}; use once_cell::sync::OnceCell; use sync::Mutex; use vm_memory::GuestMemory; use vmm_vhost::message::{VhostUserProtocolFeatures, VhostUserVirtioFeatures}; use crate::virtio; use crate::virtio::copy_config; use crate::virtio::fs::passthrough::PassthroughFs; use crate::virtio::fs::{process_fs_queue, virtio_fs_config, FS_MAX_TAG_LEN}; use crate::virtio::vhost::user::device::handler::{ CallEvent, DeviceRequestHandler, VhostUserBackend, }; static FS_EXECUTOR: OnceCell<Executor> = OnceCell::new(); async fn handle_fs_queue( mut queue: virtio::Queue, mem: GuestMemory, call_evt: Arc<Mutex<CallEvent>>, kick_evt: EventAsync, server: Arc<fuse::Server<PassthroughFs>>, tube: Arc<Mutex<Tube>>, ) { // Slot is always going to be 0 because we do not support DAX let slot: u32 = 0; loop { if let Err(e) = kick_evt.next_val().await { error!("Failed to read kick event for fs queue: {}", e); break; } if let Err(e) = process_fs_queue(&mem, &call_evt, &mut queue, &server, &tube, slot) { error!("Process FS queue failed: {}", e); break; } } } fn default_uidmap() -> String { let euid = unsafe { libc::geteuid() }; format!("{} {} 1", euid, euid) } fn default_gidmap() -> String { let egid = unsafe { libc::getegid() }; format!("{} {} 1", egid, egid) } fn jail_and_fork( mut keep_rds: Vec<RawDescriptor>, dir_path: PathBuf, uid_map: Option<String>, gid_map: Option<String>, ) -> anyhow::Result<i32> { // Create new minijail sandbox let mut j = Minijail::new()?; j.namespace_pids(); j.namespace_user(); j.namespace_user_disable_setgroups(); j.uidmap(&uid_map.unwrap_or_else(default_uidmap))?; j.gidmap(&gid_map.unwrap_or_else(default_gidmap))?; j.run_as_init(); j.namespace_vfs(); j.namespace_net(); j.no_new_privs(); // Only pivot_root if we are not re-using the current root directory. if dir_path!= Path::new("/") { // It's safe to call `namespace_vfs` multiple times. j.namespace_vfs(); j.enter_pivot_root(&dir_path)?; } j.set_remount_mode(libc::MS_SLAVE); let limit = get_max_open_files().context("failed to get max open files")?; j.set_rlimit(libc::RLIMIT_NOFILE as i32, limit, limit)?; // Make sure there are no duplicates in keep_rds keep_rds.dedup(); // fork on the jail here let pid = unsafe { j.fork(Some(&keep_rds))? }; if pid > 0 { unsafe { libc::prctl(libc::PR_SET_PDEATHSIG, libc::SIGTERM) }; } if pid < 0 { bail!("Fork error! {}", std::io::Error::last_os_error()); } Ok(pid) } struct FsBackend { server: Arc<fuse::Server<PassthroughFs>>, tag: [u8; FS_MAX_TAG_LEN], avail_features: u64, acked_features: u64, acked_protocol_features: VhostUserProtocolFeatures, workers: [Option<AbortHandle>; Self::MAX_QUEUE_NUM], keep_rds: Vec<RawDescriptor>, } impl FsBackend { pub fn new(tag: &str) -> anyhow::Result<Self> { if tag.len() > FS_MAX_TAG_LEN { bail!( "fs tag is too long: {} (max supported: {})", tag.len(), FS_MAX_TAG_LEN ); } let mut fs_tag = [0u8; FS_MAX_TAG_LEN]; fs_tag[..tag.len()].copy_from_slice(tag.as_bytes()); let avail_features = virtio::base_features(ProtectionType::Unprotected) | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); // Use default passthroughfs config let fs = PassthroughFs::new(Default::default())?; let mut keep_rds: Vec<RawDescriptor> = [0, 1, 2].to_vec(); keep_rds.append(&mut fs.keep_rds()); let server = Arc::new(Server::new(fs)); Ok(FsBackend { server, tag: fs_tag, avail_features, acked_features: 0, acked_protocol_features: VhostUserProtocolFeatures::empty(), workers: Default::default(), keep_rds, }) } } impl VhostUserBackend for FsBackend { const MAX_QUEUE_NUM: usize = 2; /* worker queue and high priority queue */ const MAX_VRING_LEN: u16 = 1024; type Doorbell = CallEvent; type Error = anyhow::Error; fn features(&self) -> u64 { self.avail_features } fn ack_features(&mut self, value: u64) -> anyhow::Result<()> { let unrequested_features = value &!self.avail_features; if unrequested_features!= 0 { bail!("invalid features are given: {:#x}", unrequested_features); } self.acked_features |= value; Ok(()) } fn acked_features(&self) -> u64 { self.acked_features } fn protocol_features(&self) -> VhostUserProtocolFeatures { VhostUserProtocolFeatures::CONFIG | VhostUserProtocolFeatures::MQ } fn ack_protocol_features(&mut self, features: u64) -> anyhow::Result<()> { let features = VhostUserProtocolFeatures::from_bits(features) .ok_or_else(|| anyhow!("invalid protocol features are given: {:#x}", features))?; let supported = self.protocol_features(); self.acked_protocol_features = features & supported; Ok(()) } fn acked_protocol_features(&self) -> u64 { self.acked_protocol_features.bits() } fn read_config(&self, offset: u64, data: &mut [u8]) { let config = virtio_fs_config { tag: self.tag, num_request_queues: Le32::from(1), }; copy_config(data, 0, config.as_slice(), offset); } fn reset(&mut self) { for handle in self.workers.iter_mut().filter_map(Option::take) { handle.abort(); } } fn start_queue( &mut self, idx: usize, mut queue: virtio::Queue, mem: GuestMemory, call_evt: Arc<Mutex<CallEvent>>, kick_evt: Event, ) -> anyhow::Result<()> { if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) { warn!("Starting new queue handler without stopping old handler"); handle.abort(); } // Safe because the executor is initialized in main() below. let ex = FS_EXECUTOR.get().expect("Executor not initialized"); // Enable any virtqueue features that were negotiated (like VIRTIO_RING_F_EVENT_IDX). queue.ack_features(self.acked_features); let kick_evt = EventAsync::new(kick_evt.0, ex).context("failed to create EventAsync for kick_evt")?; let (handle, registration) = AbortHandle::new_pair(); let (_, fs_device_tube) = Tube::pair()?; ex.spawn_local(Abortable::new( handle_fs_queue( queue, mem, call_evt, kick_evt, self.server.clone(), Arc::new(Mutex::new(fs_device_tube)), ), registration, )) .detach(); self.workers[idx] = Some(handle); Ok(()) } fn stop_queue(&mut self, idx: usize) { if let Some(handle) = self.workers.get_mut(idx).and_then(Option::take) { handle.abort(); } } } #[derive(FromArgs)] #[argh(description = "")] struct Options { #[argh(option, description = "path to a socket", arg_name = "PATH")] socket: String, #[argh(option, description = "the virtio-fs tag", arg_name = "TAG")] tag: String, #[argh(option, description = "path to a directory to share", arg_name = "DIR")] shared_dir: PathBuf,
/// Starts a vhost-user fs device. /// Returns an error if the given `args` is invalid or the device fails to run. pub fn run_fs_device(program_name: &str, args: &[&str]) -> anyhow::Result<()> { let opts = match Options::from_args(&[program_name], args) { Ok(opts) => opts, Err(e) => { if e.status.is_err() { bail!(e.output); } else { println!("{}", e.output); } return Ok(()); } }; base::syslog::init().context("Failed to initialize syslog")?; let fs_device = FsBackend::new(&opts.tag)?; // Create and bind unix socket let listener = UnixListener::bind(opts.socket).map(UnlinkUnixListener)?; let mut keep_rds = fs_device.keep_rds.clone(); keep_rds.push(listener.as_raw_fd()); base::syslog::push_descriptors(&mut keep_rds); let handler = DeviceRequestHandler::new(fs_device); let pid = jail_and_fork(keep_rds, opts.shared_dir, opts.uid_map, opts.gid_map)?; // Parent, nothing to do but wait and then exit if pid!= 0 { unsafe { libc::waitpid(pid, std::ptr::null_mut(), 0) }; return Ok(()); } // We need to set the no setuid fixup secure bit so that we don't drop capabilities when // changing the thread uid/gid. Without this, creating new entries can fail in some corner // cases. const SECBIT_NO_SETUID_FIXUP: i32 = 1 << 2; // TODO(crbug.com/1199487): Remove this once libc provides the wrapper for all targets. #[cfg(target_os = "linux")] { // Safe because this doesn't modify any memory and we check the return value. let mut securebits = unsafe { libc::prctl(libc::PR_GET_SECUREBITS) }; if securebits < 0 { bail!(io::Error::last_os_error()); } securebits |= SECBIT_NO_SETUID_FIXUP; // Safe because this doesn't modify any memory and we check the return value. let ret = unsafe { libc::prctl(libc::PR_SET_SECUREBITS, securebits) }; if ret < 0 { bail!(io::Error::last_os_error()); } } // Child, we can continue by spawning the executor and set up the device let ex = Executor::new().context("Failed to create executor")?; let _ = FS_EXECUTOR.set(ex.clone()); if let Err(e) = ex.run_until(handler.run_with_listener(listener, &ex)) { bail!(e); } Ok(()) }
#[argh(option, description = "uid map to use", arg_name = "UIDMAP")] uid_map: Option<String>, #[argh(option, description = "gid map to use", arg_name = "GIDMAP")] gid_map: Option<String>, }
random_line_split
clob.rs
//! Содержит типы для работы с большими символьными объектами. use std::io; use {Connection, Result, DbResult}; use types::Charset; use ffi::native::lob::{Lob, LobImpl, LobOpenMode, CharsetForm}; use ffi::types::Piece; use super::{Bytes, Chars, LobPrivate}; //------------------------------------------------------------------------------------------------- /// Указатель на большой символьный объект (CLOB или NCLOB). #[derive(Debug, PartialEq, Eq)] pub struct Clob<'conn> { /// FFI объект для типобезопасного взаимодействия с базой impl_: LobImpl<'conn, Lob>, /// Вид символьного объекта: в кодировке базы данных (CLOB) или в национальной кодировке (NCLOB). form: CharsetForm, } impl<'conn> Clob<'conn> { /// Получает количество символов, содержащихся в данном объекте в данный момент. /// /// Следует учитывать, что "символ" в понимании Oracle -- это один юнит кодировки UTF-16, занимающий /// 2 байта. Таким образом, кодовые точки Юникода, представленные [суррогатными парами][utf-16] в UTF-16, /// считаются, как 2 символа. /// /// [utf-16]: https://ru.wikipedia.org/wiki/UTF-16#.D0.9F.D1.80.D0.B8.D0.BD.D1.86.D0.B8.D0.BF_.D0.BA.D0.BE.D0.B4.D0.B8.D1.80.D0.BE.D0.B2.D0.B0.D0.BD.D0.B8.D1.8F #[inline] pub fn len(&self) -> Result<Chars> { let len = try!(self.impl_.len()); Ok(Chars(len)) } /// Получает максимальное количество *байт*, которое может быть сохранено в данном объекте. /// В зависимости от настроек сервера базы данных данное значение может варьироваться от /// 8 до 128 терабайт (TB). #[inline] pub fn capacity(&self) -> Result<Bytes> { let len = try!(self.impl_.capacity()); Ok(Bytes(len)) } /// For LOBs with storage parameter `BASICFILE`, the amount of a chunk's space that is used to store /// the internal LOB value. This is the amount that users should use when reading or writing the LOB /// value. If possible, users should start their writes at chunk boundaries, such as the beginning of /// a chunk, and write a chunk at a time. /// /// For LOBs with storage parameter `SECUREFILE`, chunk size is an advisory size and is provided for /// backward compatibility. /// /// When creating a table that contains an internal LOB, the user can specify the chunking factor, /// which can be a multiple of Oracle Database blocks. This corresponds to the chunk size used by /// the LOB data layer when accessing and modifying the LOB value. Part of the chunk is used to store /// system-related information, and the rest stores the LOB value. This function returns the amount /// of space used in the LOB chunk to store the LOB value. Performance is improved if the application /// issues read or write requests using a multiple of this chunk size. For writes, there is an added /// benefit because LOB chunks are versioned and, if all writes are done on a chunk basis, no extra /// versioning is done or duplicated. Users could batch up the write until they have enough for a chunk /// instead of issuing several write calls for the same chunk. #[inline] pub fn get_chunk_size(&self) -> Result<Bytes> { let size = try!(self.impl_.get_chunk_size()); Ok(Bytes(size as u64)) } /// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет /// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB). /// /// # Производительность /// Необходимо учитывать, что в случае частой записи предпочтительней делать ее через специальный /// объект-писатель, который можно получить из данного объекта вызовом функции [`new_writer()`](#function.new_writer). /// Если поступить таким образом, то обновление функциональных и доменных индексов базы данных (если они /// есть) для данного большого объекта будет отложено до тех пор, пока объект-писатель не будет уничтожен. /// При вызове же данной функции обновление данных индексов произойдет сразу же по окончании вызова, что /// может сильно снизить производительность. #[inline] pub fn trim(&mut self, len: Chars) -> Result<()> { self.impl_.trim(len.0).map_err(Into::into) } /// Заполняет LOB, начиная с указанного индекса, указанным количеством пробелов. После завершения /// работы в `count` будет записано реальное количество очищенных символов. /// /// # Производительность /// Необходимо учитывать, что в случае частой записи предпочтительней делать ее через специальный /// объект-писатель, который можно получить из данного объекта вызовом функции [`new_writer()`](#function.new_writer). /// Если поступить таким образом, то обновление функциональных и доменных индексов базы данных (если они /// есть) для данного большого объекта будет отложено до тех пор, пока объект-писатель не будет уничтожен. /// При вызове же данной функции обновление данных индексов произойдет сразу же по окончании вызова, что /// может сильно снизить производительность. #[inline] pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> { self.impl_.erase(offset.0, &mut count.0).map_err(Into::into) } /// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных. /// Данные читаются из CLOB-а в кодировке `UTF-8`. #[inline] pub fn new_reader<'lob>(&'lob mut self) -> Result<ClobReader<'lob, 'conn>> { self.new_reader_with_charset(Charset::AL32UTF8) } /// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных. /// Данные читаются из CLOB-а в указанной кодировке. /// /// Каждый вызов `read` будет заполнять массив байтами в запрошенной кодировке. Так как стандартные методы Rust для /// работы читателем байт как читателем текста предполагают, что представлен в UTF-8, то их нельзя использовать для /// данного читателя, т.к. тест будет извлекаться с указанной кодировке. #[inline] pub fn new_reader_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobReader<'lob, 'conn>> { try!(self.impl_.open(LobOpenMode::ReadOnly)); Ok(ClobReader { lob: self, piece: Piece::First, charset: charset }) } /// Создает писателя в данный символьный объект. Преимущество использования писателя вместо прямой записи /// в объект в том, что функциональные и доменные индексы базы данных (если они есть) для данного большого /// объекта будут обновлены только после уничтожения писателя, а не при каждой записи в объект, что в /// лучшую сторону сказывается на производительности. /// /// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько /// локаторов (которые представляет данный класс) на него существует. #[inline] pub fn new_writer<'lob>(&'lob mut self) -> Result<ClobWriter<'lob, 'conn>> { self.new_writer_with_charset(Charset::AL32UTF8) } /// Создает писателя в данный символьный объект, записывающий текстовые данные, представленные в указанной кодировке. /// /// Преимущество использования писателя вместо прямой записи в объект в том, что функциональные и доменные индексы /// базы данных (если они есть) для данного большого объекта будут обновлены только после уничтожения писателя, а не /// при каждой записи в объект, что в лучшую сторону сказывается на производительности. /// /// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько /// локаторов (которые представляет данный класс) на него существует. #[inline] pub fn new_writer_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobWriter<'lob, 'conn>> { try!(self.impl_.open(LobOpenMode::WriteOnly)); Ok(ClobWriter { lob: self, piece: Piece::First, charset: charset }) } /// Получает кодировку базы данных для данного большого символьного объекта. #[inline] pub fn charset(&self) -> Result<Charset> { self.impl_.charset().map_err(Into::into) } /// Если CLOB прочитан или записан не полностью, то сообщает базе данных, что дальнейшее чтение/запись не требуются /// и закрывает CLOB. fn close(&mut self, piece: Piece) -> DbResult<()> { // Если LOB был прочитан/записан не полностью, то отменяем запросы на чтение/запись и восстанавливаемся if piece!= Piece::Last { try!(self.impl_.break_()); try!(self.impl_.reset()); } self.impl_.close() } } impl<'conn> LobPrivate<'conn> for Clob<'conn> { fn new(raw: &[u8], conn: &'conn Connection) -> Result<Self> { let p = raw.as_ptr() as *const *mut Lob; let locator = unsafe { *p as *mut Lob }; let impl_ = LobImpl::from(conn, locator); let form = try!(impl_.form()); Ok(Clob { impl_: impl_, form: form }) } } impl<'conn> io::Read for Clob<'conn> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.impl_.read(Piece::One, Charset::AL32UTF8, self.form, buf).0 } } impl<'conn> io::Write for Clob<'conn> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.impl_.write(Piece::One, Charset::AL32UTF8, self.form, buf).0 } fn flush(&mut self) -> io::Result<()> { Ok(()) } } //------------------------------------------------------------------------------------------------- /// Позволяет писать в большой символьный объект, не вызывая пересчета индексов после каждой записи. /// Индексы будут пересчитаны только после уничтожения данного объекта. #[derive(Debug)] pub struct ClobWriter<'lob, 'conn: 'lob> { lob: &'lob mut Clob<'conn>, piece: Piece, charset: Charset, } impl<'lob, 'conn: 'lob> ClobWriter<'lob, 'conn> { /// Получает `CLOB`, записываемый данным писателем. pub fn lob(&mut self) -> &mut Clob<'conn> { self.lob } /// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет /// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB). #[inline] pub fn trim(&mut self, len: Chars) -> Result<()> { self.lob.trim(len) } /// Заполняет LOB, начиная с указанного индекса, указанным количеством нулей. После завершения /// работы в `count` будет записано реальное количество очищенных байт. #[inline] pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> { self.lob.erase(offset, count) } } impl<'lob, 'conn: 'lob> io::Write for ClobWriter<'lob, 'conn> { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let (res, piece) = self.lob.impl_.write(self.piece, self.charset, self.lob.form, buf); self.piece = piece; res } #[inline] fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl<'lob, 'conn: 'lob> Drop for ClobWriter<'lob, 'conn> { fn drop(&mut self) { // Невозможно делать панику отсюда, т.к. приложение из-за этого крашится let _ = self.lob.close(self.piece);//.expect("Error when close CLOB writer"); } } //------------------------------------------------------------------------------------------------- /// Позволяет читать из большой бинарного объекта в потоковом режиме. Каждый вызов `read` читает очередную порцию данных. #[derive(Debug)] pub struct ClobReader<'lob, 'conn: 'lob> { lob: &'lob mut Clob<'conn>, /// Описательная часть порции данных, получаемых из базы данных (первая или нет). piece: Piece, /// Кодировка, в которой следует интерпретировать получаемые из базы данных байты. charset: Charset, } impl<'lob, 'conn: 'lob> ClobReader<'lob, 'conn> { /// Получает `CLOB`, читаемый данным читателем. pub fn lob(&mut self) -> &mut Clob<'conn> { self.lob } } impl<'lob, 'conn: 'lob> io::Read for ClobReader<'lob, 'conn> { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let (
m, buf); self.piece = piece; res } } impl<'lob, 'conn: 'lob> Drop for ClobReader<'lob, 'conn> { fn drop(&mut self) { // Невозможно делать панику отсюда, т.к. приложение из-за этого крашится let _ = self.lob.close(self.piece);//.expect("Error when close CLOB reader"); } }
res, piece) = self.lob.impl_.read(self.piece, self.charset, self.lob.for
conditional_block
clob.rs
//! Содержит типы для работы с большими символьными объектами. use std::io; use {Connection, Result, DbResult}; use types::Charset; use ffi::native::lob::{Lob, LobImpl, LobOpenMode, CharsetForm}; use ffi::types::Piece; use super::{Bytes, Chars, LobPrivate}; //------------------------------------------------------------------------------------------------- /// Указатель на большой символьный объект (CLOB или NCLOB). #[derive(Debug, PartialEq, Eq)] pub struct Clob<'conn> { /// FFI объект для типобезопасного взаимодействия с базой impl_: LobImpl<'conn, Lob>, /// Вид символьного объекта: в кодировке базы данных (CLOB) или в национальной кодировке (NCLOB). form: CharsetForm, } impl<'conn> Clob<'conn> { /// Получает количество символов, содержащихся в данном объекте в данный момент. /// /// Следует учитывать, что "символ" в понимании Oracle -- это один юнит кодировки UTF-16, занимающий /// 2 байта. Таким образом, кодовые точки Юникода, представленные [суррогатными парами][utf-16] в UTF-16, /// считаются, как 2 символа. /// /// [utf-16]: https://ru.wikipedia.org/wiki/UTF-16#.D0.9F.D1.80.D0.B8.D0.BD.D1.86.D0.B8.D0.BF_.D0.BA.D0.BE.D0.B4.D0.B8.D1.80.D0.BE.D0.B2.D0.B0.D0.BD.D0.B8.D1.8F #[inline] pub fn len(&self) -> Result<Chars> { let len = try!(self.impl_.len()); Ok(Chars(len)) } /// Получает максимальное количество *байт*, которое может быть сохранено в данном объекте. /// В зависимости от настроек сервера базы данных данное значение может варьироваться от /// 8 до 128 терабайт (TB). #[inline] pub fn capacity(&self) -> Result<Bytes> { let len = try!(self.impl_.capacity()); Ok(Bytes(len)) } /// For LOBs with storage parameter `BASICFILE`, the amount of a chunk's space that is used to store /// the internal LOB value. This is the amount that users should use when reading or writing the LOB /// value. If possible, users should start their writes at chunk boundaries, such as the beginning of /// a chunk, and write a chunk at a time. /// /// For LOBs with storage parameter `SECUREFILE`, chunk size is an advisory size and is provided for /// backward compatibility. /// /// When creating a table that contains an internal LOB, the user can specify the chunking factor, /// which can be a multiple of Oracle Database blocks. This corresponds to the chunk size used by /// the LOB data layer when accessing and modifying the LOB value. Part of the chunk is used to store /// system-related information, and the rest stores the LOB value. This function returns the amount /// of space used in the LOB chunk to store the LOB value. Performance is improved if the application /// issues read or write requests using a multiple of this chunk size. For writes, there is an added /// benefit because LOB chunks are versioned and, if all writes are done on a chunk basis, no extra /// versioning is done or duplicated. Users could batch up the write until they have enough for a chunk /// instead of issuing several write calls for the same chunk. #[inline] pub fn get_chunk_size(&self) -> Result<Bytes> { let size = try!(self.impl_.get_chunk_size()); Ok(Bytes(size as u64)) } /// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет /// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB). /// /// # Производительность /// Необходимо учитывать, что в случае частой записи предпочтительней делать ее через специальный /// объект-писатель, который можно получить из данного объекта вызовом функции [`new_writer()`](#function.new_writer). /// Если поступить таким образом, то обновление функциональных и доменных индексов базы данных (если они /// есть) для данного большого объекта будет отложено до тех пор, пока объект-писатель не будет уничтожен. /// При вызове же данной функции обновление данных индексов произойдет сразу же по окончании вызова, что /// может сильно снизить производительность. #[inline] pub fn trim(&mut self, len: Chars) -> Result<()> { self.impl_.trim(len.0).map_err(Into::into) } /// Заполняет LOB, начиная с указанного индекса, указанным количеством пробелов. После завершения /// работы в `count` будет записано реальное количество очищенных символов. /// /// # Производительность /// Необходимо учитывать, что в случае частой записи предпочтительней делать ее через специальный /// объект-писатель, который можно получить из данного объекта вызовом функции [`new_writer()`](#function.new_writer). /// Если поступить таким образом, то обновление функциональных и доменных индексов базы данных (если они /// есть) для данного большого объекта будет отложено до тех пор, пока объект-писатель не будет уничтожен. /// При вызове же данной функции обновление данных индексов произойдет сразу же по окончании вызова, что /// может сильно снизить производительность. #[inline] pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> { self.impl_.erase(offset.0, &mut count.0).map_err(Into::into) } /// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных. /// Данные читаются из CLOB-а в кодировке `UTF-8`. #[inline] pub fn new_reader<'lob>(&'lob mut self) -> Result<ClobReader<'lob, 'conn>> { self.new_reader_with_charset(Charset::AL32UTF8) } /// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных. /// Данные читаются из CLOB-а в указанной кодировке. /// /// Каждый вызов `read` будет заполнять массив байтами в запрошенной кодировке. Так как стандартные методы Rust для /// работы читателем байт как читателем текста предполагают, что представлен в UTF-8, то их нельзя использовать для /// данного читателя, т.к. тест будет извлекаться с указанной кодировке. #[inline] pub fn new_reader_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobReader<'lob, 'conn>> { try!(self.impl_.open(LobOpenMode::ReadOnly)); Ok(ClobReader { lob: self, piece: Piece::First, charset: charset }) } /// Создает писателя в данный символьный объект. Преимущество использования писателя вместо прямой записи /// в объект в том, что функциональные и доменные индексы базы данных (если они есть) для данного большого /// объекта будут обновлены только после уничтожения писателя, а не при каждой записи в объект, что в /// лучшую сторону сказывается на производительности. /// /// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько /// локаторов (которые представляет данный класс) на него существует. #[inline] pub fn new_writer<'lob>(&'lob mut self) -> Result<ClobWriter<'lob, 'conn>> { self.new_writer_with_charset(Charset::AL32UTF8) } /// Создает писателя в данный символьный объект, записывающий текстовые данные, представленные в указанной кодировке. /// /// Преимущество использования писателя вместо прямой записи в объект в том, что функциональные и доменные индексы /// базы данных (если они есть) для данного большого объекта будут обновлены только после уничтожения писателя, а не /// при каждой записи в объект, что в лучшую сторону сказывается на производительности. /// /// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько /// локаторов (которые представляет данный класс) на него существует. #[inline] pub fn new_writer_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobWriter<'lob, 'conn>> { try!(self.impl_.open(LobOpenMode::WriteOnly)); Ok(ClobWriter { lob: self, piece: Piece::First, charset: charset }) } /// Получает кодировку базы данных для данного большого символьного объекта. #[inline] pub fn charset(&self) -> Result<Charset> { self.impl_.charset().map_err(Into::into) } /// Если CLOB прочитан или записан не полностью, то сообщает базе данных, что дальнейшее чтение/запись не требуются /// и закрывает CLOB. fn close(&mut self, piece: Piece) -> DbResult<()> { // Если LOB был прочитан/записан не полностью, то отменяем запросы на чтение/запись и восстанавливаемся if piece!= Piece::Last { try!(self.impl_.break_()); try!(self.impl_.reset()); } self.impl_.close() } } impl<'conn> LobPrivate<'conn> for Clob<'conn> { fn new(raw: &[u8], conn: &'conn Connection) -> Result<Self> { let p = raw.as_ptr() as *const *mut Lob; let locator = unsafe { *p as *mut Lob }; let impl_ = LobImpl::from(conn, locator); let form = try!(impl_.form()); Ok(Clob { impl_: impl_, form: form }) } } impl<'conn> io::Read for Clob<'conn> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.impl_.read(Piece::One, Charset::AL32UTF8, self.form, buf).0 } } impl<'conn> io::Write for Clob<'conn> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.impl_.write(Piece::One, Charset::AL32UTF8, self.form, buf).0 } fn flush(&mut self) -> io::Result<()> { Ok(()) } } //------------------------------------------------------------------------------------------------- /// Позволяет писать в большой символьный объект, не вызывая пересчета индексов после каждой записи. /// Индексы будут пересчитаны только после уничтожения данного объекта. #[derive(Debug)] pub struct ClobWriter<'lob, 'conn: 'lob> { lob: &'lob mut Clob<'conn>, piece: Piece, charset: Charset, } impl<'lob, 'conn: 'lob> ClobWriter<'lob, 'conn> { /// Получает `CLOB`, записываемый данным писателем. pub fn lob(&mut self) -> &mut Clob<'conn> { self.lob } /// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет /// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB). #[inline] pub fn trim(&mut self, len: Chars) -> Result<()> { self.lob.trim(len) } /// Заполняет LOB, начиная с указанного индекса, указанным количеством нулей. После завершения /// работы в `count` будет записано реальное количество очищенных байт. #[inline] pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> { self.lob.erase(offset, count) } } impl<'lob, 'conn: 'lob> io::Write for ClobWriter<'lob, 'conn> { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let (res, piece) = self.lob.impl_.write(self.piece, self.charset, self.lob.form, buf); self.piece = piece; res } #[inline] fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl<'lob, 'conn: 'lob> Drop for ClobWriter<'lob, 'conn> { fn drop(&mut self) { // Невозможно делать панику отсюда, т.к. приложение из-за этого крашится let _ = self.lob.close(self.piece);//.expect("Error when close CLOB writer"); } } //------------------------------------------------------------------------------------------------- /// Позволяет читать из большой бинарного объекта в потоковом режиме. Каждый вызов `read` читает очередную порцию данных. #[derive(Debug)] pub struct ClobReader<'lob, 'conn: 'lob> { lob: &'lob mut Clob<'conn>, /// Описательная часть порции данных, получаемых из базы данных (первая или нет). piece: Piece, /// Кодировка, в которой следует интерпретировать получаемые из базы данных байты. charset: Charset, } impl<'lob, 'conn: 'lob> ClobReader<'lob, 'conn> { /// Получает `CLOB`, читаемый данным
iece; res } } impl<'lob, 'conn: 'lob> Drop for ClobReader<'lob, 'conn> { fn drop(&mut self) { // Невозможно делать панику отсюда, т.к. приложение из-за этого крашится let _ = self.lob.close(self.piece);//.expect("Error when close CLOB reader"); } }
читателем. pub fn lob(&mut self) -> &mut Clob<'conn> { self.lob } } impl<'lob, 'conn: 'lob> io::Read for ClobReader<'lob, 'conn> { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let (res, piece) = self.lob.impl_.read(self.piece, self.charset, self.lob.form, buf); self.piece = p
identifier_body
clob.rs
//! Содержит типы для работы с большими символьными объектами. use std::io; use {Connection, Result, DbResult}; use types::Charset; use ffi::native::lob::{Lob, LobImpl, LobOpenMode, CharsetForm}; use ffi::types::Piece; use super::{Bytes, Chars, LobPrivate}; //------------------------------------------------------------------------------------------------- /// Указатель на большой символьный объект (CLOB или NCLOB). #[derive(Debug, PartialEq, Eq)] pub struct Clob<'conn> { /// FFI объект для типобезопасного взаимодействия с базой impl_: LobImpl<'conn, Lob>, /// Вид символьного объекта: в кодировке базы данных (CLOB) или в национальной кодировке (NCLOB). form: CharsetForm, } impl<'conn> Clob<'conn> { /// Получает количество символов, содержащихся в данном объекте в данный момент. /// /// Следует учитывать, что "символ" в понимании Oracle -- это один юнит кодировки UTF-16, занимающий /// 2 байта. Таким образом, кодовые точки Юникода, представленные [суррогатными парами][utf-16] в UTF-16, /// считаются, как 2 символа. /// /// [utf-16]: https://ru.wikipedia.org/wiki/UTF-16#.D0.9F.D1.80.D0.B8.D0.BD.D1.86.D0.B8.D0.BF_.D0.BA.D0.BE.D0.B4.D0.B8.D1.80.D0.BE.D0.B2.D0.B0.D0.BD.D0.B8.D1.8F #[inline] pub fn len(&self) -> Result<Chars> { let len = try!(self.impl_.len()); Ok(Chars(len)) } /// Получает максимальное количество *байт*, которое может быть сохранено в данном объекте. /// В зависимости от настроек сервера базы данных данное значение может варьироваться от /// 8 до 128 терабайт (TB). #[inline] pub fn capacity(&self) -> Result<Bytes> { let len = try!(self.impl_.capacity()); Ok(Bytes(len)) } /// For LOBs with storage parameter `BASICFILE`, the amount of a chunk's space that is used to store /// the internal LOB value. This is the amount that users should use when reading or writing the LOB /// value. If possible, users should start their writes at chunk boundaries, such as the beginning of /// a chunk, and write a chunk at a time. /// /// For LOBs with storage parameter `SECUREFILE`, chunk size is an advisory size and is provided for /// backward compatibility. /// /// When creating a table that contains an internal LOB, the user can specify the chunking factor, /// which can be a multiple of Oracle Database blocks. This corresponds to the chunk size used by /// the LOB data layer when accessing and modifying the LOB value. Part of the chunk is used to store /// system-related information, and the rest stores the LOB value. This function returns the amount /// of space used in the LOB chunk to store the LOB value. Performance is improved if the application /// issues read or write requests using a multiple of this chunk size. For writes, there is an added /// benefit because LOB chunks are versioned and, if all writes are done on a chunk basis, no extra /// versioning is done or duplicated. Users could batch up the write until they have enough for a chunk /// instead of issuing several write calls for the same chunk. #[inline] pub fn get_chunk_size(&self) -> Result<Bytes> { let size = try!(self.impl_.get_chunk_size()); Ok(Bytes(size as u64)) } /// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет /// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB). /// /// # Производительность /// Необходимо учитывать, что в случае частой записи предпочтительней делать ее через специальный /// объект-писатель, который можно получить из данного объекта вызовом функции [`new_writer()`](#function.new_writer). /// Если поступить таким образом, то обновление функциональных и доменных индексов базы данных (если они /// есть) для данного большого объекта будет отложено до тех пор, пока объект-писатель не будет уничтожен. /// При вызове же данной функции обновление данных индексов произойдет сразу же по окончании вызова, что /// может сильно снизить производительность. #[inline] pub fn trim(&mut self, len: Chars) -> Result<()> { self.impl_.trim(len.0).map_err(Into::into) } /// Заполняет LOB, начиная с указанного индекса, указанным количеством пробелов. После завершения /// работы в `count` будет записано реальное количество очищенных символов. /// /// # Производительность /// Необходимо учитывать, что в случае частой записи предпочтительней делать ее через специальный /// объект-писатель, который можно получить из данного объекта вызовом функции [`new_writer()`](#function.new_writer). /// Если поступить таким образом, то обновление функциональных и доменных индексов базы данных (если они /// есть) для данного большого объекта будет отложено до тех пор, пока объект-писатель не будет уничтожен. /// При вызове же данной функции обновление данных индексов произойдет сразу же по окончании вызова, что /// может сильно снизить производительность. #[inline] pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> { self.impl_.erase(offset.0, &mut count.0).map_err(Into::into) } /// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных.
} /// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных. /// Данные читаются из CLOB-а в указанной кодировке. /// /// Каждый вызов `read` будет заполнять массив байтами в запрошенной кодировке. Так как стандартные методы Rust для /// работы читателем байт как читателем текста предполагают, что представлен в UTF-8, то их нельзя использовать для /// данного читателя, т.к. тест будет извлекаться с указанной кодировке. #[inline] pub fn new_reader_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobReader<'lob, 'conn>> { try!(self.impl_.open(LobOpenMode::ReadOnly)); Ok(ClobReader { lob: self, piece: Piece::First, charset: charset }) } /// Создает писателя в данный символьный объект. Преимущество использования писателя вместо прямой записи /// в объект в том, что функциональные и доменные индексы базы данных (если они есть) для данного большого /// объекта будут обновлены только после уничтожения писателя, а не при каждой записи в объект, что в /// лучшую сторону сказывается на производительности. /// /// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько /// локаторов (которые представляет данный класс) на него существует. #[inline] pub fn new_writer<'lob>(&'lob mut self) -> Result<ClobWriter<'lob, 'conn>> { self.new_writer_with_charset(Charset::AL32UTF8) } /// Создает писателя в данный символьный объект, записывающий текстовые данные, представленные в указанной кодировке. /// /// Преимущество использования писателя вместо прямой записи в объект в том, что функциональные и доменные индексы /// базы данных (если они есть) для данного большого объекта будут обновлены только после уничтожения писателя, а не /// при каждой записи в объект, что в лучшую сторону сказывается на производительности. /// /// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько /// локаторов (которые представляет данный класс) на него существует. #[inline] pub fn new_writer_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobWriter<'lob, 'conn>> { try!(self.impl_.open(LobOpenMode::WriteOnly)); Ok(ClobWriter { lob: self, piece: Piece::First, charset: charset }) } /// Получает кодировку базы данных для данного большого символьного объекта. #[inline] pub fn charset(&self) -> Result<Charset> { self.impl_.charset().map_err(Into::into) } /// Если CLOB прочитан или записан не полностью, то сообщает базе данных, что дальнейшее чтение/запись не требуются /// и закрывает CLOB. fn close(&mut self, piece: Piece) -> DbResult<()> { // Если LOB был прочитан/записан не полностью, то отменяем запросы на чтение/запись и восстанавливаемся if piece!= Piece::Last { try!(self.impl_.break_()); try!(self.impl_.reset()); } self.impl_.close() } } impl<'conn> LobPrivate<'conn> for Clob<'conn> { fn new(raw: &[u8], conn: &'conn Connection) -> Result<Self> { let p = raw.as_ptr() as *const *mut Lob; let locator = unsafe { *p as *mut Lob }; let impl_ = LobImpl::from(conn, locator); let form = try!(impl_.form()); Ok(Clob { impl_: impl_, form: form }) } } impl<'conn> io::Read for Clob<'conn> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.impl_.read(Piece::One, Charset::AL32UTF8, self.form, buf).0 } } impl<'conn> io::Write for Clob<'conn> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.impl_.write(Piece::One, Charset::AL32UTF8, self.form, buf).0 } fn flush(&mut self) -> io::Result<()> { Ok(()) } } //------------------------------------------------------------------------------------------------- /// Позволяет писать в большой символьный объект, не вызывая пересчета индексов после каждой записи. /// Индексы будут пересчитаны только после уничтожения данного объекта. #[derive(Debug)] pub struct ClobWriter<'lob, 'conn: 'lob> { lob: &'lob mut Clob<'conn>, piece: Piece, charset: Charset, } impl<'lob, 'conn: 'lob> ClobWriter<'lob, 'conn> { /// Получает `CLOB`, записываемый данным писателем. pub fn lob(&mut self) -> &mut Clob<'conn> { self.lob } /// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет /// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB). #[inline] pub fn trim(&mut self, len: Chars) -> Result<()> { self.lob.trim(len) } /// Заполняет LOB, начиная с указанного индекса, указанным количеством нулей. После завершения /// работы в `count` будет записано реальное количество очищенных байт. #[inline] pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> { self.lob.erase(offset, count) } } impl<'lob, 'conn: 'lob> io::Write for ClobWriter<'lob, 'conn> { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let (res, piece) = self.lob.impl_.write(self.piece, self.charset, self.lob.form, buf); self.piece = piece; res } #[inline] fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl<'lob, 'conn: 'lob> Drop for ClobWriter<'lob, 'conn> { fn drop(&mut self) { // Невозможно делать панику отсюда, т.к. приложение из-за этого крашится let _ = self.lob.close(self.piece);//.expect("Error when close CLOB writer"); } } //------------------------------------------------------------------------------------------------- /// Позволяет читать из большой бинарного объекта в потоковом режиме. Каждый вызов `read` читает очередную порцию данных. #[derive(Debug)] pub struct ClobReader<'lob, 'conn: 'lob> { lob: &'lob mut Clob<'conn>, /// Описательная часть порции данных, получаемых из базы данных (первая или нет). piece: Piece, /// Кодировка, в которой следует интерпретировать получаемые из базы данных байты. charset: Charset, } impl<'lob, 'conn: 'lob> ClobReader<'lob, 'conn> { /// Получает `CLOB`, читаемый данным читателем. pub fn lob(&mut self) -> &mut Clob<'conn> { self.lob } } impl<'lob, 'conn: 'lob> io::Read for ClobReader<'lob, 'conn> { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let (res, piece) = self.lob.impl_.read(self.piece, self.charset, self.lob.form, buf); self.piece = piece; res } } impl<'lob, 'conn: 'lob> Drop for ClobReader<'lob, 'conn> { fn drop(&mut self) { // Невозможно делать панику отсюда, т.к. приложение из-за этого крашится let _ = self.lob.close(self.piece);//.expect("Error when close CLOB reader"); } }
/// Данные читаются из CLOB-а в кодировке `UTF-8`. #[inline] pub fn new_reader<'lob>(&'lob mut self) -> Result<ClobReader<'lob, 'conn>> { self.new_reader_with_charset(Charset::AL32UTF8)
random_line_split
clob.rs
//! Содержит типы для работы с большими символьными объектами. use std::io; use {Connection, Result, DbResult}; use types::Charset; use ffi::native::lob::{Lob, LobImpl, LobOpenMode, CharsetForm}; use ffi::types::Piece; use super::{Bytes, Chars, LobPrivate}; //------------------------------------------------------------------------------------------------- /// Указатель на большой символьный объект (CLOB или NCLOB). #[derive(Debug, PartialEq, Eq)] pub struct Clob<'conn> { /// FFI объект для типобезопасного взаимодействия с базой impl_: LobImpl<'conn, Lob>, /// Вид символьного объекта: в кодировке базы данных (CLOB) или в национальной кодировке (NCLOB). form: CharsetForm, } impl<'conn> Clob<'conn> { /// Получает количество символов, содержащихся в данном объекте в данный момент. /// /// Следует учитывать, что "символ" в понимании Oracle -- это один юнит кодировки UTF-16, занимающий /// 2 байта. Таким образом, кодовые точки Юникода, представленные [суррогатными парами][utf-16] в UTF-16, /// считаются, как 2 символа. /// /// [utf-16]: https://ru.wikipedia.org/wiki/UTF-16#.D0.9F.D1.80.D0.B8.D0.BD.D1.86.D0.B8.D0.BF_.D0.BA.D0.BE.D0.B4.D0.B8.D1.80.D0.BE.D0.B2.D0.B0.D0.BD.D0.B8.D1.8F #[inline] pub fn len(&self) -> Result<Chars> { let len = try!(self.impl_.len()); Ok(Chars(len)) } /// Получает максимальное количество *байт*, которое может быть сохранено в данном объекте. /// В зависимости от настроек сервера базы данных данное значение может варьироваться от /// 8 до 128 терабайт (TB). #[inline] pub fn capacity(&self) -> Result<Bytes> { let len = try!(self.impl_.capacity()); Ok(Bytes(len)) } /// For LOBs with storage parameter `BASICFILE`, the amount of a chunk's space that is used to store /// the internal LOB value. This is the amount that users should use when reading or writing the LOB /// value. If possible, users should start their writes at chunk boundaries, such as the beginning of /// a chunk, and write a chunk at a time. /// /// For LOBs with storage parameter `SECUREFILE`, chunk size is an advisory size and is provided for /// backward compatibility. /// /// When creating a table that contains an internal LOB, the user can specify the chunking factor, /// which can be a multiple of Oracle Database blocks. This corresponds to the chunk size used by /// the LOB data layer when accessing and modifying the LOB value. Part of the chunk is used to store /// system-related information, and the rest stores the LOB value. This function returns the amount /// of space used in the LOB chunk to store the LOB value. Performance is improved if the application /// issues read or write requests using a multiple of this chunk size. For writes, there is an added /// benefit because LOB chunks are versioned and, if all writes are done on a chunk basis, no extra /// versioning is done or duplicated. Users could batch up the write until they have enough for a chunk /// instead of issuing several write calls for the same chunk. #[inline] pub fn get_chunk_size(&self) -> Result<Bytes> { let size = try!(self.impl_.get_chunk_size()); Ok(Bytes(size as u64)) } /// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет /// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB). /// /// # Производительность /// Необходимо учитывать, что в случае частой записи предпочтительней делать ее через специальный /// объект-писатель, который можно получить из данного объекта вызовом функции [`new_writer()`](#function.new_writer). /// Если поступить таким образом, то обновление функциональных и доменных индексов базы данных (если они /// есть) для данного большого объекта будет отложено до тех пор, пока объект-писатель не будет уничтожен. /// При вызове же данной функции обновление данных индексов произойдет сразу же по окончании вызова, что /// может сильно снизить производительность. #[inline] pub fn trim(&mut self, len: Chars) -> Result<()> { self.impl_.trim(len.0).map_err(Into::into) } /// Заполняет LOB, начиная с указанного индекса, указанным количеством пробелов. После завершения /// работы в `count` будет записано реальное количество очищенных символов. /// /// # Производительность /// Необходимо учитывать, что в случае частой записи предпочтительней делать ее через специальный /// объект-писатель, который можно получить из данного объекта вызовом функции [`new_writer()`](#function.new_writer). /// Если поступить таким образом, то обновление функциональных и доменных индексов базы данных (если они /// есть) для данного большого объекта будет отложено до тех пор, пока объект-писатель не будет уничтожен. /// При вызове же данной функции обновление данных индексов произойдет сразу же по окончании вызова, что /// может сильно снизить производительность. #[inline] pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> { self.impl_.erase(offset.0, &mut count.0).map_err(Into::into) } /// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных. /// Данные читаются из CLOB-а в кодировке `UTF-8`. #[inline] pub fn new_reader<'lob>(&'lob mut self) -> Result<ClobReader<'lob, 'conn>> { self.new_reader_with_charset(Charset::AL32UTF8) } /// Создает читателя данного символьного объекта. Каждый вызов метода `read` читателя читает очередную порцию данных. /// Данные читаются из CLOB-а в указанной кодировке. /// /// Каждый вызов `read` будет заполнять массив байтами в запрошенной кодировке. Так как стандартные методы Rust для /// работы читателем байт как читателем текста предполагают, что представлен в UTF-8, то их нельзя использовать для /// данного читателя, т.к. тест будет извлекаться с указанной кодировке. #[inline] pub fn new_reader_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobReader<'lob, 'conn>> { try!(self.impl_.open(LobOpenMode::ReadOnly)); Ok(ClobReader { lob: self, piece: Piece::First, charset: charset }) } /// Создает писателя в данный символьный объект. Преимущество использования писателя вместо прямой записи /// в объект в том, что функциональные и доменные индексы базы данных (если они есть) для данного большого /// объекта будут обновлены только после уничтожения писателя, а не при каждой записи в объект, что в /// лучшую сторону сказывается на производительности. /// /// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько /// локаторов (которые представляет данный класс) на него существует. #[inline] pub fn new_writer<'lob>(&'lob mut self) -> Result<ClobWriter<'lob, 'conn>> { self.new_writer_with_charset(Charset::AL32UTF8) } /// Создает писателя в данный символьный объект, записывающий текстовые данные, представленные в указанной кодировке. /// /// Преимущество использования писателя вместо прямой записи в объект в том, что функциональные и доменные индексы /// базы данных (если они есть) для данного большого объекта будут обновлены только после уничтожения писателя, а не /// при каждой записи в объект, что в лучшую сторону сказывается на производительности. /// /// В пределах одной транзакции один CLOB может быть открыт только единожды, независимо от того, сколько /// локаторов (которые представляет данный класс) на него существует. #[inline] pub fn new_writer_with_charset<'lob>(&'lob mut self, charset: Charset) -> Result<ClobWriter<'lob, 'conn>> { try!(self.impl_.open(LobOpenMode::WriteOnly)); Ok(ClobWriter { lob: self, piece: Piece::First, charset: charset }) } /// Получает кодировку базы данных для данного большого символьного объекта. #[inline] pub fn charset(&self) -> Result<Charset> { self.impl_.charset().map_err(Into::into) } /// Если CLOB прочитан или записан не полностью, то сообщает базе данных, что дальнейшее чтение/запись не требуются /// и закрывает CLOB. fn close(&mut self, piece: Piece) -> DbResult<()> { // Если LOB был прочитан/записан не полностью, то отменяем запросы на чтение/запись и восстанавливаемся if piece!= Piece::Last { try!(self.impl_.break_()); try!(self.impl_.reset()); } self.impl_.close() } } impl<'conn> LobPrivate<'conn> for Clob<'conn> { fn new(raw: &[u8], conn: &'conn Connection) -> Result<Self> { let p = raw.as_ptr() as *const *mut Lob; let locator = unsafe { *p as *mut Lob }; let impl_ = LobImpl::from(conn, locator); let form = try!(impl_.form()); Ok(Clob { impl_: impl_, form: form }) } } impl<'conn> io::Read for Clob<'conn> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.impl_.read(Piece::One, Charset::AL32UTF8, self.form, buf).0 } } impl<'conn> io::Write for Clob<'conn> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.impl_.write(Piece::One, Charset::AL32UTF8, self.form, buf).0 } fn flush(&mut self) -> io::Result<()> { Ok(()) } } //------------------------------------------------------------------------------------------------- /// Позволяет писать в большой символьный объект, не вызывая пересчета индексов после каждой записи. /// Индексы будут пересчитаны только после уничтожения данного объекта. #[derive(Debug)] pub struct ClobWriter<'lob, 'conn: 'lob> { lob: &'lob mut Clob<'conn>, piece: Piece, charset: Charset, } impl<'lob, 'conn: 'lob> ClobWriter<'lob, 'conn> { /// Получает `CLOB`, записываемый данным писателем. pub fn lob(&mut self) -> &mut Clob<'conn> { self.lob } /// Укорачивает данный объект до указанной длины. В случае, если новая длина больше предыдущей, будет /// возвращена ошибка (таким образом, данную функцию нельзя использовать для увеличения размера LOB). #[inline] pub fn trim(&mut self, len: Chars) -> Result<()> { self.lob.trim(len) } /// Заполняет LOB, начиная с указанного индекса, указанным количеством нулей. После завершения /// работы в `count` будет записано реальное количество очищенных байт. #[inline] pub fn erase(&mut self, offset: Chars, count: &mut Chars) -> Result<()> { self.lob.erase(offset, count) } } impl<'lob, 'conn: 'lob> io::Write for ClobWriter<'lob, 'conn> { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let (res, piece) = self.lob.impl_.write(self.piece, self.charset, self.lob.form, buf); self.piece = piece; res } #[inline] fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl<'lob, 'conn: 'lob> Drop for ClobWriter<'lob, 'conn> { fn drop(&mut self) { // Невозможно делать панику отсюда, т.к. приложение из-за этого крашится let _ = self.lob.close(self.piece);//.expect("Error when close CLOB writer"); } } //------------------------------------------------------------------------------------------------- /// Позволяет читать из большой бинарного объекта в потоковом режиме. Каждый вызов `read` читает очередную порцию данных. #[derive(Debug)] pub struct ClobReader<'lob, 'conn: 'lob> { lob: &'lob mut Clob<'conn>, /// Описательная часть порции данных, получаемых из базы данных (первая или нет). piece: Piece, /// Кодировка, в которой следует интерпретировать получаемые из базы данных байты. charset: Charset, } impl<'lob, 'conn: 'lob> ClobReader<'lob, 'conn> { /// Получает `CLOB`, читаемый данным читателем. pub fn lob(&mut self) -> &mut Clob<'conn> { self.lob } } impl<'lob, 'conn: 'lob> io::Read for ClobReader<'lob, 'conn> { #[inline] fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let (res, piece) = self.lob.impl_.read(self.piece, self.charset, self.lob.form, buf); self.piece = piece; res } } impl<'lob, 'conn: 'lob> Drop for ClobReader<'lob, 'conn> { fn drop(&mut self) { // Невозможно делать панику отсюда, т.к. приложение из-за этого крашится let _ = self.lob.close(self.piece);//.expect("Error when close CLOB reader"); } }
identifier_name
lib.rs
//! See the `Bitmap` type. /// A dense bitmap, intended to store small bitslices (<= width of usize). pub struct Bitmap { entries: usize, width: usize, // Avoid a vector here because we have our own bounds checking, and // don't want to duplicate the length, or panic. data: *mut u8, } #[inline(always)] fn get_n_bits_at(byte: u8, n: u8, start: u8) -> u8 { (byte >> (8-n-start) as usize) & (0xFF >> (8-n) as usize) } impl Drop for Bitmap { fn
(&mut self) { let p = self.data; if p!= 0 as *mut _ { self.data = 0 as *mut _; let _ = unsafe { Vec::from_raw_parts(p as *mut u8, 0, self.byte_len()) }; } } } impl Bitmap { /// Create a new bitmap, returning None if the data can't be allocated or /// if the width of each slice can't fit in a usize. entries * width must /// not overflow usize. pub fn new(entries: usize, width: usize) -> Option<Bitmap> { if width > (std::mem::size_of::<usize>() * 8) || width == 0 { None } else { entries.checked_mul(width) .and_then(|bits| bits.checked_add(8 - (bits % 8))) .and_then(|rbits| rbits.checked_div(8)) .and_then(|needed| { let ptr = { let mut alloc = Vec::<u8>::with_capacity(needed); let ptr = alloc.as_mut_ptr(); std::mem::forget(alloc); ptr }; unsafe { std::ptr::write_bytes(ptr, 0, needed); } Some(Bitmap { entries: entries, width: width, data: ptr as *mut u8 }) }) } } /// Create a new Bitmap from raw parts. Will return None if the given /// entry and width would overflow the number of bits or bytes needed to /// store the Bitmap. pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> { if width > (std::mem::size_of::<usize>() * 8) || width == 0 { None } else { entries.checked_mul(width) .and_then(|bits| bits.checked_add(8 - (bits % 8))) .and_then(|rbits| rbits.checked_div(8)) .and_then(|_| { Some(Bitmap { entries: entries, width: width, data: ptr }) }) } } /// Get the `i`th bitslice, returning None on out-of-bounds pub fn get(&self, i: usize) -> Option<usize> { if i >= self.entries { None } else { let mut bit_offset = i * self.width; let mut in_byte_offset = bit_offset % 8; let mut byte_offset = (bit_offset - in_byte_offset) / 8; let mut bits_left = self.width; let mut value: usize = 0; while bits_left > 0 { // how many bits can we need to set in this byte? let can_get = std::cmp::min(8 - in_byte_offset, bits_left); // alright, pull them out. let byte = unsafe { *self.data.offset(byte_offset as isize) }; let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize; // make room for the bits we just read value <<= can_get; value |= got; // update all the state bit_offset += can_get; in_byte_offset = bit_offset % 8; byte_offset = (bit_offset - in_byte_offset) / 8; bits_left -= can_get; } Some(value) } } /// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains /// bits outside of the least significant `self.width` bits. pub fn set(&mut self, i: usize, mut value: usize) -> bool { let usize = std::mem::size_of::<usize>() * 8; if i >= self.entries || value &!(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width)))!= 0 { false } else { // shift over into the high bits value <<= std::cmp::min(usize - 1, usize - self.width); let mut bit_offset = i * self.width; let mut in_byte_offset = bit_offset % 8; let mut byte_offset = (bit_offset - in_byte_offset) / 8; let mut bits_left = self.width; while bits_left > 0 { let can_set = std::cmp::min(8 - in_byte_offset, bits_left); // pull out the highest can_set bits from value let mut to_set: usize = value >> (usize - can_set); // move them into where they will live to_set <<= 8 - can_set - in_byte_offset; let addr = unsafe { self.data.offset(byte_offset as isize) }; let mut byte = unsafe { *addr }; debug_assert!(to_set <= 255); // clear the bits we'll be setting byte &=!(0xFF >> (7 - in_byte_offset) << (8usize.saturating_sub(in_byte_offset).saturating_sub(self.width))); byte |= to_set as u8; unsafe { *addr = byte }; // update all the state value <<= can_set; bit_offset += can_set; in_byte_offset = bit_offset % 8; byte_offset = (bit_offset - in_byte_offset) / 8; bits_left -= can_set; } true } } /// Length in number of bitslices cointained. pub fn len(&self) -> usize { self.entries } /// Size of the internal buffer, in bytes. pub fn byte_len(&self) -> usize { // can't overflow, since creation asserts that it doesn't. let w = self.entries * self.width; let r = w % 8; (w + r) / 8 } pub fn iter(&self) -> Slices { Slices { idx: 0, bm: self } } /// Get the raw pointer to this Bitmap's data. pub unsafe fn get_ptr(&self) -> *mut u8 { self.data } /// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd /// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this /// operation should really be avoided. The destructor will call `Vec`s destructor on the /// internal pointer. pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 { let p = self.data; self.data = ptr; p } } /// Iterator over the bitslices in the bitmap pub struct Slices<'a> { idx: usize, bm: &'a Bitmap } impl<'a> Iterator for Slices<'a> { type Item = usize; /// *NOTE*: This iterator is not "well-behaved", in that if you keep calling /// `next` after it returns None, eventually it will overflow and start /// yielding elements again. Use the `fuse` method to make this /// "well-behaved". fn next(&mut self) -> Option<usize> { let rv = self.bm.get(self.idx); self.idx += 1; rv } fn size_hint(&self) -> (usize, Option<usize>) { (self.bm.len(), Some(self.bm.len())) } } impl<'a> std::iter::IntoIterator for &'a Bitmap { type Item = usize; type IntoIter = Slices<'a>; fn into_iter(self) -> Slices<'a> { self.iter() } } #[cfg(test)] mod test { extern crate quickcheck; use self::quickcheck::quickcheck; use super::{get_n_bits_at, Bitmap}; use std; #[test] fn empty() { let bm = Bitmap::new(10, 10).unwrap(); for i in 0..10 { assert_eq!(bm.get(i), Some(0)); } assert_eq!(bm.get(11), None); } #[test] fn get() { let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0]; let bm = Bitmap { entries: 8, width: 3, data: &mut data as *mut [u8; 4] as *mut u8 }; for i in 0..8 { assert_eq!(bm.get(i), Some(i)); } assert_eq!(bm.get(8), None); assert_eq!(bm.get(9), None); // we don't use real data here, so don't bother freeing it let mut bm = bm; unsafe { bm.set_ptr(std::ptr::null_mut()); } } #[test] fn set() { let mut bm = Bitmap::new(10, 3).unwrap(); for i in 0..8 { assert!(bm.set(i, i)); assert_eq!(bm.get(i), Some(i)); } assert_eq!(bm.get(8), Some(0)); assert_eq!(bm.get(9), Some(0)); assert_eq!(bm.get(10), None); } #[test] fn get_n_bits() { macro_rules! t { ( $( $e:expr, $n:expr, $s:expr, $g:expr; )* ) => ( { $( assert_eq!(get_n_bits_at($e, $n, $s), $g); )* } ) } t! { 0b00111001, 1, 0, 0b0; 0b00111001, 8, 0, 0b00111001; 0b11010101, 2, 0, 0b11; 0b11010101, 2, 1, 0b10; 0b11010101, 2, 2, 0b01; 0b11010101, 2, 3, 0b10; 0b11010101, 2, 4, 0b01; 0b11010101, 3, 0, 0b110; 0b11010101, 3, 1, 0b101; 0b11010101, 3, 2, 0b010; } } #[test] fn iter() { let mut bm = Bitmap::new(10, 3).unwrap(); bm.set(2, 0b101); bm.set(7, 0b110); let bs: Vec<usize> = bm.iter().collect(); assert_eq!(bs, [0, 0, 0b101, 0, 0, 0, 0, 0b110, 0, 0]); } fn set_then_clear_prop(entries: usize, width: usize) -> bool { if width >= std::mem::size_of::<usize>() * 8 || width == 0 { return true } let mut bm = Bitmap::new(entries, width).unwrap(); let all_set = (1 << width) - 1; for i in 0..entries { assert!(bm.set(i, all_set)); } for val in &bm { println!("should be {}, is {}", all_set, val); if val!= all_set { return false; } } for i in 0..entries { assert!(bm.set(i, 0)); } for val in &bm { println!("should be {}, is {}", 0, val); if val!= 0 { return false; } } true } #[test] fn set_then_clear_is_identity() { quickcheck(set_then_clear_prop as fn(usize, usize) -> bool); } }
drop
identifier_name
lib.rs
//! See the `Bitmap` type. /// A dense bitmap, intended to store small bitslices (<= width of usize). pub struct Bitmap { entries: usize, width: usize, // Avoid a vector here because we have our own bounds checking, and // don't want to duplicate the length, or panic. data: *mut u8, } #[inline(always)] fn get_n_bits_at(byte: u8, n: u8, start: u8) -> u8 { (byte >> (8-n-start) as usize) & (0xFF >> (8-n) as usize) } impl Drop for Bitmap { fn drop(&mut self) { let p = self.data; if p!= 0 as *mut _ { self.data = 0 as *mut _; let _ = unsafe { Vec::from_raw_parts(p as *mut u8, 0, self.byte_len()) }; } } } impl Bitmap { /// Create a new bitmap, returning None if the data can't be allocated or /// if the width of each slice can't fit in a usize. entries * width must /// not overflow usize. pub fn new(entries: usize, width: usize) -> Option<Bitmap> { if width > (std::mem::size_of::<usize>() * 8) || width == 0 { None } else { entries.checked_mul(width) .and_then(|bits| bits.checked_add(8 - (bits % 8))) .and_then(|rbits| rbits.checked_div(8)) .and_then(|needed| { let ptr = { let mut alloc = Vec::<u8>::with_capacity(needed); let ptr = alloc.as_mut_ptr(); std::mem::forget(alloc); ptr }; unsafe { std::ptr::write_bytes(ptr, 0, needed); } Some(Bitmap { entries: entries, width: width, data: ptr as *mut u8 }) }) } } /// Create a new Bitmap from raw parts. Will return None if the given /// entry and width would overflow the number of bits or bytes needed to /// store the Bitmap. pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> { if width > (std::mem::size_of::<usize>() * 8) || width == 0 { None } else { entries.checked_mul(width) .and_then(|bits| bits.checked_add(8 - (bits % 8))) .and_then(|rbits| rbits.checked_div(8)) .and_then(|_| { Some(Bitmap { entries: entries, width: width, data: ptr }) }) } } /// Get the `i`th bitslice, returning None on out-of-bounds pub fn get(&self, i: usize) -> Option<usize> { if i >= self.entries
else { let mut bit_offset = i * self.width; let mut in_byte_offset = bit_offset % 8; let mut byte_offset = (bit_offset - in_byte_offset) / 8; let mut bits_left = self.width; let mut value: usize = 0; while bits_left > 0 { // how many bits can we need to set in this byte? let can_get = std::cmp::min(8 - in_byte_offset, bits_left); // alright, pull them out. let byte = unsafe { *self.data.offset(byte_offset as isize) }; let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize; // make room for the bits we just read value <<= can_get; value |= got; // update all the state bit_offset += can_get; in_byte_offset = bit_offset % 8; byte_offset = (bit_offset - in_byte_offset) / 8; bits_left -= can_get; } Some(value) } } /// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains /// bits outside of the least significant `self.width` bits. pub fn set(&mut self, i: usize, mut value: usize) -> bool { let usize = std::mem::size_of::<usize>() * 8; if i >= self.entries || value &!(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width)))!= 0 { false } else { // shift over into the high bits value <<= std::cmp::min(usize - 1, usize - self.width); let mut bit_offset = i * self.width; let mut in_byte_offset = bit_offset % 8; let mut byte_offset = (bit_offset - in_byte_offset) / 8; let mut bits_left = self.width; while bits_left > 0 { let can_set = std::cmp::min(8 - in_byte_offset, bits_left); // pull out the highest can_set bits from value let mut to_set: usize = value >> (usize - can_set); // move them into where they will live to_set <<= 8 - can_set - in_byte_offset; let addr = unsafe { self.data.offset(byte_offset as isize) }; let mut byte = unsafe { *addr }; debug_assert!(to_set <= 255); // clear the bits we'll be setting byte &=!(0xFF >> (7 - in_byte_offset) << (8usize.saturating_sub(in_byte_offset).saturating_sub(self.width))); byte |= to_set as u8; unsafe { *addr = byte }; // update all the state value <<= can_set; bit_offset += can_set; in_byte_offset = bit_offset % 8; byte_offset = (bit_offset - in_byte_offset) / 8; bits_left -= can_set; } true } } /// Length in number of bitslices cointained. pub fn len(&self) -> usize { self.entries } /// Size of the internal buffer, in bytes. pub fn byte_len(&self) -> usize { // can't overflow, since creation asserts that it doesn't. let w = self.entries * self.width; let r = w % 8; (w + r) / 8 } pub fn iter(&self) -> Slices { Slices { idx: 0, bm: self } } /// Get the raw pointer to this Bitmap's data. pub unsafe fn get_ptr(&self) -> *mut u8 { self.data } /// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd /// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this /// operation should really be avoided. The destructor will call `Vec`s destructor on the /// internal pointer. pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 { let p = self.data; self.data = ptr; p } } /// Iterator over the bitslices in the bitmap pub struct Slices<'a> { idx: usize, bm: &'a Bitmap } impl<'a> Iterator for Slices<'a> { type Item = usize; /// *NOTE*: This iterator is not "well-behaved", in that if you keep calling /// `next` after it returns None, eventually it will overflow and start /// yielding elements again. Use the `fuse` method to make this /// "well-behaved". fn next(&mut self) -> Option<usize> { let rv = self.bm.get(self.idx); self.idx += 1; rv } fn size_hint(&self) -> (usize, Option<usize>) { (self.bm.len(), Some(self.bm.len())) } } impl<'a> std::iter::IntoIterator for &'a Bitmap { type Item = usize; type IntoIter = Slices<'a>; fn into_iter(self) -> Slices<'a> { self.iter() } } #[cfg(test)] mod test { extern crate quickcheck; use self::quickcheck::quickcheck; use super::{get_n_bits_at, Bitmap}; use std; #[test] fn empty() { let bm = Bitmap::new(10, 10).unwrap(); for i in 0..10 { assert_eq!(bm.get(i), Some(0)); } assert_eq!(bm.get(11), None); } #[test] fn get() { let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0]; let bm = Bitmap { entries: 8, width: 3, data: &mut data as *mut [u8; 4] as *mut u8 }; for i in 0..8 { assert_eq!(bm.get(i), Some(i)); } assert_eq!(bm.get(8), None); assert_eq!(bm.get(9), None); // we don't use real data here, so don't bother freeing it let mut bm = bm; unsafe { bm.set_ptr(std::ptr::null_mut()); } } #[test] fn set() { let mut bm = Bitmap::new(10, 3).unwrap(); for i in 0..8 { assert!(bm.set(i, i)); assert_eq!(bm.get(i), Some(i)); } assert_eq!(bm.get(8), Some(0)); assert_eq!(bm.get(9), Some(0)); assert_eq!(bm.get(10), None); } #[test] fn get_n_bits() { macro_rules! t { ( $( $e:expr, $n:expr, $s:expr, $g:expr; )* ) => ( { $( assert_eq!(get_n_bits_at($e, $n, $s), $g); )* } ) } t! { 0b00111001, 1, 0, 0b0; 0b00111001, 8, 0, 0b00111001; 0b11010101, 2, 0, 0b11; 0b11010101, 2, 1, 0b10; 0b11010101, 2, 2, 0b01; 0b11010101, 2, 3, 0b10; 0b11010101, 2, 4, 0b01; 0b11010101, 3, 0, 0b110; 0b11010101, 3, 1, 0b101; 0b11010101, 3, 2, 0b010; } } #[test] fn iter() { let mut bm = Bitmap::new(10, 3).unwrap(); bm.set(2, 0b101); bm.set(7, 0b110); let bs: Vec<usize> = bm.iter().collect(); assert_eq!(bs, [0, 0, 0b101, 0, 0, 0, 0, 0b110, 0, 0]); } fn set_then_clear_prop(entries: usize, width: usize) -> bool { if width >= std::mem::size_of::<usize>() * 8 || width == 0 { return true } let mut bm = Bitmap::new(entries, width).unwrap(); let all_set = (1 << width) - 1; for i in 0..entries { assert!(bm.set(i, all_set)); } for val in &bm { println!("should be {}, is {}", all_set, val); if val!= all_set { return false; } } for i in 0..entries { assert!(bm.set(i, 0)); } for val in &bm { println!("should be {}, is {}", 0, val); if val!= 0 { return false; } } true } #[test] fn set_then_clear_is_identity() { quickcheck(set_then_clear_prop as fn(usize, usize) -> bool); } }
{ None }
conditional_block
lib.rs
//! See the `Bitmap` type. /// A dense bitmap, intended to store small bitslices (<= width of usize). pub struct Bitmap { entries: usize, width: usize, // Avoid a vector here because we have our own bounds checking, and // don't want to duplicate the length, or panic. data: *mut u8, } #[inline(always)] fn get_n_bits_at(byte: u8, n: u8, start: u8) -> u8 { (byte >> (8-n-start) as usize) & (0xFF >> (8-n) as usize) } impl Drop for Bitmap { fn drop(&mut self) { let p = self.data; if p!= 0 as *mut _ { self.data = 0 as *mut _; let _ = unsafe { Vec::from_raw_parts(p as *mut u8, 0, self.byte_len()) }; } } } impl Bitmap { /// Create a new bitmap, returning None if the data can't be allocated or /// if the width of each slice can't fit in a usize. entries * width must /// not overflow usize. pub fn new(entries: usize, width: usize) -> Option<Bitmap> { if width > (std::mem::size_of::<usize>() * 8) || width == 0 { None } else { entries.checked_mul(width) .and_then(|bits| bits.checked_add(8 - (bits % 8))) .and_then(|rbits| rbits.checked_div(8)) .and_then(|needed| { let ptr = { let mut alloc = Vec::<u8>::with_capacity(needed); let ptr = alloc.as_mut_ptr(); std::mem::forget(alloc); ptr }; unsafe { std::ptr::write_bytes(ptr, 0, needed); } Some(Bitmap { entries: entries, width: width, data: ptr as *mut u8 }) }) } } /// Create a new Bitmap from raw parts. Will return None if the given /// entry and width would overflow the number of bits or bytes needed to /// store the Bitmap. pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> { if width > (std::mem::size_of::<usize>() * 8) || width == 0 { None } else { entries.checked_mul(width) .and_then(|bits| bits.checked_add(8 - (bits % 8))) .and_then(|rbits| rbits.checked_div(8)) .and_then(|_| { Some(Bitmap { entries: entries, width: width, data: ptr }) }) } } /// Get the `i`th bitslice, returning None on out-of-bounds pub fn get(&self, i: usize) -> Option<usize> { if i >= self.entries { None } else { let mut bit_offset = i * self.width; let mut in_byte_offset = bit_offset % 8; let mut byte_offset = (bit_offset - in_byte_offset) / 8; let mut bits_left = self.width; let mut value: usize = 0; while bits_left > 0 { // how many bits can we need to set in this byte? let can_get = std::cmp::min(8 - in_byte_offset, bits_left); // alright, pull them out. let byte = unsafe { *self.data.offset(byte_offset as isize) }; let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize; // make room for the bits we just read value <<= can_get; value |= got; // update all the state bit_offset += can_get; in_byte_offset = bit_offset % 8; byte_offset = (bit_offset - in_byte_offset) / 8; bits_left -= can_get; } Some(value) } } /// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains /// bits outside of the least significant `self.width` bits. pub fn set(&mut self, i: usize, mut value: usize) -> bool { let usize = std::mem::size_of::<usize>() * 8; if i >= self.entries || value &!(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width)))!= 0 { false } else { // shift over into the high bits value <<= std::cmp::min(usize - 1, usize - self.width); let mut bit_offset = i * self.width; let mut in_byte_offset = bit_offset % 8; let mut byte_offset = (bit_offset - in_byte_offset) / 8;
while bits_left > 0 { let can_set = std::cmp::min(8 - in_byte_offset, bits_left); // pull out the highest can_set bits from value let mut to_set: usize = value >> (usize - can_set); // move them into where they will live to_set <<= 8 - can_set - in_byte_offset; let addr = unsafe { self.data.offset(byte_offset as isize) }; let mut byte = unsafe { *addr }; debug_assert!(to_set <= 255); // clear the bits we'll be setting byte &=!(0xFF >> (7 - in_byte_offset) << (8usize.saturating_sub(in_byte_offset).saturating_sub(self.width))); byte |= to_set as u8; unsafe { *addr = byte }; // update all the state value <<= can_set; bit_offset += can_set; in_byte_offset = bit_offset % 8; byte_offset = (bit_offset - in_byte_offset) / 8; bits_left -= can_set; } true } } /// Length in number of bitslices cointained. pub fn len(&self) -> usize { self.entries } /// Size of the internal buffer, in bytes. pub fn byte_len(&self) -> usize { // can't overflow, since creation asserts that it doesn't. let w = self.entries * self.width; let r = w % 8; (w + r) / 8 } pub fn iter(&self) -> Slices { Slices { idx: 0, bm: self } } /// Get the raw pointer to this Bitmap's data. pub unsafe fn get_ptr(&self) -> *mut u8 { self.data } /// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd /// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this /// operation should really be avoided. The destructor will call `Vec`s destructor on the /// internal pointer. pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 { let p = self.data; self.data = ptr; p } } /// Iterator over the bitslices in the bitmap pub struct Slices<'a> { idx: usize, bm: &'a Bitmap } impl<'a> Iterator for Slices<'a> { type Item = usize; /// *NOTE*: This iterator is not "well-behaved", in that if you keep calling /// `next` after it returns None, eventually it will overflow and start /// yielding elements again. Use the `fuse` method to make this /// "well-behaved". fn next(&mut self) -> Option<usize> { let rv = self.bm.get(self.idx); self.idx += 1; rv } fn size_hint(&self) -> (usize, Option<usize>) { (self.bm.len(), Some(self.bm.len())) } } impl<'a> std::iter::IntoIterator for &'a Bitmap { type Item = usize; type IntoIter = Slices<'a>; fn into_iter(self) -> Slices<'a> { self.iter() } } #[cfg(test)] mod test { extern crate quickcheck; use self::quickcheck::quickcheck; use super::{get_n_bits_at, Bitmap}; use std; #[test] fn empty() { let bm = Bitmap::new(10, 10).unwrap(); for i in 0..10 { assert_eq!(bm.get(i), Some(0)); } assert_eq!(bm.get(11), None); } #[test] fn get() { let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0]; let bm = Bitmap { entries: 8, width: 3, data: &mut data as *mut [u8; 4] as *mut u8 }; for i in 0..8 { assert_eq!(bm.get(i), Some(i)); } assert_eq!(bm.get(8), None); assert_eq!(bm.get(9), None); // we don't use real data here, so don't bother freeing it let mut bm = bm; unsafe { bm.set_ptr(std::ptr::null_mut()); } } #[test] fn set() { let mut bm = Bitmap::new(10, 3).unwrap(); for i in 0..8 { assert!(bm.set(i, i)); assert_eq!(bm.get(i), Some(i)); } assert_eq!(bm.get(8), Some(0)); assert_eq!(bm.get(9), Some(0)); assert_eq!(bm.get(10), None); } #[test] fn get_n_bits() { macro_rules! t { ( $( $e:expr, $n:expr, $s:expr, $g:expr; )* ) => ( { $( assert_eq!(get_n_bits_at($e, $n, $s), $g); )* } ) } t! { 0b00111001, 1, 0, 0b0; 0b00111001, 8, 0, 0b00111001; 0b11010101, 2, 0, 0b11; 0b11010101, 2, 1, 0b10; 0b11010101, 2, 2, 0b01; 0b11010101, 2, 3, 0b10; 0b11010101, 2, 4, 0b01; 0b11010101, 3, 0, 0b110; 0b11010101, 3, 1, 0b101; 0b11010101, 3, 2, 0b010; } } #[test] fn iter() { let mut bm = Bitmap::new(10, 3).unwrap(); bm.set(2, 0b101); bm.set(7, 0b110); let bs: Vec<usize> = bm.iter().collect(); assert_eq!(bs, [0, 0, 0b101, 0, 0, 0, 0, 0b110, 0, 0]); } fn set_then_clear_prop(entries: usize, width: usize) -> bool { if width >= std::mem::size_of::<usize>() * 8 || width == 0 { return true } let mut bm = Bitmap::new(entries, width).unwrap(); let all_set = (1 << width) - 1; for i in 0..entries { assert!(bm.set(i, all_set)); } for val in &bm { println!("should be {}, is {}", all_set, val); if val!= all_set { return false; } } for i in 0..entries { assert!(bm.set(i, 0)); } for val in &bm { println!("should be {}, is {}", 0, val); if val!= 0 { return false; } } true } #[test] fn set_then_clear_is_identity() { quickcheck(set_then_clear_prop as fn(usize, usize) -> bool); } }
let mut bits_left = self.width;
random_line_split
lib.rs
//! See the `Bitmap` type. /// A dense bitmap, intended to store small bitslices (<= width of usize). pub struct Bitmap { entries: usize, width: usize, // Avoid a vector here because we have our own bounds checking, and // don't want to duplicate the length, or panic. data: *mut u8, } #[inline(always)] fn get_n_bits_at(byte: u8, n: u8, start: u8) -> u8 { (byte >> (8-n-start) as usize) & (0xFF >> (8-n) as usize) } impl Drop for Bitmap { fn drop(&mut self) { let p = self.data; if p!= 0 as *mut _ { self.data = 0 as *mut _; let _ = unsafe { Vec::from_raw_parts(p as *mut u8, 0, self.byte_len()) }; } } } impl Bitmap { /// Create a new bitmap, returning None if the data can't be allocated or /// if the width of each slice can't fit in a usize. entries * width must /// not overflow usize. pub fn new(entries: usize, width: usize) -> Option<Bitmap> { if width > (std::mem::size_of::<usize>() * 8) || width == 0 { None } else { entries.checked_mul(width) .and_then(|bits| bits.checked_add(8 - (bits % 8))) .and_then(|rbits| rbits.checked_div(8)) .and_then(|needed| { let ptr = { let mut alloc = Vec::<u8>::with_capacity(needed); let ptr = alloc.as_mut_ptr(); std::mem::forget(alloc); ptr }; unsafe { std::ptr::write_bytes(ptr, 0, needed); } Some(Bitmap { entries: entries, width: width, data: ptr as *mut u8 }) }) } } /// Create a new Bitmap from raw parts. Will return None if the given /// entry and width would overflow the number of bits or bytes needed to /// store the Bitmap. pub unsafe fn from_raw_parts(entries: usize, width: usize, ptr: *mut u8) -> Option<Bitmap> { if width > (std::mem::size_of::<usize>() * 8) || width == 0 { None } else { entries.checked_mul(width) .and_then(|bits| bits.checked_add(8 - (bits % 8))) .and_then(|rbits| rbits.checked_div(8)) .and_then(|_| { Some(Bitmap { entries: entries, width: width, data: ptr }) }) } } /// Get the `i`th bitslice, returning None on out-of-bounds pub fn get(&self, i: usize) -> Option<usize> { if i >= self.entries { None } else { let mut bit_offset = i * self.width; let mut in_byte_offset = bit_offset % 8; let mut byte_offset = (bit_offset - in_byte_offset) / 8; let mut bits_left = self.width; let mut value: usize = 0; while bits_left > 0 { // how many bits can we need to set in this byte? let can_get = std::cmp::min(8 - in_byte_offset, bits_left); // alright, pull them out. let byte = unsafe { *self.data.offset(byte_offset as isize) }; let got = get_n_bits_at(byte, can_get as u8, in_byte_offset as u8) as usize; // make room for the bits we just read value <<= can_get; value |= got; // update all the state bit_offset += can_get; in_byte_offset = bit_offset % 8; byte_offset = (bit_offset - in_byte_offset) / 8; bits_left -= can_get; } Some(value) } } /// Set the `i`th bitslice to `value`, returning false on out-of-bounds or if `value` contains /// bits outside of the least significant `self.width` bits. pub fn set(&mut self, i: usize, mut value: usize) -> bool { let usize = std::mem::size_of::<usize>() * 8; if i >= self.entries || value &!(usize::max_value() >> (std::cmp::min(usize-1, usize - self.width)))!= 0 { false } else { // shift over into the high bits value <<= std::cmp::min(usize - 1, usize - self.width); let mut bit_offset = i * self.width; let mut in_byte_offset = bit_offset % 8; let mut byte_offset = (bit_offset - in_byte_offset) / 8; let mut bits_left = self.width; while bits_left > 0 { let can_set = std::cmp::min(8 - in_byte_offset, bits_left); // pull out the highest can_set bits from value let mut to_set: usize = value >> (usize - can_set); // move them into where they will live to_set <<= 8 - can_set - in_byte_offset; let addr = unsafe { self.data.offset(byte_offset as isize) }; let mut byte = unsafe { *addr }; debug_assert!(to_set <= 255); // clear the bits we'll be setting byte &=!(0xFF >> (7 - in_byte_offset) << (8usize.saturating_sub(in_byte_offset).saturating_sub(self.width))); byte |= to_set as u8; unsafe { *addr = byte }; // update all the state value <<= can_set; bit_offset += can_set; in_byte_offset = bit_offset % 8; byte_offset = (bit_offset - in_byte_offset) / 8; bits_left -= can_set; } true } } /// Length in number of bitslices cointained. pub fn len(&self) -> usize { self.entries } /// Size of the internal buffer, in bytes. pub fn byte_len(&self) -> usize
pub fn iter(&self) -> Slices { Slices { idx: 0, bm: self } } /// Get the raw pointer to this Bitmap's data. pub unsafe fn get_ptr(&self) -> *mut u8 { self.data } /// Set the raw pointer to this Bitmap's data, returning the old one. It needs to be free'd /// with `Vec`'s destructor if the Bitmap was not made with `from_raw_parts`. In general this /// operation should really be avoided. The destructor will call `Vec`s destructor on the /// internal pointer. pub unsafe fn set_ptr(&mut self, ptr: *mut u8) -> *mut u8 { let p = self.data; self.data = ptr; p } } /// Iterator over the bitslices in the bitmap pub struct Slices<'a> { idx: usize, bm: &'a Bitmap } impl<'a> Iterator for Slices<'a> { type Item = usize; /// *NOTE*: This iterator is not "well-behaved", in that if you keep calling /// `next` after it returns None, eventually it will overflow and start /// yielding elements again. Use the `fuse` method to make this /// "well-behaved". fn next(&mut self) -> Option<usize> { let rv = self.bm.get(self.idx); self.idx += 1; rv } fn size_hint(&self) -> (usize, Option<usize>) { (self.bm.len(), Some(self.bm.len())) } } impl<'a> std::iter::IntoIterator for &'a Bitmap { type Item = usize; type IntoIter = Slices<'a>; fn into_iter(self) -> Slices<'a> { self.iter() } } #[cfg(test)] mod test { extern crate quickcheck; use self::quickcheck::quickcheck; use super::{get_n_bits_at, Bitmap}; use std; #[test] fn empty() { let bm = Bitmap::new(10, 10).unwrap(); for i in 0..10 { assert_eq!(bm.get(i), Some(0)); } assert_eq!(bm.get(11), None); } #[test] fn get() { let mut data: [u8; 4] = [0b000_001_01, 0b0_011_100_1, 0b01_110_111, 0]; let bm = Bitmap { entries: 8, width: 3, data: &mut data as *mut [u8; 4] as *mut u8 }; for i in 0..8 { assert_eq!(bm.get(i), Some(i)); } assert_eq!(bm.get(8), None); assert_eq!(bm.get(9), None); // we don't use real data here, so don't bother freeing it let mut bm = bm; unsafe { bm.set_ptr(std::ptr::null_mut()); } } #[test] fn set() { let mut bm = Bitmap::new(10, 3).unwrap(); for i in 0..8 { assert!(bm.set(i, i)); assert_eq!(bm.get(i), Some(i)); } assert_eq!(bm.get(8), Some(0)); assert_eq!(bm.get(9), Some(0)); assert_eq!(bm.get(10), None); } #[test] fn get_n_bits() { macro_rules! t { ( $( $e:expr, $n:expr, $s:expr, $g:expr; )* ) => ( { $( assert_eq!(get_n_bits_at($e, $n, $s), $g); )* } ) } t! { 0b00111001, 1, 0, 0b0; 0b00111001, 8, 0, 0b00111001; 0b11010101, 2, 0, 0b11; 0b11010101, 2, 1, 0b10; 0b11010101, 2, 2, 0b01; 0b11010101, 2, 3, 0b10; 0b11010101, 2, 4, 0b01; 0b11010101, 3, 0, 0b110; 0b11010101, 3, 1, 0b101; 0b11010101, 3, 2, 0b010; } } #[test] fn iter() { let mut bm = Bitmap::new(10, 3).unwrap(); bm.set(2, 0b101); bm.set(7, 0b110); let bs: Vec<usize> = bm.iter().collect(); assert_eq!(bs, [0, 0, 0b101, 0, 0, 0, 0, 0b110, 0, 0]); } fn set_then_clear_prop(entries: usize, width: usize) -> bool { if width >= std::mem::size_of::<usize>() * 8 || width == 0 { return true } let mut bm = Bitmap::new(entries, width).unwrap(); let all_set = (1 << width) - 1; for i in 0..entries { assert!(bm.set(i, all_set)); } for val in &bm { println!("should be {}, is {}", all_set, val); if val!= all_set { return false; } } for i in 0..entries { assert!(bm.set(i, 0)); } for val in &bm { println!("should be {}, is {}", 0, val); if val!= 0 { return false; } } true } #[test] fn set_then_clear_is_identity() { quickcheck(set_then_clear_prop as fn(usize, usize) -> bool); } }
{ // can't overflow, since creation asserts that it doesn't. let w = self.entries * self.width; let r = w % 8; (w + r) / 8 }
identifier_body
prec_climber.rs
// pest. The Elegant Parser // Copyright (c) 2018 Dragoș Tiselice // // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. All files in the project carrying such notice may not be copied, // modified, or distributed except according to those terms. //! Constructs useful in infix operator parsing with the precedence climbing method. use alloc::borrow::Cow; use alloc::boxed::Box; use alloc::vec::Vec; use core::iter::Peekable; use core::ops::BitOr; use crate::iterators::Pair; use crate::RuleType; /// Macro for more convenient const fn definition of `prec_climber::PrecClimber`. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, PrecClimber}; /// # use pest::prec_climber; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// static CLIMBER: PrecClimber<Rule> = prec_climber![ /// L plus | minus, /// L times | divide, /// R power, /// ]; /// ``` #[cfg(feature = "const_prec_climber")] #[macro_export] macro_rules! prec_climber { ( $( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)? ) => {{ prec_climber!( @precedences { 1u32 } $( [ $rule $( $rules )* ] )* ); $crate::prec_climber::PrecClimber::new_const( prec_climber!( @array $( $assoc $rule $(, $assoc $rules )* ),* ) ) }}; ( @assoc L ) => { $crate::prec_climber::Assoc::Left }; ( @assoc R ) => { $crate::prec_climber::Assoc::Right }; ( @array $( $assoc:ident $rule:ident ),* ) => { &[ $( ( Rule::$rule, $rule, prec_climber!( @assoc $assoc ), ) ),* ] }; ( @precedences { $precedence:expr } ) => {}; ( @precedences { $precedence:expr } [ $( $rule:ident )* ] $( [ $( $rules:ident )* ] )* ) => { $( #[allow(non_upper_case_globals)] const $rule: u32 = $precedence; )* prec_climber!( @precedences { 1u32 + $precedence } $( [ $( $rules )* ] )* ); }; } /// Associativity of an [`Operator`]. /// /// [`Operator`]: struct.Operator.html #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Assoc { /// Left `Operator` associativity Left, /// Right `Operator` associativity Right, } /// Infix operator used in [`PrecClimber`]. /// /// [`PrecClimber`]: struct.PrecClimber.html #[derive(Debug)] pub struct Operator<R: RuleType> { rule: R, assoc: Assoc, next: Option<Box<Operator<R>>>, } impl<R: RuleType> Operator<R> { /// Creates a new `Operator` from a `Rule` and `Assoc`.
/// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, Operator}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus /// # } /// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right); /// ``` pub fn new(rule: R, assoc: Assoc) -> Operator<R> { Operator { rule, assoc, next: None, } } } impl<R: RuleType> BitOr for Operator<R> { type Output = Self; fn bitor(mut self, rhs: Self) -> Self { fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) { if let Some(ref mut child) = op.next { assign_next(child, next); } else { op.next = Some(Box::new(next)); } } assign_next(&mut self, rhs); self } } /// List of operators and precedences, which can perform [precedence climbing][1] on infix /// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start /// with a *primary* pair and then alternate between an *operator* and a *primary*. /// /// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method /// [`Pairs`]:../iterators/struct.Pairs.html #[derive(Debug)] pub struct PrecClimber<R: Clone +'static> { ops: Cow<'static, [(R, u32, Assoc)]>, } #[cfg(feature = "const_prec_climber")] impl<R: Clone +'static> PrecClimber<R> { /// Creates a new `PrecClimber` directly from a static slice of /// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples. /// /// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when /// sorted. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, PrecClimber}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[ /// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left), /// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left), /// (Rule::power, 3, Assoc::Right) /// ]); /// ``` pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> { PrecClimber { ops: Cow::Borrowed(ops), } } } impl<R: RuleType> PrecClimber<R> { // find matching operator by `rule` fn get(&self, rule: &R) -> Option<(u32, Assoc)> { self.ops .iter() .find(|(r, _, _)| r == rule) .map(|(_, precedence, assoc)| (*precedence, *assoc)) } /// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the /// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need /// to be chained with `|` between them. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, Operator, PrecClimber}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// PrecClimber::new(vec![ /// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left), /// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left), /// Operator::new(Rule::power, Assoc::Right) /// ]); /// ``` pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> { let ops = ops .into_iter() .zip(1..) .fold(Vec::new(), |mut vec, (op, prec)| { let mut next = Some(op); while let Some(op) = next.take() { let Operator { rule, assoc, next: op_next, } = op; vec.push((rule, prec, assoc)); next = op_next.map(|op| *op); } vec }); PrecClimber { ops: Cow::Owned(ops), } } /// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce. /// *Primary* pairs are mapped with `primary` and then reduced to one single result with /// `infix`. /// /// # Panics /// /// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*, /// *primary* order is not respected. /// /// # Examples /// /// ```ignore /// let primary = |pair| { /// consume(pair, climber) /// }; /// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| { /// match op.rule() { /// Rule::plus => lhs + rhs, /// Rule::minus => lhs - rhs, /// Rule::times => lhs * rhs, /// Rule::divide => lhs / rhs, /// Rule::power => lhs.pow(rhs as u32), /// _ => unreachable!() /// } /// }; /// /// let result = climber.climb(pairs, primary, infix); /// ``` pub fn climb<'i, P, F, G, T>(&self, mut pairs: P, mut primary: F, mut infix: G) -> T where P: Iterator<Item = Pair<'i, R>>, F: FnMut(Pair<'i, R>) -> T, G: FnMut(T, Pair<'i, R>, T) -> T, { let lhs = primary( pairs .next() .expect("precedence climbing requires a non-empty Pairs"), ); self.climb_rec(lhs, 0, &mut pairs.peekable(), &mut primary, &mut infix) } fn climb_rec<'i, P, F, G, T>( &self, mut lhs: T, min_prec: u32, pairs: &mut Peekable<P>, primary: &mut F, infix: &mut G, ) -> T where P: Iterator<Item = Pair<'i, R>>, F: FnMut(Pair<'i, R>) -> T, G: FnMut(T, Pair<'i, R>, T) -> T, { while pairs.peek().is_some() { let rule = pairs.peek().unwrap().as_rule(); if let Some((prec, _)) = self.get(&rule) { if prec >= min_prec { let op = pairs.next().unwrap(); let mut rhs = primary(pairs.next().expect( "infix operator must be followed by \ a primary expression", )); while pairs.peek().is_some() { let rule = pairs.peek().unwrap().as_rule(); if let Some((new_prec, assoc)) = self.get(&rule) { if new_prec > prec || assoc == Assoc::Right && new_prec == prec { rhs = self.climb_rec(rhs, new_prec, pairs, primary, infix); } else { break; } } else { break; } } lhs = infix(lhs, op, rhs); } else { break; } } else { break; } } lhs } }
random_line_split
prec_climber.rs
// pest. The Elegant Parser // Copyright (c) 2018 Dragoș Tiselice // // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. All files in the project carrying such notice may not be copied, // modified, or distributed except according to those terms. //! Constructs useful in infix operator parsing with the precedence climbing method. use alloc::borrow::Cow; use alloc::boxed::Box; use alloc::vec::Vec; use core::iter::Peekable; use core::ops::BitOr; use crate::iterators::Pair; use crate::RuleType; /// Macro for more convenient const fn definition of `prec_climber::PrecClimber`. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, PrecClimber}; /// # use pest::prec_climber; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// static CLIMBER: PrecClimber<Rule> = prec_climber![ /// L plus | minus, /// L times | divide, /// R power, /// ]; /// ``` #[cfg(feature = "const_prec_climber")] #[macro_export] macro_rules! prec_climber { ( $( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)? ) => {{ prec_climber!( @precedences { 1u32 } $( [ $rule $( $rules )* ] )* ); $crate::prec_climber::PrecClimber::new_const( prec_climber!( @array $( $assoc $rule $(, $assoc $rules )* ),* ) ) }}; ( @assoc L ) => { $crate::prec_climber::Assoc::Left }; ( @assoc R ) => { $crate::prec_climber::Assoc::Right }; ( @array $( $assoc:ident $rule:ident ),* ) => { &[ $( ( Rule::$rule, $rule, prec_climber!( @assoc $assoc ), ) ),* ] }; ( @precedences { $precedence:expr } ) => {}; ( @precedences { $precedence:expr } [ $( $rule:ident )* ] $( [ $( $rules:ident )* ] )* ) => { $( #[allow(non_upper_case_globals)] const $rule: u32 = $precedence; )* prec_climber!( @precedences { 1u32 + $precedence } $( [ $( $rules )* ] )* ); }; } /// Associativity of an [`Operator`]. /// /// [`Operator`]: struct.Operator.html #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Assoc { /// Left `Operator` associativity Left, /// Right `Operator` associativity Right, } /// Infix operator used in [`PrecClimber`]. /// /// [`PrecClimber`]: struct.PrecClimber.html #[derive(Debug)] pub struct Operator<R: RuleType> { rule: R, assoc: Assoc, next: Option<Box<Operator<R>>>, } impl<R: RuleType> Operator<R> { /// Creates a new `Operator` from a `Rule` and `Assoc`. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, Operator}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus /// # } /// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right); /// ``` pub fn new(rule: R, assoc: Assoc) -> Operator<R> { Operator { rule, assoc, next: None, } } } impl<R: RuleType> BitOr for Operator<R> { type Output = Self; fn bitor(mut self, rhs: Self) -> Self {
} /// List of operators and precedences, which can perform [precedence climbing][1] on infix /// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start /// with a *primary* pair and then alternate between an *operator* and a *primary*. /// /// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method /// [`Pairs`]:../iterators/struct.Pairs.html #[derive(Debug)] pub struct PrecClimber<R: Clone +'static> { ops: Cow<'static, [(R, u32, Assoc)]>, } #[cfg(feature = "const_prec_climber")] impl<R: Clone +'static> PrecClimber<R> { /// Creates a new `PrecClimber` directly from a static slice of /// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples. /// /// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when /// sorted. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, PrecClimber}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[ /// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left), /// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left), /// (Rule::power, 3, Assoc::Right) /// ]); /// ``` pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> { PrecClimber { ops: Cow::Borrowed(ops), } } } impl<R: RuleType> PrecClimber<R> { // find matching operator by `rule` fn get(&self, rule: &R) -> Option<(u32, Assoc)> { self.ops .iter() .find(|(r, _, _)| r == rule) .map(|(_, precedence, assoc)| (*precedence, *assoc)) } /// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the /// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need /// to be chained with `|` between them. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, Operator, PrecClimber}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// PrecClimber::new(vec![ /// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left), /// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left), /// Operator::new(Rule::power, Assoc::Right) /// ]); /// ``` pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> { let ops = ops .into_iter() .zip(1..) .fold(Vec::new(), |mut vec, (op, prec)| { let mut next = Some(op); while let Some(op) = next.take() { let Operator { rule, assoc, next: op_next, } = op; vec.push((rule, prec, assoc)); next = op_next.map(|op| *op); } vec }); PrecClimber { ops: Cow::Owned(ops), } } /// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce. /// *Primary* pairs are mapped with `primary` and then reduced to one single result with /// `infix`. /// /// # Panics /// /// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*, /// *primary* order is not respected. /// /// # Examples /// /// ```ignore /// let primary = |pair| { /// consume(pair, climber) /// }; /// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| { /// match op.rule() { /// Rule::plus => lhs + rhs, /// Rule::minus => lhs - rhs, /// Rule::times => lhs * rhs, /// Rule::divide => lhs / rhs, /// Rule::power => lhs.pow(rhs as u32), /// _ => unreachable!() /// } /// }; /// /// let result = climber.climb(pairs, primary, infix); /// ``` pub fn climb<'i, P, F, G, T>(&self, mut pairs: P, mut primary: F, mut infix: G) -> T where P: Iterator<Item = Pair<'i, R>>, F: FnMut(Pair<'i, R>) -> T, G: FnMut(T, Pair<'i, R>, T) -> T, { let lhs = primary( pairs .next() .expect("precedence climbing requires a non-empty Pairs"), ); self.climb_rec(lhs, 0, &mut pairs.peekable(), &mut primary, &mut infix) } fn climb_rec<'i, P, F, G, T>( &self, mut lhs: T, min_prec: u32, pairs: &mut Peekable<P>, primary: &mut F, infix: &mut G, ) -> T where P: Iterator<Item = Pair<'i, R>>, F: FnMut(Pair<'i, R>) -> T, G: FnMut(T, Pair<'i, R>, T) -> T, { while pairs.peek().is_some() { let rule = pairs.peek().unwrap().as_rule(); if let Some((prec, _)) = self.get(&rule) { if prec >= min_prec { let op = pairs.next().unwrap(); let mut rhs = primary(pairs.next().expect( "infix operator must be followed by \ a primary expression", )); while pairs.peek().is_some() { let rule = pairs.peek().unwrap().as_rule(); if let Some((new_prec, assoc)) = self.get(&rule) { if new_prec > prec || assoc == Assoc::Right && new_prec == prec { rhs = self.climb_rec(rhs, new_prec, pairs, primary, infix); } else { break; } } else { break; } } lhs = infix(lhs, op, rhs); } else { break; } } else { break; } } lhs } }
fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) { if let Some(ref mut child) = op.next { assign_next(child, next); } else { op.next = Some(Box::new(next)); } } assign_next(&mut self, rhs); self }
identifier_body
prec_climber.rs
// pest. The Elegant Parser // Copyright (c) 2018 Dragoș Tiselice // // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. All files in the project carrying such notice may not be copied, // modified, or distributed except according to those terms. //! Constructs useful in infix operator parsing with the precedence climbing method. use alloc::borrow::Cow; use alloc::boxed::Box; use alloc::vec::Vec; use core::iter::Peekable; use core::ops::BitOr; use crate::iterators::Pair; use crate::RuleType; /// Macro for more convenient const fn definition of `prec_climber::PrecClimber`. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, PrecClimber}; /// # use pest::prec_climber; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// static CLIMBER: PrecClimber<Rule> = prec_climber![ /// L plus | minus, /// L times | divide, /// R power, /// ]; /// ``` #[cfg(feature = "const_prec_climber")] #[macro_export] macro_rules! prec_climber { ( $( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)? ) => {{ prec_climber!( @precedences { 1u32 } $( [ $rule $( $rules )* ] )* ); $crate::prec_climber::PrecClimber::new_const( prec_climber!( @array $( $assoc $rule $(, $assoc $rules )* ),* ) ) }}; ( @assoc L ) => { $crate::prec_climber::Assoc::Left }; ( @assoc R ) => { $crate::prec_climber::Assoc::Right }; ( @array $( $assoc:ident $rule:ident ),* ) => { &[ $( ( Rule::$rule, $rule, prec_climber!( @assoc $assoc ), ) ),* ] }; ( @precedences { $precedence:expr } ) => {}; ( @precedences { $precedence:expr } [ $( $rule:ident )* ] $( [ $( $rules:ident )* ] )* ) => { $( #[allow(non_upper_case_globals)] const $rule: u32 = $precedence; )* prec_climber!( @precedences { 1u32 + $precedence } $( [ $( $rules )* ] )* ); }; } /// Associativity of an [`Operator`]. /// /// [`Operator`]: struct.Operator.html #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Assoc { /// Left `Operator` associativity Left, /// Right `Operator` associativity Right, } /// Infix operator used in [`PrecClimber`]. /// /// [`PrecClimber`]: struct.PrecClimber.html #[derive(Debug)] pub struct Operator<R: RuleType> { rule: R, assoc: Assoc, next: Option<Box<Operator<R>>>, } impl<R: RuleType> Operator<R> { /// Creates a new `Operator` from a `Rule` and `Assoc`. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, Operator}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus /// # } /// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right); /// ``` pub fn new(rule: R, assoc: Assoc) -> Operator<R> { Operator { rule, assoc, next: None, } } } impl<R: RuleType> BitOr for Operator<R> { type Output = Self; fn bitor(mut self, rhs: Self) -> Self { fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) { if let Some(ref mut child) = op.next { assign_next(child, next); } else { op.next = Some(Box::new(next)); } } assign_next(&mut self, rhs); self } } /// List of operators and precedences, which can perform [precedence climbing][1] on infix /// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start /// with a *primary* pair and then alternate between an *operator* and a *primary*. /// /// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method /// [`Pairs`]:../iterators/struct.Pairs.html #[derive(Debug)] pub struct PrecClimber<R: Clone +'static> { ops: Cow<'static, [(R, u32, Assoc)]>, } #[cfg(feature = "const_prec_climber")] impl<R: Clone +'static> PrecClimber<R> { /// Creates a new `PrecClimber` directly from a static slice of /// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples. /// /// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when /// sorted. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, PrecClimber}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[ /// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left), /// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left), /// (Rule::power, 3, Assoc::Right) /// ]); /// ``` pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> { PrecClimber { ops: Cow::Borrowed(ops), } } } impl<R: RuleType> PrecClimber<R> { // find matching operator by `rule` fn get(&self, rule: &R) -> Option<(u32, Assoc)> { self.ops .iter() .find(|(r, _, _)| r == rule) .map(|(_, precedence, assoc)| (*precedence, *assoc)) } /// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the /// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need /// to be chained with `|` between them. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, Operator, PrecClimber}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// PrecClimber::new(vec![ /// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left), /// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left), /// Operator::new(Rule::power, Assoc::Right) /// ]); /// ``` pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> { let ops = ops .into_iter() .zip(1..) .fold(Vec::new(), |mut vec, (op, prec)| { let mut next = Some(op); while let Some(op) = next.take() { let Operator { rule, assoc, next: op_next, } = op; vec.push((rule, prec, assoc)); next = op_next.map(|op| *op); } vec }); PrecClimber { ops: Cow::Owned(ops), } } /// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce. /// *Primary* pairs are mapped with `primary` and then reduced to one single result with /// `infix`. /// /// # Panics /// /// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*, /// *primary* order is not respected. /// /// # Examples /// /// ```ignore /// let primary = |pair| { /// consume(pair, climber) /// }; /// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| { /// match op.rule() { /// Rule::plus => lhs + rhs, /// Rule::minus => lhs - rhs, /// Rule::times => lhs * rhs, /// Rule::divide => lhs / rhs, /// Rule::power => lhs.pow(rhs as u32), /// _ => unreachable!() /// } /// }; /// /// let result = climber.climb(pairs, primary, infix); /// ``` pub fn c
'i, P, F, G, T>(&self, mut pairs: P, mut primary: F, mut infix: G) -> T where P: Iterator<Item = Pair<'i, R>>, F: FnMut(Pair<'i, R>) -> T, G: FnMut(T, Pair<'i, R>, T) -> T, { let lhs = primary( pairs .next() .expect("precedence climbing requires a non-empty Pairs"), ); self.climb_rec(lhs, 0, &mut pairs.peekable(), &mut primary, &mut infix) } fn climb_rec<'i, P, F, G, T>( &self, mut lhs: T, min_prec: u32, pairs: &mut Peekable<P>, primary: &mut F, infix: &mut G, ) -> T where P: Iterator<Item = Pair<'i, R>>, F: FnMut(Pair<'i, R>) -> T, G: FnMut(T, Pair<'i, R>, T) -> T, { while pairs.peek().is_some() { let rule = pairs.peek().unwrap().as_rule(); if let Some((prec, _)) = self.get(&rule) { if prec >= min_prec { let op = pairs.next().unwrap(); let mut rhs = primary(pairs.next().expect( "infix operator must be followed by \ a primary expression", )); while pairs.peek().is_some() { let rule = pairs.peek().unwrap().as_rule(); if let Some((new_prec, assoc)) = self.get(&rule) { if new_prec > prec || assoc == Assoc::Right && new_prec == prec { rhs = self.climb_rec(rhs, new_prec, pairs, primary, infix); } else { break; } } else { break; } } lhs = infix(lhs, op, rhs); } else { break; } } else { break; } } lhs } }
limb<
identifier_name
prec_climber.rs
// pest. The Elegant Parser // Copyright (c) 2018 Dragoș Tiselice // // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. All files in the project carrying such notice may not be copied, // modified, or distributed except according to those terms. //! Constructs useful in infix operator parsing with the precedence climbing method. use alloc::borrow::Cow; use alloc::boxed::Box; use alloc::vec::Vec; use core::iter::Peekable; use core::ops::BitOr; use crate::iterators::Pair; use crate::RuleType; /// Macro for more convenient const fn definition of `prec_climber::PrecClimber`. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, PrecClimber}; /// # use pest::prec_climber; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// static CLIMBER: PrecClimber<Rule> = prec_climber![ /// L plus | minus, /// L times | divide, /// R power, /// ]; /// ``` #[cfg(feature = "const_prec_climber")] #[macro_export] macro_rules! prec_climber { ( $( $assoc:ident $rule:ident $( | $rules:ident )* ),+ $(,)? ) => {{ prec_climber!( @precedences { 1u32 } $( [ $rule $( $rules )* ] )* ); $crate::prec_climber::PrecClimber::new_const( prec_climber!( @array $( $assoc $rule $(, $assoc $rules )* ),* ) ) }}; ( @assoc L ) => { $crate::prec_climber::Assoc::Left }; ( @assoc R ) => { $crate::prec_climber::Assoc::Right }; ( @array $( $assoc:ident $rule:ident ),* ) => { &[ $( ( Rule::$rule, $rule, prec_climber!( @assoc $assoc ), ) ),* ] }; ( @precedences { $precedence:expr } ) => {}; ( @precedences { $precedence:expr } [ $( $rule:ident )* ] $( [ $( $rules:ident )* ] )* ) => { $( #[allow(non_upper_case_globals)] const $rule: u32 = $precedence; )* prec_climber!( @precedences { 1u32 + $precedence } $( [ $( $rules )* ] )* ); }; } /// Associativity of an [`Operator`]. /// /// [`Operator`]: struct.Operator.html #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Assoc { /// Left `Operator` associativity Left, /// Right `Operator` associativity Right, } /// Infix operator used in [`PrecClimber`]. /// /// [`PrecClimber`]: struct.PrecClimber.html #[derive(Debug)] pub struct Operator<R: RuleType> { rule: R, assoc: Assoc, next: Option<Box<Operator<R>>>, } impl<R: RuleType> Operator<R> { /// Creates a new `Operator` from a `Rule` and `Assoc`. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, Operator}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus /// # } /// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Right); /// ``` pub fn new(rule: R, assoc: Assoc) -> Operator<R> { Operator { rule, assoc, next: None, } } } impl<R: RuleType> BitOr for Operator<R> { type Output = Self; fn bitor(mut self, rhs: Self) -> Self { fn assign_next<R: RuleType>(op: &mut Operator<R>, next: Operator<R>) { if let Some(ref mut child) = op.next { assign_next(child, next); } else { op.next = Some(Box::new(next)); } } assign_next(&mut self, rhs); self } } /// List of operators and precedences, which can perform [precedence climbing][1] on infix /// expressions contained in a [`Pairs`]. The token pairs contained in the `Pairs` should start /// with a *primary* pair and then alternate between an *operator* and a *primary*. /// /// [1]: https://en.wikipedia.org/wiki/Operator-precedence_parser#Precedence_climbing_method /// [`Pairs`]:../iterators/struct.Pairs.html #[derive(Debug)] pub struct PrecClimber<R: Clone +'static> { ops: Cow<'static, [(R, u32, Assoc)]>, } #[cfg(feature = "const_prec_climber")] impl<R: Clone +'static> PrecClimber<R> { /// Creates a new `PrecClimber` directly from a static slice of /// `(rule: Rule, precedence: u32, associativity: Assoc)` tuples. /// /// Precedence starts from `1`. Entries don't have to be ordered in any way, but it's easier to read when /// sorted. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, PrecClimber}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// static CLIMBER: PrecClimber<Rule> = PrecClimber::new_const(&[ /// (Rule::plus, 1, Assoc::Left), (Rule::minus, 1, Assoc::Left), /// (Rule::times, 2, Assoc::Left), (Rule::divide, 2, Assoc::Left), /// (Rule::power, 3, Assoc::Right) /// ]); /// ``` pub const fn new_const(ops: &'static [(R, u32, Assoc)]) -> PrecClimber<R> { PrecClimber { ops: Cow::Borrowed(ops), } } } impl<R: RuleType> PrecClimber<R> { // find matching operator by `rule` fn get(&self, rule: &R) -> Option<(u32, Assoc)> { self.ops .iter() .find(|(r, _, _)| r == rule) .map(|(_, precedence, assoc)| (*precedence, *assoc)) } /// Creates a new `PrecClimber` from the `Operator`s contained in `ops`. Every entry in the /// `Vec` has precedence *index + 1*. In order to have operators with same precedence, they need /// to be chained with `|` between them. /// /// # Examples /// /// ``` /// # use pest::prec_climber::{Assoc, Operator, PrecClimber}; /// # #[allow(non_camel_case_types)] /// # #[allow(dead_code)] /// # #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] /// # enum Rule { /// # plus, /// # minus, /// # times, /// # divide, /// # power /// # } /// PrecClimber::new(vec![ /// Operator::new(Rule::plus, Assoc::Left) | Operator::new(Rule::minus, Assoc::Left), /// Operator::new(Rule::times, Assoc::Left) | Operator::new(Rule::divide, Assoc::Left), /// Operator::new(Rule::power, Assoc::Right) /// ]); /// ``` pub fn new(ops: Vec<Operator<R>>) -> PrecClimber<R> { let ops = ops .into_iter() .zip(1..) .fold(Vec::new(), |mut vec, (op, prec)| { let mut next = Some(op); while let Some(op) = next.take() { let Operator { rule, assoc, next: op_next, } = op; vec.push((rule, prec, assoc)); next = op_next.map(|op| *op); } vec }); PrecClimber { ops: Cow::Owned(ops), } } /// Performs the precedence climbing algorithm on the `pairs` in a similar manner to map-reduce. /// *Primary* pairs are mapped with `primary` and then reduced to one single result with /// `infix`. /// /// # Panics /// /// Panics will occur when `pairs` is empty or when the alternating *primary*, *operator*, /// *primary* order is not respected. /// /// # Examples /// /// ```ignore /// let primary = |pair| { /// consume(pair, climber) /// }; /// let infix = |lhs: i32, op: Pair<Rule>, rhs: i32| { /// match op.rule() { /// Rule::plus => lhs + rhs, /// Rule::minus => lhs - rhs, /// Rule::times => lhs * rhs, /// Rule::divide => lhs / rhs, /// Rule::power => lhs.pow(rhs as u32), /// _ => unreachable!() /// } /// }; /// /// let result = climber.climb(pairs, primary, infix); /// ``` pub fn climb<'i, P, F, G, T>(&self, mut pairs: P, mut primary: F, mut infix: G) -> T where P: Iterator<Item = Pair<'i, R>>, F: FnMut(Pair<'i, R>) -> T, G: FnMut(T, Pair<'i, R>, T) -> T, { let lhs = primary( pairs .next() .expect("precedence climbing requires a non-empty Pairs"), ); self.climb_rec(lhs, 0, &mut pairs.peekable(), &mut primary, &mut infix) } fn climb_rec<'i, P, F, G, T>( &self, mut lhs: T, min_prec: u32, pairs: &mut Peekable<P>, primary: &mut F, infix: &mut G, ) -> T where P: Iterator<Item = Pair<'i, R>>, F: FnMut(Pair<'i, R>) -> T, G: FnMut(T, Pair<'i, R>, T) -> T, { while pairs.peek().is_some() { let rule = pairs.peek().unwrap().as_rule(); if let Some((prec, _)) = self.get(&rule) { if prec >= min_prec { let op = pairs.next().unwrap(); let mut rhs = primary(pairs.next().expect( "infix operator must be followed by \ a primary expression", )); while pairs.peek().is_some() { let rule = pairs.peek().unwrap().as_rule(); if let Some((new_prec, assoc)) = self.get(&rule) { if new_prec > prec || assoc == Assoc::Right && new_prec == prec { rhs = self.climb_rec(rhs, new_prec, pairs, primary, infix); } else { break; } } else { break; } } lhs = infix(lhs, op, rhs); } else {
} else { break; } } lhs } }
break; }
conditional_block
subscriber.rs
use super::error::{ErrorKind, Result, ResultExt}; use super::header::{decode, encode, match_field}; use super::{Message, Topic}; use crate::rosmsg::RosMsg; use crate::util::lossy_channel::{lossy_channel, LossyReceiver, LossySender}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use crossbeam::channel::{bounded, select, Receiver, Sender, TrySendError}; use log::error; use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::sync::Arc; use std::thread; enum DataStreamConnectionChange { Connect( usize, LossySender<MessageInfo>, Sender<HashMap<String, String>>, ), Disconnect(usize), } pub struct SubscriberRosConnection { next_data_stream_id: usize, data_stream_tx: Sender<DataStreamConnectionChange>, publishers_stream: Sender<SocketAddr>, topic: Topic, pub connected_ids: BTreeSet<usize>, pub connected_publishers: BTreeSet<String>, } impl SubscriberRosConnection { pub fn new( caller_id: &str, topic: &str, msg_definition: String, msg_type: String, md5sum: String, ) -> SubscriberRosConnection { let subscriber_connection_queue_size = 8; let (data_stream_tx, data_stream_rx) = bounded(subscriber_connection_queue_size); let publisher_connection_queue_size = 8; let (pub_tx, pub_rx) = bounded(publisher_connection_queue_size); let caller_id = String::from(caller_id); let topic_name = String::from(topic); thread::spawn({ let msg_type = msg_type.clone(); let md5sum = md5sum.clone(); move || { join_connections( data_stream_rx, pub_rx, &caller_id, &topic_name, &msg_definition, &md5sum, &msg_type, ) } }); let topic = Topic { name: String::from(topic), msg_type, md5sum, }; SubscriberRosConnection { next_data_stream_id: 1, data_stream_tx, publishers_stream: pub_tx, topic, connected_ids: BTreeSet::new(), connected_publishers: BTreeSet::new(), } } // TODO: allow synchronous handling for subscribers // This creates a new thread to call on_message. Next API change should // allow subscribing with either callback or inline handler of the queue. // The queue is lossy, so it wouldn't be blocking. pub fn add_subscriber<T, F, G>( &mut self, queue_size: usize, on_message: F, on_connect: G, ) -> usize where T: Message, F: Fn(T, &str) + Send +'static, G: Fn(HashMap<String, String>) + Send +'static, { let data_stream_id = self.next_data_stream_id; self.connected_ids.insert(data_stream_id); self.next_data_stream_id += 1; let (data_tx, data_rx) = lossy_channel(queue_size); let (connection_tx, connection_rx) = bounded(8); if self .data_stream_tx .send(DataStreamConnectionChange::Connect( data_stream_id, data_tx, connection_tx, )) .is_err() { // TODO: we might want to panic here error!("Subscriber failed to connect to data stream"); } thread::spawn(move || { handle_data::<T, F, G>(data_rx, connection_rx, on_message, on_connect) }); data_stream_id } pub fn remove_subscriber(&mut self, id: usize) { self.connected_ids.remove(&id); if self .data_stream_tx .send(DataStreamConnectionChange::Disconnect(id)) .is_err() { // TODO: we might want to panic here error!("Subscriber failed to disconnect from data stream"); } } pub fn has_subscribers(&self) -> bool { !self.connected_ids.is_empty() } #[inline] pub fn publisher_count(&self) -> usize { self.connected_publishers.len() } #[inline] pub fn publisher_uris(&self) -> Vec<String> { self.connected_publishers.iter().cloned().collect() } #[allow(clippy::useless_conversion)] pub fn connect_to<U: ToSocketAddrs>( &mut self, publisher: &str, addresses: U, ) -> std::io::Result<()> { for address in addresses.to_socket_addrs()? { // This should never fail, so it's safe to unwrap // Failure could only be caused by the join_connections // thread not running, which only happens after // Subscriber has been deconstructed self.publishers_stream .send(address) .expect("Connected thread died"); } self.connected_publishers.insert(publisher.to_owned()); Ok(()) } pub fn is_connected_to(&self, publisher: &str) -> bool { self.connected_publishers.contains(publisher) } pub fn limit_publishers_to(&mut self, publishers: &BTreeSet<String>) { let difference: Vec<String> = self .connected_publishers .difference(publishers) .cloned() .collect(); for item in difference { self.connected_publishers.remove(&item); } } pub fn get_topic(&self) -> &Topic { &self.topic } } fn handle_data<T, F, G>( data: LossyReceiver<MessageInfo>, connections: Receiver<HashMap<String, String>>, on_message: F, on_connect: G, ) where T: Message, F: Fn(T, &str), G: Fn(HashMap<String, String>) + Send +'static, { loop { select! { recv(data.kill_rx.kill_rx) -> _ => break, recv(data.data_rx) -> msg => match msg { Err(_) => break, Ok(buffer) => match RosMsg::decode_slice(&buffer.data) { Ok(value) => on_message(value, &buffer.caller_id), Err(err) => error!("Failed to decode message: {}", err), }, }, recv(connections) -> msg => match msg { Err(_) => break, Ok(conn) => on_connect(conn), }, } } } fn join_connections( subscribers: Receiver<DataStreamConnectionChange>, publishers: Receiver<SocketAddr>, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) { type Sub = (LossySender<MessageInfo>, Sender<HashMap<String, String>>); let mut subs: BTreeMap<usize, Sub> = BTreeMap::new(); let mut existing_headers: Vec<HashMap<String, String>> = Vec::new(); let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8); // Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction loop { select! { recv(data_rx) -> msg => { match msg { Err(_) => break, Ok(v) => for sub in subs.values() { if sub.0.try_send(v.clone()).is_err() { error!("Failed to send data to subscriber"); } } } } recv(subscribers) -> msg => { match msg { Err(_) => break, Ok(DataStreamConnectionChange::Connect(id, data, conn)) => { for header in &existing_headers { if conn.send(header.clone()).is_err() { error!("Failed to send connection info for subscriber"); }; } subs.insert(id, (data, conn)); } Ok(DataStreamConnectionChange::Disconnect(id)) => { if let Some((mut data, _)) = subs.remove(&id) { if data.close().is_err() { error!("Subscriber data stream to topic has already been killed"); } } } } } recv(publishers) -> msg => { match msg { Err(_) => break, Ok(publisher) => { let result = join_connection( &data_tx, &publisher, caller_id, topic, msg_definition, md5sum, msg_type, ) .chain_err(|| ErrorKind::TopicConnectionFail(topic.into())); match result { Ok(headers) => { for sub in subs.values() { if sub.1.send(headers.clone()).is_err() { error!("Failed to send connection info for subscriber"); } } existing_headers.push(headers); } Err(err) => { let info = err .iter() .map(|v| format!("{}", v)) .collect::<Vec<_>>() .join("\nCaused by:"); error!("{}", info); } } } } } } } } fn join_connection( data_stream: &Sender<MessageInfo>, publisher: &SocketAddr, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> { let mut stream = TcpStream::connect(publisher)?; let headers = exchange_headers::<_>( &mut stream, caller_id, topic, msg_definition, md5sum, msg_type, )?; let pub_caller_id = headers.get("callerid").cloned(); let target = data_stream.clone(); thread::spawn(move || { let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default()); while let Ok(buffer) = package_to_vector(&mut stream) { if let Err(TrySendError::Disconnected(_)) = target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer)) { // Data receiver has been destroyed after // Subscriber destructor's kill signal break; } } }); Ok(headers) } fn write_request<U: std::io::Write>( mut stream: &mut U, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<()> { let mut fields = HashMap::<String, String>::new(); fields.insert(String::from("message_definition"), msg_definition.into()); fields.insert(String::from("callerid"), caller_id.into()); fields.insert(String::from("topic"), topic.into()); fields.insert(String::from("md5sum"), md5sum.into()); fields.insert(String::from("type"), msg_type.into()); encode(&mut stream, &fields)?; Ok(()) } fn read_response<U: std::io::Read>( mut stream: &mut U, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> { let fields = decode(&mut stream)?; if md5sum!= "*" { match_field(&fields, "md5sum", md5sum)?; } if msg_type!= "*" { match_field(&fields, "type", msg_type)?; } Ok(fields) } fn exchange_headers<U>( stream: &mut U, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> where U: std::io::Write + std::io::Read, { write_request::<U>(stream, caller_id, topic, msg_definition, md5sum, msg_type)?; read_response::<U>(stream, md5sum, msg_type) } #[inline] fn package_to_vector<R: std::io::Read>(stream: &mut R) -> std::io::Result<Vec<u8>> { let length = stream.read_u32::<LittleEndian>()?; let u32_size = std::mem::size_of::<u32>(); let num_bytes = length as usize + u32_size; // Allocate memory of the proper size for the incoming message. We // do not initialize the memory to zero here (as would be safe) // because it is expensive and ultimately unnecessary. We know the // length of the message and if the length is incorrect, the // stream reading functions will bail with an Error rather than // leaving memory uninitialized. let mut out = Vec::<u8>::with_capacity(num_bytes); let out_ptr = out.as_mut_ptr(); // Read length from stream. std::io::Cursor::new(unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, u32_size) }) .write_u32::<LittleEndian>(length)?; // Read data from stream. let read_buf = unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, num_bytes) }; stream.read_exact(&mut read_buf[u32_size..])?; // Don't drop the original Vec which has size==0 and instead use // its memory to initialize a new Vec with size == capacity == num_bytes. std::mem::forget(out); // Return the new, now full and "safely" initialized. Ok(unsafe { Vec::from_raw_parts(out_ptr, num_bytes, num_bytes) }) } #[derive(Clone)] struct MessageInfo { caller_id: Arc<String>, data: Vec<u8>, } impl MessageInfo { fn new(caller_id: Arc<String>, data: Vec<u8>) -> Self { Self { caller_id, data } } } #[cfg(test)] mod tests { use super::*; static FAILED_TO_READ_WRITE_VECTOR: &str = "Failed to read or write from vector"; #[test] fn package_to_vector_creates_right_buffer_from_reader() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]; let data = package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); } #[test] fn package_to_vector_respects_provided_length() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; let data = package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); } #[test] fn package_to_vector_fails_if_stream_is_shorter_than_annotated() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5]; package_to_vector(&mut std::io::Cursor::new(input)).unwrap_err(); } #[test] fn package_to_vector_fails_leaves_cursor_at_end_of_reading()
}
{ let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 4, 0, 0, 0, 11, 12, 13, 14]; let mut cursor = std::io::Cursor::new(input); let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [4, 0, 0, 0, 11, 12, 13, 14]); }
identifier_body
subscriber.rs
use super::error::{ErrorKind, Result, ResultExt}; use super::header::{decode, encode, match_field}; use super::{Message, Topic}; use crate::rosmsg::RosMsg; use crate::util::lossy_channel::{lossy_channel, LossyReceiver, LossySender}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use crossbeam::channel::{bounded, select, Receiver, Sender, TrySendError}; use log::error; use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::sync::Arc; use std::thread; enum DataStreamConnectionChange { Connect( usize, LossySender<MessageInfo>, Sender<HashMap<String, String>>, ), Disconnect(usize), } pub struct SubscriberRosConnection { next_data_stream_id: usize, data_stream_tx: Sender<DataStreamConnectionChange>, publishers_stream: Sender<SocketAddr>, topic: Topic, pub connected_ids: BTreeSet<usize>, pub connected_publishers: BTreeSet<String>, } impl SubscriberRosConnection { pub fn new( caller_id: &str, topic: &str, msg_definition: String, msg_type: String, md5sum: String, ) -> SubscriberRosConnection { let subscriber_connection_queue_size = 8; let (data_stream_tx, data_stream_rx) = bounded(subscriber_connection_queue_size); let publisher_connection_queue_size = 8; let (pub_tx, pub_rx) = bounded(publisher_connection_queue_size); let caller_id = String::from(caller_id); let topic_name = String::from(topic); thread::spawn({ let msg_type = msg_type.clone(); let md5sum = md5sum.clone(); move || { join_connections( data_stream_rx, pub_rx, &caller_id, &topic_name, &msg_definition, &md5sum, &msg_type, ) } }); let topic = Topic { name: String::from(topic), msg_type, md5sum, }; SubscriberRosConnection { next_data_stream_id: 1, data_stream_tx, publishers_stream: pub_tx, topic, connected_ids: BTreeSet::new(), connected_publishers: BTreeSet::new(), } } // TODO: allow synchronous handling for subscribers // This creates a new thread to call on_message. Next API change should // allow subscribing with either callback or inline handler of the queue. // The queue is lossy, so it wouldn't be blocking. pub fn add_subscriber<T, F, G>( &mut self, queue_size: usize, on_message: F, on_connect: G, ) -> usize where T: Message, F: Fn(T, &str) + Send +'static, G: Fn(HashMap<String, String>) + Send +'static, { let data_stream_id = self.next_data_stream_id; self.connected_ids.insert(data_stream_id); self.next_data_stream_id += 1; let (data_tx, data_rx) = lossy_channel(queue_size); let (connection_tx, connection_rx) = bounded(8); if self .data_stream_tx .send(DataStreamConnectionChange::Connect( data_stream_id, data_tx, connection_tx, )) .is_err() { // TODO: we might want to panic here error!("Subscriber failed to connect to data stream"); } thread::spawn(move || { handle_data::<T, F, G>(data_rx, connection_rx, on_message, on_connect) }); data_stream_id } pub fn remove_subscriber(&mut self, id: usize) { self.connected_ids.remove(&id); if self .data_stream_tx .send(DataStreamConnectionChange::Disconnect(id)) .is_err() { // TODO: we might want to panic here error!("Subscriber failed to disconnect from data stream"); } } pub fn has_subscribers(&self) -> bool { !self.connected_ids.is_empty() } #[inline] pub fn publisher_count(&self) -> usize { self.connected_publishers.len() } #[inline] pub fn publisher_uris(&self) -> Vec<String> { self.connected_publishers.iter().cloned().collect() } #[allow(clippy::useless_conversion)] pub fn connect_to<U: ToSocketAddrs>( &mut self, publisher: &str, addresses: U, ) -> std::io::Result<()> { for address in addresses.to_socket_addrs()? { // This should never fail, so it's safe to unwrap // Failure could only be caused by the join_connections // thread not running, which only happens after // Subscriber has been deconstructed self.publishers_stream .send(address) .expect("Connected thread died"); } self.connected_publishers.insert(publisher.to_owned()); Ok(()) } pub fn is_connected_to(&self, publisher: &str) -> bool { self.connected_publishers.contains(publisher) } pub fn limit_publishers_to(&mut self, publishers: &BTreeSet<String>) { let difference: Vec<String> = self .connected_publishers .difference(publishers) .cloned() .collect(); for item in difference { self.connected_publishers.remove(&item); } } pub fn get_topic(&self) -> &Topic { &self.topic } } fn handle_data<T, F, G>( data: LossyReceiver<MessageInfo>, connections: Receiver<HashMap<String, String>>, on_message: F, on_connect: G, ) where T: Message, F: Fn(T, &str), G: Fn(HashMap<String, String>) + Send +'static, { loop { select! { recv(data.kill_rx.kill_rx) -> _ => break, recv(data.data_rx) -> msg => match msg { Err(_) => break, Ok(buffer) => match RosMsg::decode_slice(&buffer.data) { Ok(value) => on_message(value, &buffer.caller_id), Err(err) => error!("Failed to decode message: {}", err), }, }, recv(connections) -> msg => match msg { Err(_) => break, Ok(conn) => on_connect(conn), }, } } } fn join_connections( subscribers: Receiver<DataStreamConnectionChange>, publishers: Receiver<SocketAddr>, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) { type Sub = (LossySender<MessageInfo>, Sender<HashMap<String, String>>); let mut subs: BTreeMap<usize, Sub> = BTreeMap::new(); let mut existing_headers: Vec<HashMap<String, String>> = Vec::new(); let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8); // Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction loop { select! { recv(data_rx) -> msg => { match msg { Err(_) => break, Ok(v) => for sub in subs.values() { if sub.0.try_send(v.clone()).is_err() { error!("Failed to send data to subscriber"); } } } } recv(subscribers) -> msg => { match msg { Err(_) => break, Ok(DataStreamConnectionChange::Connect(id, data, conn)) => { for header in &existing_headers { if conn.send(header.clone()).is_err() { error!("Failed to send connection info for subscriber"); }; } subs.insert(id, (data, conn)); } Ok(DataStreamConnectionChange::Disconnect(id)) => { if let Some((mut data, _)) = subs.remove(&id) { if data.close().is_err() { error!("Subscriber data stream to topic has already been killed"); } } } } } recv(publishers) -> msg => { match msg { Err(_) => break, Ok(publisher) => { let result = join_connection( &data_tx, &publisher, caller_id, topic, msg_definition, md5sum, msg_type, ) .chain_err(|| ErrorKind::TopicConnectionFail(topic.into())); match result { Ok(headers) => { for sub in subs.values() { if sub.1.send(headers.clone()).is_err() { error!("Failed to send connection info for subscriber"); } } existing_headers.push(headers); } Err(err) => { let info = err .iter() .map(|v| format!("{}", v)) .collect::<Vec<_>>() .join("\nCaused by:"); error!("{}", info); } } } } } } } } fn join_connection( data_stream: &Sender<MessageInfo>, publisher: &SocketAddr, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> { let mut stream = TcpStream::connect(publisher)?; let headers = exchange_headers::<_>( &mut stream, caller_id, topic, msg_definition, md5sum, msg_type, )?; let pub_caller_id = headers.get("callerid").cloned(); let target = data_stream.clone(); thread::spawn(move || { let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default()); while let Ok(buffer) = package_to_vector(&mut stream) { if let Err(TrySendError::Disconnected(_)) = target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer)) { // Data receiver has been destroyed after // Subscriber destructor's kill signal break; } } }); Ok(headers) } fn write_request<U: std::io::Write>( mut stream: &mut U, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<()> { let mut fields = HashMap::<String, String>::new(); fields.insert(String::from("message_definition"), msg_definition.into()); fields.insert(String::from("callerid"), caller_id.into()); fields.insert(String::from("topic"), topic.into()); fields.insert(String::from("md5sum"), md5sum.into()); fields.insert(String::from("type"), msg_type.into()); encode(&mut stream, &fields)?; Ok(()) } fn read_response<U: std::io::Read>( mut stream: &mut U, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> { let fields = decode(&mut stream)?; if md5sum!= "*" { match_field(&fields, "md5sum", md5sum)?; } if msg_type!= "*" { match_field(&fields, "type", msg_type)?; } Ok(fields) } fn exchange_headers<U>( stream: &mut U, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> where U: std::io::Write + std::io::Read, { write_request::<U>(stream, caller_id, topic, msg_definition, md5sum, msg_type)?; read_response::<U>(stream, md5sum, msg_type) } #[inline] fn package_to_vector<R: std::io::Read>(stream: &mut R) -> std::io::Result<Vec<u8>> { let length = stream.read_u32::<LittleEndian>()?; let u32_size = std::mem::size_of::<u32>(); let num_bytes = length as usize + u32_size; // Allocate memory of the proper size for the incoming message. We // do not initialize the memory to zero here (as would be safe) // because it is expensive and ultimately unnecessary. We know the // length of the message and if the length is incorrect, the // stream reading functions will bail with an Error rather than // leaving memory uninitialized. let mut out = Vec::<u8>::with_capacity(num_bytes); let out_ptr = out.as_mut_ptr(); // Read length from stream. std::io::Cursor::new(unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, u32_size) }) .write_u32::<LittleEndian>(length)?; // Read data from stream. let read_buf = unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, num_bytes) }; stream.read_exact(&mut read_buf[u32_size..])?; // Don't drop the original Vec which has size==0 and instead use // its memory to initialize a new Vec with size == capacity == num_bytes. std::mem::forget(out); // Return the new, now full and "safely" initialized. Ok(unsafe { Vec::from_raw_parts(out_ptr, num_bytes, num_bytes) }) } #[derive(Clone)] struct MessageInfo { caller_id: Arc<String>, data: Vec<u8>, } impl MessageInfo { fn new(caller_id: Arc<String>, data: Vec<u8>) -> Self { Self { caller_id, data } } } #[cfg(test)] mod tests { use super::*; static FAILED_TO_READ_WRITE_VECTOR: &str = "Failed to read or write from vector"; #[test] fn package_to_vector_creates_right_buffer_from_reader() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]; let data = package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); } #[test] fn package_to_vector_respects_provided_length() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; let data = package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); } #[test] fn package_to_vector_fails_if_stream_is_shorter_than_annotated() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5]; package_to_vector(&mut std::io::Cursor::new(input)).unwrap_err(); } #[test] fn package_to_vector_fails_leaves_cursor_at_end_of_reading() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 4, 0, 0, 0, 11, 12, 13, 14]; let mut cursor = std::io::Cursor::new(input);
assert_eq!(data, [4, 0, 0, 0, 11, 12, 13, 14]); } }
let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR);
random_line_split
subscriber.rs
use super::error::{ErrorKind, Result, ResultExt}; use super::header::{decode, encode, match_field}; use super::{Message, Topic}; use crate::rosmsg::RosMsg; use crate::util::lossy_channel::{lossy_channel, LossyReceiver, LossySender}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use crossbeam::channel::{bounded, select, Receiver, Sender, TrySendError}; use log::error; use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::sync::Arc; use std::thread; enum DataStreamConnectionChange { Connect( usize, LossySender<MessageInfo>, Sender<HashMap<String, String>>, ), Disconnect(usize), } pub struct SubscriberRosConnection { next_data_stream_id: usize, data_stream_tx: Sender<DataStreamConnectionChange>, publishers_stream: Sender<SocketAddr>, topic: Topic, pub connected_ids: BTreeSet<usize>, pub connected_publishers: BTreeSet<String>, } impl SubscriberRosConnection { pub fn new( caller_id: &str, topic: &str, msg_definition: String, msg_type: String, md5sum: String, ) -> SubscriberRosConnection { let subscriber_connection_queue_size = 8; let (data_stream_tx, data_stream_rx) = bounded(subscriber_connection_queue_size); let publisher_connection_queue_size = 8; let (pub_tx, pub_rx) = bounded(publisher_connection_queue_size); let caller_id = String::from(caller_id); let topic_name = String::from(topic); thread::spawn({ let msg_type = msg_type.clone(); let md5sum = md5sum.clone(); move || { join_connections( data_stream_rx, pub_rx, &caller_id, &topic_name, &msg_definition, &md5sum, &msg_type, ) } }); let topic = Topic { name: String::from(topic), msg_type, md5sum, }; SubscriberRosConnection { next_data_stream_id: 1, data_stream_tx, publishers_stream: pub_tx, topic, connected_ids: BTreeSet::new(), connected_publishers: BTreeSet::new(), } } // TODO: allow synchronous handling for subscribers // This creates a new thread to call on_message. Next API change should // allow subscribing with either callback or inline handler of the queue. // The queue is lossy, so it wouldn't be blocking. pub fn add_subscriber<T, F, G>( &mut self, queue_size: usize, on_message: F, on_connect: G, ) -> usize where T: Message, F: Fn(T, &str) + Send +'static, G: Fn(HashMap<String, String>) + Send +'static, { let data_stream_id = self.next_data_stream_id; self.connected_ids.insert(data_stream_id); self.next_data_stream_id += 1; let (data_tx, data_rx) = lossy_channel(queue_size); let (connection_tx, connection_rx) = bounded(8); if self .data_stream_tx .send(DataStreamConnectionChange::Connect( data_stream_id, data_tx, connection_tx, )) .is_err() { // TODO: we might want to panic here error!("Subscriber failed to connect to data stream"); } thread::spawn(move || { handle_data::<T, F, G>(data_rx, connection_rx, on_message, on_connect) }); data_stream_id } pub fn remove_subscriber(&mut self, id: usize) { self.connected_ids.remove(&id); if self .data_stream_tx .send(DataStreamConnectionChange::Disconnect(id)) .is_err() { // TODO: we might want to panic here error!("Subscriber failed to disconnect from data stream"); } } pub fn has_subscribers(&self) -> bool { !self.connected_ids.is_empty() } #[inline] pub fn publisher_count(&self) -> usize { self.connected_publishers.len() } #[inline] pub fn
(&self) -> Vec<String> { self.connected_publishers.iter().cloned().collect() } #[allow(clippy::useless_conversion)] pub fn connect_to<U: ToSocketAddrs>( &mut self, publisher: &str, addresses: U, ) -> std::io::Result<()> { for address in addresses.to_socket_addrs()? { // This should never fail, so it's safe to unwrap // Failure could only be caused by the join_connections // thread not running, which only happens after // Subscriber has been deconstructed self.publishers_stream .send(address) .expect("Connected thread died"); } self.connected_publishers.insert(publisher.to_owned()); Ok(()) } pub fn is_connected_to(&self, publisher: &str) -> bool { self.connected_publishers.contains(publisher) } pub fn limit_publishers_to(&mut self, publishers: &BTreeSet<String>) { let difference: Vec<String> = self .connected_publishers .difference(publishers) .cloned() .collect(); for item in difference { self.connected_publishers.remove(&item); } } pub fn get_topic(&self) -> &Topic { &self.topic } } fn handle_data<T, F, G>( data: LossyReceiver<MessageInfo>, connections: Receiver<HashMap<String, String>>, on_message: F, on_connect: G, ) where T: Message, F: Fn(T, &str), G: Fn(HashMap<String, String>) + Send +'static, { loop { select! { recv(data.kill_rx.kill_rx) -> _ => break, recv(data.data_rx) -> msg => match msg { Err(_) => break, Ok(buffer) => match RosMsg::decode_slice(&buffer.data) { Ok(value) => on_message(value, &buffer.caller_id), Err(err) => error!("Failed to decode message: {}", err), }, }, recv(connections) -> msg => match msg { Err(_) => break, Ok(conn) => on_connect(conn), }, } } } fn join_connections( subscribers: Receiver<DataStreamConnectionChange>, publishers: Receiver<SocketAddr>, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) { type Sub = (LossySender<MessageInfo>, Sender<HashMap<String, String>>); let mut subs: BTreeMap<usize, Sub> = BTreeMap::new(); let mut existing_headers: Vec<HashMap<String, String>> = Vec::new(); let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8); // Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction loop { select! { recv(data_rx) -> msg => { match msg { Err(_) => break, Ok(v) => for sub in subs.values() { if sub.0.try_send(v.clone()).is_err() { error!("Failed to send data to subscriber"); } } } } recv(subscribers) -> msg => { match msg { Err(_) => break, Ok(DataStreamConnectionChange::Connect(id, data, conn)) => { for header in &existing_headers { if conn.send(header.clone()).is_err() { error!("Failed to send connection info for subscriber"); }; } subs.insert(id, (data, conn)); } Ok(DataStreamConnectionChange::Disconnect(id)) => { if let Some((mut data, _)) = subs.remove(&id) { if data.close().is_err() { error!("Subscriber data stream to topic has already been killed"); } } } } } recv(publishers) -> msg => { match msg { Err(_) => break, Ok(publisher) => { let result = join_connection( &data_tx, &publisher, caller_id, topic, msg_definition, md5sum, msg_type, ) .chain_err(|| ErrorKind::TopicConnectionFail(topic.into())); match result { Ok(headers) => { for sub in subs.values() { if sub.1.send(headers.clone()).is_err() { error!("Failed to send connection info for subscriber"); } } existing_headers.push(headers); } Err(err) => { let info = err .iter() .map(|v| format!("{}", v)) .collect::<Vec<_>>() .join("\nCaused by:"); error!("{}", info); } } } } } } } } fn join_connection( data_stream: &Sender<MessageInfo>, publisher: &SocketAddr, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> { let mut stream = TcpStream::connect(publisher)?; let headers = exchange_headers::<_>( &mut stream, caller_id, topic, msg_definition, md5sum, msg_type, )?; let pub_caller_id = headers.get("callerid").cloned(); let target = data_stream.clone(); thread::spawn(move || { let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default()); while let Ok(buffer) = package_to_vector(&mut stream) { if let Err(TrySendError::Disconnected(_)) = target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer)) { // Data receiver has been destroyed after // Subscriber destructor's kill signal break; } } }); Ok(headers) } fn write_request<U: std::io::Write>( mut stream: &mut U, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<()> { let mut fields = HashMap::<String, String>::new(); fields.insert(String::from("message_definition"), msg_definition.into()); fields.insert(String::from("callerid"), caller_id.into()); fields.insert(String::from("topic"), topic.into()); fields.insert(String::from("md5sum"), md5sum.into()); fields.insert(String::from("type"), msg_type.into()); encode(&mut stream, &fields)?; Ok(()) } fn read_response<U: std::io::Read>( mut stream: &mut U, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> { let fields = decode(&mut stream)?; if md5sum!= "*" { match_field(&fields, "md5sum", md5sum)?; } if msg_type!= "*" { match_field(&fields, "type", msg_type)?; } Ok(fields) } fn exchange_headers<U>( stream: &mut U, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> where U: std::io::Write + std::io::Read, { write_request::<U>(stream, caller_id, topic, msg_definition, md5sum, msg_type)?; read_response::<U>(stream, md5sum, msg_type) } #[inline] fn package_to_vector<R: std::io::Read>(stream: &mut R) -> std::io::Result<Vec<u8>> { let length = stream.read_u32::<LittleEndian>()?; let u32_size = std::mem::size_of::<u32>(); let num_bytes = length as usize + u32_size; // Allocate memory of the proper size for the incoming message. We // do not initialize the memory to zero here (as would be safe) // because it is expensive and ultimately unnecessary. We know the // length of the message and if the length is incorrect, the // stream reading functions will bail with an Error rather than // leaving memory uninitialized. let mut out = Vec::<u8>::with_capacity(num_bytes); let out_ptr = out.as_mut_ptr(); // Read length from stream. std::io::Cursor::new(unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, u32_size) }) .write_u32::<LittleEndian>(length)?; // Read data from stream. let read_buf = unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, num_bytes) }; stream.read_exact(&mut read_buf[u32_size..])?; // Don't drop the original Vec which has size==0 and instead use // its memory to initialize a new Vec with size == capacity == num_bytes. std::mem::forget(out); // Return the new, now full and "safely" initialized. Ok(unsafe { Vec::from_raw_parts(out_ptr, num_bytes, num_bytes) }) } #[derive(Clone)] struct MessageInfo { caller_id: Arc<String>, data: Vec<u8>, } impl MessageInfo { fn new(caller_id: Arc<String>, data: Vec<u8>) -> Self { Self { caller_id, data } } } #[cfg(test)] mod tests { use super::*; static FAILED_TO_READ_WRITE_VECTOR: &str = "Failed to read or write from vector"; #[test] fn package_to_vector_creates_right_buffer_from_reader() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]; let data = package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); } #[test] fn package_to_vector_respects_provided_length() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; let data = package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); } #[test] fn package_to_vector_fails_if_stream_is_shorter_than_annotated() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5]; package_to_vector(&mut std::io::Cursor::new(input)).unwrap_err(); } #[test] fn package_to_vector_fails_leaves_cursor_at_end_of_reading() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 4, 0, 0, 0, 11, 12, 13, 14]; let mut cursor = std::io::Cursor::new(input); let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [4, 0, 0, 0, 11, 12, 13, 14]); } }
publisher_uris
identifier_name
subscriber.rs
use super::error::{ErrorKind, Result, ResultExt}; use super::header::{decode, encode, match_field}; use super::{Message, Topic}; use crate::rosmsg::RosMsg; use crate::util::lossy_channel::{lossy_channel, LossyReceiver, LossySender}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use crossbeam::channel::{bounded, select, Receiver, Sender, TrySendError}; use log::error; use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; use std::sync::Arc; use std::thread; enum DataStreamConnectionChange { Connect( usize, LossySender<MessageInfo>, Sender<HashMap<String, String>>, ), Disconnect(usize), } pub struct SubscriberRosConnection { next_data_stream_id: usize, data_stream_tx: Sender<DataStreamConnectionChange>, publishers_stream: Sender<SocketAddr>, topic: Topic, pub connected_ids: BTreeSet<usize>, pub connected_publishers: BTreeSet<String>, } impl SubscriberRosConnection { pub fn new( caller_id: &str, topic: &str, msg_definition: String, msg_type: String, md5sum: String, ) -> SubscriberRosConnection { let subscriber_connection_queue_size = 8; let (data_stream_tx, data_stream_rx) = bounded(subscriber_connection_queue_size); let publisher_connection_queue_size = 8; let (pub_tx, pub_rx) = bounded(publisher_connection_queue_size); let caller_id = String::from(caller_id); let topic_name = String::from(topic); thread::spawn({ let msg_type = msg_type.clone(); let md5sum = md5sum.clone(); move || { join_connections( data_stream_rx, pub_rx, &caller_id, &topic_name, &msg_definition, &md5sum, &msg_type, ) } }); let topic = Topic { name: String::from(topic), msg_type, md5sum, }; SubscriberRosConnection { next_data_stream_id: 1, data_stream_tx, publishers_stream: pub_tx, topic, connected_ids: BTreeSet::new(), connected_publishers: BTreeSet::new(), } } // TODO: allow synchronous handling for subscribers // This creates a new thread to call on_message. Next API change should // allow subscribing with either callback or inline handler of the queue. // The queue is lossy, so it wouldn't be blocking. pub fn add_subscriber<T, F, G>( &mut self, queue_size: usize, on_message: F, on_connect: G, ) -> usize where T: Message, F: Fn(T, &str) + Send +'static, G: Fn(HashMap<String, String>) + Send +'static, { let data_stream_id = self.next_data_stream_id; self.connected_ids.insert(data_stream_id); self.next_data_stream_id += 1; let (data_tx, data_rx) = lossy_channel(queue_size); let (connection_tx, connection_rx) = bounded(8); if self .data_stream_tx .send(DataStreamConnectionChange::Connect( data_stream_id, data_tx, connection_tx, )) .is_err() { // TODO: we might want to panic here error!("Subscriber failed to connect to data stream"); } thread::spawn(move || { handle_data::<T, F, G>(data_rx, connection_rx, on_message, on_connect) }); data_stream_id } pub fn remove_subscriber(&mut self, id: usize) { self.connected_ids.remove(&id); if self .data_stream_tx .send(DataStreamConnectionChange::Disconnect(id)) .is_err() { // TODO: we might want to panic here error!("Subscriber failed to disconnect from data stream"); } } pub fn has_subscribers(&self) -> bool { !self.connected_ids.is_empty() } #[inline] pub fn publisher_count(&self) -> usize { self.connected_publishers.len() } #[inline] pub fn publisher_uris(&self) -> Vec<String> { self.connected_publishers.iter().cloned().collect() } #[allow(clippy::useless_conversion)] pub fn connect_to<U: ToSocketAddrs>( &mut self, publisher: &str, addresses: U, ) -> std::io::Result<()> { for address in addresses.to_socket_addrs()? { // This should never fail, so it's safe to unwrap // Failure could only be caused by the join_connections // thread not running, which only happens after // Subscriber has been deconstructed self.publishers_stream .send(address) .expect("Connected thread died"); } self.connected_publishers.insert(publisher.to_owned()); Ok(()) } pub fn is_connected_to(&self, publisher: &str) -> bool { self.connected_publishers.contains(publisher) } pub fn limit_publishers_to(&mut self, publishers: &BTreeSet<String>) { let difference: Vec<String> = self .connected_publishers .difference(publishers) .cloned() .collect(); for item in difference { self.connected_publishers.remove(&item); } } pub fn get_topic(&self) -> &Topic { &self.topic } } fn handle_data<T, F, G>( data: LossyReceiver<MessageInfo>, connections: Receiver<HashMap<String, String>>, on_message: F, on_connect: G, ) where T: Message, F: Fn(T, &str), G: Fn(HashMap<String, String>) + Send +'static, { loop { select! { recv(data.kill_rx.kill_rx) -> _ => break, recv(data.data_rx) -> msg => match msg { Err(_) => break, Ok(buffer) => match RosMsg::decode_slice(&buffer.data) { Ok(value) => on_message(value, &buffer.caller_id), Err(err) => error!("Failed to decode message: {}", err), }, }, recv(connections) -> msg => match msg { Err(_) => break, Ok(conn) => on_connect(conn), }, } } } fn join_connections( subscribers: Receiver<DataStreamConnectionChange>, publishers: Receiver<SocketAddr>, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) { type Sub = (LossySender<MessageInfo>, Sender<HashMap<String, String>>); let mut subs: BTreeMap<usize, Sub> = BTreeMap::new(); let mut existing_headers: Vec<HashMap<String, String>> = Vec::new(); let (data_tx, data_rx): (Sender<MessageInfo>, Receiver<MessageInfo>) = bounded(8); // Ends when subscriber or publisher sender is destroyed, which happens at Subscriber destruction loop { select! { recv(data_rx) -> msg => { match msg { Err(_) => break, Ok(v) => for sub in subs.values() { if sub.0.try_send(v.clone()).is_err() { error!("Failed to send data to subscriber"); } } } } recv(subscribers) -> msg => { match msg { Err(_) => break, Ok(DataStreamConnectionChange::Connect(id, data, conn)) => { for header in &existing_headers { if conn.send(header.clone()).is_err() { error!("Failed to send connection info for subscriber"); }; } subs.insert(id, (data, conn)); } Ok(DataStreamConnectionChange::Disconnect(id)) => { if let Some((mut data, _)) = subs.remove(&id) { if data.close().is_err() { error!("Subscriber data stream to topic has already been killed"); } } } } } recv(publishers) -> msg => { match msg { Err(_) => break, Ok(publisher) => { let result = join_connection( &data_tx, &publisher, caller_id, topic, msg_definition, md5sum, msg_type, ) .chain_err(|| ErrorKind::TopicConnectionFail(topic.into())); match result { Ok(headers) => { for sub in subs.values() { if sub.1.send(headers.clone()).is_err() { error!("Failed to send connection info for subscriber"); } } existing_headers.push(headers); } Err(err) => { let info = err .iter() .map(|v| format!("{}", v)) .collect::<Vec<_>>() .join("\nCaused by:"); error!("{}", info); } } } } } } } } fn join_connection( data_stream: &Sender<MessageInfo>, publisher: &SocketAddr, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> { let mut stream = TcpStream::connect(publisher)?; let headers = exchange_headers::<_>( &mut stream, caller_id, topic, msg_definition, md5sum, msg_type, )?; let pub_caller_id = headers.get("callerid").cloned(); let target = data_stream.clone(); thread::spawn(move || { let pub_caller_id = Arc::new(pub_caller_id.unwrap_or_default()); while let Ok(buffer) = package_to_vector(&mut stream) { if let Err(TrySendError::Disconnected(_)) = target.try_send(MessageInfo::new(Arc::clone(&pub_caller_id), buffer))
} }); Ok(headers) } fn write_request<U: std::io::Write>( mut stream: &mut U, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<()> { let mut fields = HashMap::<String, String>::new(); fields.insert(String::from("message_definition"), msg_definition.into()); fields.insert(String::from("callerid"), caller_id.into()); fields.insert(String::from("topic"), topic.into()); fields.insert(String::from("md5sum"), md5sum.into()); fields.insert(String::from("type"), msg_type.into()); encode(&mut stream, &fields)?; Ok(()) } fn read_response<U: std::io::Read>( mut stream: &mut U, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> { let fields = decode(&mut stream)?; if md5sum!= "*" { match_field(&fields, "md5sum", md5sum)?; } if msg_type!= "*" { match_field(&fields, "type", msg_type)?; } Ok(fields) } fn exchange_headers<U>( stream: &mut U, caller_id: &str, topic: &str, msg_definition: &str, md5sum: &str, msg_type: &str, ) -> Result<HashMap<String, String>> where U: std::io::Write + std::io::Read, { write_request::<U>(stream, caller_id, topic, msg_definition, md5sum, msg_type)?; read_response::<U>(stream, md5sum, msg_type) } #[inline] fn package_to_vector<R: std::io::Read>(stream: &mut R) -> std::io::Result<Vec<u8>> { let length = stream.read_u32::<LittleEndian>()?; let u32_size = std::mem::size_of::<u32>(); let num_bytes = length as usize + u32_size; // Allocate memory of the proper size for the incoming message. We // do not initialize the memory to zero here (as would be safe) // because it is expensive and ultimately unnecessary. We know the // length of the message and if the length is incorrect, the // stream reading functions will bail with an Error rather than // leaving memory uninitialized. let mut out = Vec::<u8>::with_capacity(num_bytes); let out_ptr = out.as_mut_ptr(); // Read length from stream. std::io::Cursor::new(unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, u32_size) }) .write_u32::<LittleEndian>(length)?; // Read data from stream. let read_buf = unsafe { std::slice::from_raw_parts_mut(out_ptr as *mut u8, num_bytes) }; stream.read_exact(&mut read_buf[u32_size..])?; // Don't drop the original Vec which has size==0 and instead use // its memory to initialize a new Vec with size == capacity == num_bytes. std::mem::forget(out); // Return the new, now full and "safely" initialized. Ok(unsafe { Vec::from_raw_parts(out_ptr, num_bytes, num_bytes) }) } #[derive(Clone)] struct MessageInfo { caller_id: Arc<String>, data: Vec<u8>, } impl MessageInfo { fn new(caller_id: Arc<String>, data: Vec<u8>) -> Self { Self { caller_id, data } } } #[cfg(test)] mod tests { use super::*; static FAILED_TO_READ_WRITE_VECTOR: &str = "Failed to read or write from vector"; #[test] fn package_to_vector_creates_right_buffer_from_reader() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]; let data = package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); } #[test] fn package_to_vector_respects_provided_length() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; let data = package_to_vector(&mut std::io::Cursor::new(input)).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); } #[test] fn package_to_vector_fails_if_stream_is_shorter_than_annotated() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5]; package_to_vector(&mut std::io::Cursor::new(input)).unwrap_err(); } #[test] fn package_to_vector_fails_leaves_cursor_at_end_of_reading() { let input = [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 4, 0, 0, 0, 11, 12, 13, 14]; let mut cursor = std::io::Cursor::new(input); let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [7, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7]); let data = package_to_vector(&mut cursor).expect(FAILED_TO_READ_WRITE_VECTOR); assert_eq!(data, [4, 0, 0, 0, 11, 12, 13, 14]); } }
{ // Data receiver has been destroyed after // Subscriber destructor's kill signal break; }
conditional_block
lib.rs
//! The `io_uring` library for Rust. //! //! The crate only provides a summary of the parameters. //! For more detailed documentation, see manpage. #![cfg_attr(sgx, no_std)] #[cfg(sgx)] extern crate sgx_types; #[cfg(sgx)] #[macro_use] extern crate sgx_tstd as std; #[cfg(sgx)] extern crate sgx_trts; #[cfg(sgx)] use std::prelude::v1::*; #[cfg(sgx)] pub use sgx_trts::libc; #[macro_use] mod util; pub mod cqueue; pub mod opcode; mod register; pub mod squeue; mod submit; mod sys; #[cfg(any(feature = "concurrent", sgx))] pub mod concurrent; use std::convert::TryInto; use std::mem::ManuallyDrop; use std::os::unix::io::{AsRawFd, RawFd}; use std::{cmp, io, mem}; pub use cqueue::CompletionQueue; pub use register::Probe; pub use squeue::SubmissionQueue; pub use submit::Submitter; use util::{Fd, Mmap}; /// IoUring instance pub struct IoUring { fd: Fd, params: Parameters, memory: ManuallyDrop<MemoryMap>, sq: SubmissionQueue, cq: CompletionQueue, } #[allow(dead_code)] struct MemoryMap { sq_mmap: Mmap, sqe_mmap: Mmap, cq_mmap: Option<Mmap>, } /// IoUring build params #[derive(Clone, Default)] pub struct Builder { dontfork: bool, params: sys::io_uring_params, } #[derive(Clone)] pub struct Parameters(sys::io_uring_params); unsafe impl Send for IoUring {} unsafe impl Sync for IoUring {} impl IoUring { /// Create a IoUring instance /// /// The `entries` sets the size of queue, /// and it value should be the power of two. #[inline] pub fn new(entries: u32) -> io::Result<IoUring> { IoUring::with_params(entries, Default::default()) } fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> { // NOTE: The `SubmissionQueue` and `CompletionQueue` are references, // and their lifetime can never exceed `MemoryMap`. // // The memory mapped regions of `MemoryMap` never move, // so `SubmissionQueue` and `CompletionQueue` are `Unpin`. // // I really hope that Rust can safely use self-reference types. #[inline] unsafe fn setup_queue( fd: &Fd, p: &sys::io_uring_params, ) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> { let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>(); let cq_len = p.cq_off.cqes as usize + p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>(); let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>(); let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?; if p.features & sys::IORING_FEAT_SINGLE_MMAP!= 0 { let scq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?; let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p); let cq = CompletionQueue::new(&scq_mmap, p); let mm = MemoryMap { sq_mmap: scq_mmap, cq_mmap: None, sqe_mmap, }; Ok((mm, sq, cq)) } else { let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?; let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?; let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p); let cq = CompletionQueue::new(&cq_mmap, p); let mm = MemoryMap { cq_mmap: Some(cq_mmap), sq_mmap, sqe_mmap, }; Ok((mm, sq, cq)) } } let fd: Fd = unsafe { sys::io_uring_setup(entries, &mut p) .try_into() .map_err(|_| io::Error::last_os_error())? }; let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? }; Ok(IoUring { fd, sq, cq, params: Parameters(p), memory: ManuallyDrop::new(mm), }) }
} #[inline] pub fn params(&self) -> &Parameters { &self.params } pub fn start_enter_syscall_thread(&self) { sys::start_enter_syscall_thread(self.fd.as_raw_fd()); } /// Initiate and/or complete asynchronous I/O /// /// # Safety /// /// This provides a raw interface so developer must ensure that parameters are correct. #[inline] pub unsafe fn enter( &self, to_submit: u32, min_complete: u32, flag: u32, sig: Option<&libc::sigset_t>, ) -> io::Result<usize> { self.submitter().enter(to_submit, min_complete, flag, sig) } /// Initiate asynchronous I/O. #[inline] pub fn submit(&self) -> io::Result<usize> { self.submitter().submit() } /// Initiate and/or complete asynchronous I/O #[inline] pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> { self.submitter().submit_and_wait(want) } /// Get submitter and submission queue and completion queue pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) { let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq); (submit, &mut self.sq, &mut self.cq) } /// Get submission queue pub fn submission(&mut self) -> &mut SubmissionQueue { &mut self.sq } /// Get completion queue pub fn completion(&mut self) -> &mut CompletionQueue { &mut self.cq } /// Make a concurrent IoUring. #[cfg(any(feature = "concurrent", sgx))] pub fn concurrent(self) -> concurrent::IoUring { concurrent::IoUring::new(self) } } impl Drop for IoUring { fn drop(&mut self) { unsafe { ManuallyDrop::drop(&mut self.memory); } } } impl Builder { pub fn dontfork(&mut self) -> &mut Self { self.dontfork = true; self } /// Perform busy-waiting for an I/O completion, /// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request). pub fn setup_iopoll(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_IOPOLL; self } /// When this flag is specified, a kernel thread is created to perform submission queue polling. /// An io_uring instance configured in this way enables an application to issue I/O /// without ever context switching into the kernel. pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SQPOLL; self.params.sq_thread_idle = idle.into().unwrap_or(0); self } /// If this flag is specified, /// then the poll thread will be bound to the cpu set in the value. /// This flag is only meaningful when [Builder::setup_sqpoll] is enabled. pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SQ_AFF; self.params.sq_thread_cpu = n; self } /// Create the completion queue with struct `io_uring_params.cq_entries` entries. /// The value must be greater than entries, and may be rounded up to the next power-of-two. pub fn setup_cqsize(&mut self, n: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_CQSIZE; self.params.cq_entries = n; self } pub fn setup_clamp(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_CLAMP; self } pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self { self.params.flags |= sys::IORING_SETUP_ATTACH_WQ; self.params.wq_fd = fd as _; self } #[cfg(feature = "unstable")] pub fn setup_r_disabled(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_R_DISABLED; self } /// Build a [IoUring]. #[inline] pub fn build(&self, entries: u32) -> io::Result<IoUring> { let ring = IoUring::with_params(entries, self.params)?; if self.dontfork { ring.memory.sq_mmap.dontfork()?; ring.memory.sqe_mmap.dontfork()?; if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() { cq_mmap.dontfork()?; } } Ok(ring) } } impl Parameters { pub fn is_setup_sqpoll(&self) -> bool { self.0.flags & sys::IORING_SETUP_SQPOLL!= 0 } pub fn is_setup_iopoll(&self) -> bool { self.0.flags & sys::IORING_SETUP_IOPOLL!= 0 } /// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call. /// The SQEs must still be allocated separately. /// This brings the necessary `mmap(2)` calls down from three to two. pub fn is_feature_single_mmap(&self) -> bool { self.0.features & sys::IORING_FEAT_SINGLE_MMAP!= 0 } /// If this flag is set, io_uring supports never dropping completion events. If a completion /// event occurs and the CQ ring is full, the kernel stores the event internally until such a /// time that the CQ ring has room for more entries. pub fn is_feature_nodrop(&self) -> bool { self.0.features & sys::IORING_FEAT_NODROP!= 0 } /// If this flag is set, applications can be certain that any data for async offload has been consumed /// when the kernel has consumed the SQE pub fn is_feature_submit_stable(&self) -> bool { self.0.features & sys::IORING_FEAT_SUBMIT_STABLE!= 0 } /// If this flag is set, applications can specify offset == -1 with /// `IORING_OP_{READV,WRITEV}`, `IORING_OP_{READ,WRITE}_FIXED`, and `IORING_OP_{READ,WRITE}` /// to mean current file position, which behaves like `preadv2(2)` and `pwritev2(2)` with offset == -1. /// It’ll use (and update) the current file position. /// /// This obviously comes with the caveat that if the application has multiple reads or writes in flight, /// then the end result will not be as expected. /// This is similar to threads sharing a file descriptor and doing IO using the current file position. pub fn is_feature_rw_cur_pos(&self) -> bool { self.0.features & sys::IORING_FEAT_RW_CUR_POS!= 0 } /// If this flag is set, then io_uring guarantees that both sync and async execution of /// a request assumes the credentials of the task that called `io_uring_enter(2)` to queue the requests. /// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring. /// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same. /// Note that this is the default behavior, /// tasks can still register different personalities through /// `io_uring_register(2)` with `IORING_REGISTER_PERSONALITY` and specify the personality to use in the sqe. pub fn is_feature_cur_personality(&self) -> bool { self.0.features & sys::IORING_FEAT_CUR_PERSONALITY!= 0 } #[cfg(feature = "unstable")] pub fn is_feature_fast_poll(&self) -> bool { self.0.features & sys::IORING_FEAT_FAST_POLL!= 0 } #[cfg(feature = "unstable")] pub fn is_feature_poll_32bits(&self) -> bool { self.0.features & sys::IORING_FEAT_POLL_32BITS!= 0 } pub fn sq_entries(&self) -> u32 { self.0.sq_entries } pub fn cq_entries(&self) -> u32 { self.0.cq_entries } } impl AsRawFd for IoUring { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } }
#[inline] pub fn submitter(&self) -> Submitter<'_> { Submitter::new(&self.fd, self.params.0.flags, &self.sq)
random_line_split
lib.rs
//! The `io_uring` library for Rust. //! //! The crate only provides a summary of the parameters. //! For more detailed documentation, see manpage. #![cfg_attr(sgx, no_std)] #[cfg(sgx)] extern crate sgx_types; #[cfg(sgx)] #[macro_use] extern crate sgx_tstd as std; #[cfg(sgx)] extern crate sgx_trts; #[cfg(sgx)] use std::prelude::v1::*; #[cfg(sgx)] pub use sgx_trts::libc; #[macro_use] mod util; pub mod cqueue; pub mod opcode; mod register; pub mod squeue; mod submit; mod sys; #[cfg(any(feature = "concurrent", sgx))] pub mod concurrent; use std::convert::TryInto; use std::mem::ManuallyDrop; use std::os::unix::io::{AsRawFd, RawFd}; use std::{cmp, io, mem}; pub use cqueue::CompletionQueue; pub use register::Probe; pub use squeue::SubmissionQueue; pub use submit::Submitter; use util::{Fd, Mmap}; /// IoUring instance pub struct IoUring { fd: Fd, params: Parameters, memory: ManuallyDrop<MemoryMap>, sq: SubmissionQueue, cq: CompletionQueue, } #[allow(dead_code)] struct MemoryMap { sq_mmap: Mmap, sqe_mmap: Mmap, cq_mmap: Option<Mmap>, } /// IoUring build params #[derive(Clone, Default)] pub struct Builder { dontfork: bool, params: sys::io_uring_params, } #[derive(Clone)] pub struct Parameters(sys::io_uring_params); unsafe impl Send for IoUring {} unsafe impl Sync for IoUring {} impl IoUring { /// Create a IoUring instance /// /// The `entries` sets the size of queue, /// and it value should be the power of two. #[inline] pub fn new(entries: u32) -> io::Result<IoUring> { IoUring::with_params(entries, Default::default()) } fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> { // NOTE: The `SubmissionQueue` and `CompletionQueue` are references, // and their lifetime can never exceed `MemoryMap`. // // The memory mapped regions of `MemoryMap` never move, // so `SubmissionQueue` and `CompletionQueue` are `Unpin`. // // I really hope that Rust can safely use self-reference types. #[inline] unsafe fn setup_queue( fd: &Fd, p: &sys::io_uring_params, ) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> { let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>(); let cq_len = p.cq_off.cqes as usize + p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>(); let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>(); let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?; if p.features & sys::IORING_FEAT_SINGLE_MMAP!= 0 { let scq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?; let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p); let cq = CompletionQueue::new(&scq_mmap, p); let mm = MemoryMap { sq_mmap: scq_mmap, cq_mmap: None, sqe_mmap, }; Ok((mm, sq, cq)) } else { let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?; let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?; let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p); let cq = CompletionQueue::new(&cq_mmap, p); let mm = MemoryMap { cq_mmap: Some(cq_mmap), sq_mmap, sqe_mmap, }; Ok((mm, sq, cq)) } } let fd: Fd = unsafe { sys::io_uring_setup(entries, &mut p) .try_into() .map_err(|_| io::Error::last_os_error())? }; let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? }; Ok(IoUring { fd, sq, cq, params: Parameters(p), memory: ManuallyDrop::new(mm), }) } #[inline] pub fn submitter(&self) -> Submitter<'_> { Submitter::new(&self.fd, self.params.0.flags, &self.sq) } #[inline] pub fn params(&self) -> &Parameters { &self.params } pub fn start_enter_syscall_thread(&self) { sys::start_enter_syscall_thread(self.fd.as_raw_fd()); } /// Initiate and/or complete asynchronous I/O /// /// # Safety /// /// This provides a raw interface so developer must ensure that parameters are correct. #[inline] pub unsafe fn enter( &self, to_submit: u32, min_complete: u32, flag: u32, sig: Option<&libc::sigset_t>, ) -> io::Result<usize> { self.submitter().enter(to_submit, min_complete, flag, sig) } /// Initiate asynchronous I/O. #[inline] pub fn submit(&self) -> io::Result<usize> { self.submitter().submit() } /// Initiate and/or complete asynchronous I/O #[inline] pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> { self.submitter().submit_and_wait(want) } /// Get submitter and submission queue and completion queue pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) { let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq); (submit, &mut self.sq, &mut self.cq) } /// Get submission queue pub fn submission(&mut self) -> &mut SubmissionQueue { &mut self.sq } /// Get completion queue pub fn completion(&mut self) -> &mut CompletionQueue { &mut self.cq } /// Make a concurrent IoUring. #[cfg(any(feature = "concurrent", sgx))] pub fn concurrent(self) -> concurrent::IoUring { concurrent::IoUring::new(self) } } impl Drop for IoUring { fn drop(&mut self) { unsafe { ManuallyDrop::drop(&mut self.memory); } } } impl Builder { pub fn dontfork(&mut self) -> &mut Self { self.dontfork = true; self } /// Perform busy-waiting for an I/O completion, /// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request). pub fn setup_iopoll(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_IOPOLL; self } /// When this flag is specified, a kernel thread is created to perform submission queue polling. /// An io_uring instance configured in this way enables an application to issue I/O /// without ever context switching into the kernel. pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SQPOLL; self.params.sq_thread_idle = idle.into().unwrap_or(0); self } /// If this flag is specified, /// then the poll thread will be bound to the cpu set in the value. /// This flag is only meaningful when [Builder::setup_sqpoll] is enabled. pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SQ_AFF; self.params.sq_thread_cpu = n; self } /// Create the completion queue with struct `io_uring_params.cq_entries` entries. /// The value must be greater than entries, and may be rounded up to the next power-of-two. pub fn setup_cqsize(&mut self, n: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_CQSIZE; self.params.cq_entries = n; self } pub fn setup_clamp(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_CLAMP; self } pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self { self.params.flags |= sys::IORING_SETUP_ATTACH_WQ; self.params.wq_fd = fd as _; self } #[cfg(feature = "unstable")] pub fn setup_r_disabled(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_R_DISABLED; self } /// Build a [IoUring]. #[inline] pub fn build(&self, entries: u32) -> io::Result<IoUring> { let ring = IoUring::with_params(entries, self.params)?; if self.dontfork { ring.memory.sq_mmap.dontfork()?; ring.memory.sqe_mmap.dontfork()?; if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() { cq_mmap.dontfork()?; } } Ok(ring) } } impl Parameters { pub fn is_setup_sqpoll(&self) -> bool { self.0.flags & sys::IORING_SETUP_SQPOLL!= 0 } pub fn is_setup_iopoll(&self) -> bool { self.0.flags & sys::IORING_SETUP_IOPOLL!= 0 } /// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call. /// The SQEs must still be allocated separately. /// This brings the necessary `mmap(2)` calls down from three to two. pub fn is_feature_single_mmap(&self) -> bool { self.0.features & sys::IORING_FEAT_SINGLE_MMAP!= 0 } /// If this flag is set, io_uring supports never dropping completion events. If a completion /// event occurs and the CQ ring is full, the kernel stores the event internally until such a /// time that the CQ ring has room for more entries. pub fn is_feature_nodrop(&self) -> bool { self.0.features & sys::IORING_FEAT_NODROP!= 0 } /// If this flag is set, applications can be certain that any data for async offload has been consumed /// when the kernel has consumed the SQE pub fn is_feature_submit_stable(&self) -> bool { self.0.features & sys::IORING_FEAT_SUBMIT_STABLE!= 0 } /// If this flag is set, applications can specify offset == -1 with /// `IORING_OP_{READV,WRITEV}`, `IORING_OP_{READ,WRITE}_FIXED`, and `IORING_OP_{READ,WRITE}` /// to mean current file position, which behaves like `preadv2(2)` and `pwritev2(2)` with offset == -1. /// It’ll use (and update) the current file position. /// /// This obviously comes with the caveat that if the application has multiple reads or writes in flight, /// then the end result will not be as expected. /// This is similar to threads sharing a file descriptor and doing IO using the current file position. pub fn is_feature_rw_cur_pos(&self) -> bool { self.0.features & sys::IORING_FEAT_RW_CUR_POS!= 0 } /// If this flag is set, then io_uring guarantees that both sync and async execution of /// a request assumes the credentials of the task that called `io_uring_enter(2)` to queue the requests. /// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring. /// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same. /// Note that this is the default behavior, /// tasks can still register different personalities through /// `io_uring_register(2)` with `IORING_REGISTER_PERSONALITY` and specify the personality to use in the sqe. pub fn is_feature_cur_personality(&self) -> bool { self.0.features & sys::IORING_FEAT_CUR_PERSONALITY!= 0 } #[cfg(feature = "unstable")] pub fn is_feature_fast_poll(&self) -> bool {
#[cfg(feature = "unstable")] pub fn is_feature_poll_32bits(&self) -> bool { self.0.features & sys::IORING_FEAT_POLL_32BITS!= 0 } pub fn sq_entries(&self) -> u32 { self.0.sq_entries } pub fn cq_entries(&self) -> u32 { self.0.cq_entries } } impl AsRawFd for IoUring { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } }
self.0.features & sys::IORING_FEAT_FAST_POLL != 0 }
identifier_body
lib.rs
//! The `io_uring` library for Rust. //! //! The crate only provides a summary of the parameters. //! For more detailed documentation, see manpage. #![cfg_attr(sgx, no_std)] #[cfg(sgx)] extern crate sgx_types; #[cfg(sgx)] #[macro_use] extern crate sgx_tstd as std; #[cfg(sgx)] extern crate sgx_trts; #[cfg(sgx)] use std::prelude::v1::*; #[cfg(sgx)] pub use sgx_trts::libc; #[macro_use] mod util; pub mod cqueue; pub mod opcode; mod register; pub mod squeue; mod submit; mod sys; #[cfg(any(feature = "concurrent", sgx))] pub mod concurrent; use std::convert::TryInto; use std::mem::ManuallyDrop; use std::os::unix::io::{AsRawFd, RawFd}; use std::{cmp, io, mem}; pub use cqueue::CompletionQueue; pub use register::Probe; pub use squeue::SubmissionQueue; pub use submit::Submitter; use util::{Fd, Mmap}; /// IoUring instance pub struct IoUring { fd: Fd, params: Parameters, memory: ManuallyDrop<MemoryMap>, sq: SubmissionQueue, cq: CompletionQueue, } #[allow(dead_code)] struct MemoryMap { sq_mmap: Mmap, sqe_mmap: Mmap, cq_mmap: Option<Mmap>, } /// IoUring build params #[derive(Clone, Default)] pub struct Builder { dontfork: bool, params: sys::io_uring_params, } #[derive(Clone)] pub struct Parameters(sys::io_uring_params); unsafe impl Send for IoUring {} unsafe impl Sync for IoUring {} impl IoUring { /// Create a IoUring instance /// /// The `entries` sets the size of queue, /// and it value should be the power of two. #[inline] pub fn new(entries: u32) -> io::Result<IoUring> { IoUring::with_params(entries, Default::default()) } fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> { // NOTE: The `SubmissionQueue` and `CompletionQueue` are references, // and their lifetime can never exceed `MemoryMap`. // // The memory mapped regions of `MemoryMap` never move, // so `SubmissionQueue` and `CompletionQueue` are `Unpin`. // // I really hope that Rust can safely use self-reference types. #[inline] unsafe fn setup_queue( fd: &Fd, p: &sys::io_uring_params, ) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> { let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>(); let cq_len = p.cq_off.cqes as usize + p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>(); let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>(); let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?; if p.features & sys::IORING_FEAT_SINGLE_MMAP!= 0 { let scq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?; let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p); let cq = CompletionQueue::new(&scq_mmap, p); let mm = MemoryMap { sq_mmap: scq_mmap, cq_mmap: None, sqe_mmap, }; Ok((mm, sq, cq)) } else { let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?; let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?; let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p); let cq = CompletionQueue::new(&cq_mmap, p); let mm = MemoryMap { cq_mmap: Some(cq_mmap), sq_mmap, sqe_mmap, }; Ok((mm, sq, cq)) } } let fd: Fd = unsafe { sys::io_uring_setup(entries, &mut p) .try_into() .map_err(|_| io::Error::last_os_error())? }; let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? }; Ok(IoUring { fd, sq, cq, params: Parameters(p), memory: ManuallyDrop::new(mm), }) } #[inline] pub fn submitter(&self) -> Submitter<'_> { Submitter::new(&self.fd, self.params.0.flags, &self.sq) } #[inline] pub fn params(&self) -> &Parameters { &self.params } pub fn start_enter_syscall_thread(&self) { sys::start_enter_syscall_thread(self.fd.as_raw_fd()); } /// Initiate and/or complete asynchronous I/O /// /// # Safety /// /// This provides a raw interface so developer must ensure that parameters are correct. #[inline] pub unsafe fn enter( &self, to_submit: u32, min_complete: u32, flag: u32, sig: Option<&libc::sigset_t>, ) -> io::Result<usize> { self.submitter().enter(to_submit, min_complete, flag, sig) } /// Initiate asynchronous I/O. #[inline] pub fn submit(&self) -> io::Result<usize> { self.submitter().submit() } /// Initiate and/or complete asynchronous I/O #[inline] pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> { self.submitter().submit_and_wait(want) } /// Get submitter and submission queue and completion queue pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) { let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq); (submit, &mut self.sq, &mut self.cq) } /// Get submission queue pub fn submission(&mut self) -> &mut SubmissionQueue { &mut self.sq } /// Get completion queue pub fn completion(&mut self) -> &mut CompletionQueue { &mut self.cq } /// Make a concurrent IoUring. #[cfg(any(feature = "concurrent", sgx))] pub fn concurrent(self) -> concurrent::IoUring { concurrent::IoUring::new(self) } } impl Drop for IoUring { fn drop(&mut self) { unsafe { ManuallyDrop::drop(&mut self.memory); } } } impl Builder { pub fn dontfork(&mut self) -> &mut Self { self.dontfork = true; self } /// Perform busy-waiting for an I/O completion, /// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request). pub fn setup_iopoll(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_IOPOLL; self } /// When this flag is specified, a kernel thread is created to perform submission queue polling. /// An io_uring instance configured in this way enables an application to issue I/O /// without ever context switching into the kernel. pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SQPOLL; self.params.sq_thread_idle = idle.into().unwrap_or(0); self } /// If this flag is specified, /// then the poll thread will be bound to the cpu set in the value. /// This flag is only meaningful when [Builder::setup_sqpoll] is enabled. pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SQ_AFF; self.params.sq_thread_cpu = n; self } /// Create the completion queue with struct `io_uring_params.cq_entries` entries. /// The value must be greater than entries, and may be rounded up to the next power-of-two. pub fn setup_cqsize(&mut self, n: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_CQSIZE; self.params.cq_entries = n; self } pub fn setup_clamp(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_CLAMP; self } pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self { self.params.flags |= sys::IORING_SETUP_ATTACH_WQ; self.params.wq_fd = fd as _; self } #[cfg(feature = "unstable")] pub fn setup_r_disabled(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_R_DISABLED; self } /// Build a [IoUring]. #[inline] pub fn build(&self, entries: u32) -> io::Result<IoUring> { let ring = IoUring::with_params(entries, self.params)?; if self.dontfork { ring.memory.sq_mmap.dontfork()?; ring.memory.sqe_mmap.dontfork()?; if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() { cq_mmap.dontfork()?; } } Ok(ring) } } impl Parameters { pub fn is_setup_sqpoll(&self) -> bool { self.0.flags & sys::IORING_SETUP_SQPOLL!= 0 } pub fn is_setup_iopoll(&self) -> bool { self.0.flags & sys::IORING_SETUP_IOPOLL!= 0 } /// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call. /// The SQEs must still be allocated separately. /// This brings the necessary `mmap(2)` calls down from three to two. pub fn
(&self) -> bool { self.0.features & sys::IORING_FEAT_SINGLE_MMAP!= 0 } /// If this flag is set, io_uring supports never dropping completion events. If a completion /// event occurs and the CQ ring is full, the kernel stores the event internally until such a /// time that the CQ ring has room for more entries. pub fn is_feature_nodrop(&self) -> bool { self.0.features & sys::IORING_FEAT_NODROP!= 0 } /// If this flag is set, applications can be certain that any data for async offload has been consumed /// when the kernel has consumed the SQE pub fn is_feature_submit_stable(&self) -> bool { self.0.features & sys::IORING_FEAT_SUBMIT_STABLE!= 0 } /// If this flag is set, applications can specify offset == -1 with /// `IORING_OP_{READV,WRITEV}`, `IORING_OP_{READ,WRITE}_FIXED`, and `IORING_OP_{READ,WRITE}` /// to mean current file position, which behaves like `preadv2(2)` and `pwritev2(2)` with offset == -1. /// It’ll use (and update) the current file position. /// /// This obviously comes with the caveat that if the application has multiple reads or writes in flight, /// then the end result will not be as expected. /// This is similar to threads sharing a file descriptor and doing IO using the current file position. pub fn is_feature_rw_cur_pos(&self) -> bool { self.0.features & sys::IORING_FEAT_RW_CUR_POS!= 0 } /// If this flag is set, then io_uring guarantees that both sync and async execution of /// a request assumes the credentials of the task that called `io_uring_enter(2)` to queue the requests. /// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring. /// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same. /// Note that this is the default behavior, /// tasks can still register different personalities through /// `io_uring_register(2)` with `IORING_REGISTER_PERSONALITY` and specify the personality to use in the sqe. pub fn is_feature_cur_personality(&self) -> bool { self.0.features & sys::IORING_FEAT_CUR_PERSONALITY!= 0 } #[cfg(feature = "unstable")] pub fn is_feature_fast_poll(&self) -> bool { self.0.features & sys::IORING_FEAT_FAST_POLL!= 0 } #[cfg(feature = "unstable")] pub fn is_feature_poll_32bits(&self) -> bool { self.0.features & sys::IORING_FEAT_POLL_32BITS!= 0 } pub fn sq_entries(&self) -> u32 { self.0.sq_entries } pub fn cq_entries(&self) -> u32 { self.0.cq_entries } } impl AsRawFd for IoUring { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } }
is_feature_single_mmap
identifier_name
lib.rs
//! The `io_uring` library for Rust. //! //! The crate only provides a summary of the parameters. //! For more detailed documentation, see manpage. #![cfg_attr(sgx, no_std)] #[cfg(sgx)] extern crate sgx_types; #[cfg(sgx)] #[macro_use] extern crate sgx_tstd as std; #[cfg(sgx)] extern crate sgx_trts; #[cfg(sgx)] use std::prelude::v1::*; #[cfg(sgx)] pub use sgx_trts::libc; #[macro_use] mod util; pub mod cqueue; pub mod opcode; mod register; pub mod squeue; mod submit; mod sys; #[cfg(any(feature = "concurrent", sgx))] pub mod concurrent; use std::convert::TryInto; use std::mem::ManuallyDrop; use std::os::unix::io::{AsRawFd, RawFd}; use std::{cmp, io, mem}; pub use cqueue::CompletionQueue; pub use register::Probe; pub use squeue::SubmissionQueue; pub use submit::Submitter; use util::{Fd, Mmap}; /// IoUring instance pub struct IoUring { fd: Fd, params: Parameters, memory: ManuallyDrop<MemoryMap>, sq: SubmissionQueue, cq: CompletionQueue, } #[allow(dead_code)] struct MemoryMap { sq_mmap: Mmap, sqe_mmap: Mmap, cq_mmap: Option<Mmap>, } /// IoUring build params #[derive(Clone, Default)] pub struct Builder { dontfork: bool, params: sys::io_uring_params, } #[derive(Clone)] pub struct Parameters(sys::io_uring_params); unsafe impl Send for IoUring {} unsafe impl Sync for IoUring {} impl IoUring { /// Create a IoUring instance /// /// The `entries` sets the size of queue, /// and it value should be the power of two. #[inline] pub fn new(entries: u32) -> io::Result<IoUring> { IoUring::with_params(entries, Default::default()) } fn with_params(entries: u32, mut p: sys::io_uring_params) -> io::Result<IoUring> { // NOTE: The `SubmissionQueue` and `CompletionQueue` are references, // and their lifetime can never exceed `MemoryMap`. // // The memory mapped regions of `MemoryMap` never move, // so `SubmissionQueue` and `CompletionQueue` are `Unpin`. // // I really hope that Rust can safely use self-reference types. #[inline] unsafe fn setup_queue( fd: &Fd, p: &sys::io_uring_params, ) -> io::Result<(MemoryMap, SubmissionQueue, CompletionQueue)> { let sq_len = p.sq_off.array as usize + p.sq_entries as usize * mem::size_of::<u32>(); let cq_len = p.cq_off.cqes as usize + p.cq_entries as usize * mem::size_of::<sys::io_uring_cqe>(); let sqe_len = p.sq_entries as usize * mem::size_of::<sys::io_uring_sqe>(); let sqe_mmap = Mmap::new(fd, sys::IORING_OFF_SQES as _, sqe_len)?; if p.features & sys::IORING_FEAT_SINGLE_MMAP!= 0
else { let sq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, sq_len)?; let cq_mmap = Mmap::new(fd, sys::IORING_OFF_CQ_RING as _, cq_len)?; let sq = SubmissionQueue::new(&sq_mmap, &sqe_mmap, p); let cq = CompletionQueue::new(&cq_mmap, p); let mm = MemoryMap { cq_mmap: Some(cq_mmap), sq_mmap, sqe_mmap, }; Ok((mm, sq, cq)) } } let fd: Fd = unsafe { sys::io_uring_setup(entries, &mut p) .try_into() .map_err(|_| io::Error::last_os_error())? }; let (mm, sq, cq) = unsafe { setup_queue(&fd, &p)? }; Ok(IoUring { fd, sq, cq, params: Parameters(p), memory: ManuallyDrop::new(mm), }) } #[inline] pub fn submitter(&self) -> Submitter<'_> { Submitter::new(&self.fd, self.params.0.flags, &self.sq) } #[inline] pub fn params(&self) -> &Parameters { &self.params } pub fn start_enter_syscall_thread(&self) { sys::start_enter_syscall_thread(self.fd.as_raw_fd()); } /// Initiate and/or complete asynchronous I/O /// /// # Safety /// /// This provides a raw interface so developer must ensure that parameters are correct. #[inline] pub unsafe fn enter( &self, to_submit: u32, min_complete: u32, flag: u32, sig: Option<&libc::sigset_t>, ) -> io::Result<usize> { self.submitter().enter(to_submit, min_complete, flag, sig) } /// Initiate asynchronous I/O. #[inline] pub fn submit(&self) -> io::Result<usize> { self.submitter().submit() } /// Initiate and/or complete asynchronous I/O #[inline] pub fn submit_and_wait(&self, want: usize) -> io::Result<usize> { self.submitter().submit_and_wait(want) } /// Get submitter and submission queue and completion queue pub fn split(&mut self) -> (Submitter<'_>, &mut SubmissionQueue, &mut CompletionQueue) { let submit = Submitter::new(&self.fd, self.params.0.flags, &self.sq); (submit, &mut self.sq, &mut self.cq) } /// Get submission queue pub fn submission(&mut self) -> &mut SubmissionQueue { &mut self.sq } /// Get completion queue pub fn completion(&mut self) -> &mut CompletionQueue { &mut self.cq } /// Make a concurrent IoUring. #[cfg(any(feature = "concurrent", sgx))] pub fn concurrent(self) -> concurrent::IoUring { concurrent::IoUring::new(self) } } impl Drop for IoUring { fn drop(&mut self) { unsafe { ManuallyDrop::drop(&mut self.memory); } } } impl Builder { pub fn dontfork(&mut self) -> &mut Self { self.dontfork = true; self } /// Perform busy-waiting for an I/O completion, /// as opposed to getting notifications via an asynchronous IRQ (Interrupt Request). pub fn setup_iopoll(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_IOPOLL; self } /// When this flag is specified, a kernel thread is created to perform submission queue polling. /// An io_uring instance configured in this way enables an application to issue I/O /// without ever context switching into the kernel. pub fn setup_sqpoll(&mut self, idle: impl Into<Option<u32>>) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SQPOLL; self.params.sq_thread_idle = idle.into().unwrap_or(0); self } /// If this flag is specified, /// then the poll thread will be bound to the cpu set in the value. /// This flag is only meaningful when [Builder::setup_sqpoll] is enabled. pub fn setup_sqpoll_cpu(&mut self, n: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_SQ_AFF; self.params.sq_thread_cpu = n; self } /// Create the completion queue with struct `io_uring_params.cq_entries` entries. /// The value must be greater than entries, and may be rounded up to the next power-of-two. pub fn setup_cqsize(&mut self, n: u32) -> &mut Self { self.params.flags |= sys::IORING_SETUP_CQSIZE; self.params.cq_entries = n; self } pub fn setup_clamp(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_CLAMP; self } pub fn setup_attach_wq(&mut self, fd: RawFd) -> &mut Self { self.params.flags |= sys::IORING_SETUP_ATTACH_WQ; self.params.wq_fd = fd as _; self } #[cfg(feature = "unstable")] pub fn setup_r_disabled(&mut self) -> &mut Self { self.params.flags |= sys::IORING_SETUP_R_DISABLED; self } /// Build a [IoUring]. #[inline] pub fn build(&self, entries: u32) -> io::Result<IoUring> { let ring = IoUring::with_params(entries, self.params)?; if self.dontfork { ring.memory.sq_mmap.dontfork()?; ring.memory.sqe_mmap.dontfork()?; if let Some(cq_mmap) = ring.memory.cq_mmap.as_ref() { cq_mmap.dontfork()?; } } Ok(ring) } } impl Parameters { pub fn is_setup_sqpoll(&self) -> bool { self.0.flags & sys::IORING_SETUP_SQPOLL!= 0 } pub fn is_setup_iopoll(&self) -> bool { self.0.flags & sys::IORING_SETUP_IOPOLL!= 0 } /// If this flag is set, the two SQ and CQ rings can be mapped with a single `mmap(2)` call. /// The SQEs must still be allocated separately. /// This brings the necessary `mmap(2)` calls down from three to two. pub fn is_feature_single_mmap(&self) -> bool { self.0.features & sys::IORING_FEAT_SINGLE_MMAP!= 0 } /// If this flag is set, io_uring supports never dropping completion events. If a completion /// event occurs and the CQ ring is full, the kernel stores the event internally until such a /// time that the CQ ring has room for more entries. pub fn is_feature_nodrop(&self) -> bool { self.0.features & sys::IORING_FEAT_NODROP!= 0 } /// If this flag is set, applications can be certain that any data for async offload has been consumed /// when the kernel has consumed the SQE pub fn is_feature_submit_stable(&self) -> bool { self.0.features & sys::IORING_FEAT_SUBMIT_STABLE!= 0 } /// If this flag is set, applications can specify offset == -1 with /// `IORING_OP_{READV,WRITEV}`, `IORING_OP_{READ,WRITE}_FIXED`, and `IORING_OP_{READ,WRITE}` /// to mean current file position, which behaves like `preadv2(2)` and `pwritev2(2)` with offset == -1. /// It’ll use (and update) the current file position. /// /// This obviously comes with the caveat that if the application has multiple reads or writes in flight, /// then the end result will not be as expected. /// This is similar to threads sharing a file descriptor and doing IO using the current file position. pub fn is_feature_rw_cur_pos(&self) -> bool { self.0.features & sys::IORING_FEAT_RW_CUR_POS!= 0 } /// If this flag is set, then io_uring guarantees that both sync and async execution of /// a request assumes the credentials of the task that called `io_uring_enter(2)` to queue the requests. /// If this flag isn’t set, then requests are issued with the credentials of the task that originally registered the io_uring. /// If only one task is using a ring, then this flag doesn’t matter as the credentials will always be the same. /// Note that this is the default behavior, /// tasks can still register different personalities through /// `io_uring_register(2)` with `IORING_REGISTER_PERSONALITY` and specify the personality to use in the sqe. pub fn is_feature_cur_personality(&self) -> bool { self.0.features & sys::IORING_FEAT_CUR_PERSONALITY!= 0 } #[cfg(feature = "unstable")] pub fn is_feature_fast_poll(&self) -> bool { self.0.features & sys::IORING_FEAT_FAST_POLL!= 0 } #[cfg(feature = "unstable")] pub fn is_feature_poll_32bits(&self) -> bool { self.0.features & sys::IORING_FEAT_POLL_32BITS!= 0 } pub fn sq_entries(&self) -> u32 { self.0.sq_entries } pub fn cq_entries(&self) -> u32 { self.0.cq_entries } } impl AsRawFd for IoUring { fn as_raw_fd(&self) -> RawFd { self.fd.as_raw_fd() } }
{ let scq_mmap = Mmap::new(fd, sys::IORING_OFF_SQ_RING as _, cmp::max(sq_len, cq_len))?; let sq = SubmissionQueue::new(&scq_mmap, &sqe_mmap, p); let cq = CompletionQueue::new(&scq_mmap, p); let mm = MemoryMap { sq_mmap: scq_mmap, cq_mmap: None, sqe_mmap, }; Ok((mm, sq, cq)) }
conditional_block
error.rs
/* * Copyright (C) 2020 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use crate::binder::AsNative; use crate::sys; use std::error; use std::ffi::CStr; use std::fmt::{Debug, Display, Formatter, Result as FmtResult}; use std::result; pub use sys::binder_status_t as status_t; /// Low-level status codes from Android `libutils`. // All error codes are negative integer values. Derived from the anonymous enum // in utils/Errors.h pub use sys::android_c_interface_StatusCode as StatusCode; /// A specialized [`Result`](result::Result) for binder operations. pub type Result<T> = result::Result<T, StatusCode>; /// Convert a low-level status code into an empty result. /// /// An OK status is converted into an `Ok` result, any other status is converted /// into an `Err` result holding the status code. pub fn status_result(status: status_t) -> Result<()> { match parse_status_code(status) { StatusCode::OK => Ok(()), e => Err(e), } } fn parse_status_code(code: i32) -> StatusCode { match code { e if e == StatusCode::OK as i32 => StatusCode::OK, e if e == StatusCode::NO_MEMORY as i32 => StatusCode::NO_MEMORY, e if e == StatusCode::INVALID_OPERATION as i32 => StatusCode::INVALID_OPERATION, e if e == StatusCode::BAD_VALUE as i32 => StatusCode::BAD_VALUE, e if e == StatusCode::BAD_TYPE as i32 => StatusCode::BAD_TYPE, e if e == StatusCode::NAME_NOT_FOUND as i32 => StatusCode::NAME_NOT_FOUND, e if e == StatusCode::PERMISSION_DENIED as i32 => StatusCode::PERMISSION_DENIED, e if e == StatusCode::NO_INIT as i32 => StatusCode::NO_INIT, e if e == StatusCode::ALREADY_EXISTS as i32 => StatusCode::ALREADY_EXISTS, e if e == StatusCode::DEAD_OBJECT as i32 => StatusCode::DEAD_OBJECT, e if e == StatusCode::FAILED_TRANSACTION as i32 => StatusCode::FAILED_TRANSACTION, e if e == StatusCode::BAD_INDEX as i32 => StatusCode::BAD_INDEX, e if e == StatusCode::NOT_ENOUGH_DATA as i32 => StatusCode::NOT_ENOUGH_DATA, e if e == StatusCode::WOULD_BLOCK as i32 => StatusCode::WOULD_BLOCK, e if e == StatusCode::TIMED_OUT as i32 => StatusCode::TIMED_OUT, e if e == StatusCode::UNKNOWN_TRANSACTION as i32 => StatusCode::UNKNOWN_TRANSACTION, e if e == StatusCode::FDS_NOT_ALLOWED as i32 => StatusCode::FDS_NOT_ALLOWED, e if e == StatusCode::UNEXPECTED_NULL as i32 => StatusCode::UNEXPECTED_NULL, _ => StatusCode::UNKNOWN_ERROR, } } pub use sys::android_c_interface_ExceptionCode as ExceptionCode; fn parse_exception_code(code: i32) -> ExceptionCode { match code { e if e == ExceptionCode::NONE as i32 => ExceptionCode::NONE, e if e == ExceptionCode::SECURITY as i32 => ExceptionCode::SECURITY, e if e == ExceptionCode::BAD_PARCELABLE as i32 => ExceptionCode::BAD_PARCELABLE, e if e == ExceptionCode::ILLEGAL_ARGUMENT as i32 => ExceptionCode::ILLEGAL_ARGUMENT, e if e == ExceptionCode::NULL_POINTER as i32 => ExceptionCode::NULL_POINTER, e if e == ExceptionCode::ILLEGAL_STATE as i32 => ExceptionCode::ILLEGAL_STATE, e if e == ExceptionCode::NETWORK_MAIN_THREAD as i32 => ExceptionCode::NETWORK_MAIN_THREAD, e if e == ExceptionCode::UNSUPPORTED_OPERATION as i32 => { ExceptionCode::UNSUPPORTED_OPERATION } e if e == ExceptionCode::SERVICE_SPECIFIC as i32 => ExceptionCode::SERVICE_SPECIFIC, _ => ExceptionCode::TRANSACTION_FAILED, } } // Safety: `Status` always contains a owning pointer to a valid `AStatus`. The // lifetime of the contained pointer is the same as the `Status` object. /// High-level binder status object that encapsulates a standard way to keep /// track of and chain binder errors along with service specific errors. /// /// Used in AIDL transactions to represent failed transactions. pub struct Status(*mut sys::AStatus); // Safety: The `AStatus` that the `Status` points to must have an entirely thread-safe API for the // duration of the `Status` object's lifetime. We ensure this by not allowing mutation of a `Status` // in Rust, and the NDK API says we're the owner of our `AStatus` objects so outside code should not // be mutating them underneath us. unsafe impl Sync for Status {} // Safety: `Status` always contains an owning pointer to a global, immutable, interned `AStatus`. // A thread-local `AStatus` would not be valid. unsafe impl Send for Status {} impl Status { /// Create a status object representing a successful transaction. pub fn ok() -> Self { let ptr = unsafe { // Safety: `AStatus_newOk` always returns a new, heap allocated // pointer to an `ASTatus` object, so we know this pointer will be // valid. // // Rust takes ownership of the returned pointer. sys::AStatus_newOk() }; Self(ptr) } /// Create a status object from a service specific error pub fn new_service_specific_error(err: i32, message: Option<&CStr>) -> Status { let ptr = if let Some(message) = message { unsafe { // Safety: Any i32 is a valid service specific error for the // error code parameter. We construct a valid, null-terminated // `CString` from the message, which must be a valid C-style // string to pass as the message. This function always returns a // new, heap allocated pointer to an `AStatus` object, so we // know the returned pointer will be valid. // // Rust takes ownership of the returned pointer. sys::AStatus_fromServiceSpecificErrorWithMessage(err, message.as_ptr()) } } else { unsafe { // Safety: Any i32 is a valid service specific error for the // error code parameter. This function always returns a new, // heap allocated pointer to an `AStatus` object, so we know the // returned pointer will be valid. // // Rust takes ownership of the returned pointer. sys::AStatus_fromServiceSpecificError(err) } }; Self(ptr) } /// Create a status object from an exception code pub fn new_exception(exception: ExceptionCode, message: Option<&CStr>) -> Status { if let Some(message) = message { let ptr = unsafe { sys::AStatus_fromExceptionCodeWithMessage(exception as i32, message.as_ptr()) }; Self(ptr) } else { exception.into() } } /// Create a status object from a raw `AStatus` pointer. /// /// # Safety /// /// This constructor is safe iff `ptr` is a valid pointer to an `AStatus`. pub(crate) unsafe fn from_ptr(ptr: *mut sys::AStatus) -> Self { Self(ptr) } /// Returns `true` if this status represents a successful transaction. pub fn is_ok(&self) -> bool { unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_isOk` here. sys::AStatus_isOk(self.as_native()) } } /// Returns a description of the status. pub fn get_description(&self) -> String { let description_ptr = unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_getDescription` // here. // // `AStatus_getDescription` always returns a valid pointer to a null // terminated C string. Rust is responsible for freeing this pointer // via `AStatus_deleteDescription`. sys::AStatus_getDescription(self.as_native()) }; let description = unsafe { // Safety: `AStatus_getDescription` always returns a valid C string, // which can be safely converted to a `CStr`. CStr::from_ptr(description_ptr) }; let description = description.to_string_lossy().to_string(); unsafe { // Safety: `description_ptr` was returned from // `AStatus_getDescription` above, and must be freed via // `AStatus_deleteDescription`. We must not access the pointer after // this call, so we copy it into an owned string above and return // that string. sys::AStatus_deleteDescription(description_ptr); } description } /// Returns the exception code of the status. pub fn exception_code(&self) -> ExceptionCode { let code = unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_getExceptionCode` // here. sys::AStatus_getExceptionCode(self.as_native()) }; parse_exception_code(code) } /// Return a status code representing a transaction failure, or /// `StatusCode::OK` if there was no transaction failure. /// /// If this method returns `OK`, the status may still represent a different /// exception or a service specific error. To find out if this transaction /// as a whole is okay, use [`is_ok`](Self::is_ok) instead. pub fn transaction_error(&self) -> StatusCode { let code = unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_getStatus` here. sys::AStatus_getStatus(self.as_native()) }; parse_status_code(code) } /// Return a service specific error if this status represents one. /// /// This function will only ever return a non-zero result if /// [`exception_code`](Self::exception_code) returns /// `ExceptionCode::SERVICE_SPECIFIC`. If this function returns 0, the /// status object may still represent a different exception or status. To /// find out if this transaction as a whole is okay, use /// [`is_ok`](Self::is_ok) instead. pub fn service_specific_error(&self) -> i32 { unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to // `AStatus_getServiceSpecificError` here. sys::AStatus_getServiceSpecificError(self.as_native()) } } /// Calls `op` if the status was ok, otherwise returns an `Err` value of /// `self`. pub fn and_then<T, F>(self, op: F) -> result::Result<T, Status> where F: FnOnce() -> result::Result<T, Status>, { <result::Result<(), Status>>::from(self)?; op() } } impl error::Error for Status {} impl Display for Status { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_str(&self.get_description()) } } impl Debug for Status { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_str(&self.get_description()) } } impl PartialEq for Status { fn eq(&self, other: &Status) -> bool { let self_code = self.exception_code(); let other_code = other.exception_code(); match (self_code, other_code) { (ExceptionCode::NONE, ExceptionCode::NONE) => true, (ExceptionCode::TRANSACTION_FAILED, ExceptionCode::TRANSACTION_FAILED) => { self.transaction_error() == other.transaction_error() && self.get_description() == other.get_description() } (ExceptionCode::SERVICE_SPECIFIC, ExceptionCode::SERVICE_SPECIFIC) => { self.service_specific_error() == other.service_specific_error() && self.get_description() == other.get_description() } (e1, e2) => e1 == e2 && self.get_description() == other.get_description(), } } } impl Eq for Status {} impl From<StatusCode> for Status { fn from(status: StatusCode) -> Status { (status as status_t).into() } } impl From<status_t> for Status { fn from(status: status_t) -> Status { let ptr = unsafe { // Safety: `AStatus_fromStatus` expects any `status_t` integer, so // this is a safe FFI call. Unknown values will be coerced into // UNKNOWN_ERROR. sys::AStatus_fromStatus(status) }; Self(ptr) } } impl From<ExceptionCode> for Status { fn from(code: ExceptionCode) -> Status { let ptr = unsafe { // Safety: `AStatus_fromExceptionCode` expects any // `binder_exception_t` (i32) integer, so this is a safe FFI call. // Unknown values will be coerced into EX_TRANSACTION_FAILED. sys::AStatus_fromExceptionCode(code as i32) }; Self(ptr) } } // TODO: impl Try for Status when try_trait is stabilized // https://github.com/rust-lang/rust/issues/42327 impl From<Status> for result::Result<(), Status> { fn from(status: Status) -> result::Result<(), Status> { if status.is_ok() { Ok(()) } else { Err(status) } } } impl From<Status> for status_t { fn from(status: Status) -> status_t { status.transaction_error() as status_t } } impl Drop for Status { fn drop(&mut self) { unsafe { // Safety: `Status` manages the lifetime of its inner `AStatus` // pointee, so we need to delete it here. We know that the pointer // will be valid here since `Status` always contains a valid pointer // while it is alive. sys::AStatus_delete(self.0); } } } /// # Safety /// /// `Status` always contains a valid pointer to an `AStatus` object, so we can /// trivially convert it to a correctly-typed raw pointer. /// /// Care must be taken that the returned pointer is only dereferenced while the /// `Status` object is still alive. unsafe impl AsNative<sys::AStatus> for Status { fn as_native(&self) -> *const sys::AStatus
fn as_native_mut(&mut self) -> *mut sys::AStatus { self.0 } }
{ self.0 }
identifier_body
error.rs
/* * Copyright (C) 2020 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use crate::binder::AsNative; use crate::sys; use std::error; use std::ffi::CStr; use std::fmt::{Debug, Display, Formatter, Result as FmtResult}; use std::result; pub use sys::binder_status_t as status_t; /// Low-level status codes from Android `libutils`. // All error codes are negative integer values. Derived from the anonymous enum // in utils/Errors.h pub use sys::android_c_interface_StatusCode as StatusCode; /// A specialized [`Result`](result::Result) for binder operations. pub type Result<T> = result::Result<T, StatusCode>; /// Convert a low-level status code into an empty result. /// /// An OK status is converted into an `Ok` result, any other status is converted /// into an `Err` result holding the status code. pub fn status_result(status: status_t) -> Result<()> { match parse_status_code(status) { StatusCode::OK => Ok(()), e => Err(e), } } fn parse_status_code(code: i32) -> StatusCode { match code { e if e == StatusCode::OK as i32 => StatusCode::OK, e if e == StatusCode::NO_MEMORY as i32 => StatusCode::NO_MEMORY, e if e == StatusCode::INVALID_OPERATION as i32 => StatusCode::INVALID_OPERATION, e if e == StatusCode::BAD_VALUE as i32 => StatusCode::BAD_VALUE, e if e == StatusCode::BAD_TYPE as i32 => StatusCode::BAD_TYPE, e if e == StatusCode::NAME_NOT_FOUND as i32 => StatusCode::NAME_NOT_FOUND, e if e == StatusCode::PERMISSION_DENIED as i32 => StatusCode::PERMISSION_DENIED, e if e == StatusCode::NO_INIT as i32 => StatusCode::NO_INIT, e if e == StatusCode::ALREADY_EXISTS as i32 => StatusCode::ALREADY_EXISTS, e if e == StatusCode::DEAD_OBJECT as i32 => StatusCode::DEAD_OBJECT, e if e == StatusCode::FAILED_TRANSACTION as i32 => StatusCode::FAILED_TRANSACTION, e if e == StatusCode::BAD_INDEX as i32 => StatusCode::BAD_INDEX, e if e == StatusCode::NOT_ENOUGH_DATA as i32 => StatusCode::NOT_ENOUGH_DATA, e if e == StatusCode::WOULD_BLOCK as i32 => StatusCode::WOULD_BLOCK, e if e == StatusCode::TIMED_OUT as i32 => StatusCode::TIMED_OUT, e if e == StatusCode::UNKNOWN_TRANSACTION as i32 => StatusCode::UNKNOWN_TRANSACTION, e if e == StatusCode::FDS_NOT_ALLOWED as i32 => StatusCode::FDS_NOT_ALLOWED, e if e == StatusCode::UNEXPECTED_NULL as i32 => StatusCode::UNEXPECTED_NULL, _ => StatusCode::UNKNOWN_ERROR, } } pub use sys::android_c_interface_ExceptionCode as ExceptionCode; fn parse_exception_code(code: i32) -> ExceptionCode { match code { e if e == ExceptionCode::NONE as i32 => ExceptionCode::NONE, e if e == ExceptionCode::SECURITY as i32 => ExceptionCode::SECURITY, e if e == ExceptionCode::BAD_PARCELABLE as i32 => ExceptionCode::BAD_PARCELABLE, e if e == ExceptionCode::ILLEGAL_ARGUMENT as i32 => ExceptionCode::ILLEGAL_ARGUMENT, e if e == ExceptionCode::NULL_POINTER as i32 => ExceptionCode::NULL_POINTER, e if e == ExceptionCode::ILLEGAL_STATE as i32 => ExceptionCode::ILLEGAL_STATE, e if e == ExceptionCode::NETWORK_MAIN_THREAD as i32 => ExceptionCode::NETWORK_MAIN_THREAD, e if e == ExceptionCode::UNSUPPORTED_OPERATION as i32 => { ExceptionCode::UNSUPPORTED_OPERATION } e if e == ExceptionCode::SERVICE_SPECIFIC as i32 => ExceptionCode::SERVICE_SPECIFIC, _ => ExceptionCode::TRANSACTION_FAILED, } } // Safety: `Status` always contains a owning pointer to a valid `AStatus`. The // lifetime of the contained pointer is the same as the `Status` object. /// High-level binder status object that encapsulates a standard way to keep /// track of and chain binder errors along with service specific errors. /// /// Used in AIDL transactions to represent failed transactions. pub struct Status(*mut sys::AStatus); // Safety: The `AStatus` that the `Status` points to must have an entirely thread-safe API for the // duration of the `Status` object's lifetime. We ensure this by not allowing mutation of a `Status` // in Rust, and the NDK API says we're the owner of our `AStatus` objects so outside code should not // be mutating them underneath us. unsafe impl Sync for Status {} // Safety: `Status` always contains an owning pointer to a global, immutable, interned `AStatus`. // A thread-local `AStatus` would not be valid. unsafe impl Send for Status {} impl Status { /// Create a status object representing a successful transaction. pub fn ok() -> Self { let ptr = unsafe { // Safety: `AStatus_newOk` always returns a new, heap allocated // pointer to an `ASTatus` object, so we know this pointer will be // valid. // // Rust takes ownership of the returned pointer. sys::AStatus_newOk() }; Self(ptr) } /// Create a status object from a service specific error pub fn new_service_specific_error(err: i32, message: Option<&CStr>) -> Status { let ptr = if let Some(message) = message { unsafe { // Safety: Any i32 is a valid service specific error for the // error code parameter. We construct a valid, null-terminated // `CString` from the message, which must be a valid C-style // string to pass as the message. This function always returns a // new, heap allocated pointer to an `AStatus` object, so we // know the returned pointer will be valid. // // Rust takes ownership of the returned pointer. sys::AStatus_fromServiceSpecificErrorWithMessage(err, message.as_ptr()) } } else { unsafe { // Safety: Any i32 is a valid service specific error for the // error code parameter. This function always returns a new, // heap allocated pointer to an `AStatus` object, so we know the // returned pointer will be valid. // // Rust takes ownership of the returned pointer. sys::AStatus_fromServiceSpecificError(err) } }; Self(ptr) } /// Create a status object from an exception code pub fn new_exception(exception: ExceptionCode, message: Option<&CStr>) -> Status { if let Some(message) = message { let ptr = unsafe { sys::AStatus_fromExceptionCodeWithMessage(exception as i32, message.as_ptr()) }; Self(ptr) } else { exception.into() } } /// Create a status object from a raw `AStatus` pointer. /// /// # Safety /// /// This constructor is safe iff `ptr` is a valid pointer to an `AStatus`. pub(crate) unsafe fn from_ptr(ptr: *mut sys::AStatus) -> Self { Self(ptr) } /// Returns `true` if this status represents a successful transaction. pub fn is_ok(&self) -> bool { unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_isOk` here. sys::AStatus_isOk(self.as_native()) } } /// Returns a description of the status. pub fn get_description(&self) -> String { let description_ptr = unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_getDescription` // here. // // `AStatus_getDescription` always returns a valid pointer to a null // terminated C string. Rust is responsible for freeing this pointer // via `AStatus_deleteDescription`. sys::AStatus_getDescription(self.as_native()) }; let description = unsafe { // Safety: `AStatus_getDescription` always returns a valid C string, // which can be safely converted to a `CStr`. CStr::from_ptr(description_ptr) }; let description = description.to_string_lossy().to_string(); unsafe { // Safety: `description_ptr` was returned from // `AStatus_getDescription` above, and must be freed via // `AStatus_deleteDescription`. We must not access the pointer after // this call, so we copy it into an owned string above and return // that string. sys::AStatus_deleteDescription(description_ptr); } description } /// Returns the exception code of the status. pub fn exception_code(&self) -> ExceptionCode { let code = unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_getExceptionCode` // here. sys::AStatus_getExceptionCode(self.as_native()) }; parse_exception_code(code) } /// Return a status code representing a transaction failure, or /// `StatusCode::OK` if there was no transaction failure. /// /// If this method returns `OK`, the status may still represent a different /// exception or a service specific error. To find out if this transaction /// as a whole is okay, use [`is_ok`](Self::is_ok) instead. pub fn transaction_error(&self) -> StatusCode { let code = unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_getStatus` here. sys::AStatus_getStatus(self.as_native()) }; parse_status_code(code) } /// Return a service specific error if this status represents one. /// /// This function will only ever return a non-zero result if /// [`exception_code`](Self::exception_code) returns /// `ExceptionCode::SERVICE_SPECIFIC`. If this function returns 0, the /// status object may still represent a different exception or status. To /// find out if this transaction as a whole is okay, use /// [`is_ok`](Self::is_ok) instead. pub fn service_specific_error(&self) -> i32 { unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to // `AStatus_getServiceSpecificError` here. sys::AStatus_getServiceSpecificError(self.as_native()) } } /// Calls `op` if the status was ok, otherwise returns an `Err` value of /// `self`. pub fn and_then<T, F>(self, op: F) -> result::Result<T, Status> where F: FnOnce() -> result::Result<T, Status>, { <result::Result<(), Status>>::from(self)?; op() } } impl error::Error for Status {} impl Display for Status { fn
(&self, f: &mut Formatter) -> FmtResult { f.write_str(&self.get_description()) } } impl Debug for Status { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_str(&self.get_description()) } } impl PartialEq for Status { fn eq(&self, other: &Status) -> bool { let self_code = self.exception_code(); let other_code = other.exception_code(); match (self_code, other_code) { (ExceptionCode::NONE, ExceptionCode::NONE) => true, (ExceptionCode::TRANSACTION_FAILED, ExceptionCode::TRANSACTION_FAILED) => { self.transaction_error() == other.transaction_error() && self.get_description() == other.get_description() } (ExceptionCode::SERVICE_SPECIFIC, ExceptionCode::SERVICE_SPECIFIC) => { self.service_specific_error() == other.service_specific_error() && self.get_description() == other.get_description() } (e1, e2) => e1 == e2 && self.get_description() == other.get_description(), } } } impl Eq for Status {} impl From<StatusCode> for Status { fn from(status: StatusCode) -> Status { (status as status_t).into() } } impl From<status_t> for Status { fn from(status: status_t) -> Status { let ptr = unsafe { // Safety: `AStatus_fromStatus` expects any `status_t` integer, so // this is a safe FFI call. Unknown values will be coerced into // UNKNOWN_ERROR. sys::AStatus_fromStatus(status) }; Self(ptr) } } impl From<ExceptionCode> for Status { fn from(code: ExceptionCode) -> Status { let ptr = unsafe { // Safety: `AStatus_fromExceptionCode` expects any // `binder_exception_t` (i32) integer, so this is a safe FFI call. // Unknown values will be coerced into EX_TRANSACTION_FAILED. sys::AStatus_fromExceptionCode(code as i32) }; Self(ptr) } } // TODO: impl Try for Status when try_trait is stabilized // https://github.com/rust-lang/rust/issues/42327 impl From<Status> for result::Result<(), Status> { fn from(status: Status) -> result::Result<(), Status> { if status.is_ok() { Ok(()) } else { Err(status) } } } impl From<Status> for status_t { fn from(status: Status) -> status_t { status.transaction_error() as status_t } } impl Drop for Status { fn drop(&mut self) { unsafe { // Safety: `Status` manages the lifetime of its inner `AStatus` // pointee, so we need to delete it here. We know that the pointer // will be valid here since `Status` always contains a valid pointer // while it is alive. sys::AStatus_delete(self.0); } } } /// # Safety /// /// `Status` always contains a valid pointer to an `AStatus` object, so we can /// trivially convert it to a correctly-typed raw pointer. /// /// Care must be taken that the returned pointer is only dereferenced while the /// `Status` object is still alive. unsafe impl AsNative<sys::AStatus> for Status { fn as_native(&self) -> *const sys::AStatus { self.0 } fn as_native_mut(&mut self) -> *mut sys::AStatus { self.0 } }
fmt
identifier_name
error.rs
/* * Copyright (C) 2020 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use crate::binder::AsNative; use crate::sys; use std::error; use std::ffi::CStr; use std::fmt::{Debug, Display, Formatter, Result as FmtResult}; use std::result; pub use sys::binder_status_t as status_t; /// Low-level status codes from Android `libutils`. // All error codes are negative integer values. Derived from the anonymous enum // in utils/Errors.h pub use sys::android_c_interface_StatusCode as StatusCode; /// A specialized [`Result`](result::Result) for binder operations. pub type Result<T> = result::Result<T, StatusCode>; /// Convert a low-level status code into an empty result. /// /// An OK status is converted into an `Ok` result, any other status is converted /// into an `Err` result holding the status code. pub fn status_result(status: status_t) -> Result<()> { match parse_status_code(status) { StatusCode::OK => Ok(()), e => Err(e), } } fn parse_status_code(code: i32) -> StatusCode { match code { e if e == StatusCode::OK as i32 => StatusCode::OK, e if e == StatusCode::NO_MEMORY as i32 => StatusCode::NO_MEMORY, e if e == StatusCode::INVALID_OPERATION as i32 => StatusCode::INVALID_OPERATION, e if e == StatusCode::BAD_VALUE as i32 => StatusCode::BAD_VALUE, e if e == StatusCode::BAD_TYPE as i32 => StatusCode::BAD_TYPE, e if e == StatusCode::NAME_NOT_FOUND as i32 => StatusCode::NAME_NOT_FOUND, e if e == StatusCode::PERMISSION_DENIED as i32 => StatusCode::PERMISSION_DENIED, e if e == StatusCode::NO_INIT as i32 => StatusCode::NO_INIT, e if e == StatusCode::ALREADY_EXISTS as i32 => StatusCode::ALREADY_EXISTS, e if e == StatusCode::DEAD_OBJECT as i32 => StatusCode::DEAD_OBJECT, e if e == StatusCode::FAILED_TRANSACTION as i32 => StatusCode::FAILED_TRANSACTION, e if e == StatusCode::BAD_INDEX as i32 => StatusCode::BAD_INDEX, e if e == StatusCode::NOT_ENOUGH_DATA as i32 => StatusCode::NOT_ENOUGH_DATA, e if e == StatusCode::WOULD_BLOCK as i32 => StatusCode::WOULD_BLOCK, e if e == StatusCode::TIMED_OUT as i32 => StatusCode::TIMED_OUT, e if e == StatusCode::UNKNOWN_TRANSACTION as i32 => StatusCode::UNKNOWN_TRANSACTION, e if e == StatusCode::FDS_NOT_ALLOWED as i32 => StatusCode::FDS_NOT_ALLOWED, e if e == StatusCode::UNEXPECTED_NULL as i32 => StatusCode::UNEXPECTED_NULL, _ => StatusCode::UNKNOWN_ERROR, } } pub use sys::android_c_interface_ExceptionCode as ExceptionCode; fn parse_exception_code(code: i32) -> ExceptionCode { match code { e if e == ExceptionCode::NONE as i32 => ExceptionCode::NONE, e if e == ExceptionCode::SECURITY as i32 => ExceptionCode::SECURITY, e if e == ExceptionCode::BAD_PARCELABLE as i32 => ExceptionCode::BAD_PARCELABLE, e if e == ExceptionCode::ILLEGAL_ARGUMENT as i32 => ExceptionCode::ILLEGAL_ARGUMENT, e if e == ExceptionCode::NULL_POINTER as i32 => ExceptionCode::NULL_POINTER, e if e == ExceptionCode::ILLEGAL_STATE as i32 => ExceptionCode::ILLEGAL_STATE, e if e == ExceptionCode::NETWORK_MAIN_THREAD as i32 => ExceptionCode::NETWORK_MAIN_THREAD, e if e == ExceptionCode::UNSUPPORTED_OPERATION as i32 => { ExceptionCode::UNSUPPORTED_OPERATION } e if e == ExceptionCode::SERVICE_SPECIFIC as i32 => ExceptionCode::SERVICE_SPECIFIC, _ => ExceptionCode::TRANSACTION_FAILED, } } // Safety: `Status` always contains a owning pointer to a valid `AStatus`. The // lifetime of the contained pointer is the same as the `Status` object. /// High-level binder status object that encapsulates a standard way to keep /// track of and chain binder errors along with service specific errors. /// /// Used in AIDL transactions to represent failed transactions. pub struct Status(*mut sys::AStatus); // Safety: The `AStatus` that the `Status` points to must have an entirely thread-safe API for the // duration of the `Status` object's lifetime. We ensure this by not allowing mutation of a `Status` // in Rust, and the NDK API says we're the owner of our `AStatus` objects so outside code should not // be mutating them underneath us. unsafe impl Sync for Status {} // Safety: `Status` always contains an owning pointer to a global, immutable, interned `AStatus`. // A thread-local `AStatus` would not be valid. unsafe impl Send for Status {} impl Status { /// Create a status object representing a successful transaction. pub fn ok() -> Self { let ptr = unsafe { // Safety: `AStatus_newOk` always returns a new, heap allocated // pointer to an `ASTatus` object, so we know this pointer will be // valid. // // Rust takes ownership of the returned pointer. sys::AStatus_newOk() }; Self(ptr) } /// Create a status object from a service specific error pub fn new_service_specific_error(err: i32, message: Option<&CStr>) -> Status { let ptr = if let Some(message) = message { unsafe { // Safety: Any i32 is a valid service specific error for the // error code parameter. We construct a valid, null-terminated // `CString` from the message, which must be a valid C-style // string to pass as the message. This function always returns a // new, heap allocated pointer to an `AStatus` object, so we // know the returned pointer will be valid. // // Rust takes ownership of the returned pointer. sys::AStatus_fromServiceSpecificErrorWithMessage(err, message.as_ptr()) } } else { unsafe { // Safety: Any i32 is a valid service specific error for the // error code parameter. This function always returns a new, // heap allocated pointer to an `AStatus` object, so we know the // returned pointer will be valid. // // Rust takes ownership of the returned pointer. sys::AStatus_fromServiceSpecificError(err) } }; Self(ptr) } /// Create a status object from an exception code pub fn new_exception(exception: ExceptionCode, message: Option<&CStr>) -> Status { if let Some(message) = message { let ptr = unsafe { sys::AStatus_fromExceptionCodeWithMessage(exception as i32, message.as_ptr()) }; Self(ptr) } else { exception.into() } } /// Create a status object from a raw `AStatus` pointer. /// /// # Safety /// /// This constructor is safe iff `ptr` is a valid pointer to an `AStatus`. pub(crate) unsafe fn from_ptr(ptr: *mut sys::AStatus) -> Self { Self(ptr) } /// Returns `true` if this status represents a successful transaction. pub fn is_ok(&self) -> bool { unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_isOk` here. sys::AStatus_isOk(self.as_native()) } } /// Returns a description of the status. pub fn get_description(&self) -> String { let description_ptr = unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_getDescription` // here. // // `AStatus_getDescription` always returns a valid pointer to a null // terminated C string. Rust is responsible for freeing this pointer // via `AStatus_deleteDescription`. sys::AStatus_getDescription(self.as_native()) }; let description = unsafe { // Safety: `AStatus_getDescription` always returns a valid C string, // which can be safely converted to a `CStr`. CStr::from_ptr(description_ptr) }; let description = description.to_string_lossy().to_string(); unsafe { // Safety: `description_ptr` was returned from // `AStatus_getDescription` above, and must be freed via // `AStatus_deleteDescription`. We must not access the pointer after // this call, so we copy it into an owned string above and return // that string. sys::AStatus_deleteDescription(description_ptr); } description } /// Returns the exception code of the status. pub fn exception_code(&self) -> ExceptionCode { let code = unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_getExceptionCode` // here. sys::AStatus_getExceptionCode(self.as_native()) }; parse_exception_code(code) } /// Return a status code representing a transaction failure, or /// `StatusCode::OK` if there was no transaction failure. /// /// If this method returns `OK`, the status may still represent a different /// exception or a service specific error. To find out if this transaction /// as a whole is okay, use [`is_ok`](Self::is_ok) instead. pub fn transaction_error(&self) -> StatusCode { let code = unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to `AStatus_getStatus` here. sys::AStatus_getStatus(self.as_native()) }; parse_status_code(code) } /// Return a service specific error if this status represents one. /// /// This function will only ever return a non-zero result if /// [`exception_code`](Self::exception_code) returns /// `ExceptionCode::SERVICE_SPECIFIC`. If this function returns 0, the /// status object may still represent a different exception or status. To /// find out if this transaction as a whole is okay, use /// [`is_ok`](Self::is_ok) instead. pub fn service_specific_error(&self) -> i32 { unsafe { // Safety: `Status` always contains a valid `AStatus` pointer, so we // are always passing a valid pointer to // `AStatus_getServiceSpecificError` here. sys::AStatus_getServiceSpecificError(self.as_native()) } } /// Calls `op` if the status was ok, otherwise returns an `Err` value of /// `self`. pub fn and_then<T, F>(self, op: F) -> result::Result<T, Status> where F: FnOnce() -> result::Result<T, Status>, { <result::Result<(), Status>>::from(self)?; op() } } impl error::Error for Status {}
} } impl Debug for Status { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_str(&self.get_description()) } } impl PartialEq for Status { fn eq(&self, other: &Status) -> bool { let self_code = self.exception_code(); let other_code = other.exception_code(); match (self_code, other_code) { (ExceptionCode::NONE, ExceptionCode::NONE) => true, (ExceptionCode::TRANSACTION_FAILED, ExceptionCode::TRANSACTION_FAILED) => { self.transaction_error() == other.transaction_error() && self.get_description() == other.get_description() } (ExceptionCode::SERVICE_SPECIFIC, ExceptionCode::SERVICE_SPECIFIC) => { self.service_specific_error() == other.service_specific_error() && self.get_description() == other.get_description() } (e1, e2) => e1 == e2 && self.get_description() == other.get_description(), } } } impl Eq for Status {} impl From<StatusCode> for Status { fn from(status: StatusCode) -> Status { (status as status_t).into() } } impl From<status_t> for Status { fn from(status: status_t) -> Status { let ptr = unsafe { // Safety: `AStatus_fromStatus` expects any `status_t` integer, so // this is a safe FFI call. Unknown values will be coerced into // UNKNOWN_ERROR. sys::AStatus_fromStatus(status) }; Self(ptr) } } impl From<ExceptionCode> for Status { fn from(code: ExceptionCode) -> Status { let ptr = unsafe { // Safety: `AStatus_fromExceptionCode` expects any // `binder_exception_t` (i32) integer, so this is a safe FFI call. // Unknown values will be coerced into EX_TRANSACTION_FAILED. sys::AStatus_fromExceptionCode(code as i32) }; Self(ptr) } } // TODO: impl Try for Status when try_trait is stabilized // https://github.com/rust-lang/rust/issues/42327 impl From<Status> for result::Result<(), Status> { fn from(status: Status) -> result::Result<(), Status> { if status.is_ok() { Ok(()) } else { Err(status) } } } impl From<Status> for status_t { fn from(status: Status) -> status_t { status.transaction_error() as status_t } } impl Drop for Status { fn drop(&mut self) { unsafe { // Safety: `Status` manages the lifetime of its inner `AStatus` // pointee, so we need to delete it here. We know that the pointer // will be valid here since `Status` always contains a valid pointer // while it is alive. sys::AStatus_delete(self.0); } } } /// # Safety /// /// `Status` always contains a valid pointer to an `AStatus` object, so we can /// trivially convert it to a correctly-typed raw pointer. /// /// Care must be taken that the returned pointer is only dereferenced while the /// `Status` object is still alive. unsafe impl AsNative<sys::AStatus> for Status { fn as_native(&self) -> *const sys::AStatus { self.0 } fn as_native_mut(&mut self) -> *mut sys::AStatus { self.0 } }
impl Display for Status { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_str(&self.get_description())
random_line_split
setup.rs
// Copyright 2020 Zachary Stewart // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Implements the setup phase of the board. use std::collections::{hash_map::Entry, HashMap}; use crate::{ board::{AddShipError, Board, CannotPlaceReason, Dimensions, Grid, PlaceError}, ships::{ProjectIter, ShapeProjection, ShipId, ShipShape}, }; /// Reference to a particular ship's placement info as well as the grid, providing access /// to the methods necessary to check it's placement status. pub struct ShipEntry<'a, I, D: Dimensions, S> { /// ID of this ship. id: I, /// Grid that the ship may occupy. grid: &'a Grid<I, D>, /// Placement info for the ship. ship: &'a ShipPlacementInfo<S, D::Coordinate>, } impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntry<'a, I, D, S> { /// If the ship is placed, get the placement. Otherwise return `None`. // Has to be specialized for mut and non-mut because mut variants can't return a // projection that lives as long as 'a, since that would potentially alias the &mut // ref. With a const ref, we can give back a ref that lives as long as self rather // than just as long as this method call. pub fn placement(&self) -> Option<&'a ShapeProjection<D::Coordinate>> { self.ship.placement.as_ref() } } /// Reference to a particular ship's placement info as well as the grid, providing access /// to the methods necessary to check it's placement status and place or unplace it. pub struct ShipEntryMut<'a, I, D: Dimensions, S> { /// ID of this ship id: I, /// Grid that ships are being placed into. grid: &'a mut Grid<I, D>, /// Back ref to the ship. ship: &'a mut ShipPlacementInfo<S, D::Coordinate>, } /// Implementation of the shared parts of ShipEntry. macro_rules! ship_entry_shared { ($t:ident) => { impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> $t<'a, I, D, S> { /// Get the ID of this ship. pub fn id(&self) -> &I { &self.id } /// Returns true if this ship has been placed. pub fn placed(&self) -> bool { self.ship.placement.is_some() } /// Get an interator over possible projections of the shape for this ship that /// start from the given [`Coordinate`]. If there are no possible placements /// from the given coordinate, including if the coordinate is out of bounds, /// the resulting iterator will be empty. pub fn get_placements( &self, coord: D::Coordinate, ) -> ProjectIter<D, S::ProjectIterState> { self.ship.shape.project(coord, &self.grid.dim) } /// Check if the specified placement is valid for this ship. pub fn check_placement( &self, placement: &ShapeProjection<D::Coordinate>, ) -> Result<(), CannotPlaceReason> { if self.placed() { Err(CannotPlaceReason::AlreadyPlaced) } else if!self .ship .shape .is_valid_placement(placement, &self.grid.dim) { Err(CannotPlaceReason::InvalidProjection) } else { for coord in placement.iter() { match self.grid.get(coord) { None => return Err(CannotPlaceReason::InvalidProjection), Some(cell) if cell.ship.is_some() => { return Err(CannotPlaceReason::AlreadyOccupied) } _ => {} } } Ok(()) } } } }; } ship_entry_shared!(ShipEntry); ship_entry_shared!(ShipEntryMut); impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntryMut<'a, I, D, S> { /// If the ship is placed, get the placement. Otherwise return `None`. // Has to be specialized for mut and non-mut because mut variants can't return a // projection that lives as long as 'a, since that would potentially alias the &mut // ref. pub fn placement(&self) -> Option<&ShapeProjection<D::Coordinate>> { self.ship.placement.as_ref() } /// Attempts to place the ship with onto the given coordinates. If the ship is already /// placed, returns `Err` with the attempted placement and reason placement failed, /// otherwise returns `Ok(())` pub fn place( &mut self, placement: ShapeProjection<D::Coordinate>, ) -> Result<(), PlaceError<ShapeProjection<D::Coordinate>>> { if self.placed() { Err(PlaceError::new(CannotPlaceReason::AlreadyPlaced, placement)) } else if!self .ship .shape .is_valid_placement(&placement, &self.grid.dim) { Err(PlaceError::new( CannotPlaceReason::InvalidProjection, placement, )) } else { for coord in placement.iter() { match self.grid.get(coord) { None => { // ShipShape should ensure that all coordinates are valid, but don't // trust it. return Err(PlaceError::new( CannotPlaceReason::InvalidProjection, placement, )); } Some(cell) if cell.ship.is_some() => { return Err(PlaceError::new( CannotPlaceReason::AlreadyOccupied, placement, )); } _ => {} } } // Already ensured that every position is valid and not occupied. for coord in placement.iter() { self.grid[coord].ship = Some(self.id.to_owned()); } self.ship.placement = Some(placement); Ok(()) } } /// Attempt to clear the placement of the ship. Returns the previous placement of the /// ship if any. Returns `None` if the ship has not been placed. pub fn unplace(&mut self) -> Option<ShapeProjection<D::Coordinate>> { self.ship.placement.take().map(|placement| { for coord in placement.iter() { // We should only allow placement on valid cells, so unwrap is fine. self.grid[coord].ship = None; } placement }) } } /// Contains a ship's shape and current placement status in the grid. struct ShipPlacementInfo<S, C> { /// Shape being placed. shape: S, /// Placement of this ship, if it has been placed. placement: Option<ShapeProjection<C>>, } /// Setup phase for a [`Board`]. Allows placing ships and does not allow shooting. pub struct BoardSetup<I: ShipId, D: Dimensions, S: ShipShape<D>> { /// Grid for placement of ships. grid: Grid<I, D>, /// Mapping of added ShipIds to coresponding placement info. ships: HashMap<I, ShipPlacementInfo<S, D::Coordinate>>, } impl<I: ShipId, D: Dimensions, S: ShipShape<D>> BoardSetup<I, D, S> { /// Begin game setup by constructing a new board with the given [`Dimensions`]. pub fn new(dim: D) -> Self { Self { grid: Grid::new(dim), ships: HashMap::new(), } } /// Get the [`Dimesnsions`] of this [`Board`]. pub fn dimensions(&self) -> &D { &self.grid.dim } /// Tries to start the game. If all ships are placed, returns a [`Board`] with the /// current placements. If no ships have been added or any ship has not been placed, /// returns self. pub fn start(self) -> Result<Board<I, D>, Self> { if!self.ready() { Err(self) } else { Ok(Board { grid: self.grid, ships: self .ships .into_iter() .map(|(id, info)| match info.placement { Some(placement) => (id, placement), None => unreachable!(), }) .collect(), }) } } /// Checks if this board is ready to start. Returns `true` if at least one ship has /// been added and all ships are placed. pub fn ready(&self) -> bool { !self.ships.is_empty() && self.ships.values().all(|ship| ship.placement.is_some()) } /// Get an iterator over the ships configured on this board. pub fn iter_ships(&self) -> impl Iterator<Item = ShipEntry<I, D, S>> { let grid = &self.grid; self.ships.iter().map(move |(id, ship)| ShipEntry { id: id.clone(), grid, ship, }) } /// Attempts to add a ship with the given ID. If the given ShipID is already used, /// returns the shape passed to this function. Otherwise adds the shape and returns /// the ShipEntryMut for it to allow placement. pub fn add_ship( &mut self, id: I, shape: S, ) -> Result<ShipEntryMut<I, D, S>, AddShipError<I, S>> { match self.ships.entry(id.clone()) { Entry::Occupied(_) => Err(AddShipError::new(id, shape)), Entry::Vacant(entry) => { let ship = entry.insert(ShipPlacementInfo { shape, placement: None, }); Ok(ShipEntryMut { id, grid: &mut self.grid, ship, }) } } } /// Get the [`ShipEntry`] for the ship with the specified ID if such a ship exists. pub fn
(&self, id: I) -> Option<ShipEntry<I, D, S>> { let grid = &self.grid; self.ships .get(&id) .map(move |ship| ShipEntry { id, grid, ship }) } /// Get the [`ShipEntryMut`] for the ship with the specified ID if such a ship exists. pub fn get_ship_mut(&mut self, id: I) -> Option<ShipEntryMut<I, D, S>> { let grid = &mut self.grid; self.ships .get_mut(&id) .map(move |ship| ShipEntryMut { id, grid, ship }) } /// Get the ID of the ship placed at the specified coordinate if any. Returns None if /// the coordinate is out of bounds or no ship was placed on the specified point. pub fn get_coord(&self, coord: &D::Coordinate) -> Option<&I> { self.grid.get(coord).and_then(|cell| cell.ship.as_ref()) } }
get_ship
identifier_name
setup.rs
// Copyright 2020 Zachary Stewart // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Implements the setup phase of the board. use std::collections::{hash_map::Entry, HashMap}; use crate::{ board::{AddShipError, Board, CannotPlaceReason, Dimensions, Grid, PlaceError}, ships::{ProjectIter, ShapeProjection, ShipId, ShipShape}, }; /// Reference to a particular ship's placement info as well as the grid, providing access /// to the methods necessary to check it's placement status. pub struct ShipEntry<'a, I, D: Dimensions, S> { /// ID of this ship. id: I, /// Grid that the ship may occupy. grid: &'a Grid<I, D>, /// Placement info for the ship. ship: &'a ShipPlacementInfo<S, D::Coordinate>, } impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntry<'a, I, D, S> { /// If the ship is placed, get the placement. Otherwise return `None`. // Has to be specialized for mut and non-mut because mut variants can't return a // projection that lives as long as 'a, since that would potentially alias the &mut // ref. With a const ref, we can give back a ref that lives as long as self rather // than just as long as this method call. pub fn placement(&self) -> Option<&'a ShapeProjection<D::Coordinate>> { self.ship.placement.as_ref() } } /// Reference to a particular ship's placement info as well as the grid, providing access /// to the methods necessary to check it's placement status and place or unplace it. pub struct ShipEntryMut<'a, I, D: Dimensions, S> { /// ID of this ship id: I, /// Grid that ships are being placed into. grid: &'a mut Grid<I, D>, /// Back ref to the ship. ship: &'a mut ShipPlacementInfo<S, D::Coordinate>, } /// Implementation of the shared parts of ShipEntry. macro_rules! ship_entry_shared { ($t:ident) => { impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> $t<'a, I, D, S> { /// Get the ID of this ship. pub fn id(&self) -> &I { &self.id } /// Returns true if this ship has been placed. pub fn placed(&self) -> bool { self.ship.placement.is_some() } /// Get an interator over possible projections of the shape for this ship that /// start from the given [`Coordinate`]. If there are no possible placements /// from the given coordinate, including if the coordinate is out of bounds, /// the resulting iterator will be empty. pub fn get_placements( &self, coord: D::Coordinate, ) -> ProjectIter<D, S::ProjectIterState> { self.ship.shape.project(coord, &self.grid.dim) } /// Check if the specified placement is valid for this ship. pub fn check_placement( &self, placement: &ShapeProjection<D::Coordinate>, ) -> Result<(), CannotPlaceReason> { if self.placed() { Err(CannotPlaceReason::AlreadyPlaced) } else if!self .ship .shape .is_valid_placement(placement, &self.grid.dim) { Err(CannotPlaceReason::InvalidProjection) } else { for coord in placement.iter() { match self.grid.get(coord) { None => return Err(CannotPlaceReason::InvalidProjection), Some(cell) if cell.ship.is_some() => { return Err(CannotPlaceReason::AlreadyOccupied) } _ => {} } } Ok(()) } } } }; } ship_entry_shared!(ShipEntry); ship_entry_shared!(ShipEntryMut); impl<'a, I: ShipId, D: Dimensions, S: ShipShape<D>> ShipEntryMut<'a, I, D, S> { /// If the ship is placed, get the placement. Otherwise return `None`. // Has to be specialized for mut and non-mut because mut variants can't return a // projection that lives as long as 'a, since that would potentially alias the &mut // ref. pub fn placement(&self) -> Option<&ShapeProjection<D::Coordinate>> { self.ship.placement.as_ref() } /// Attempts to place the ship with onto the given coordinates. If the ship is already /// placed, returns `Err` with the attempted placement and reason placement failed, /// otherwise returns `Ok(())` pub fn place( &mut self, placement: ShapeProjection<D::Coordinate>, ) -> Result<(), PlaceError<ShapeProjection<D::Coordinate>>> { if self.placed() { Err(PlaceError::new(CannotPlaceReason::AlreadyPlaced, placement)) } else if!self .ship .shape .is_valid_placement(&placement, &self.grid.dim) { Err(PlaceError::new( CannotPlaceReason::InvalidProjection, placement, )) } else { for coord in placement.iter() { match self.grid.get(coord) { None => { // ShipShape should ensure that all coordinates are valid, but don't // trust it. return Err(PlaceError::new( CannotPlaceReason::InvalidProjection, placement,
placement, )); } _ => {} } } // Already ensured that every position is valid and not occupied. for coord in placement.iter() { self.grid[coord].ship = Some(self.id.to_owned()); } self.ship.placement = Some(placement); Ok(()) } } /// Attempt to clear the placement of the ship. Returns the previous placement of the /// ship if any. Returns `None` if the ship has not been placed. pub fn unplace(&mut self) -> Option<ShapeProjection<D::Coordinate>> { self.ship.placement.take().map(|placement| { for coord in placement.iter() { // We should only allow placement on valid cells, so unwrap is fine. self.grid[coord].ship = None; } placement }) } } /// Contains a ship's shape and current placement status in the grid. struct ShipPlacementInfo<S, C> { /// Shape being placed. shape: S, /// Placement of this ship, if it has been placed. placement: Option<ShapeProjection<C>>, } /// Setup phase for a [`Board`]. Allows placing ships and does not allow shooting. pub struct BoardSetup<I: ShipId, D: Dimensions, S: ShipShape<D>> { /// Grid for placement of ships. grid: Grid<I, D>, /// Mapping of added ShipIds to coresponding placement info. ships: HashMap<I, ShipPlacementInfo<S, D::Coordinate>>, } impl<I: ShipId, D: Dimensions, S: ShipShape<D>> BoardSetup<I, D, S> { /// Begin game setup by constructing a new board with the given [`Dimensions`]. pub fn new(dim: D) -> Self { Self { grid: Grid::new(dim), ships: HashMap::new(), } } /// Get the [`Dimesnsions`] of this [`Board`]. pub fn dimensions(&self) -> &D { &self.grid.dim } /// Tries to start the game. If all ships are placed, returns a [`Board`] with the /// current placements. If no ships have been added or any ship has not been placed, /// returns self. pub fn start(self) -> Result<Board<I, D>, Self> { if!self.ready() { Err(self) } else { Ok(Board { grid: self.grid, ships: self .ships .into_iter() .map(|(id, info)| match info.placement { Some(placement) => (id, placement), None => unreachable!(), }) .collect(), }) } } /// Checks if this board is ready to start. Returns `true` if at least one ship has /// been added and all ships are placed. pub fn ready(&self) -> bool { !self.ships.is_empty() && self.ships.values().all(|ship| ship.placement.is_some()) } /// Get an iterator over the ships configured on this board. pub fn iter_ships(&self) -> impl Iterator<Item = ShipEntry<I, D, S>> { let grid = &self.grid; self.ships.iter().map(move |(id, ship)| ShipEntry { id: id.clone(), grid, ship, }) } /// Attempts to add a ship with the given ID. If the given ShipID is already used, /// returns the shape passed to this function. Otherwise adds the shape and returns /// the ShipEntryMut for it to allow placement. pub fn add_ship( &mut self, id: I, shape: S, ) -> Result<ShipEntryMut<I, D, S>, AddShipError<I, S>> { match self.ships.entry(id.clone()) { Entry::Occupied(_) => Err(AddShipError::new(id, shape)), Entry::Vacant(entry) => { let ship = entry.insert(ShipPlacementInfo { shape, placement: None, }); Ok(ShipEntryMut { id, grid: &mut self.grid, ship, }) } } } /// Get the [`ShipEntry`] for the ship with the specified ID if such a ship exists. pub fn get_ship(&self, id: I) -> Option<ShipEntry<I, D, S>> { let grid = &self.grid; self.ships .get(&id) .map(move |ship| ShipEntry { id, grid, ship }) } /// Get the [`ShipEntryMut`] for the ship with the specified ID if such a ship exists. pub fn get_ship_mut(&mut self, id: I) -> Option<ShipEntryMut<I, D, S>> { let grid = &mut self.grid; self.ships .get_mut(&id) .map(move |ship| ShipEntryMut { id, grid, ship }) } /// Get the ID of the ship placed at the specified coordinate if any. Returns None if /// the coordinate is out of bounds or no ship was placed on the specified point. pub fn get_coord(&self, coord: &D::Coordinate) -> Option<&I> { self.grid.get(coord).and_then(|cell| cell.ship.as_ref()) } }
)); } Some(cell) if cell.ship.is_some() => { return Err(PlaceError::new( CannotPlaceReason::AlreadyOccupied,
random_line_split
buffered.rs
the same file or network socket. It does not /// help when reading very large amounts at once, or reading just one or a few /// times. It also provides no advantage when reading from a source that is /// already in memory, like a `Vec<u8>`. /// /// When the `BufReader<R>` is dropped, the contents of its buffer will be /// discarded. Creating multiple instances of a `BufReader<R>` on the same /// stream can cause data loss. Reading from the underlying reader after /// unwrapping the `BufReader<R>` with `BufReader::into_inner` can also cause /// data loss. /// /// [`Read`]:../../std/io/trait.Read.html /// [`TcpStream::read`]:../../std/net/struct.TcpStream.html#method.read /// [`TcpStream`]:../../std/net/struct.TcpStream.html /// pub struct BufReader<R> { inner: R, buf: Box<[u8]>, pos: usize, cap: usize, } impl<R: Read> BufReader<R> { /// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. /// pub fn new(inner: R) -> BufReader<R> { BufReader::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufReader<R>` with the specified buffer capacity. /// pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> { unsafe { let mut buffer = Vec::with_capacity(capacity); buffer.set_len(capacity); inner.initializer().initialize(&mut buffer); BufReader { inner, buf: buffer.into_boxed_slice(), pos: 0, cap: 0 } } } } impl<R> BufReader<R> { /// Gets a reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. /// pub fn get_ref(&self) -> &R { &self.inner } /// Gets a mutable reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. /// pub fn get_mut(&mut self) -> &mut R { &mut self.inner } /// Returns a reference to the internally buffered data. /// /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty. /// pub fn buffer(&self) -> &[u8] { &self.buf[self.pos..self.cap] } /// Returns the number of bytes the internal buffer can hold at once. /// pub fn capacity(&self) -> usize { self.buf.len() } /// Unwraps this `BufReader<R>`, returning the underlying reader. /// /// Note that any leftover data in the internal buffer is lost. Therefore, /// a following read from the underlying reader may lead to data loss. /// pub fn into_inner(self) -> R { self.inner } /// Invalidates all data in the internal buffer. #[inline] fn discard_buffer(&mut self) { self.pos = 0; self.cap = 0; } } impl<R: Seek> BufReader<R> { /// Seeks relative to the current position. If the new position lies within the buffer, /// the buffer will not be flushed, allowing for more efficient seeks. /// This method does not return the location of the underlying reader, so the caller /// must track this information themselves if it is required. pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> { let pos = self.pos as u64; if offset < 0 { if let Some(new_pos) = pos.checked_sub((-offset) as u64) { self.pos = new_pos as usize; return Ok(()); } } else { if let Some(new_pos) = pos.checked_add(offset as u64) { if new_pos <= self.cap as u64 { self.pos = new_pos as usize; return Ok(()); } } } self.seek(SeekFrom::Current(offset)).map(drop) } } impl<R: Read> Read for BufReader<R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { // If we don't have any buffered data and we're doing a massive read // (larger than our internal buffer), bypass our internal buffer // entirely. if self.pos == self.cap && buf.len() >= self.buf.len() { self.discard_buffer(); return self.inner.read(buf); } let nread = { let mut rem = self.fill_buf()?; rem.read(buf)? }; self.consume(nread); Ok(nread) } fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { let total_len = bufs.iter().map(|b| b.len()).sum::<usize>(); if self.pos == self.cap && total_len >= self.buf.len() { self.discard_buffer(); return self.inner.read_vectored(bufs); } let nread = { let mut rem = self.fill_buf()?; rem.read_vectored(bufs)? }; self.consume(nread); Ok(nread) } // we can't skip unconditionally because of the large buffer case in read. unsafe fn initializer(&self) -> Initializer { self.inner.initializer() } } impl<R: Read> BufRead for BufReader<R> { fn fill_buf(&mut self) -> io::Result<&[u8]> { // If we've reached the end of our internal buffer then we need to fetch // some more data from the underlying reader. // Branch using `>=` instead of the more correct `==` // to tell the compiler that the pos..cap slice is always valid. if self.pos >= self.cap { debug_assert!(self.pos == self.cap); self.cap = self.inner.read(&mut self.buf)?; self.pos = 0; } Ok(&self.buf[self.pos..self.cap]) } fn consume(&mut self, amt: usize) { self.pos = cmp::min(self.pos + amt, self.cap); } } impl<R> fmt::Debug for BufReader<R> where R: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BufReader") .field("reader", &self.inner) .field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len())) .finish() } } impl<R: Seek> Seek for BufReader<R> { /// Seek to an offset, in bytes, in the underlying reader. /// /// The position used for seeking with `SeekFrom::Current(_)` is the /// position the underlying reader would be at if the `BufReader<R>` had no /// internal buffer. /// /// Seeking always discards the internal buffer, even if the seek position /// would otherwise fall within it. This guarantees that calling /// `.into_inner()` immediately after a seek yields the underlying reader /// at the same position. /// /// To seek without discarding the internal buffer, use [`BufReader::seek_relative`]. /// /// See [`std::io::Seek`] for more details. /// /// Note: In the edge case where you're seeking with `SeekFrom::Current(n)` /// where `n` minus the internal buffer length overflows an `i64`, two /// seeks will be performed instead of one. If the second seek returns /// `Err`, the underlying reader will be left at the same position it would /// have if you called `seek` with `SeekFrom::Current(0)`. /// /// [`BufReader::seek_relative`]: struct.BufReader.html#method.seek_relative /// [`std::io::Seek`]: trait.Seek.html fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { let result: u64; if let SeekFrom::Current(n) = pos { let remainder = (self.cap - self.pos) as i64; // it should be safe to assume that remainder fits within an i64 as the alternative // means we managed to allocate 8 exbibytes and that's absurd. // But it's not out of the realm of possibility for some weird underlying reader to // support seeking by i64::min_value() so we need to handle underflow when subtracting // remainder. if let Some(offset) = n.checked_sub(remainder) { result = self.inner.seek(SeekFrom::Current(offset))?; } else { // seek backwards by our remainder, and then by the offset self.inner.seek(SeekFrom::Current(-remainder))?; self.discard_buffer(); result = self.inner.seek(SeekFrom::Current(n))?; } } else { // Seeking with Start/End doesn't care about our buffer length. result = self.inner.seek(pos)?; } self.discard_buffer(); Ok(result) } } /// Wraps a writer and buffers its output. /// /// It can be excessively inefficient to work directly with something that /// implements [`Write`]. For example, every call to /// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A /// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying /// writer in large, infrequent batches. /// /// `BufWriter<W>` can improve the speed of programs that make *small* and /// *repeated* write calls to the same file or network socket. It does not /// help when writing very large amounts at once, or writing just one or a few /// times. It also provides no advantage when writing to a destination that is /// in memory, like a `Vec<u8>`. /// /// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though /// dropping will attempt to flush the the contents of the buffer, any errors /// that happen in the process of dropping will be ignored. Calling [`flush`] /// ensures that the buffer is empty and thus dropping will not even attempt /// file operations. /// /// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped /// together by the buffer and will all be written out in one system call when /// the `stream` is flushed. /// /// [`Write`]:../../std/io/trait.Write.html /// [`TcpStream::write`]:../../std/net/struct.TcpStream.html#method.write /// [`TcpStream`]:../../std/net/struct.TcpStream.html /// [`flush`]: #method.flush pub struct BufWriter<W: Write> { inner: Option<W>, buf: Vec<u8>, // #30888: If the inner writer panics in a call to write, we don't want to // write the buffered data a second time in BufWriter's destructor. This // flag tells the Drop impl if it should skip the flush. panicked: bool, } /// An error returned by `into_inner` which combines an error that /// happened while writing out the buffer, and the buffered writer object /// which may be used to recover from the condition. /// #[derive(Debug)] pub struct IntoInnerError<W>(W, Error); impl<W: Write> BufWriter<W> { /// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. /// pub fn new(inner: W) -> BufWriter<W> { BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufWriter<W>` with the specified buffer capacity. /// pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> { BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false } } fn flush_buf(&mut self) -> io::Result<()> { let mut written = 0; let len = self.buf.len(); let mut ret = Ok(()); while written < len { self.panicked = true; let r = self.inner.as_mut().unwrap().write(&self.buf[written..]); self.panicked = false; match r { Ok(0) => { ret = Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data")); break; } Ok(n) => written += n, Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} Err(e) => { ret = Err(e); break; } } } if written > 0 { self.buf.drain(..written); } ret } /// Gets a reference to the underlying writer. /// pub fn get_ref(&self) -> &W { self.inner.as_ref().unwrap() } /// Gets a mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. /// pub fn get_mut(&mut self) -> &mut W { self.inner.as_mut().unwrap() } /// Returns a reference to the internally buffered data. /// pub fn buffer(&self) -> &[u8] { &self.buf } /// Returns the number of bytes the internal buffer can hold without flushing. /// pub fn capacity(&self) -> usize { self.buf.capacity() } /// Unwraps this `BufWriter<W>`, returning the underlying writer. /// /// The buffer is written out before returning the writer. /// /// # Errors /// /// An `Err` will be returned if an error occurs while flushing the buffer. /// pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> { match self.flush_buf() { Err(e) => Err(IntoInnerError(self, e)), Ok(()) => Ok(self.inner.take().unwrap()), } } } impl<W: Write> Write for BufWriter<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { if self.buf.len() + buf.len() > self.buf.capacity() { self.flush_buf()?; } if buf.len() >= self.buf.capacity() { self.panicked = true; let r = self.get_mut().write(buf); self.panicked = false; r } else { self.buf.write(buf) } } fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { let total_len = bufs.iter().map(|b| b.len()).sum::<usize>(); if self.buf.len() + total_len > self.buf.capacity() { self.flush_buf()?; } if total_len >= self.buf.capacity() { self.panicked = true; let r = self.get_mut().write_vectored(bufs); self.panicked = false; r } else { self.buf.write_vectored(bufs) } } fn flush(&mut self) -> io::Result<()> { self.flush_buf().and_then(|()| self.get_mut().flush()) } } impl<W: Write> fmt::Debug for BufWriter<W> where W: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BufWriter") .field("writer", &self.inner.as_ref().unwrap()) .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity())) .finish() } } impl<W: Write + Seek> Seek for BufWriter<W> { /// Seek to the offset, in bytes, in the underlying writer. /// /// Seeking always writes out the internal buffer before seeking. fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { self.flush_buf().and_then(|_| self.get_mut().seek(pos)) } } impl<W: Write> Drop for BufWriter<W> { fn drop(&mut self) { if self.inner.is_some() &&!self.panicked { // dtors should not panic, so we ignore a failed flush let _r = self.flush_buf(); } } } impl<W> IntoInnerError<W> { /// Returns the error which caused the call to `into_inner()` to fail. /// /// This error was returned when attempting to write the internal buffer. /// pub fn error(&self) -> &Error { &self.1 } /// Returns the buffered writer instance which generated the error. /// /// The returned object can be used for error recovery, such as /// re-inspecting the buffer. /// pub fn into_inner(self) -> W { self.0 } } impl<W> From<IntoInnerError<W>> for Error { fn from(iie: IntoInnerError<W>) -> Error { iie.1 } } impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> { fn description(&self) -> &str { error::Error::description(self.error()) } } impl<W> fmt::Display for IntoInnerError<W> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.error().fmt(f) } } /// Wraps a writer and buffers output to it, flushing whenever a newline /// (`0x0a`, `'\n'`) is detected. /// /// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output. /// But it only does this batched write when it goes out of scope, or when the /// internal buffer is full. Sometimes, you'd prefer to write each line as it's /// completed, rather than the entire buffer at once. Enter `LineWriter`. It /// does exactly that. /// /// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the /// `LineWriter` goes out of scope or when its internal buffer is full. /// /// [bufwriter]: struct.BufWriter.html /// /// If there's still a partial line in the buffer when the `LineWriter` is /// dropped, it will flush those contents. /// pub struct LineWriter<W: Write> { inner: BufWriter<W>, need_flush: bool, } impl<W: Write> LineWriter<W> { /// Creates a new `LineWriter`. /// pub fn new(inner: W) -> LineWriter<W> { // Lines typically aren't that long, don't use a giant buffer LineWriter::with_capacity(1024, inner) } /// Creates a new `LineWriter` with a specified capacity for the internal /// buffer. /// pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> { LineWriter { inner: BufWriter::with_capacity(capacity, inner), need_flush: false } } /// Gets a reference to the underlying writer. /// pub fn ge
self) -> &W { self.inner.get_ref() } /// Gets a mutable reference to the underlying writer. /// /// Caution must be taken when calling methods on the mutable reference /// returned as extra writes could corrupt the output stream. /// pub fn get_mut(&mut self) -> &mut W { self.inner.get_mut() } /// Unwraps this `LineWriter`, returning the underlying writer. /// /// The internal buffer is written out before returning the writer. /// /// # Errors /// /// An `Err` will be returned if an error occurs while flushing the buffer. /// pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> { self.inner.into_inner().map_err(|IntoInnerError(buf, e)| { IntoInnerError(LineWriter { inner: buf, need_flush: false }, e) }) } } impl<W: Write> Write for LineWriter<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { if self.need_flush { self.flush()?; } // Find the last newline character in the buffer provided. If found then // we're going to write all the data up to that point and then flush, // otherwise we just write the whole block to the underlying writer. let i = match memchr::memrchr(b'\n', buf) { Some(i) => i, None => return self.inner.write(buf), }; // Ok, we're going to write a partial amount of the data given first // followed by flushing the newline. After we've successfully written // some data then we *must* report that we wrote that data, so future // errors are ignored. We set our internal `need_flush` flag, though, in // case flushing fails and we need to try it first next time. let n = self.inner.write(&buf[..=i])?; self.need_flush = true; if self.flush().is_err() || n!= i + 1 { return Ok(n); } // At this point we successfully wrote `i + 1` bytes and flushed it out, // meaning that the entire line is now flushed out on the screen. While // we can attempt to finish writing the rest of the data provided. // Remember though that we ignore errors here as we've successfully // written data, so we need to report that. match self.inner.write(&buf[i + 1..]) { Ok(i) => Ok(n + i), Err(_) => Ok(n), } } // Vectored writes are very similar to the writes above, but adjusted for // the list of buffers that we have to write. fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { if self.need_flush { self.flush()?; } // Find the last newline, and failing that write the whole buffer let last_newline = bufs .iter() .enumerate() .rev() .filter_map(|(i, buf)| { let pos = memchr::memrchr(b'\n', buf)?; Some((i, pos)) }) .next(); let (i, j) = match last_newline { Some(pair) => pair, None => return self.inner.write_vectored(bufs), }; let (prefix, suffix) = bufs.split_at(i); let (buf, suffix) = suffix.split_at(1);
t_ref(&
identifier_name
buffered.rs
the same file or network socket. It does not /// help when reading very large amounts at once, or reading just one or a few /// times. It also provides no advantage when reading from a source that is /// already in memory, like a `Vec<u8>`. /// /// When the `BufReader<R>` is dropped, the contents of its buffer will be /// discarded. Creating multiple instances of a `BufReader<R>` on the same /// stream can cause data loss. Reading from the underlying reader after /// unwrapping the `BufReader<R>` with `BufReader::into_inner` can also cause /// data loss. /// /// [`Read`]:../../std/io/trait.Read.html /// [`TcpStream::read`]:../../std/net/struct.TcpStream.html#method.read /// [`TcpStream`]:../../std/net/struct.TcpStream.html /// pub struct BufReader<R> { inner: R, buf: Box<[u8]>, pos: usize, cap: usize, } impl<R: Read> BufReader<R> { /// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. /// pub fn new(inner: R) -> BufReader<R> { BufReader::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufReader<R>` with the specified buffer capacity. /// pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> { unsafe { let mut buffer = Vec::with_capacity(capacity); buffer.set_len(capacity); inner.initializer().initialize(&mut buffer); BufReader { inner, buf: buffer.into_boxed_slice(), pos: 0, cap: 0 } } } } impl<R> BufReader<R> { /// Gets a reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. /// pub fn get_ref(&self) -> &R { &self.inner } /// Gets a mutable reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. /// pub fn get_mut(&mut self) -> &mut R { &mut self.inner } /// Returns a reference to the internally buffered data. /// /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty. /// pub fn buffer(&self) -> &[u8] { &self.buf[self.pos..self.cap] } /// Returns the number of bytes the internal buffer can hold at once. /// pub fn capacity(&self) -> usize { self.buf.len() } /// Unwraps this `BufReader<R>`, returning the underlying reader. /// /// Note that any leftover data in the internal buffer is lost. Therefore, /// a following read from the underlying reader may lead to data loss. /// pub fn into_inner(self) -> R { self.inner } /// Invalidates all data in the internal buffer. #[inline] fn discard_buffer(&mut self) { self.pos = 0; self.cap = 0; } } impl<R: Seek> BufReader<R> { /// Seeks relative to the current position. If the new position lies within the buffer, /// the buffer will not be flushed, allowing for more efficient seeks. /// This method does not return the location of the underlying reader, so the caller /// must track this information themselves if it is required. pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> { let pos = self.pos as u64; if offset < 0 { if let Some(new_pos) = pos.checked_sub((-offset) as u64) { self.pos = new_pos as usize; return Ok(()); } } else { if let Some(new_pos) = pos.checked_add(offset as u64) { if new_pos <= self.cap as u64 { self.pos = new_pos as usize; return Ok(()); } } } self.seek(SeekFrom::Current(offset)).map(drop) } } impl<R: Read> Read for BufReader<R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { // If we don't have any buffered data and we're doing a massive read // (larger than our internal buffer), bypass our internal buffer // entirely. if self.pos == self.cap && buf.len() >= self.buf.len() { self.discard_buffer(); return self.inner.read(buf); } let nread = { let mut rem = self.fill_buf()?; rem.read(buf)? }; self.consume(nread); Ok(nread) } fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { let total_len = bufs.iter().map(|b| b.len()).sum::<usize>(); if self.pos == self.cap && total_len >= self.buf.len() { self.discard_buffer(); return self.inner.read_vectored(bufs); } let nread = { let mut rem = self.fill_buf()?; rem.read_vectored(bufs)? }; self.consume(nread); Ok(nread) } // we can't skip unconditionally because of the large buffer case in read. unsafe fn initializer(&self) -> Initializer { self.inner.initializer() } } impl<R: Read> BufRead for BufReader<R> { fn fill_buf(&mut self) -> io::Result<&[u8]> { // If we've reached the end of our internal buffer then we need to fetch // some more data from the underlying reader. // Branch using `>=` instead of the more correct `==` // to tell the compiler that the pos..cap slice is always valid. if self.pos >= self.cap { debug_assert!(self.pos == self.cap); self.cap = self.inner.read(&mut self.buf)?; self.pos = 0; } Ok(&self.buf[self.pos..self.cap]) } fn consume(&mut self, amt: usize) { self.pos = cmp::min(self.pos + amt, self.cap); } } impl<R> fmt::Debug for BufReader<R> where R: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BufReader") .field("reader", &self.inner) .field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len())) .finish() } } impl<R: Seek> Seek for BufReader<R> { /// Seek to an offset, in bytes, in the underlying reader. /// /// The position used for seeking with `SeekFrom::Current(_)` is the /// position the underlying reader would be at if the `BufReader<R>` had no /// internal buffer. /// /// Seeking always discards the internal buffer, even if the seek position /// would otherwise fall within it. This guarantees that calling /// `.into_inner()` immediately after a seek yields the underlying reader /// at the same position. /// /// To seek without discarding the internal buffer, use [`BufReader::seek_relative`]. /// /// See [`std::io::Seek`] for more details. /// /// Note: In the edge case where you're seeking with `SeekFrom::Current(n)` /// where `n` minus the internal buffer length overflows an `i64`, two /// seeks will be performed instead of one. If the second seek returns /// `Err`, the underlying reader will be left at the same position it would /// have if you called `seek` with `SeekFrom::Current(0)`. /// /// [`BufReader::seek_relative`]: struct.BufReader.html#method.seek_relative /// [`std::io::Seek`]: trait.Seek.html fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { let result: u64; if let SeekFrom::Current(n) = pos { let remainder = (self.cap - self.pos) as i64; // it should be safe to assume that remainder fits within an i64 as the alternative // means we managed to allocate 8 exbibytes and that's absurd. // But it's not out of the realm of possibility for some weird underlying reader to // support seeking by i64::min_value() so we need to handle underflow when subtracting // remainder. if let Some(offset) = n.checked_sub(remainder) { result = self.inner.seek(SeekFrom::Current(offset))?; } else { // seek backwards by our remainder, and then by the offset self.inner.seek(SeekFrom::Current(-remainder))?; self.discard_buffer(); result = self.inner.seek(SeekFrom::Current(n))?; } } else { // Seeking with Start/End doesn't care about our buffer length. result = self.inner.seek(pos)?; } self.discard_buffer(); Ok(result) } } /// Wraps a writer and buffers its output. /// /// It can be excessively inefficient to work directly with something that /// implements [`Write`]. For example, every call to /// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A /// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying /// writer in large, infrequent batches. /// /// `BufWriter<W>` can improve the speed of programs that make *small* and /// *repeated* write calls to the same file or network socket. It does not /// help when writing very large amounts at once, or writing just one or a few /// times. It also provides no advantage when writing to a destination that is /// in memory, like a `Vec<u8>`. /// /// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though /// dropping will attempt to flush the the contents of the buffer, any errors /// that happen in the process of dropping will be ignored. Calling [`flush`] /// ensures that the buffer is empty and thus dropping will not even attempt /// file operations. /// /// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped /// together by the buffer and will all be written out in one system call when /// the `stream` is flushed. /// /// [`Write`]:../../std/io/trait.Write.html /// [`TcpStream::write`]:../../std/net/struct.TcpStream.html#method.write /// [`TcpStream`]:../../std/net/struct.TcpStream.html /// [`flush`]: #method.flush pub struct BufWriter<W: Write> { inner: Option<W>, buf: Vec<u8>, // #30888: If the inner writer panics in a call to write, we don't want to // write the buffered data a second time in BufWriter's destructor. This // flag tells the Drop impl if it should skip the flush. panicked: bool, } /// An error returned by `into_inner` which combines an error that /// happened while writing out the buffer, and the buffered writer object /// which may be used to recover from the condition. /// #[derive(Debug)] pub struct IntoInnerError<W>(W, Error); impl<W: Write> BufWriter<W> { /// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. /// pub fn new(inner: W) -> BufWriter<W> { BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufWriter<W>` with the specified buffer capacity. /// pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> { BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false } } fn flush_buf(&mut self) -> io::Result<()> { let mut written = 0; let len = self.buf.len(); let mut ret = Ok(()); while written < len { self.panicked = true; let r = self.inner.as_mut().unwrap().write(&self.buf[written..]); self.panicked = false; match r { Ok(0) => { ret = Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data")); break; } Ok(n) => written += n, Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} Err(e) => { ret = Err(e); break; } } } if written > 0 { self.buf.drain(..written); } ret } /// Gets a reference to the underlying writer. /// pub fn get_ref(&self) -> &W { self.inner.as_ref().unwrap() } /// Gets a mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. /// pub fn get_mut(&mut self) -> &mut W { self.inner.as_mut().unwrap() } /// Returns a reference to the internally buffered data. /// pub fn buffer(&self) -> &[u8] { &self.buf } /// Returns the number of bytes the internal buffer can hold without flushing. /// pub fn capacity(&self) -> usize { self.buf.capacity() } /// Unwraps this `BufWriter<W>`, returning the underlying writer. /// /// The buffer is written out before returning the writer. /// /// # Errors /// /// An `Err` will be returned if an error occurs while flushing the buffer. /// pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> { match self.flush_buf() { Err(e) => Err(IntoInnerError(self, e)), Ok(()) => Ok(self.inner.take().unwrap()), } } } impl<W: Write> Write for BufWriter<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { if self.buf.len() + buf.len() > self.buf.capacity() { self.flush_buf()?; } if buf.len() >= self.buf.capacity() { self.panicked = true; let r = self.get_mut().write(buf); self.panicked = false; r } else { self.buf.write(buf) } } fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { let total_len = bufs.iter().map(|b| b.len()).sum::<usize>(); if self.buf.len() + total_len > self.buf.capacity()
if total_len >= self.buf.capacity() { self.panicked = true; let r = self.get_mut().write_vectored(bufs); self.panicked = false; r } else { self.buf.write_vectored(bufs) } } fn flush(&mut self) -> io::Result<()> { self.flush_buf().and_then(|()| self.get_mut().flush()) } } impl<W: Write> fmt::Debug for BufWriter<W> where W: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BufWriter") .field("writer", &self.inner.as_ref().unwrap()) .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity())) .finish() } } impl<W: Write + Seek> Seek for BufWriter<W> { /// Seek to the offset, in bytes, in the underlying writer. /// /// Seeking always writes out the internal buffer before seeking. fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { self.flush_buf().and_then(|_| self.get_mut().seek(pos)) } } impl<W: Write> Drop for BufWriter<W> { fn drop(&mut self) { if self.inner.is_some() &&!self.panicked { // dtors should not panic, so we ignore a failed flush let _r = self.flush_buf(); } } } impl<W> IntoInnerError<W> { /// Returns the error which caused the call to `into_inner()` to fail. /// /// This error was returned when attempting to write the internal buffer. /// pub fn error(&self) -> &Error { &self.1 } /// Returns the buffered writer instance which generated the error. /// /// The returned object can be used for error recovery, such as /// re-inspecting the buffer. /// pub fn into_inner(self) -> W { self.0 } } impl<W> From<IntoInnerError<W>> for Error { fn from(iie: IntoInnerError<W>) -> Error { iie.1 } } impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> { fn description(&self) -> &str { error::Error::description(self.error()) } } impl<W> fmt::Display for IntoInnerError<W> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.error().fmt(f) } } /// Wraps a writer and buffers output to it, flushing whenever a newline /// (`0x0a`, `'\n'`) is detected. /// /// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output. /// But it only does this batched write when it goes out of scope, or when the /// internal buffer is full. Sometimes, you'd prefer to write each line as it's /// completed, rather than the entire buffer at once. Enter `LineWriter`. It /// does exactly that. /// /// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the /// `LineWriter` goes out of scope or when its internal buffer is full. /// /// [bufwriter]: struct.BufWriter.html /// /// If there's still a partial line in the buffer when the `LineWriter` is /// dropped, it will flush those contents. /// pub struct LineWriter<W: Write> { inner: BufWriter<W>, need_flush: bool, } impl<W: Write> LineWriter<W> { /// Creates a new `LineWriter`. /// pub fn new(inner: W) -> LineWriter<W> { // Lines typically aren't that long, don't use a giant buffer LineWriter::with_capacity(1024, inner) } /// Creates a new `LineWriter` with a specified capacity for the internal /// buffer. /// pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> { LineWriter { inner: BufWriter::with_capacity(capacity, inner), need_flush: false } } /// Gets a reference to the underlying writer. /// pub fn get_ref(&self) -> &W { self.inner.get_ref() } /// Gets a mutable reference to the underlying writer. /// /// Caution must be taken when calling methods on the mutable reference /// returned as extra writes could corrupt the output stream. /// pub fn get_mut(&mut self) -> &mut W { self.inner.get_mut() } /// Unwraps this `LineWriter`, returning the underlying writer. /// /// The internal buffer is written out before returning the writer. /// /// # Errors /// /// An `Err` will be returned if an error occurs while flushing the buffer. /// pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> { self.inner.into_inner().map_err(|IntoInnerError(buf, e)| { IntoInnerError(LineWriter { inner: buf, need_flush: false }, e) }) } } impl<W: Write> Write for LineWriter<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { if self.need_flush { self.flush()?; } // Find the last newline character in the buffer provided. If found then // we're going to write all the data up to that point and then flush, // otherwise we just write the whole block to the underlying writer. let i = match memchr::memrchr(b'\n', buf) { Some(i) => i, None => return self.inner.write(buf), }; // Ok, we're going to write a partial amount of the data given first // followed by flushing the newline. After we've successfully written // some data then we *must* report that we wrote that data, so future // errors are ignored. We set our internal `need_flush` flag, though, in // case flushing fails and we need to try it first next time. let n = self.inner.write(&buf[..=i])?; self.need_flush = true; if self.flush().is_err() || n!= i + 1 { return Ok(n); } // At this point we successfully wrote `i + 1` bytes and flushed it out, // meaning that the entire line is now flushed out on the screen. While // we can attempt to finish writing the rest of the data provided. // Remember though that we ignore errors here as we've successfully // written data, so we need to report that. match self.inner.write(&buf[i + 1..]) { Ok(i) => Ok(n + i), Err(_) => Ok(n), } } // Vectored writes are very similar to the writes above, but adjusted for // the list of buffers that we have to write. fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { if self.need_flush { self.flush()?; } // Find the last newline, and failing that write the whole buffer let last_newline = bufs .iter() .enumerate() .rev() .filter_map(|(i, buf)| { let pos = memchr::memrchr(b'\n', buf)?; Some((i, pos)) }) .next(); let (i, j) = match last_newline { Some(pair) => pair, None => return self.inner.write_vectored(bufs), }; let (prefix, suffix) = bufs.split_at(i); let (buf, suffix) = suffix.split_at(1);
{ self.flush_buf()?; }
conditional_block
buffered.rs
to the same file or network socket. It does not /// help when reading very large amounts at once, or reading just one or a few /// times. It also provides no advantage when reading from a source that is /// already in memory, like a `Vec<u8>`. /// /// When the `BufReader<R>` is dropped, the contents of its buffer will be /// discarded. Creating multiple instances of a `BufReader<R>` on the same /// stream can cause data loss. Reading from the underlying reader after /// unwrapping the `BufReader<R>` with `BufReader::into_inner` can also cause /// data loss. /// /// [`Read`]:../../std/io/trait.Read.html /// [`TcpStream::read`]:../../std/net/struct.TcpStream.html#method.read /// [`TcpStream`]:../../std/net/struct.TcpStream.html /// pub struct BufReader<R> { inner: R, buf: Box<[u8]>, pos: usize, cap: usize, } impl<R: Read> BufReader<R> { /// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. /// pub fn new(inner: R) -> BufReader<R> { BufReader::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufReader<R>` with the specified buffer capacity. /// pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> { unsafe { let mut buffer = Vec::with_capacity(capacity); buffer.set_len(capacity); inner.initializer().initialize(&mut buffer); BufReader { inner, buf: buffer.into_boxed_slice(), pos: 0, cap: 0 } } } } impl<R> BufReader<R> { /// Gets a reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. /// pub fn get_ref(&self) -> &R { &self.inner } /// Gets a mutable reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. /// pub fn get_mut(&mut self) -> &mut R { &mut self.inner } /// Returns a reference to the internally buffered data. /// /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty. /// pub fn buffer(&self) -> &[u8] { &self.buf[self.pos..self.cap] } /// Returns the number of bytes the internal buffer can hold at once. /// pub fn capacity(&self) -> usize { self.buf.len() } /// Unwraps this `BufReader<R>`, returning the underlying reader. /// /// Note that any leftover data in the internal buffer is lost. Therefore, /// a following read from the underlying reader may lead to data loss. /// pub fn into_inner(self) -> R { self.inner } /// Invalidates all data in the internal buffer. #[inline] fn discard_buffer(&mut self) { self.pos = 0; self.cap = 0; } } impl<R: Seek> BufReader<R> { /// Seeks relative to the current position. If the new position lies within the buffer, /// the buffer will not be flushed, allowing for more efficient seeks. /// This method does not return the location of the underlying reader, so the caller /// must track this information themselves if it is required. pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> { let pos = self.pos as u64; if offset < 0 { if let Some(new_pos) = pos.checked_sub((-offset) as u64) { self.pos = new_pos as usize; return Ok(()); } } else { if let Some(new_pos) = pos.checked_add(offset as u64) { if new_pos <= self.cap as u64 { self.pos = new_pos as usize; return Ok(()); } } } self.seek(SeekFrom::Current(offset)).map(drop) } } impl<R: Read> Read for BufReader<R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { // If we don't have any buffered data and we're doing a massive read // (larger than our internal buffer), bypass our internal buffer // entirely. if self.pos == self.cap && buf.len() >= self.buf.len() { self.discard_buffer(); return self.inner.read(buf); } let nread = { let mut rem = self.fill_buf()?; rem.read(buf)? }; self.consume(nread); Ok(nread) } fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { let total_len = bufs.iter().map(|b| b.len()).sum::<usize>(); if self.pos == self.cap && total_len >= self.buf.len() { self.discard_buffer(); return self.inner.read_vectored(bufs); } let nread = { let mut rem = self.fill_buf()?; rem.read_vectored(bufs)? }; self.consume(nread); Ok(nread) } // we can't skip unconditionally because of the large buffer case in read. unsafe fn initializer(&self) -> Initializer { self.inner.initializer() } } impl<R: Read> BufRead for BufReader<R> { fn fill_buf(&mut self) -> io::Result<&[u8]> { // If we've reached the end of our internal buffer then we need to fetch // some more data from the underlying reader. // Branch using `>=` instead of the more correct `==` // to tell the compiler that the pos..cap slice is always valid. if self.pos >= self.cap { debug_assert!(self.pos == self.cap); self.cap = self.inner.read(&mut self.buf)?; self.pos = 0; } Ok(&self.buf[self.pos..self.cap]) } fn consume(&mut self, amt: usize) { self.pos = cmp::min(self.pos + amt, self.cap); } } impl<R> fmt::Debug for BufReader<R> where R: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BufReader") .field("reader", &self.inner) .field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len())) .finish() } } impl<R: Seek> Seek for BufReader<R> { /// Seek to an offset, in bytes, in the underlying reader. /// /// The position used for seeking with `SeekFrom::Current(_)` is the /// position the underlying reader would be at if the `BufReader<R>` had no /// internal buffer. /// /// Seeking always discards the internal buffer, even if the seek position /// would otherwise fall within it. This guarantees that calling /// `.into_inner()` immediately after a seek yields the underlying reader /// at the same position. /// /// To seek without discarding the internal buffer, use [`BufReader::seek_relative`]. /// /// See [`std::io::Seek`] for more details. /// /// Note: In the edge case where you're seeking with `SeekFrom::Current(n)` /// where `n` minus the internal buffer length overflows an `i64`, two /// seeks will be performed instead of one. If the second seek returns /// `Err`, the underlying reader will be left at the same position it would /// have if you called `seek` with `SeekFrom::Current(0)`. /// /// [`BufReader::seek_relative`]: struct.BufReader.html#method.seek_relative /// [`std::io::Seek`]: trait.Seek.html fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { let result: u64; if let SeekFrom::Current(n) = pos { let remainder = (self.cap - self.pos) as i64; // it should be safe to assume that remainder fits within an i64 as the alternative // means we managed to allocate 8 exbibytes and that's absurd. // But it's not out of the realm of possibility for some weird underlying reader to // support seeking by i64::min_value() so we need to handle underflow when subtracting // remainder. if let Some(offset) = n.checked_sub(remainder) { result = self.inner.seek(SeekFrom::Current(offset))?; } else { // seek backwards by our remainder, and then by the offset self.inner.seek(SeekFrom::Current(-remainder))?; self.discard_buffer(); result = self.inner.seek(SeekFrom::Current(n))?; } } else { // Seeking with Start/End doesn't care about our buffer length. result = self.inner.seek(pos)?; } self.discard_buffer(); Ok(result) } } /// Wraps a writer and buffers its output. /// /// It can be excessively inefficient to work directly with something that /// implements [`Write`]. For example, every call to /// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A /// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying /// writer in large, infrequent batches. /// /// `BufWriter<W>` can improve the speed of programs that make *small* and /// *repeated* write calls to the same file or network socket. It does not /// help when writing very large amounts at once, or writing just one or a few /// times. It also provides no advantage when writing to a destination that is /// in memory, like a `Vec<u8>`. /// /// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though /// dropping will attempt to flush the the contents of the buffer, any errors /// that happen in the process of dropping will be ignored. Calling [`flush`] /// ensures that the buffer is empty and thus dropping will not even attempt /// file operations. /// /// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped /// together by the buffer and will all be written out in one system call when /// the `stream` is flushed. /// /// [`Write`]:../../std/io/trait.Write.html /// [`TcpStream::write`]:../../std/net/struct.TcpStream.html#method.write /// [`TcpStream`]:../../std/net/struct.TcpStream.html /// [`flush`]: #method.flush pub struct BufWriter<W: Write> { inner: Option<W>, buf: Vec<u8>, // #30888: If the inner writer panics in a call to write, we don't want to // write the buffered data a second time in BufWriter's destructor. This // flag tells the Drop impl if it should skip the flush. panicked: bool, } /// An error returned by `into_inner` which combines an error that /// happened while writing out the buffer, and the buffered writer object /// which may be used to recover from the condition. /// #[derive(Debug)] pub struct IntoInnerError<W>(W, Error); impl<W: Write> BufWriter<W> { /// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. /// pub fn new(inner: W) -> BufWriter<W> { BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufWriter<W>` with the specified buffer capacity. /// pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> { BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false } } fn flush_buf(&mut self) -> io::Result<()> { let mut written = 0; let len = self.buf.len(); let mut ret = Ok(()); while written < len { self.panicked = true; let r = self.inner.as_mut().unwrap().write(&self.buf[written..]); self.panicked = false; match r { Ok(0) => { ret = Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data")); break; } Ok(n) => written += n, Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} Err(e) => { ret = Err(e); break; } } } if written > 0 { self.buf.drain(..written); } ret } /// Gets a reference to the underlying writer. /// pub fn get_ref(&self) -> &W { self.inner.as_ref().unwrap() } /// Gets a mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. /// pub fn get_mut(&mut self) -> &mut W { self.inner.as_mut().unwrap() } /// Returns a reference to the internally buffered data. /// pub fn buffer(&self) -> &[u8] { &self.buf } /// Returns the number of bytes the internal buffer can hold without flushing. /// pub fn capacity(&self) -> usize { self.buf.capacity() } /// Unwraps this `BufWriter<W>`, returning the underlying writer. /// /// The buffer is written out before returning the writer. /// /// # Errors /// /// An `Err` will be returned if an error occurs while flushing the buffer. /// pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> { match self.flush_buf() { Err(e) => Err(IntoInnerError(self, e)), Ok(()) => Ok(self.inner.take().unwrap()), } } } impl<W: Write> Write for BufWriter<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { if self.buf.len() + buf.len() > self.buf.capacity() { self.flush_buf()?; } if buf.len() >= self.buf.capacity() { self.panicked = true; let r = self.get_mut().write(buf); self.panicked = false; r } else { self.buf.write(buf) } } fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { let total_len = bufs.iter().map(|b| b.len()).sum::<usize>(); if self.buf.len() + total_len > self.buf.capacity() { self.flush_buf()?; } if total_len >= self.buf.capacity() { self.panicked = true; let r = self.get_mut().write_vectored(bufs); self.panicked = false; r } else { self.buf.write_vectored(bufs) } } fn flush(&mut self) -> io::Result<()> { self.flush_buf().and_then(|()| self.get_mut().flush()) } } impl<W: Write> fmt::Debug for BufWriter<W> where W: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BufWriter") .field("writer", &self.inner.as_ref().unwrap()) .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity())) .finish() } } impl<W: Write + Seek> Seek for BufWriter<W> { /// Seek to the offset, in bytes, in the underlying writer. /// /// Seeking always writes out the internal buffer before seeking. fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { self.flush_buf().and_then(|_| self.get_mut().seek(pos)) } } impl<W: Write> Drop for BufWriter<W> { fn drop(&mut self) { if self.inner.is_some() &&!self.panicked { // dtors should not panic, so we ignore a failed flush let _r = self.flush_buf(); } } } impl<W> IntoInnerError<W> { /// Returns the error which caused the call to `into_inner()` to fail. /// /// This error was returned when attempting to write the internal buffer. /// pub fn error(&self) -> &Error { &self.1 } /// Returns the buffered writer instance which generated the error. /// /// The returned object can be used for error recovery, such as /// re-inspecting the buffer. /// pub fn into_inner(self) -> W { self.0 } } impl<W> From<IntoInnerError<W>> for Error { fn from(iie: IntoInnerError<W>) -> Error { iie.1 } } impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> { fn description(&self) -> &str { error::Error::description(self.error()) } } impl<W> fmt::Display for IntoInnerError<W> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.error().fmt(f) } } /// Wraps a writer and buffers output to it, flushing whenever a newline /// (`0x0a`, `'\n'`) is detected. /// /// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output. /// But it only does this batched write when it goes out of scope, or when the /// internal buffer is full. Sometimes, you'd prefer to write each line as it's /// completed, rather than the entire buffer at once. Enter `LineWriter`. It /// does exactly that. /// /// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the /// `LineWriter` goes out of scope or when its internal buffer is full. /// /// [bufwriter]: struct.BufWriter.html /// /// If there's still a partial line in the buffer when the `LineWriter` is /// dropped, it will flush those contents. /// pub struct LineWriter<W: Write> { inner: BufWriter<W>, need_flush: bool, } impl<W: Write> LineWriter<W> { /// Creates a new `LineWriter`. /// pub fn new(inner: W) -> LineWriter<W> { // Lines typically aren't that long, don't use a giant buffer LineWriter::with_capacity(1024, inner) } /// Creates a new `LineWriter` with a specified capacity for the internal /// buffer. /// pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> { LineWriter { inner: BufWriter::with_capacity(capacity, inner), need_flush: false } } /// Gets a reference to the underlying writer. /// pub fn get_ref(&self) -> &W { self.inner.get_ref() } /// Gets a mutable reference to the underlying writer. /// /// Caution must be taken when calling methods on the mutable reference /// returned as extra writes could corrupt the output stream. /// pub fn get_mut(&mut self) -> &mut W { self.inner.get_mut() } /// Unwraps this `LineWriter`, returning the underlying writer. /// /// The internal buffer is written out before returning the writer. /// /// # Errors /// /// An `Err` will be returned if an error occurs while flushing the buffer. /// pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> {
impl<W: Write> Write for LineWriter<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { if self.need_flush { self.flush()?; } // Find the last newline character in the buffer provided. If found then // we're going to write all the data up to that point and then flush, // otherwise we just write the whole block to the underlying writer. let i = match memchr::memrchr(b'\n', buf) { Some(i) => i, None => return self.inner.write(buf), }; // Ok, we're going to write a partial amount of the data given first // followed by flushing the newline. After we've successfully written // some data then we *must* report that we wrote that data, so future // errors are ignored. We set our internal `need_flush` flag, though, in // case flushing fails and we need to try it first next time. let n = self.inner.write(&buf[..=i])?; self.need_flush = true; if self.flush().is_err() || n!= i + 1 { return Ok(n); } // At this point we successfully wrote `i + 1` bytes and flushed it out, // meaning that the entire line is now flushed out on the screen. While // we can attempt to finish writing the rest of the data provided. // Remember though that we ignore errors here as we've successfully // written data, so we need to report that. match self.inner.write(&buf[i + 1..]) { Ok(i) => Ok(n + i), Err(_) => Ok(n), } } // Vectored writes are very similar to the writes above, but adjusted for // the list of buffers that we have to write. fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { if self.need_flush { self.flush()?; } // Find the last newline, and failing that write the whole buffer let last_newline = bufs .iter() .enumerate() .rev() .filter_map(|(i, buf)| { let pos = memchr::memrchr(b'\n', buf)?; Some((i, pos)) }) .next(); let (i, j) = match last_newline { Some(pair) => pair, None => return self.inner.write_vectored(bufs), }; let (prefix, suffix) = bufs.split_at(i); let (buf, suffix) = suffix.split_at(1);
self.inner.into_inner().map_err(|IntoInnerError(buf, e)| { IntoInnerError(LineWriter { inner: buf, need_flush: false }, e) }) } }
identifier_body
buffered.rs
calls to the same file or network socket. It does not /// help when reading very large amounts at once, or reading just one or a few /// times. It also provides no advantage when reading from a source that is /// already in memory, like a `Vec<u8>`. /// /// When the `BufReader<R>` is dropped, the contents of its buffer will be /// discarded. Creating multiple instances of a `BufReader<R>` on the same /// stream can cause data loss. Reading from the underlying reader after /// unwrapping the `BufReader<R>` with `BufReader::into_inner` can also cause /// data loss. /// /// [`Read`]:../../std/io/trait.Read.html /// [`TcpStream::read`]:../../std/net/struct.TcpStream.html#method.read /// [`TcpStream`]:../../std/net/struct.TcpStream.html /// pub struct BufReader<R> { inner: R, buf: Box<[u8]>, pos: usize, cap: usize, } impl<R: Read> BufReader<R> { /// Creates a new `BufReader<R>` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. /// pub fn new(inner: R) -> BufReader<R> { BufReader::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufReader<R>` with the specified buffer capacity. /// pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> { unsafe { let mut buffer = Vec::with_capacity(capacity); buffer.set_len(capacity); inner.initializer().initialize(&mut buffer); BufReader { inner, buf: buffer.into_boxed_slice(), pos: 0, cap: 0 } } } } impl<R> BufReader<R> { /// Gets a reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. /// pub fn get_ref(&self) -> &R { &self.inner } /// Gets a mutable reference to the underlying reader. /// /// It is inadvisable to directly read from the underlying reader. /// pub fn get_mut(&mut self) -> &mut R { &mut self.inner } /// Returns a reference to the internally buffered data. /// /// Unlike `fill_buf`, this will not attempt to fill the buffer if it is empty. /// pub fn buffer(&self) -> &[u8] { &self.buf[self.pos..self.cap] } /// Returns the number of bytes the internal buffer can hold at once. /// pub fn capacity(&self) -> usize { self.buf.len() } /// Unwraps this `BufReader<R>`, returning the underlying reader. /// /// Note that any leftover data in the internal buffer is lost. Therefore, /// a following read from the underlying reader may lead to data loss. /// pub fn into_inner(self) -> R { self.inner } /// Invalidates all data in the internal buffer. #[inline] fn discard_buffer(&mut self) { self.pos = 0; self.cap = 0; } } impl<R: Seek> BufReader<R> { /// Seeks relative to the current position. If the new position lies within the buffer, /// the buffer will not be flushed, allowing for more efficient seeks. /// This method does not return the location of the underlying reader, so the caller /// must track this information themselves if it is required. pub fn seek_relative(&mut self, offset: i64) -> io::Result<()> { let pos = self.pos as u64; if offset < 0 { if let Some(new_pos) = pos.checked_sub((-offset) as u64) { self.pos = new_pos as usize; return Ok(()); } } else { if let Some(new_pos) = pos.checked_add(offset as u64) { if new_pos <= self.cap as u64 { self.pos = new_pos as usize; return Ok(()); } } } self.seek(SeekFrom::Current(offset)).map(drop) } } impl<R: Read> Read for BufReader<R> { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { // If we don't have any buffered data and we're doing a massive read // (larger than our internal buffer), bypass our internal buffer // entirely. if self.pos == self.cap && buf.len() >= self.buf.len() { self.discard_buffer(); return self.inner.read(buf); } let nread = { let mut rem = self.fill_buf()?; rem.read(buf)? }; self.consume(nread); Ok(nread) } fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { let total_len = bufs.iter().map(|b| b.len()).sum::<usize>(); if self.pos == self.cap && total_len >= self.buf.len() { self.discard_buffer(); return self.inner.read_vectored(bufs); } let nread = { let mut rem = self.fill_buf()?; rem.read_vectored(bufs)? }; self.consume(nread); Ok(nread) } // we can't skip unconditionally because of the large buffer case in read. unsafe fn initializer(&self) -> Initializer { self.inner.initializer() } } impl<R: Read> BufRead for BufReader<R> { fn fill_buf(&mut self) -> io::Result<&[u8]> { // If we've reached the end of our internal buffer then we need to fetch // some more data from the underlying reader. // Branch using `>=` instead of the more correct `==` // to tell the compiler that the pos..cap slice is always valid. if self.pos >= self.cap { debug_assert!(self.pos == self.cap); self.cap = self.inner.read(&mut self.buf)?; self.pos = 0; } Ok(&self.buf[self.pos..self.cap]) } fn consume(&mut self, amt: usize) { self.pos = cmp::min(self.pos + amt, self.cap); } } impl<R> fmt::Debug for BufReader<R> where R: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BufReader") .field("reader", &self.inner) .field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len())) .finish() } } impl<R: Seek> Seek for BufReader<R> { /// Seek to an offset, in bytes, in the underlying reader. /// /// The position used for seeking with `SeekFrom::Current(_)` is the /// position the underlying reader would be at if the `BufReader<R>` had no /// internal buffer. /// /// Seeking always discards the internal buffer, even if the seek position /// would otherwise fall within it. This guarantees that calling /// `.into_inner()` immediately after a seek yields the underlying reader /// at the same position. /// /// To seek without discarding the internal buffer, use [`BufReader::seek_relative`]. /// /// See [`std::io::Seek`] for more details. /// /// Note: In the edge case where you're seeking with `SeekFrom::Current(n)` /// where `n` minus the internal buffer length overflows an `i64`, two /// seeks will be performed instead of one. If the second seek returns /// `Err`, the underlying reader will be left at the same position it would /// have if you called `seek` with `SeekFrom::Current(0)`. /// /// [`BufReader::seek_relative`]: struct.BufReader.html#method.seek_relative /// [`std::io::Seek`]: trait.Seek.html fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { let result: u64; if let SeekFrom::Current(n) = pos { let remainder = (self.cap - self.pos) as i64; // it should be safe to assume that remainder fits within an i64 as the alternative // means we managed to allocate 8 exbibytes and that's absurd. // But it's not out of the realm of possibility for some weird underlying reader to // support seeking by i64::min_value() so we need to handle underflow when subtracting // remainder. if let Some(offset) = n.checked_sub(remainder) { result = self.inner.seek(SeekFrom::Current(offset))?; } else { // seek backwards by our remainder, and then by the offset self.inner.seek(SeekFrom::Current(-remainder))?; self.discard_buffer(); result = self.inner.seek(SeekFrom::Current(n))?; } } else { // Seeking with Start/End doesn't care about our buffer length. result = self.inner.seek(pos)?; } self.discard_buffer(); Ok(result) } } /// Wraps a writer and buffers its output. /// /// It can be excessively inefficient to work directly with something that /// implements [`Write`]. For example, every call to /// [`write`][`TcpStream::write`] on [`TcpStream`] results in a system call. A /// `BufWriter<W>` keeps an in-memory buffer of data and writes it to an underlying /// writer in large, infrequent batches. /// /// `BufWriter<W>` can improve the speed of programs that make *small* and /// *repeated* write calls to the same file or network socket. It does not /// help when writing very large amounts at once, or writing just one or a few /// times. It also provides no advantage when writing to a destination that is /// in memory, like a `Vec<u8>`. /// /// It is critical to call [`flush`] before `BufWriter<W>` is dropped. Though /// dropping will attempt to flush the the contents of the buffer, any errors /// that happen in the process of dropping will be ignored. Calling [`flush`] /// ensures that the buffer is empty and thus dropping will not even attempt /// file operations. /// /// By wrapping the stream with a `BufWriter<W>`, these ten writes are all grouped /// together by the buffer and will all be written out in one system call when /// the `stream` is flushed. /// /// [`Write`]:../../std/io/trait.Write.html /// [`TcpStream::write`]:../../std/net/struct.TcpStream.html#method.write /// [`TcpStream`]:../../std/net/struct.TcpStream.html /// [`flush`]: #method.flush pub struct BufWriter<W: Write> { inner: Option<W>, buf: Vec<u8>, // #30888: If the inner writer panics in a call to write, we don't want to // write the buffered data a second time in BufWriter's destructor. This // flag tells the Drop impl if it should skip the flush. panicked: bool, } /// An error returned by `into_inner` which combines an error that /// happened while writing out the buffer, and the buffered writer object /// which may be used to recover from the condition. /// #[derive(Debug)] pub struct IntoInnerError<W>(W, Error); impl<W: Write> BufWriter<W> { /// Creates a new `BufWriter<W>` with a default buffer capacity. The default is currently 8 KB, /// but may change in the future. /// pub fn new(inner: W) -> BufWriter<W> { BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner) } /// Creates a new `BufWriter<W>` with the specified buffer capacity. /// pub fn with_capacity(capacity: usize, inner: W) -> BufWriter<W> { BufWriter { inner: Some(inner), buf: Vec::with_capacity(capacity), panicked: false } } fn flush_buf(&mut self) -> io::Result<()> { let mut written = 0; let len = self.buf.len(); let mut ret = Ok(()); while written < len { self.panicked = true; let r = self.inner.as_mut().unwrap().write(&self.buf[written..]); self.panicked = false; match r { Ok(0) => { ret = Err(Error::new(ErrorKind::WriteZero, "failed to write the buffered data")); break; } Ok(n) => written += n, Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} Err(e) => { ret = Err(e); break; } } } if written > 0 { self.buf.drain(..written); } ret } /// Gets a reference to the underlying writer. /// pub fn get_ref(&self) -> &W { self.inner.as_ref().unwrap() } /// Gets a mutable reference to the underlying writer. /// /// It is inadvisable to directly write to the underlying writer. /// pub fn get_mut(&mut self) -> &mut W { self.inner.as_mut().unwrap() } /// Returns a reference to the internally buffered data. /// pub fn buffer(&self) -> &[u8] { &self.buf } /// Returns the number of bytes the internal buffer can hold without flushing. /// pub fn capacity(&self) -> usize { self.buf.capacity() } /// Unwraps this `BufWriter<W>`, returning the underlying writer. /// /// The buffer is written out before returning the writer. /// /// # Errors /// /// An `Err` will be returned if an error occurs while flushing the buffer. /// pub fn into_inner(mut self) -> Result<W, IntoInnerError<BufWriter<W>>> { match self.flush_buf() { Err(e) => Err(IntoInnerError(self, e)), Ok(()) => Ok(self.inner.take().unwrap()), } } } impl<W: Write> Write for BufWriter<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { if self.buf.len() + buf.len() > self.buf.capacity() { self.flush_buf()?; } if buf.len() >= self.buf.capacity() { self.panicked = true; let r = self.get_mut().write(buf); self.panicked = false; r } else { self.buf.write(buf) } } fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { let total_len = bufs.iter().map(|b| b.len()).sum::<usize>(); if self.buf.len() + total_len > self.buf.capacity() { self.flush_buf()?; } if total_len >= self.buf.capacity() { self.panicked = true; let r = self.get_mut().write_vectored(bufs); self.panicked = false; r } else { self.buf.write_vectored(bufs) } } fn flush(&mut self) -> io::Result<()> { self.flush_buf().and_then(|()| self.get_mut().flush()) } } impl<W: Write> fmt::Debug for BufWriter<W> where W: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("BufWriter") .field("writer", &self.inner.as_ref().unwrap()) .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity())) .finish() } } impl<W: Write + Seek> Seek for BufWriter<W> { /// Seek to the offset, in bytes, in the underlying writer. /// /// Seeking always writes out the internal buffer before seeking. fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { self.flush_buf().and_then(|_| self.get_mut().seek(pos)) } } impl<W: Write> Drop for BufWriter<W> { fn drop(&mut self) { if self.inner.is_some() &&!self.panicked { // dtors should not panic, so we ignore a failed flush let _r = self.flush_buf(); } } } impl<W> IntoInnerError<W> { /// Returns the error which caused the call to `into_inner()` to fail. /// /// This error was returned when attempting to write the internal buffer. /// pub fn error(&self) -> &Error { &self.1 } /// Returns the buffered writer instance which generated the error. /// /// The returned object can be used for error recovery, such as /// re-inspecting the buffer. /// pub fn into_inner(self) -> W { self.0 } } impl<W> From<IntoInnerError<W>> for Error { fn from(iie: IntoInnerError<W>) -> Error { iie.1 } } impl<W: Send + fmt::Debug> error::Error for IntoInnerError<W> { fn description(&self) -> &str { error::Error::description(self.error()) } } impl<W> fmt::Display for IntoInnerError<W> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.error().fmt(f) } } /// Wraps a writer and buffers output to it, flushing whenever a newline /// (`0x0a`, `'\n'`) is detected. /// /// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output. /// But it only does this batched write when it goes out of scope, or when the /// internal buffer is full. Sometimes, you'd prefer to write each line as it's /// completed, rather than the entire buffer at once. Enter `LineWriter`. It /// does exactly that. /// /// Like [`BufWriter`][bufwriter], a `LineWriter`’s buffer will also be flushed when the /// `LineWriter` goes out of scope or when its internal buffer is full. /// /// [bufwriter]: struct.BufWriter.html /// /// If there's still a partial line in the buffer when the `LineWriter` is /// dropped, it will flush those contents. ///
} impl<W: Write> LineWriter<W> { /// Creates a new `LineWriter`. /// pub fn new(inner: W) -> LineWriter<W> { // Lines typically aren't that long, don't use a giant buffer LineWriter::with_capacity(1024, inner) } /// Creates a new `LineWriter` with a specified capacity for the internal /// buffer. /// pub fn with_capacity(capacity: usize, inner: W) -> LineWriter<W> { LineWriter { inner: BufWriter::with_capacity(capacity, inner), need_flush: false } } /// Gets a reference to the underlying writer. /// pub fn get_ref(&self) -> &W { self.inner.get_ref() } /// Gets a mutable reference to the underlying writer. /// /// Caution must be taken when calling methods on the mutable reference /// returned as extra writes could corrupt the output stream. /// pub fn get_mut(&mut self) -> &mut W { self.inner.get_mut() } /// Unwraps this `LineWriter`, returning the underlying writer. /// /// The internal buffer is written out before returning the writer. /// /// # Errors /// /// An `Err` will be returned if an error occurs while flushing the buffer. /// pub fn into_inner(self) -> Result<W, IntoInnerError<LineWriter<W>>> { self.inner.into_inner().map_err(|IntoInnerError(buf, e)| { IntoInnerError(LineWriter { inner: buf, need_flush: false }, e) }) } } impl<W: Write> Write for LineWriter<W> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { if self.need_flush { self.flush()?; } // Find the last newline character in the buffer provided. If found then // we're going to write all the data up to that point and then flush, // otherwise we just write the whole block to the underlying writer. let i = match memchr::memrchr(b'\n', buf) { Some(i) => i, None => return self.inner.write(buf), }; // Ok, we're going to write a partial amount of the data given first // followed by flushing the newline. After we've successfully written // some data then we *must* report that we wrote that data, so future // errors are ignored. We set our internal `need_flush` flag, though, in // case flushing fails and we need to try it first next time. let n = self.inner.write(&buf[..=i])?; self.need_flush = true; if self.flush().is_err() || n!= i + 1 { return Ok(n); } // At this point we successfully wrote `i + 1` bytes and flushed it out, // meaning that the entire line is now flushed out on the screen. While // we can attempt to finish writing the rest of the data provided. // Remember though that we ignore errors here as we've successfully // written data, so we need to report that. match self.inner.write(&buf[i + 1..]) { Ok(i) => Ok(n + i), Err(_) => Ok(n), } } // Vectored writes are very similar to the writes above, but adjusted for // the list of buffers that we have to write. fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { if self.need_flush { self.flush()?; } // Find the last newline, and failing that write the whole buffer let last_newline = bufs .iter() .enumerate() .rev() .filter_map(|(i, buf)| { let pos = memchr::memrchr(b'\n', buf)?; Some((i, pos)) }) .next(); let (i, j) = match last_newline { Some(pair) => pair, None => return self.inner.write_vectored(bufs), }; let (prefix, suffix) = bufs.split_at(i); let (buf, suffix) = suffix.split_at(1);
pub struct LineWriter<W: Write> { inner: BufWriter<W>, need_flush: bool,
random_line_split
skim.rs
///! The fuzzy matching algorithm used by skim ///! It focus more on path matching /// ///! # Example: ///! ```edition2018 ///! use fuzzy_matcher::skim::{fuzzy_match, fuzzy_indices}; ///! ///! assert_eq!(None, fuzzy_match("abc", "abx")); ///! assert!(fuzzy_match("axbycz", "abc").is_some()); ///! assert!(fuzzy_match("axbycz", "xyz").is_some()); ///! ///! let (score, indices) = fuzzy_indices("axbycz", "abc").unwrap(); ///! assert_eq!(indices, [0, 2, 4]); ///! ///! ``` ///! ///! It is modeled after <https://github.com/felipesere/icepick.git> use std::cmp::max; use crate::util::*; const BONUS_MATCHED: i64 = 4; const BONUS_CASE_MATCH: i64 = 4; const BONUS_UPPER_MATCH: i64 = 6; const BONUS_ADJACENCY: i64 = 10; const BONUS_SEPARATOR: i64 = 8; const BONUS_CAMEL: i64 = 8; const PENALTY_CASE_UNMATCHED: i64 = -1; const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters const PENALTY_UNMATCHED: i64 = -2; pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64>
pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> { if pattern.is_empty() { return Some((0, Vec::new())); } let mut picked = vec![]; let scores = build_graph(choice, pattern)?; let last_row = &scores[scores.len() - 1]; let (mut next_col, &MatchingStatus { final_score,.. }) = last_row .iter() .enumerate() .max_by_key(|&(_, x)| x.final_score) .expect("fuzzy_indices failed to iterate over last_row"); let mut pat_idx = scores.len() as i64 - 1; while pat_idx >= 0 { let status = scores[pat_idx as usize][next_col]; next_col = status.back_ref; picked.push(status.idx); pat_idx -= 1; } picked.reverse(); Some((final_score, picked)) } #[derive(Clone, Copy, Debug)] struct MatchingStatus { pub idx: usize, pub score: i64, pub final_score: i64, pub adj_num: usize, pub back_ref: usize, } impl Default for MatchingStatus { fn default() -> Self { MatchingStatus { idx: 0, score: 0, final_score: 0, adj_num: 1, back_ref: 0, } } } fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> { let mut scores = vec![]; let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern let mut pat_prev_ch = '\0'; // initialize the match positions and inline scores for (pat_idx, pat_ch) in pattern.chars().enumerate() { let mut vec = vec![]; let mut choice_prev_ch = '\0'; for (idx, ch) in choice.chars().enumerate() { if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx { let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch); vec.push(MatchingStatus { idx, score, final_score: score, adj_num: 1, back_ref: 0, }); } choice_prev_ch = ch; } if vec.is_empty() { // not matched return None; } match_start_idx = vec[0].idx + 1; scores.push(vec); pat_prev_ch = pat_ch; } // calculate max scores considering adjacent characters for pat_idx in 1..scores.len() { let (first_half, last_half) = scores.split_at_mut(pat_idx); let prev_row = &first_half[first_half.len() - 1]; let cur_row = &mut last_half[0]; for idx in 0..cur_row.len() { let next = cur_row[idx]; let prev = if idx > 0 { cur_row[idx - 1] } else { MatchingStatus::default() }; let mut score_before_idx = prev.final_score - prev.score + next.score; score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64); score_before_idx -= if prev.adj_num == 0 { BONUS_ADJACENCY } else { 0 }; let (back_ref, score, adj_num) = prev_row .iter() .enumerate() .take_while(|&(_, &MatchingStatus { idx,.. })| idx < next.idx) .skip_while(|&(_, &MatchingStatus { idx,.. })| idx < prev.idx) .map(|(back_ref, cur)| { let adj_num = next.idx - cur.idx - 1; let mut final_score = cur.final_score + next.score; final_score += if adj_num == 0 { BONUS_ADJACENCY } else { PENALTY_UNMATCHED * adj_num as i64 }; (back_ref, final_score, adj_num) }) .max_by_key(|&(_, x, _)| x) .unwrap_or((prev.back_ref, score_before_idx, prev.adj_num)); cur_row[idx] = if idx > 0 && score < score_before_idx { MatchingStatus { final_score: score_before_idx, back_ref: prev.back_ref, adj_num, ..next } } else { MatchingStatus { final_score: score, back_ref, adj_num, ..next } }; } } Some(scores) } // judge how many scores the current index should get fn fuzzy_score( choice_ch: char, choice_idx: usize, choice_prev_ch: char, pat_ch: char, pat_idx: usize, _pat_prev_ch: char, ) -> i64 { let mut score = BONUS_MATCHED; let choice_prev_ch_type = char_type_of(choice_prev_ch); let choice_role = char_role(choice_prev_ch, choice_ch); if pat_ch == choice_ch { if pat_ch.is_uppercase() { score += BONUS_UPPER_MATCH; } else { score += BONUS_CASE_MATCH; } } else { score += PENALTY_CASE_UNMATCHED; } // apply bonus for camelCases if choice_role == CharRole::Head { score += BONUS_CAMEL; } // apply bonus for matches after a separator if choice_prev_ch_type == CharType::Separ { score += BONUS_SEPARATOR; } if pat_idx == 0 { score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING); } score } #[cfg(test)] mod tests { use super::*; fn wrap_matches(line: &str, indices: &[usize]) -> String { let mut ret = String::new(); let mut peekable = indices.iter().peekable(); for (idx, ch) in line.chars().enumerate() { let next_id = **peekable.peek().unwrap_or(&&line.len()); if next_id == idx { ret.push_str(format!("[{}]", ch).as_str()); peekable.next(); } else { ret.push(ch); } } ret } fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> { let mut lines_with_score: Vec<(i64, &'static str)> = lines .into_iter() .map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s)) .collect(); lines_with_score.sort_by_key(|(score, _)| -score); lines_with_score .into_iter() .map(|(_, string)| string) .collect() } fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> { let (_score, indices) = fuzzy_indices(line, pattern)?; Some(wrap_matches(line, &indices)) } fn assert_order(pattern: &str, choices: &[&'static str]) { let result = filter_and_sort(pattern, choices); if result!= choices { // debug print println!("pattern: {}", pattern); for &choice in choices.iter() { if let Some((score, indices)) = fuzzy_indices(choice, pattern) { println!("{}: {:?}", score, wrap_matches(choice, &indices)); } else { println!("NO MATCH for {}", choice); } } } assert_eq!(result, choices); } #[test] fn test_match_or_not() { assert_eq!(Some(0), fuzzy_match("", "")); assert_eq!(Some(0), fuzzy_match("abcdefaghi", "")); assert_eq!(None, fuzzy_match("", "a")); assert_eq!(None, fuzzy_match("abcdefaghi", "中")); assert_eq!(None, fuzzy_match("abc", "abx")); assert!(fuzzy_match("axbycz", "abc").is_some()); assert!(fuzzy_match("axbycz", "xyz").is_some()); assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap()); assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap()); assert_eq!( "[H]ello, [世]界", &wrap_fuzzy_match("Hello, 世界", "H世").unwrap() ); } #[test] fn test_match_quality() { // case // assert_order("monad", &["monad", "Monad", "mONAD"]); // initials assert_order("ab", &["ab", "aoo_boo", "acb"]); assert_order("CC", &["CamelCase", "camelCase", "camelcase"]); assert_order("cC", &["camelCase", "CamelCase", "camelcase"]); assert_order( "cc", &[ "camel case", "camelCase", "camelcase", "CamelCase", "camel ace", ], ); assert_order( "Da.Te", &["Data.Text", "Data.Text.Lazy", "Data.Aeson.Encoding.text"], ); // prefix assert_order("is", &["isIEEE", "inSuf"]); // shorter assert_order("ma", &["map", "many", "maximum"]); assert_order("print", &["printf", "sprintf"]); // score(PRINT) = kMinScore assert_order("ast", &["ast", "AST", "INT_FAST16_MAX"]); // score(PRINT) > kMinScore assert_order("Int", &["int", "INT", "PRINT"]); } }
{ if pattern.is_empty() { return Some(0); } let scores = build_graph(choice, pattern)?; let last_row = &scores[scores.len() - 1]; let (_, &MatchingStatus { final_score, .. }) = last_row .iter() .enumerate() .max_by_key(|&(_, x)| x.final_score) .expect("fuzzy_indices failed to iterate over last_row"); Some(final_score) }
identifier_body
skim.rs
///! The fuzzy matching algorithm used by skim ///! It focus more on path matching /// ///! # Example: ///! ```edition2018 ///! use fuzzy_matcher::skim::{fuzzy_match, fuzzy_indices}; ///! ///! assert_eq!(None, fuzzy_match("abc", "abx")); ///! assert!(fuzzy_match("axbycz", "abc").is_some()); ///! assert!(fuzzy_match("axbycz", "xyz").is_some()); ///! ///! let (score, indices) = fuzzy_indices("axbycz", "abc").unwrap(); ///! assert_eq!(indices, [0, 2, 4]); ///! ///! ``` ///! ///! It is modeled after <https://github.com/felipesere/icepick.git> use std::cmp::max; use crate::util::*; const BONUS_MATCHED: i64 = 4; const BONUS_CASE_MATCH: i64 = 4; const BONUS_UPPER_MATCH: i64 = 6; const BONUS_ADJACENCY: i64 = 10; const BONUS_SEPARATOR: i64 = 8; const BONUS_CAMEL: i64 = 8; const PENALTY_CASE_UNMATCHED: i64 = -1; const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters const PENALTY_UNMATCHED: i64 = -2; pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> { if pattern.is_empty() { return Some(0); } let scores = build_graph(choice, pattern)?; let last_row = &scores[scores.len() - 1]; let (_, &MatchingStatus { final_score,.. }) = last_row .iter() .enumerate() .max_by_key(|&(_, x)| x.final_score) .expect("fuzzy_indices failed to iterate over last_row"); Some(final_score) } pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> { if pattern.is_empty() { return Some((0, Vec::new())); } let mut picked = vec![]; let scores = build_graph(choice, pattern)?; let last_row = &scores[scores.len() - 1]; let (mut next_col, &MatchingStatus { final_score,.. }) = last_row .iter() .enumerate() .max_by_key(|&(_, x)| x.final_score) .expect("fuzzy_indices failed to iterate over last_row"); let mut pat_idx = scores.len() as i64 - 1; while pat_idx >= 0 { let status = scores[pat_idx as usize][next_col]; next_col = status.back_ref; picked.push(status.idx); pat_idx -= 1; } picked.reverse(); Some((final_score, picked)) } #[derive(Clone, Copy, Debug)] struct MatchingStatus { pub idx: usize, pub score: i64, pub final_score: i64, pub adj_num: usize, pub back_ref: usize, } impl Default for MatchingStatus { fn default() -> Self { MatchingStatus { idx: 0, score: 0, final_score: 0, adj_num: 1, back_ref: 0, } } } fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> { let mut scores = vec![]; let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern let mut pat_prev_ch = '\0'; // initialize the match positions and inline scores for (pat_idx, pat_ch) in pattern.chars().enumerate() { let mut vec = vec![]; let mut choice_prev_ch = '\0'; for (idx, ch) in choice.chars().enumerate() { if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx { let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch); vec.push(MatchingStatus { idx, score, final_score: score, adj_num: 1, back_ref: 0, }); } choice_prev_ch = ch; } if vec.is_empty() { // not matched return None; } match_start_idx = vec[0].idx + 1; scores.push(vec); pat_prev_ch = pat_ch; } // calculate max scores considering adjacent characters for pat_idx in 1..scores.len() { let (first_half, last_half) = scores.split_at_mut(pat_idx); let prev_row = &first_half[first_half.len() - 1]; let cur_row = &mut last_half[0]; for idx in 0..cur_row.len() { let next = cur_row[idx]; let prev = if idx > 0 { cur_row[idx - 1] } else { MatchingStatus::default() }; let mut score_before_idx = prev.final_score - prev.score + next.score; score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64); score_before_idx -= if prev.adj_num == 0 { BONUS_ADJACENCY } else { 0 }; let (back_ref, score, adj_num) = prev_row .iter() .enumerate() .take_while(|&(_, &MatchingStatus { idx,.. })| idx < next.idx) .skip_while(|&(_, &MatchingStatus { idx,.. })| idx < prev.idx) .map(|(back_ref, cur)| { let adj_num = next.idx - cur.idx - 1; let mut final_score = cur.final_score + next.score; final_score += if adj_num == 0 { BONUS_ADJACENCY } else { PENALTY_UNMATCHED * adj_num as i64 }; (back_ref, final_score, adj_num) }) .max_by_key(|&(_, x, _)| x) .unwrap_or((prev.back_ref, score_before_idx, prev.adj_num)); cur_row[idx] = if idx > 0 && score < score_before_idx { MatchingStatus { final_score: score_before_idx, back_ref: prev.back_ref, adj_num, ..next } } else { MatchingStatus { final_score: score, back_ref, adj_num, ..next } }; } } Some(scores) } // judge how many scores the current index should get fn fuzzy_score( choice_ch: char, choice_idx: usize, choice_prev_ch: char, pat_ch: char, pat_idx: usize, _pat_prev_ch: char, ) -> i64 { let mut score = BONUS_MATCHED; let choice_prev_ch_type = char_type_of(choice_prev_ch); let choice_role = char_role(choice_prev_ch, choice_ch); if pat_ch == choice_ch { if pat_ch.is_uppercase() { score += BONUS_UPPER_MATCH; } else { score += BONUS_CASE_MATCH; } } else { score += PENALTY_CASE_UNMATCHED; } // apply bonus for camelCases if choice_role == CharRole::Head { score += BONUS_CAMEL; } // apply bonus for matches after a separator if choice_prev_ch_type == CharType::Separ { score += BONUS_SEPARATOR; } if pat_idx == 0 { score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING); } score } #[cfg(test)] mod tests { use super::*; fn wrap_matches(line: &str, indices: &[usize]) -> String { let mut ret = String::new(); let mut peekable = indices.iter().peekable(); for (idx, ch) in line.chars().enumerate() { let next_id = **peekable.peek().unwrap_or(&&line.len()); if next_id == idx { ret.push_str(format!("[{}]", ch).as_str()); peekable.next(); } else { ret.push(ch); } } ret } fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> { let mut lines_with_score: Vec<(i64, &'static str)> = lines .into_iter() .map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s)) .collect(); lines_with_score.sort_by_key(|(score, _)| -score); lines_with_score .into_iter() .map(|(_, string)| string) .collect() } fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> { let (_score, indices) = fuzzy_indices(line, pattern)?; Some(wrap_matches(line, &indices)) } fn assert_order(pattern: &str, choices: &[&'static str]) { let result = filter_and_sort(pattern, choices); if result!= choices { // debug print println!("pattern: {}", pattern); for &choice in choices.iter() { if let Some((score, indices)) = fuzzy_indices(choice, pattern) { println!("{}: {:?}", score, wrap_matches(choice, &indices)); } else { println!("NO MATCH for {}", choice); } } } assert_eq!(result, choices); } #[test] fn test_match_or_not() { assert_eq!(Some(0), fuzzy_match("", "")); assert_eq!(Some(0), fuzzy_match("abcdefaghi", "")); assert_eq!(None, fuzzy_match("", "a")); assert_eq!(None, fuzzy_match("abcdefaghi", "中")); assert_eq!(None, fuzzy_match("abc", "abx")); assert!(fuzzy_match("axbycz", "abc").is_some()); assert!(fuzzy_match("axbycz", "xyz").is_some()); assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap()); assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap()); assert_eq!( "[H]ello, [世]界", &wrap_fuzzy_match("Hello, 世界", "H世").unwrap() ); } #[test] fn test_match_quality() { // case // assert_order("monad", &["monad", "Monad", "mONAD"]); // initials assert_order("ab", &["ab", "aoo_boo", "acb"]); assert_order("CC", &["CamelCase", "camelCase", "camelcase"]); assert_order("cC", &["camelCase", "CamelCase", "camelcase"]); assert_order( "cc",
&[ "camel case", "camelCase", "camelcase", "CamelCase", "camel ace", ], ); assert_order( "Da.Te", &["Data.Text", "Data.Text.Lazy", "Data.Aeson.Encoding.text"], ); // prefix assert_order("is", &["isIEEE", "inSuf"]); // shorter assert_order("ma", &["map", "many", "maximum"]); assert_order("print", &["printf", "sprintf"]); // score(PRINT) = kMinScore assert_order("ast", &["ast", "AST", "INT_FAST16_MAX"]); // score(PRINT) > kMinScore assert_order("Int", &["int", "INT", "PRINT"]); } }
random_line_split
skim.rs
///! The fuzzy matching algorithm used by skim ///! It focus more on path matching /// ///! # Example: ///! ```edition2018 ///! use fuzzy_matcher::skim::{fuzzy_match, fuzzy_indices}; ///! ///! assert_eq!(None, fuzzy_match("abc", "abx")); ///! assert!(fuzzy_match("axbycz", "abc").is_some()); ///! assert!(fuzzy_match("axbycz", "xyz").is_some()); ///! ///! let (score, indices) = fuzzy_indices("axbycz", "abc").unwrap(); ///! assert_eq!(indices, [0, 2, 4]); ///! ///! ``` ///! ///! It is modeled after <https://github.com/felipesere/icepick.git> use std::cmp::max; use crate::util::*; const BONUS_MATCHED: i64 = 4; const BONUS_CASE_MATCH: i64 = 4; const BONUS_UPPER_MATCH: i64 = 6; const BONUS_ADJACENCY: i64 = 10; const BONUS_SEPARATOR: i64 = 8; const BONUS_CAMEL: i64 = 8; const PENALTY_CASE_UNMATCHED: i64 = -1; const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters const PENALTY_UNMATCHED: i64 = -2; pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> { if pattern.is_empty() { return Some(0); } let scores = build_graph(choice, pattern)?; let last_row = &scores[scores.len() - 1]; let (_, &MatchingStatus { final_score,.. }) = last_row .iter() .enumerate() .max_by_key(|&(_, x)| x.final_score) .expect("fuzzy_indices failed to iterate over last_row"); Some(final_score) } pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> { if pattern.is_empty() { return Some((0, Vec::new())); } let mut picked = vec![]; let scores = build_graph(choice, pattern)?; let last_row = &scores[scores.len() - 1]; let (mut next_col, &MatchingStatus { final_score,.. }) = last_row .iter() .enumerate() .max_by_key(|&(_, x)| x.final_score) .expect("fuzzy_indices failed to iterate over last_row"); let mut pat_idx = scores.len() as i64 - 1; while pat_idx >= 0 { let status = scores[pat_idx as usize][next_col]; next_col = status.back_ref; picked.push(status.idx); pat_idx -= 1; } picked.reverse(); Some((final_score, picked)) } #[derive(Clone, Copy, Debug)] struct MatchingStatus { pub idx: usize, pub score: i64, pub final_score: i64, pub adj_num: usize, pub back_ref: usize, } impl Default for MatchingStatus { fn default() -> Self { MatchingStatus { idx: 0, score: 0, final_score: 0, adj_num: 1, back_ref: 0, } } } fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> { let mut scores = vec![]; let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern let mut pat_prev_ch = '\0'; // initialize the match positions and inline scores for (pat_idx, pat_ch) in pattern.chars().enumerate() { let mut vec = vec![]; let mut choice_prev_ch = '\0'; for (idx, ch) in choice.chars().enumerate() { if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx { let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch); vec.push(MatchingStatus { idx, score, final_score: score, adj_num: 1, back_ref: 0, }); } choice_prev_ch = ch; } if vec.is_empty() { // not matched return None; } match_start_idx = vec[0].idx + 1; scores.push(vec); pat_prev_ch = pat_ch; } // calculate max scores considering adjacent characters for pat_idx in 1..scores.len() { let (first_half, last_half) = scores.split_at_mut(pat_idx); let prev_row = &first_half[first_half.len() - 1]; let cur_row = &mut last_half[0]; for idx in 0..cur_row.len() { let next = cur_row[idx]; let prev = if idx > 0 { cur_row[idx - 1] } else { MatchingStatus::default() }; let mut score_before_idx = prev.final_score - prev.score + next.score; score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64); score_before_idx -= if prev.adj_num == 0 { BONUS_ADJACENCY } else { 0 }; let (back_ref, score, adj_num) = prev_row .iter() .enumerate() .take_while(|&(_, &MatchingStatus { idx,.. })| idx < next.idx) .skip_while(|&(_, &MatchingStatus { idx,.. })| idx < prev.idx) .map(|(back_ref, cur)| { let adj_num = next.idx - cur.idx - 1; let mut final_score = cur.final_score + next.score; final_score += if adj_num == 0 { BONUS_ADJACENCY } else { PENALTY_UNMATCHED * adj_num as i64 }; (back_ref, final_score, adj_num) }) .max_by_key(|&(_, x, _)| x) .unwrap_or((prev.back_ref, score_before_idx, prev.adj_num)); cur_row[idx] = if idx > 0 && score < score_before_idx { MatchingStatus { final_score: score_before_idx, back_ref: prev.back_ref, adj_num, ..next } } else { MatchingStatus { final_score: score, back_ref, adj_num, ..next } }; } } Some(scores) } // judge how many scores the current index should get fn fuzzy_score( choice_ch: char, choice_idx: usize, choice_prev_ch: char, pat_ch: char, pat_idx: usize, _pat_prev_ch: char, ) -> i64 { let mut score = BONUS_MATCHED; let choice_prev_ch_type = char_type_of(choice_prev_ch); let choice_role = char_role(choice_prev_ch, choice_ch); if pat_ch == choice_ch
else { score += PENALTY_CASE_UNMATCHED; } // apply bonus for camelCases if choice_role == CharRole::Head { score += BONUS_CAMEL; } // apply bonus for matches after a separator if choice_prev_ch_type == CharType::Separ { score += BONUS_SEPARATOR; } if pat_idx == 0 { score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING); } score } #[cfg(test)] mod tests { use super::*; fn wrap_matches(line: &str, indices: &[usize]) -> String { let mut ret = String::new(); let mut peekable = indices.iter().peekable(); for (idx, ch) in line.chars().enumerate() { let next_id = **peekable.peek().unwrap_or(&&line.len()); if next_id == idx { ret.push_str(format!("[{}]", ch).as_str()); peekable.next(); } else { ret.push(ch); } } ret } fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> { let mut lines_with_score: Vec<(i64, &'static str)> = lines .into_iter() .map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s)) .collect(); lines_with_score.sort_by_key(|(score, _)| -score); lines_with_score .into_iter() .map(|(_, string)| string) .collect() } fn wrap_fuzzy_match(line: &str, pattern: &str) -> Option<String> { let (_score, indices) = fuzzy_indices(line, pattern)?; Some(wrap_matches(line, &indices)) } fn assert_order(pattern: &str, choices: &[&'static str]) { let result = filter_and_sort(pattern, choices); if result!= choices { // debug print println!("pattern: {}", pattern); for &choice in choices.iter() { if let Some((score, indices)) = fuzzy_indices(choice, pattern) { println!("{}: {:?}", score, wrap_matches(choice, &indices)); } else { println!("NO MATCH for {}", choice); } } } assert_eq!(result, choices); } #[test] fn test_match_or_not() { assert_eq!(Some(0), fuzzy_match("", "")); assert_eq!(Some(0), fuzzy_match("abcdefaghi", "")); assert_eq!(None, fuzzy_match("", "a")); assert_eq!(None, fuzzy_match("abcdefaghi", "中")); assert_eq!(None, fuzzy_match("abc", "abx")); assert!(fuzzy_match("axbycz", "abc").is_some()); assert!(fuzzy_match("axbycz", "xyz").is_some()); assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap()); assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap()); assert_eq!( "[H]ello, [世]界", &wrap_fuzzy_match("Hello, 世界", "H世").unwrap() ); } #[test] fn test_match_quality() { // case // assert_order("monad", &["monad", "Monad", "mONAD"]); // initials assert_order("ab", &["ab", "aoo_boo", "acb"]); assert_order("CC", &["CamelCase", "camelCase", "camelcase"]); assert_order("cC", &["camelCase", "CamelCase", "camelcase"]); assert_order( "cc", &[ "camel case", "camelCase", "camelcase", "CamelCase", "camel ace", ], ); assert_order( "Da.Te", &["Data.Text", "Data.Text.Lazy", "Data.Aeson.Encoding.text"], ); // prefix assert_order("is", &["isIEEE", "inSuf"]); // shorter assert_order("ma", &["map", "many", "maximum"]); assert_order("print", &["printf", "sprintf"]); // score(PRINT) = kMinScore assert_order("ast", &["ast", "AST", "INT_FAST16_MAX"]); // score(PRINT) > kMinScore assert_order("Int", &["int", "INT", "PRINT"]); } }
{ if pat_ch.is_uppercase() { score += BONUS_UPPER_MATCH; } else { score += BONUS_CASE_MATCH; } }
conditional_block
skim.rs
///! The fuzzy matching algorithm used by skim ///! It focus more on path matching /// ///! # Example: ///! ```edition2018 ///! use fuzzy_matcher::skim::{fuzzy_match, fuzzy_indices}; ///! ///! assert_eq!(None, fuzzy_match("abc", "abx")); ///! assert!(fuzzy_match("axbycz", "abc").is_some()); ///! assert!(fuzzy_match("axbycz", "xyz").is_some()); ///! ///! let (score, indices) = fuzzy_indices("axbycz", "abc").unwrap(); ///! assert_eq!(indices, [0, 2, 4]); ///! ///! ``` ///! ///! It is modeled after <https://github.com/felipesere/icepick.git> use std::cmp::max; use crate::util::*; const BONUS_MATCHED: i64 = 4; const BONUS_CASE_MATCH: i64 = 4; const BONUS_UPPER_MATCH: i64 = 6; const BONUS_ADJACENCY: i64 = 10; const BONUS_SEPARATOR: i64 = 8; const BONUS_CAMEL: i64 = 8; const PENALTY_CASE_UNMATCHED: i64 = -1; const PENALTY_LEADING: i64 = -6; // penalty applied for every letter before the first match const PENALTY_MAX_LEADING: i64 = -18; // maxing penalty for leading letters const PENALTY_UNMATCHED: i64 = -2; pub fn fuzzy_match(choice: &str, pattern: &str) -> Option<i64> { if pattern.is_empty() { return Some(0); } let scores = build_graph(choice, pattern)?; let last_row = &scores[scores.len() - 1]; let (_, &MatchingStatus { final_score,.. }) = last_row .iter() .enumerate() .max_by_key(|&(_, x)| x.final_score) .expect("fuzzy_indices failed to iterate over last_row"); Some(final_score) } pub fn fuzzy_indices(choice: &str, pattern: &str) -> Option<(i64, Vec<usize>)> { if pattern.is_empty() { return Some((0, Vec::new())); } let mut picked = vec![]; let scores = build_graph(choice, pattern)?; let last_row = &scores[scores.len() - 1]; let (mut next_col, &MatchingStatus { final_score,.. }) = last_row .iter() .enumerate() .max_by_key(|&(_, x)| x.final_score) .expect("fuzzy_indices failed to iterate over last_row"); let mut pat_idx = scores.len() as i64 - 1; while pat_idx >= 0 { let status = scores[pat_idx as usize][next_col]; next_col = status.back_ref; picked.push(status.idx); pat_idx -= 1; } picked.reverse(); Some((final_score, picked)) } #[derive(Clone, Copy, Debug)] struct MatchingStatus { pub idx: usize, pub score: i64, pub final_score: i64, pub adj_num: usize, pub back_ref: usize, } impl Default for MatchingStatus { fn default() -> Self { MatchingStatus { idx: 0, score: 0, final_score: 0, adj_num: 1, back_ref: 0, } } } fn build_graph(choice: &str, pattern: &str) -> Option<Vec<Vec<MatchingStatus>>> { let mut scores = vec![]; let mut match_start_idx = 0; // to ensure that the pushed char are able to match the pattern let mut pat_prev_ch = '\0'; // initialize the match positions and inline scores for (pat_idx, pat_ch) in pattern.chars().enumerate() { let mut vec = vec![]; let mut choice_prev_ch = '\0'; for (idx, ch) in choice.chars().enumerate() { if ch.to_ascii_lowercase() == pat_ch.to_ascii_lowercase() && idx >= match_start_idx { let score = fuzzy_score(ch, idx, choice_prev_ch, pat_ch, pat_idx, pat_prev_ch); vec.push(MatchingStatus { idx, score, final_score: score, adj_num: 1, back_ref: 0, }); } choice_prev_ch = ch; } if vec.is_empty() { // not matched return None; } match_start_idx = vec[0].idx + 1; scores.push(vec); pat_prev_ch = pat_ch; } // calculate max scores considering adjacent characters for pat_idx in 1..scores.len() { let (first_half, last_half) = scores.split_at_mut(pat_idx); let prev_row = &first_half[first_half.len() - 1]; let cur_row = &mut last_half[0]; for idx in 0..cur_row.len() { let next = cur_row[idx]; let prev = if idx > 0 { cur_row[idx - 1] } else { MatchingStatus::default() }; let mut score_before_idx = prev.final_score - prev.score + next.score; score_before_idx += PENALTY_UNMATCHED * ((next.idx - prev.idx) as i64); score_before_idx -= if prev.adj_num == 0 { BONUS_ADJACENCY } else { 0 }; let (back_ref, score, adj_num) = prev_row .iter() .enumerate() .take_while(|&(_, &MatchingStatus { idx,.. })| idx < next.idx) .skip_while(|&(_, &MatchingStatus { idx,.. })| idx < prev.idx) .map(|(back_ref, cur)| { let adj_num = next.idx - cur.idx - 1; let mut final_score = cur.final_score + next.score; final_score += if adj_num == 0 { BONUS_ADJACENCY } else { PENALTY_UNMATCHED * adj_num as i64 }; (back_ref, final_score, adj_num) }) .max_by_key(|&(_, x, _)| x) .unwrap_or((prev.back_ref, score_before_idx, prev.adj_num)); cur_row[idx] = if idx > 0 && score < score_before_idx { MatchingStatus { final_score: score_before_idx, back_ref: prev.back_ref, adj_num, ..next } } else { MatchingStatus { final_score: score, back_ref, adj_num, ..next } }; } } Some(scores) } // judge how many scores the current index should get fn fuzzy_score( choice_ch: char, choice_idx: usize, choice_prev_ch: char, pat_ch: char, pat_idx: usize, _pat_prev_ch: char, ) -> i64 { let mut score = BONUS_MATCHED; let choice_prev_ch_type = char_type_of(choice_prev_ch); let choice_role = char_role(choice_prev_ch, choice_ch); if pat_ch == choice_ch { if pat_ch.is_uppercase() { score += BONUS_UPPER_MATCH; } else { score += BONUS_CASE_MATCH; } } else { score += PENALTY_CASE_UNMATCHED; } // apply bonus for camelCases if choice_role == CharRole::Head { score += BONUS_CAMEL; } // apply bonus for matches after a separator if choice_prev_ch_type == CharType::Separ { score += BONUS_SEPARATOR; } if pat_idx == 0 { score += max((choice_idx as i64) * PENALTY_LEADING, PENALTY_MAX_LEADING); } score } #[cfg(test)] mod tests { use super::*; fn wrap_matches(line: &str, indices: &[usize]) -> String { let mut ret = String::new(); let mut peekable = indices.iter().peekable(); for (idx, ch) in line.chars().enumerate() { let next_id = **peekable.peek().unwrap_or(&&line.len()); if next_id == idx { ret.push_str(format!("[{}]", ch).as_str()); peekable.next(); } else { ret.push(ch); } } ret } fn filter_and_sort(pattern: &str, lines: &[&'static str]) -> Vec<&'static str> { let mut lines_with_score: Vec<(i64, &'static str)> = lines .into_iter() .map(|&s| (fuzzy_match(s, pattern).unwrap_or(-(1 << 62)), s)) .collect(); lines_with_score.sort_by_key(|(score, _)| -score); lines_with_score .into_iter() .map(|(_, string)| string) .collect() } fn
(line: &str, pattern: &str) -> Option<String> { let (_score, indices) = fuzzy_indices(line, pattern)?; Some(wrap_matches(line, &indices)) } fn assert_order(pattern: &str, choices: &[&'static str]) { let result = filter_and_sort(pattern, choices); if result!= choices { // debug print println!("pattern: {}", pattern); for &choice in choices.iter() { if let Some((score, indices)) = fuzzy_indices(choice, pattern) { println!("{}: {:?}", score, wrap_matches(choice, &indices)); } else { println!("NO MATCH for {}", choice); } } } assert_eq!(result, choices); } #[test] fn test_match_or_not() { assert_eq!(Some(0), fuzzy_match("", "")); assert_eq!(Some(0), fuzzy_match("abcdefaghi", "")); assert_eq!(None, fuzzy_match("", "a")); assert_eq!(None, fuzzy_match("abcdefaghi", "中")); assert_eq!(None, fuzzy_match("abc", "abx")); assert!(fuzzy_match("axbycz", "abc").is_some()); assert!(fuzzy_match("axbycz", "xyz").is_some()); assert_eq!("[a]x[b]y[c]z", &wrap_fuzzy_match("axbycz", "abc").unwrap()); assert_eq!("a[x]b[y]c[z]", &wrap_fuzzy_match("axbycz", "xyz").unwrap()); assert_eq!( "[H]ello, [世]界", &wrap_fuzzy_match("Hello, 世界", "H世").unwrap() ); } #[test] fn test_match_quality() { // case // assert_order("monad", &["monad", "Monad", "mONAD"]); // initials assert_order("ab", &["ab", "aoo_boo", "acb"]); assert_order("CC", &["CamelCase", "camelCase", "camelcase"]); assert_order("cC", &["camelCase", "CamelCase", "camelcase"]); assert_order( "cc", &[ "camel case", "camelCase", "camelcase", "CamelCase", "camel ace", ], ); assert_order( "Da.Te", &["Data.Text", "Data.Text.Lazy", "Data.Aeson.Encoding.text"], ); // prefix assert_order("is", &["isIEEE", "inSuf"]); // shorter assert_order("ma", &["map", "many", "maximum"]); assert_order("print", &["printf", "sprintf"]); // score(PRINT) = kMinScore assert_order("ast", &["ast", "AST", "INT_FAST16_MAX"]); // score(PRINT) > kMinScore assert_order("Int", &["int", "INT", "PRINT"]); } }
wrap_fuzzy_match
identifier_name
dataflows.rs
sink, description of sink) pub sink_exports: BTreeMap<GlobalId, ComputeSinkDesc<S, T>>, /// An optional frontier to which inputs should be advanced. /// /// If this is set, it should override the default setting determined by /// the upper bound of `since` frontiers contributing to the dataflow. /// It is an error for this to be set to a frontier not beyond that default. pub as_of: Option<Antichain<T>>, /// Frontier beyond which the dataflow should not execute. /// Specifically, updates at times greater or equal to this frontier are suppressed. /// This is often set to `as_of + 1` to enable "batch" computations. pub until: Antichain<T>, /// Human readable name pub debug_name: String, } impl<T> DataflowDescription<Plan<T>, (), mz_repr::Timestamp> { /// Tests if the dataflow refers to a single timestamp, namely /// that `as_of` has a single coordinate and that the `until` /// value corresponds to the `as_of` value plus one. pub fn is_single_time(&self) -> bool { // TODO: this would be much easier to check if `until` was a strict lower bound, // and we would be testing that `until == as_of`. let Some(as_of) = self.as_of.as_ref() else { return false; }; !as_of.is_empty() && as_of .as_option() .and_then(|as_of| as_of.checked_add(1)) .as_ref() == self.until.as_option() } } impl<T> DataflowDescription<OptimizedMirRelationExpr, (), T> { /// Creates a new dataflow description with a human-readable name. pub fn new(name: String) -> Self { Self { source_imports: Default::default(), index_imports: Default::default(), objects_to_build: Vec::new(), index_exports: Default::default(), sink_exports: Default::default(), as_of: Default::default(), until: Antichain::new(), debug_name: name, } } /// Imports a previously exported index. /// /// This method makes available an index previously exported as `id`, identified /// to the query by `description` (which names the view the index arranges, and /// the keys by which it is arranged). pub fn import_index( &mut self, id: GlobalId, desc: IndexDesc, typ: RelationType, monotonic: bool, ) { self.index_imports.insert( id, IndexImport { desc, typ, monotonic, usage_types: None, }, ); } /// Imports a source and makes it available as `id`. pub fn import_source(&mut self, id: GlobalId, typ: RelationType, monotonic: bool) { // Import the source with no linear operators applied to it. // They may be populated by whole-dataflow optimization. self.source_imports.insert( id, ( SourceInstanceDesc { storage_metadata: (), arguments: SourceInstanceArguments { operators: None }, typ, }, monotonic, ), ); } /// Binds to `id` the relation expression `plan`. pub fn insert_plan(&mut self, id: GlobalId, plan: OptimizedMirRelationExpr) { self.objects_to_build.push(BuildDesc { id, plan }); } /// Exports as `id` an index described by `description`. /// /// Future uses of `import_index` in other dataflow descriptions may use `id`, /// as long as this dataflow has not been terminated in the meantime. pub fn export_index(&mut self, id: GlobalId, description: IndexDesc, on_type: RelationType) { // We first create a "view" named `id` that ensures that the // data are correctly arranged and available for export. self.insert_plan( id, OptimizedMirRelationExpr::declare_optimized(MirRelationExpr::ArrangeBy { input: Box::new(MirRelationExpr::global_get( description.on_id, on_type.clone(), )), keys: vec![description.key.clone()], }), ); self.index_exports.insert(id, (description, on_type)); } /// Exports as `id` a sink described by `description`. pub fn export_sink(&mut self, id: GlobalId, description: ComputeSinkDesc<(), T>) { self.sink_exports.insert(id, description); } /// Returns true iff `id` is already imported. pub fn is_imported(&self, id: &GlobalId) -> bool { self.objects_to_build.iter().any(|bd| &bd.id == id) || self.source_imports.keys().any(|i| i == id) } /// Assigns the `as_of` frontier to the supplied argument. /// /// This method allows the dataflow to indicate a frontier up through /// which all times should be advanced. This can be done for at least /// two reasons: 1. correctness and 2. performance. /// /// Correctness may require an `as_of` to ensure that historical detail /// is consolidated at representative times that do not present specific /// detail that is not specifically correct. For example, updates may be /// compacted to times that are no longer the source times, but instead /// some byproduct of when compaction was executed; we should not present /// those specific times as meaningfully different from other equivalent /// times. /// /// Performance may benefit from an aggressive `as_of` as it reduces the /// number of distinct moments at which collections vary. Differential /// dataflow will refresh its outputs at each time its inputs change and /// to moderate that we can minimize the volume of distinct input times /// as much as possible. /// /// Generally, one should consider setting `as_of` at least to the `since` /// frontiers of contributing data sources and as aggressively as the /// computation permits. pub fn set_as_of(&mut self, as_of: Antichain<T>) { self.as_of = Some(as_of); } /// The number of columns associated with an identifier in the dataflow. pub fn arity_of(&self, id: &GlobalId) -> usize { for (source_id, (source, _monotonic)) in self.source_imports.iter() { if source_id == id { return source.typ.arity(); } } for IndexImport { desc, typ,.. } in self.index_imports.values() { if &desc.on_id == id { return typ.arity(); } } for desc in self.objects_to_build.iter() { if &desc.id == id { return desc.plan.arity(); } } panic!("GlobalId {} not found in DataflowDesc", id); } /// Calls r and s on any sub-members of those types in self. Halts at the first error return. pub fn visit_children<R, S, E>(&mut self, r: R, s: S) -> Result<(), E> where R: Fn(&mut OptimizedMirRelationExpr) -> Result<(), E>, S: Fn(&mut MirScalarExpr) -> Result<(), E>, { for BuildDesc { plan,.. } in &mut self.objects_to_build { r(plan)?; } for (source_instance_desc, _) in self.source_imports.values_mut() { let Some(mfp) = source_instance_desc.arguments.operators.as_mut() else { continue; }; for expr in mfp.expressions.iter_mut() { s(expr)?; } for (_, expr) in mfp.predicates.iter_mut() { s(expr)?; } } Ok(()) } } impl<P, S, T> DataflowDescription<P, S, T> where P: CollectionPlan, { /// Identifiers of exported objects (indexes and sinks). pub fn
(&self) -> impl Iterator<Item = GlobalId> + '_ { self.index_exports .keys() .chain(self.sink_exports.keys()) .cloned() } /// Identifiers of exported subscribe sinks. pub fn subscribe_ids(&self) -> impl Iterator<Item = GlobalId> + '_ { self.sink_exports .iter() .filter_map(|(id, desc)| match desc.connection { ComputeSinkConnection::Subscribe(_) => Some(*id), _ => None, }) } /// Returns the description of the object to build with the specified /// identifier. /// /// # Panics /// /// Panics if `id` is not present in `objects_to_build` exactly once. pub fn build_desc(&self, id: GlobalId) -> &BuildDesc<P> { let mut builds = self.objects_to_build.iter().filter(|build| build.id == id); let build = builds .next() .unwrap_or_else(|| panic!("object to build id {id} unexpectedly missing")); assert!(builds.next().is_none()); build } /// Computes the set of identifiers upon which the specified collection /// identifier depends. /// /// `collection_id` must specify a valid object in `objects_to_build`. /// /// This method includes identifiers for e.g. intermediate views, and should be filtered /// if one only wants sources and indexes. /// /// This method is safe for mutually recursive view definitions. pub fn depends_on(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> { let mut out = BTreeSet::new(); self.depends_on_into(collection_id, &mut out); out } /// Like `depends_on`, but appends to an existing `BTreeSet`. pub fn depends_on_into(&self, collection_id: GlobalId, out: &mut BTreeSet<GlobalId>) { out.insert(collection_id); if self.source_imports.contains_key(&collection_id) { // The collection is provided by an imported source. Report the // dependency on the source. out.insert(collection_id); return; } // NOTE(benesch): we're not smart enough here to know *which* index // for the collection will be used, if one exists, so we have to report // the dependency on all of them. let mut found_index = false; for (index_id, IndexImport { desc,.. }) in &self.index_imports { if desc.on_id == collection_id { // The collection is provided by an imported index. Report the // dependency on the index. out.insert(*index_id); found_index = true; } } if found_index { return; } // The collection is not provided by a source or imported index. // It must be a collection whose plan we have handy. Recurse. let build = self.build_desc(collection_id); for id in build.plan.depends_on() { if!out.contains(&id) { self.depends_on_into(id, out) } } } /// Computes the set of imports upon which the specified collection depends. /// /// This method behaves like `depends_on` but filters out internal dependencies that are not /// included in the dataflow imports. pub fn depends_on_imports(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> { let is_import = |id: &GlobalId| { self.source_imports.contains_key(id) || self.index_imports.contains_key(id) }; let deps = self.depends_on(collection_id); deps.into_iter().filter(is_import).collect() } } impl<P: PartialEq, S: PartialEq, T: timely::PartialOrder> DataflowDescription<P, S, T> { /// Determine if a dataflow description is compatible with this dataflow description. /// /// Compatible dataflows have equal exports, imports, and objects to build. The `as_of` of /// the receiver has to be less equal the `other` `as_of`. /// // TODO: The semantics of this function are only useful for command reconciliation at the moment. pub fn compatible_with(&self, other: &Self) -> bool { let equality = self.index_exports == other.index_exports && self.sink_exports == other.sink_exports && self.objects_to_build == other.objects_to_build && self.index_imports == other.index_imports && self.source_imports == other.source_imports; let partial = if let (Some(as_of), Some(other_as_of)) = (&self.as_of, &other.as_of) { timely::PartialOrder::less_equal(as_of, other_as_of) } else { false }; equality && partial } } impl RustType<ProtoDataflowDescription> for DataflowDescription<crate::plan::Plan, CollectionMetadata> { fn into_proto(&self) -> ProtoDataflowDescription { ProtoDataflowDescription { source_imports: self.source_imports.into_proto(), index_imports: self.index_imports.into_proto(), objects_to_build: self.objects_to_build.into_proto(), index_exports: self.index_exports.into_proto(), sink_exports: self.sink_exports.into_proto(), as_of: self.as_of.into_proto(), until: Some(self.until.into_proto()), debug_name: self.debug_name.clone(), } } fn from_proto(proto: ProtoDataflowDescription) -> Result<Self, TryFromProtoError> { Ok(DataflowDescription { source_imports: proto.source_imports.into_rust()?, index_imports: proto.index_imports.into_rust()?, objects_to_build: proto.objects_to_build.into_rust()?, index_exports: proto.index_exports.into_rust()?, sink_exports: proto.sink_exports.into_rust()?, as_of: proto.as_of.map(|x| x.into_rust()).transpose()?, until: proto .until .map(|x| x.into_rust()) .transpose()? .unwrap_or_else(Antichain::new), debug_name: proto.debug_name, }) } } impl ProtoMapEntry<GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)> for ProtoSourceImport { fn from_rust<'a>( entry: ( &'a GlobalId, &'a (SourceInstanceDesc<CollectionMetadata>, bool), ), ) -> Self { ProtoSourceImport { id: Some(entry.0.into_proto()), source_instance_desc: Some(entry.1.0.into_proto()), monotonic: entry.1.1.into_proto(), } } fn into_rust( self, ) -> Result<(GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoSourceImport::id")?, ( self.source_instance_desc .into_rust_if_some("ProtoSourceImport::source_instance_desc")?, self.monotonic.into_rust()?, ), )) } } impl ProtoMapEntry<GlobalId, IndexImport> for ProtoIndexImport { fn from_rust<'a>( ( id, IndexImport { desc, typ, monotonic, usage_types, }, ): (&'a GlobalId, &'a IndexImport), ) -> Self { ProtoIndexImport { id: Some(id.into_proto()), index_desc: Some(desc.into_proto()), typ: Some(typ.into_proto()), monotonic: monotonic.into_proto(), usage_types: usage_types.as_ref().unwrap_or(&Vec::new()).into_proto(), has_usage_types: usage_types.is_some(), } } fn into_rust(self) -> Result<(GlobalId, IndexImport), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoIndex::id")?, IndexImport { desc: self .index_desc .into_rust_if_some("ProtoIndexImport::index_desc")?, typ: self.typ.into_rust_if_some("ProtoIndexImport::typ")?, monotonic: self.monotonic.into_rust()?, usage_types: if!self.has_usage_types.into_rust()? { None } else { Some(self.usage_types.into_rust()?) }, }, )) } } impl ProtoMapEntry<GlobalId, (IndexDesc, RelationType)> for ProtoIndexExport { fn from_rust<'a>( (id, (index_desc, typ)): (&'a GlobalId, &'a (IndexDesc, RelationType)), ) -> Self { ProtoIndexExport { id: Some(id.into_proto()), index_desc: Some(index_desc.into_proto()), typ: Some(typ.into_proto()), } } fn into_rust(self) -> Result<(GlobalId, (IndexDesc, RelationType)), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoIndexExport::id")?, ( self.index_desc .into_rust_if_some("ProtoIndexExport::index_desc")?, self.typ.into_rust_if_some("ProtoIndexExport::typ")?, ), )) } } impl ProtoMapEntry<GlobalId, ComputeSinkDesc<CollectionMetadata>> for ProtoSinkExport { fn from_rust<'a>( (id, sink_desc): (&'a GlobalId, &'a ComputeSinkDesc<CollectionMetadata>), ) -> Self { ProtoSinkExport { id: Some(id.into_proto()), sink_desc: Some(sink_desc.into_proto()), } } fn into_rust( self, ) -> Result<(GlobalId, ComputeSinkDesc<CollectionMetadata>), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoSinkExport::id")?, self.sink_desc .into_rust_if_some("ProtoSinkExport::sink_desc")?, )) } } impl Arbitrary for DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> { type Strategy = BoxedStrategy<Self>; type Parameters = (); fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { any_dataflow_description().boxed() } } proptest::prop_compose! { fn any_dataflow_description()( source_imports in proptest::collection::vec(any_source_import(), 1..3), index_imports in proptest::collection::vec(any_dataflow_index_import(), 1..3), objects_to_build in proptest::collection::vec(any::<BuildDesc<Plan>>(), 1..3), index_exports in proptest::collection::vec(any_dataflow_index_export(), 1..3), sink_descs in proptest::collection::vec( any::<(GlobalId, ComputeSinkDesc<CollectionMetadata, mz_repr::Timestamp>)>(), 1..3, ), as_of_some in any::<bool>(), as_of in proptest::collection::vec(any::<mz_repr::Timestamp>(), 1..5), debug_name in ".*", ) -> DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> { DataflowDescription { source_imports: BTreeMap::from_iter(source_imports.into_iter()), index_imports: BTreeMap::from_iter(index_imports.into_iter()), objects_to_build, index_exports: BTreeMap::from_iter(index_exports.into_iter()), sink_exports: BTreeMap::from_iter( sink_descs.into_iter(), ), as_of: if as_of_some { Some(Antichain::from(as_of))
export_ids
identifier_name
dataflows.rs
sink, description of sink) pub sink_exports: BTreeMap<GlobalId, ComputeSinkDesc<S, T>>, /// An optional frontier to which inputs should be advanced. /// /// If this is set, it should override the default setting determined by /// the upper bound of `since` frontiers contributing to the dataflow. /// It is an error for this to be set to a frontier not beyond that default. pub as_of: Option<Antichain<T>>, /// Frontier beyond which the dataflow should not execute. /// Specifically, updates at times greater or equal to this frontier are suppressed. /// This is often set to `as_of + 1` to enable "batch" computations. pub until: Antichain<T>, /// Human readable name pub debug_name: String, } impl<T> DataflowDescription<Plan<T>, (), mz_repr::Timestamp> { /// Tests if the dataflow refers to a single timestamp, namely /// that `as_of` has a single coordinate and that the `until` /// value corresponds to the `as_of` value plus one. pub fn is_single_time(&self) -> bool { // TODO: this would be much easier to check if `until` was a strict lower bound, // and we would be testing that `until == as_of`. let Some(as_of) = self.as_of.as_ref() else { return false; }; !as_of.is_empty() && as_of .as_option() .and_then(|as_of| as_of.checked_add(1)) .as_ref() == self.until.as_option() } } impl<T> DataflowDescription<OptimizedMirRelationExpr, (), T> { /// Creates a new dataflow description with a human-readable name. pub fn new(name: String) -> Self { Self { source_imports: Default::default(), index_imports: Default::default(), objects_to_build: Vec::new(), index_exports: Default::default(), sink_exports: Default::default(), as_of: Default::default(), until: Antichain::new(), debug_name: name, } } /// Imports a previously exported index. /// /// This method makes available an index previously exported as `id`, identified /// to the query by `description` (which names the view the index arranges, and /// the keys by which it is arranged). pub fn import_index( &mut self, id: GlobalId, desc: IndexDesc, typ: RelationType, monotonic: bool, ) { self.index_imports.insert( id, IndexImport { desc, typ, monotonic, usage_types: None, }, ); } /// Imports a source and makes it available as `id`. pub fn import_source(&mut self, id: GlobalId, typ: RelationType, monotonic: bool) { // Import the source with no linear operators applied to it. // They may be populated by whole-dataflow optimization. self.source_imports.insert( id, ( SourceInstanceDesc { storage_metadata: (), arguments: SourceInstanceArguments { operators: None }, typ, }, monotonic, ), ); } /// Binds to `id` the relation expression `plan`. pub fn insert_plan(&mut self, id: GlobalId, plan: OptimizedMirRelationExpr) { self.objects_to_build.push(BuildDesc { id, plan }); } /// Exports as `id` an index described by `description`. /// /// Future uses of `import_index` in other dataflow descriptions may use `id`, /// as long as this dataflow has not been terminated in the meantime. pub fn export_index(&mut self, id: GlobalId, description: IndexDesc, on_type: RelationType) { // We first create a "view" named `id` that ensures that the // data are correctly arranged and available for export. self.insert_plan( id, OptimizedMirRelationExpr::declare_optimized(MirRelationExpr::ArrangeBy { input: Box::new(MirRelationExpr::global_get( description.on_id, on_type.clone(), )), keys: vec![description.key.clone()], }), ); self.index_exports.insert(id, (description, on_type)); } /// Exports as `id` a sink described by `description`. pub fn export_sink(&mut self, id: GlobalId, description: ComputeSinkDesc<(), T>) { self.sink_exports.insert(id, description); } /// Returns true iff `id` is already imported. pub fn is_imported(&self, id: &GlobalId) -> bool { self.objects_to_build.iter().any(|bd| &bd.id == id) || self.source_imports.keys().any(|i| i == id) } /// Assigns the `as_of` frontier to the supplied argument. /// /// This method allows the dataflow to indicate a frontier up through /// which all times should be advanced. This can be done for at least /// two reasons: 1. correctness and 2. performance. /// /// Correctness may require an `as_of` to ensure that historical detail /// is consolidated at representative times that do not present specific /// detail that is not specifically correct. For example, updates may be /// compacted to times that are no longer the source times, but instead /// some byproduct of when compaction was executed; we should not present /// those specific times as meaningfully different from other equivalent /// times. /// /// Performance may benefit from an aggressive `as_of` as it reduces the /// number of distinct moments at which collections vary. Differential /// dataflow will refresh its outputs at each time its inputs change and /// to moderate that we can minimize the volume of distinct input times /// as much as possible. /// /// Generally, one should consider setting `as_of` at least to the `since` /// frontiers of contributing data sources and as aggressively as the /// computation permits. pub fn set_as_of(&mut self, as_of: Antichain<T>) { self.as_of = Some(as_of); } /// The number of columns associated with an identifier in the dataflow. pub fn arity_of(&self, id: &GlobalId) -> usize
/// Calls r and s on any sub-members of those types in self. Halts at the first error return. pub fn visit_children<R, S, E>(&mut self, r: R, s: S) -> Result<(), E> where R: Fn(&mut OptimizedMirRelationExpr) -> Result<(), E>, S: Fn(&mut MirScalarExpr) -> Result<(), E>, { for BuildDesc { plan,.. } in &mut self.objects_to_build { r(plan)?; } for (source_instance_desc, _) in self.source_imports.values_mut() { let Some(mfp) = source_instance_desc.arguments.operators.as_mut() else { continue; }; for expr in mfp.expressions.iter_mut() { s(expr)?; } for (_, expr) in mfp.predicates.iter_mut() { s(expr)?; } } Ok(()) } } impl<P, S, T> DataflowDescription<P, S, T> where P: CollectionPlan, { /// Identifiers of exported objects (indexes and sinks). pub fn export_ids(&self) -> impl Iterator<Item = GlobalId> + '_ { self.index_exports .keys() .chain(self.sink_exports.keys()) .cloned() } /// Identifiers of exported subscribe sinks. pub fn subscribe_ids(&self) -> impl Iterator<Item = GlobalId> + '_ { self.sink_exports .iter() .filter_map(|(id, desc)| match desc.connection { ComputeSinkConnection::Subscribe(_) => Some(*id), _ => None, }) } /// Returns the description of the object to build with the specified /// identifier. /// /// # Panics /// /// Panics if `id` is not present in `objects_to_build` exactly once. pub fn build_desc(&self, id: GlobalId) -> &BuildDesc<P> { let mut builds = self.objects_to_build.iter().filter(|build| build.id == id); let build = builds .next() .unwrap_or_else(|| panic!("object to build id {id} unexpectedly missing")); assert!(builds.next().is_none()); build } /// Computes the set of identifiers upon which the specified collection /// identifier depends. /// /// `collection_id` must specify a valid object in `objects_to_build`. /// /// This method includes identifiers for e.g. intermediate views, and should be filtered /// if one only wants sources and indexes. /// /// This method is safe for mutually recursive view definitions. pub fn depends_on(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> { let mut out = BTreeSet::new(); self.depends_on_into(collection_id, &mut out); out } /// Like `depends_on`, but appends to an existing `BTreeSet`. pub fn depends_on_into(&self, collection_id: GlobalId, out: &mut BTreeSet<GlobalId>) { out.insert(collection_id); if self.source_imports.contains_key(&collection_id) { // The collection is provided by an imported source. Report the // dependency on the source. out.insert(collection_id); return; } // NOTE(benesch): we're not smart enough here to know *which* index // for the collection will be used, if one exists, so we have to report // the dependency on all of them. let mut found_index = false; for (index_id, IndexImport { desc,.. }) in &self.index_imports { if desc.on_id == collection_id { // The collection is provided by an imported index. Report the // dependency on the index. out.insert(*index_id); found_index = true; } } if found_index { return; } // The collection is not provided by a source or imported index. // It must be a collection whose plan we have handy. Recurse. let build = self.build_desc(collection_id); for id in build.plan.depends_on() { if!out.contains(&id) { self.depends_on_into(id, out) } } } /// Computes the set of imports upon which the specified collection depends. /// /// This method behaves like `depends_on` but filters out internal dependencies that are not /// included in the dataflow imports. pub fn depends_on_imports(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> { let is_import = |id: &GlobalId| { self.source_imports.contains_key(id) || self.index_imports.contains_key(id) }; let deps = self.depends_on(collection_id); deps.into_iter().filter(is_import).collect() } } impl<P: PartialEq, S: PartialEq, T: timely::PartialOrder> DataflowDescription<P, S, T> { /// Determine if a dataflow description is compatible with this dataflow description. /// /// Compatible dataflows have equal exports, imports, and objects to build. The `as_of` of /// the receiver has to be less equal the `other` `as_of`. /// // TODO: The semantics of this function are only useful for command reconciliation at the moment. pub fn compatible_with(&self, other: &Self) -> bool { let equality = self.index_exports == other.index_exports && self.sink_exports == other.sink_exports && self.objects_to_build == other.objects_to_build && self.index_imports == other.index_imports && self.source_imports == other.source_imports; let partial = if let (Some(as_of), Some(other_as_of)) = (&self.as_of, &other.as_of) { timely::PartialOrder::less_equal(as_of, other_as_of) } else { false }; equality && partial } } impl RustType<ProtoDataflowDescription> for DataflowDescription<crate::plan::Plan, CollectionMetadata> { fn into_proto(&self) -> ProtoDataflowDescription { ProtoDataflowDescription { source_imports: self.source_imports.into_proto(), index_imports: self.index_imports.into_proto(), objects_to_build: self.objects_to_build.into_proto(), index_exports: self.index_exports.into_proto(), sink_exports: self.sink_exports.into_proto(), as_of: self.as_of.into_proto(), until: Some(self.until.into_proto()), debug_name: self.debug_name.clone(), } } fn from_proto(proto: ProtoDataflowDescription) -> Result<Self, TryFromProtoError> { Ok(DataflowDescription { source_imports: proto.source_imports.into_rust()?, index_imports: proto.index_imports.into_rust()?, objects_to_build: proto.objects_to_build.into_rust()?, index_exports: proto.index_exports.into_rust()?, sink_exports: proto.sink_exports.into_rust()?, as_of: proto.as_of.map(|x| x.into_rust()).transpose()?, until: proto .until .map(|x| x.into_rust()) .transpose()? .unwrap_or_else(Antichain::new), debug_name: proto.debug_name, }) } } impl ProtoMapEntry<GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)> for ProtoSourceImport { fn from_rust<'a>( entry: ( &'a GlobalId, &'a (SourceInstanceDesc<CollectionMetadata>, bool), ), ) -> Self { ProtoSourceImport { id: Some(entry.0.into_proto()), source_instance_desc: Some(entry.1.0.into_proto()), monotonic: entry.1.1.into_proto(), } } fn into_rust( self, ) -> Result<(GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoSourceImport::id")?, ( self.source_instance_desc .into_rust_if_some("ProtoSourceImport::source_instance_desc")?, self.monotonic.into_rust()?, ), )) } } impl ProtoMapEntry<GlobalId, IndexImport> for ProtoIndexImport { fn from_rust<'a>( ( id, IndexImport { desc, typ, monotonic, usage_types, }, ): (&'a GlobalId, &'a IndexImport), ) -> Self { ProtoIndexImport { id: Some(id.into_proto()), index_desc: Some(desc.into_proto()), typ: Some(typ.into_proto()), monotonic: monotonic.into_proto(), usage_types: usage_types.as_ref().unwrap_or(&Vec::new()).into_proto(), has_usage_types: usage_types.is_some(), } } fn into_rust(self) -> Result<(GlobalId, IndexImport), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoIndex::id")?, IndexImport { desc: self .index_desc .into_rust_if_some("ProtoIndexImport::index_desc")?, typ: self.typ.into_rust_if_some("ProtoIndexImport::typ")?, monotonic: self.monotonic.into_rust()?, usage_types: if!self.has_usage_types.into_rust()? { None } else { Some(self.usage_types.into_rust()?) }, }, )) } } impl ProtoMapEntry<GlobalId, (IndexDesc, RelationType)> for ProtoIndexExport { fn from_rust<'a>( (id, (index_desc, typ)): (&'a GlobalId, &'a (IndexDesc, RelationType)), ) -> Self { ProtoIndexExport { id: Some(id.into_proto()), index_desc: Some(index_desc.into_proto()), typ: Some(typ.into_proto()), } } fn into_rust(self) -> Result<(GlobalId, (IndexDesc, RelationType)), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoIndexExport::id")?, ( self.index_desc .into_rust_if_some("ProtoIndexExport::index_desc")?, self.typ.into_rust_if_some("ProtoIndexExport::typ")?, ), )) } } impl ProtoMapEntry<GlobalId, ComputeSinkDesc<CollectionMetadata>> for ProtoSinkExport { fn from_rust<'a>( (id, sink_desc): (&'a GlobalId, &'a ComputeSinkDesc<CollectionMetadata>), ) -> Self { ProtoSinkExport { id: Some(id.into_proto()), sink_desc: Some(sink_desc.into_proto()), } } fn into_rust( self, ) -> Result<(GlobalId, ComputeSinkDesc<CollectionMetadata>), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoSinkExport::id")?, self.sink_desc .into_rust_if_some("ProtoSinkExport::sink_desc")?, )) } } impl Arbitrary for DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> { type Strategy = BoxedStrategy<Self>; type Parameters = (); fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { any_dataflow_description().boxed() } } proptest::prop_compose! { fn any_dataflow_description()( source_imports in proptest::collection::vec(any_source_import(), 1..3), index_imports in proptest::collection::vec(any_dataflow_index_import(), 1..3), objects_to_build in proptest::collection::vec(any::<BuildDesc<Plan>>(), 1..3), index_exports in proptest::collection::vec(any_dataflow_index_export(), 1..3), sink_descs in proptest::collection::vec( any::<(GlobalId, ComputeSinkDesc<CollectionMetadata, mz_repr::Timestamp>)>(), 1..3, ), as_of_some in any::<bool>(), as_of in proptest::collection::vec(any::<mz_repr::Timestamp>(), 1..5), debug_name in ".*", ) -> DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> { DataflowDescription { source_imports: BTreeMap::from_iter(source_imports.into_iter()), index_imports: BTreeMap::from_iter(index_imports.into_iter()), objects_to_build, index_exports: BTreeMap::from_iter(index_exports.into_iter()), sink_exports: BTreeMap::from_iter( sink_descs.into_iter(), ), as_of: if as_of_some { Some(Antichain::from(as_of))
{ for (source_id, (source, _monotonic)) in self.source_imports.iter() { if source_id == id { return source.typ.arity(); } } for IndexImport { desc, typ, .. } in self.index_imports.values() { if &desc.on_id == id { return typ.arity(); } } for desc in self.objects_to_build.iter() { if &desc.id == id { return desc.plan.arity(); } } panic!("GlobalId {} not found in DataflowDesc", id); }
identifier_body
dataflows.rs
sink, description of sink) pub sink_exports: BTreeMap<GlobalId, ComputeSinkDesc<S, T>>, /// An optional frontier to which inputs should be advanced. /// /// If this is set, it should override the default setting determined by /// the upper bound of `since` frontiers contributing to the dataflow. /// It is an error for this to be set to a frontier not beyond that default. pub as_of: Option<Antichain<T>>, /// Frontier beyond which the dataflow should not execute. /// Specifically, updates at times greater or equal to this frontier are suppressed. /// This is often set to `as_of + 1` to enable "batch" computations. pub until: Antichain<T>, /// Human readable name pub debug_name: String, } impl<T> DataflowDescription<Plan<T>, (), mz_repr::Timestamp> { /// Tests if the dataflow refers to a single timestamp, namely /// that `as_of` has a single coordinate and that the `until` /// value corresponds to the `as_of` value plus one. pub fn is_single_time(&self) -> bool { // TODO: this would be much easier to check if `until` was a strict lower bound, // and we would be testing that `until == as_of`. let Some(as_of) = self.as_of.as_ref() else { return false; }; !as_of.is_empty() && as_of .as_option() .and_then(|as_of| as_of.checked_add(1)) .as_ref() == self.until.as_option() } } impl<T> DataflowDescription<OptimizedMirRelationExpr, (), T> { /// Creates a new dataflow description with a human-readable name. pub fn new(name: String) -> Self { Self { source_imports: Default::default(), index_imports: Default::default(), objects_to_build: Vec::new(), index_exports: Default::default(), sink_exports: Default::default(), as_of: Default::default(), until: Antichain::new(), debug_name: name, } } /// Imports a previously exported index. /// /// This method makes available an index previously exported as `id`, identified /// to the query by `description` (which names the view the index arranges, and /// the keys by which it is arranged). pub fn import_index( &mut self, id: GlobalId, desc: IndexDesc, typ: RelationType, monotonic: bool, ) { self.index_imports.insert( id, IndexImport { desc, typ, monotonic, usage_types: None, }, ); } /// Imports a source and makes it available as `id`. pub fn import_source(&mut self, id: GlobalId, typ: RelationType, monotonic: bool) { // Import the source with no linear operators applied to it. // They may be populated by whole-dataflow optimization. self.source_imports.insert( id, ( SourceInstanceDesc { storage_metadata: (), arguments: SourceInstanceArguments { operators: None }, typ, }, monotonic, ), ); } /// Binds to `id` the relation expression `plan`. pub fn insert_plan(&mut self, id: GlobalId, plan: OptimizedMirRelationExpr) { self.objects_to_build.push(BuildDesc { id, plan }); } /// Exports as `id` an index described by `description`. /// /// Future uses of `import_index` in other dataflow descriptions may use `id`, /// as long as this dataflow has not been terminated in the meantime. pub fn export_index(&mut self, id: GlobalId, description: IndexDesc, on_type: RelationType) { // We first create a "view" named `id` that ensures that the // data are correctly arranged and available for export. self.insert_plan( id, OptimizedMirRelationExpr::declare_optimized(MirRelationExpr::ArrangeBy { input: Box::new(MirRelationExpr::global_get( description.on_id, on_type.clone(), )), keys: vec![description.key.clone()], }), ); self.index_exports.insert(id, (description, on_type)); } /// Exports as `id` a sink described by `description`. pub fn export_sink(&mut self, id: GlobalId, description: ComputeSinkDesc<(), T>) { self.sink_exports.insert(id, description); }
/// Returns true iff `id` is already imported. pub fn is_imported(&self, id: &GlobalId) -> bool { self.objects_to_build.iter().any(|bd| &bd.id == id) || self.source_imports.keys().any(|i| i == id) } /// Assigns the `as_of` frontier to the supplied argument. /// /// This method allows the dataflow to indicate a frontier up through /// which all times should be advanced. This can be done for at least /// two reasons: 1. correctness and 2. performance. /// /// Correctness may require an `as_of` to ensure that historical detail /// is consolidated at representative times that do not present specific /// detail that is not specifically correct. For example, updates may be /// compacted to times that are no longer the source times, but instead /// some byproduct of when compaction was executed; we should not present /// those specific times as meaningfully different from other equivalent /// times. /// /// Performance may benefit from an aggressive `as_of` as it reduces the /// number of distinct moments at which collections vary. Differential /// dataflow will refresh its outputs at each time its inputs change and /// to moderate that we can minimize the volume of distinct input times /// as much as possible. /// /// Generally, one should consider setting `as_of` at least to the `since` /// frontiers of contributing data sources and as aggressively as the /// computation permits. pub fn set_as_of(&mut self, as_of: Antichain<T>) { self.as_of = Some(as_of); } /// The number of columns associated with an identifier in the dataflow. pub fn arity_of(&self, id: &GlobalId) -> usize { for (source_id, (source, _monotonic)) in self.source_imports.iter() { if source_id == id { return source.typ.arity(); } } for IndexImport { desc, typ,.. } in self.index_imports.values() { if &desc.on_id == id { return typ.arity(); } } for desc in self.objects_to_build.iter() { if &desc.id == id { return desc.plan.arity(); } } panic!("GlobalId {} not found in DataflowDesc", id); } /// Calls r and s on any sub-members of those types in self. Halts at the first error return. pub fn visit_children<R, S, E>(&mut self, r: R, s: S) -> Result<(), E> where R: Fn(&mut OptimizedMirRelationExpr) -> Result<(), E>, S: Fn(&mut MirScalarExpr) -> Result<(), E>, { for BuildDesc { plan,.. } in &mut self.objects_to_build { r(plan)?; } for (source_instance_desc, _) in self.source_imports.values_mut() { let Some(mfp) = source_instance_desc.arguments.operators.as_mut() else { continue; }; for expr in mfp.expressions.iter_mut() { s(expr)?; } for (_, expr) in mfp.predicates.iter_mut() { s(expr)?; } } Ok(()) } } impl<P, S, T> DataflowDescription<P, S, T> where P: CollectionPlan, { /// Identifiers of exported objects (indexes and sinks). pub fn export_ids(&self) -> impl Iterator<Item = GlobalId> + '_ { self.index_exports .keys() .chain(self.sink_exports.keys()) .cloned() } /// Identifiers of exported subscribe sinks. pub fn subscribe_ids(&self) -> impl Iterator<Item = GlobalId> + '_ { self.sink_exports .iter() .filter_map(|(id, desc)| match desc.connection { ComputeSinkConnection::Subscribe(_) => Some(*id), _ => None, }) } /// Returns the description of the object to build with the specified /// identifier. /// /// # Panics /// /// Panics if `id` is not present in `objects_to_build` exactly once. pub fn build_desc(&self, id: GlobalId) -> &BuildDesc<P> { let mut builds = self.objects_to_build.iter().filter(|build| build.id == id); let build = builds .next() .unwrap_or_else(|| panic!("object to build id {id} unexpectedly missing")); assert!(builds.next().is_none()); build } /// Computes the set of identifiers upon which the specified collection /// identifier depends. /// /// `collection_id` must specify a valid object in `objects_to_build`. /// /// This method includes identifiers for e.g. intermediate views, and should be filtered /// if one only wants sources and indexes. /// /// This method is safe for mutually recursive view definitions. pub fn depends_on(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> { let mut out = BTreeSet::new(); self.depends_on_into(collection_id, &mut out); out } /// Like `depends_on`, but appends to an existing `BTreeSet`. pub fn depends_on_into(&self, collection_id: GlobalId, out: &mut BTreeSet<GlobalId>) { out.insert(collection_id); if self.source_imports.contains_key(&collection_id) { // The collection is provided by an imported source. Report the // dependency on the source. out.insert(collection_id); return; } // NOTE(benesch): we're not smart enough here to know *which* index // for the collection will be used, if one exists, so we have to report // the dependency on all of them. let mut found_index = false; for (index_id, IndexImport { desc,.. }) in &self.index_imports { if desc.on_id == collection_id { // The collection is provided by an imported index. Report the // dependency on the index. out.insert(*index_id); found_index = true; } } if found_index { return; } // The collection is not provided by a source or imported index. // It must be a collection whose plan we have handy. Recurse. let build = self.build_desc(collection_id); for id in build.plan.depends_on() { if!out.contains(&id) { self.depends_on_into(id, out) } } } /// Computes the set of imports upon which the specified collection depends. /// /// This method behaves like `depends_on` but filters out internal dependencies that are not /// included in the dataflow imports. pub fn depends_on_imports(&self, collection_id: GlobalId) -> BTreeSet<GlobalId> { let is_import = |id: &GlobalId| { self.source_imports.contains_key(id) || self.index_imports.contains_key(id) }; let deps = self.depends_on(collection_id); deps.into_iter().filter(is_import).collect() } } impl<P: PartialEq, S: PartialEq, T: timely::PartialOrder> DataflowDescription<P, S, T> { /// Determine if a dataflow description is compatible with this dataflow description. /// /// Compatible dataflows have equal exports, imports, and objects to build. The `as_of` of /// the receiver has to be less equal the `other` `as_of`. /// // TODO: The semantics of this function are only useful for command reconciliation at the moment. pub fn compatible_with(&self, other: &Self) -> bool { let equality = self.index_exports == other.index_exports && self.sink_exports == other.sink_exports && self.objects_to_build == other.objects_to_build && self.index_imports == other.index_imports && self.source_imports == other.source_imports; let partial = if let (Some(as_of), Some(other_as_of)) = (&self.as_of, &other.as_of) { timely::PartialOrder::less_equal(as_of, other_as_of) } else { false }; equality && partial } } impl RustType<ProtoDataflowDescription> for DataflowDescription<crate::plan::Plan, CollectionMetadata> { fn into_proto(&self) -> ProtoDataflowDescription { ProtoDataflowDescription { source_imports: self.source_imports.into_proto(), index_imports: self.index_imports.into_proto(), objects_to_build: self.objects_to_build.into_proto(), index_exports: self.index_exports.into_proto(), sink_exports: self.sink_exports.into_proto(), as_of: self.as_of.into_proto(), until: Some(self.until.into_proto()), debug_name: self.debug_name.clone(), } } fn from_proto(proto: ProtoDataflowDescription) -> Result<Self, TryFromProtoError> { Ok(DataflowDescription { source_imports: proto.source_imports.into_rust()?, index_imports: proto.index_imports.into_rust()?, objects_to_build: proto.objects_to_build.into_rust()?, index_exports: proto.index_exports.into_rust()?, sink_exports: proto.sink_exports.into_rust()?, as_of: proto.as_of.map(|x| x.into_rust()).transpose()?, until: proto .until .map(|x| x.into_rust()) .transpose()? .unwrap_or_else(Antichain::new), debug_name: proto.debug_name, }) } } impl ProtoMapEntry<GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)> for ProtoSourceImport { fn from_rust<'a>( entry: ( &'a GlobalId, &'a (SourceInstanceDesc<CollectionMetadata>, bool), ), ) -> Self { ProtoSourceImport { id: Some(entry.0.into_proto()), source_instance_desc: Some(entry.1.0.into_proto()), monotonic: entry.1.1.into_proto(), } } fn into_rust( self, ) -> Result<(GlobalId, (SourceInstanceDesc<CollectionMetadata>, bool)), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoSourceImport::id")?, ( self.source_instance_desc .into_rust_if_some("ProtoSourceImport::source_instance_desc")?, self.monotonic.into_rust()?, ), )) } } impl ProtoMapEntry<GlobalId, IndexImport> for ProtoIndexImport { fn from_rust<'a>( ( id, IndexImport { desc, typ, monotonic, usage_types, }, ): (&'a GlobalId, &'a IndexImport), ) -> Self { ProtoIndexImport { id: Some(id.into_proto()), index_desc: Some(desc.into_proto()), typ: Some(typ.into_proto()), monotonic: monotonic.into_proto(), usage_types: usage_types.as_ref().unwrap_or(&Vec::new()).into_proto(), has_usage_types: usage_types.is_some(), } } fn into_rust(self) -> Result<(GlobalId, IndexImport), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoIndex::id")?, IndexImport { desc: self .index_desc .into_rust_if_some("ProtoIndexImport::index_desc")?, typ: self.typ.into_rust_if_some("ProtoIndexImport::typ")?, monotonic: self.monotonic.into_rust()?, usage_types: if!self.has_usage_types.into_rust()? { None } else { Some(self.usage_types.into_rust()?) }, }, )) } } impl ProtoMapEntry<GlobalId, (IndexDesc, RelationType)> for ProtoIndexExport { fn from_rust<'a>( (id, (index_desc, typ)): (&'a GlobalId, &'a (IndexDesc, RelationType)), ) -> Self { ProtoIndexExport { id: Some(id.into_proto()), index_desc: Some(index_desc.into_proto()), typ: Some(typ.into_proto()), } } fn into_rust(self) -> Result<(GlobalId, (IndexDesc, RelationType)), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoIndexExport::id")?, ( self.index_desc .into_rust_if_some("ProtoIndexExport::index_desc")?, self.typ.into_rust_if_some("ProtoIndexExport::typ")?, ), )) } } impl ProtoMapEntry<GlobalId, ComputeSinkDesc<CollectionMetadata>> for ProtoSinkExport { fn from_rust<'a>( (id, sink_desc): (&'a GlobalId, &'a ComputeSinkDesc<CollectionMetadata>), ) -> Self { ProtoSinkExport { id: Some(id.into_proto()), sink_desc: Some(sink_desc.into_proto()), } } fn into_rust( self, ) -> Result<(GlobalId, ComputeSinkDesc<CollectionMetadata>), TryFromProtoError> { Ok(( self.id.into_rust_if_some("ProtoSinkExport::id")?, self.sink_desc .into_rust_if_some("ProtoSinkExport::sink_desc")?, )) } } impl Arbitrary for DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> { type Strategy = BoxedStrategy<Self>; type Parameters = (); fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { any_dataflow_description().boxed() } } proptest::prop_compose! { fn any_dataflow_description()( source_imports in proptest::collection::vec(any_source_import(), 1..3), index_imports in proptest::collection::vec(any_dataflow_index_import(), 1..3), objects_to_build in proptest::collection::vec(any::<BuildDesc<Plan>>(), 1..3), index_exports in proptest::collection::vec(any_dataflow_index_export(), 1..3), sink_descs in proptest::collection::vec( any::<(GlobalId, ComputeSinkDesc<CollectionMetadata, mz_repr::Timestamp>)>(), 1..3, ), as_of_some in any::<bool>(), as_of in proptest::collection::vec(any::<mz_repr::Timestamp>(), 1..5), debug_name in ".*", ) -> DataflowDescription<Plan, CollectionMetadata, mz_repr::Timestamp> { DataflowDescription { source_imports: BTreeMap::from_iter(source_imports.into_iter()), index_imports: BTreeMap::from_iter(index_imports.into_iter()), objects_to_build, index_exports: BTreeMap::from_iter(index_exports.into_iter()), sink_exports: BTreeMap::from_iter( sink_descs.into_iter(), ), as_of: if as_of_some { Some(Antichain::from(as_of))
random_line_split
rpc.rs
time::timeout; use crate::comm::{ConnectionRegistration, RegisterWorker}; use crate::hwstats::WorkerHwStateMessage; use crate::internal::common::resources::map::ResourceMap; use crate::internal::common::resources::{Allocation, AllocationValue}; use crate::internal::common::WrappedRcRefCell; use crate::internal::messages::worker::{ FromWorkerMessage, StealResponseMsg, TaskResourceAllocation, TaskResourceAllocationValue, ToWorkerMessage, WorkerOverview, WorkerRegistrationResponse, WorkerStopReason, }; use crate::internal::server::rpc::ConnectionDescriptor; use crate::internal::transfer::auth::{ do_authentication, forward_queue_to_sealed_sink, open_message, seal_message, serialize, }; use crate::internal::transfer::transport::make_protocol_builder; use crate::internal::worker::comm::WorkerComm; use crate::internal::worker::configuration::{ sync_worker_configuration, OverviewConfiguration, ServerLostPolicy, WorkerConfiguration, }; use crate::internal::worker::hwmonitor::HwSampler; use crate::internal::worker::reactor::run_task; use crate::internal::worker::state::{WorkerState, WorkerStateRef}; use crate::internal::worker::task::Task; use crate::launcher::TaskLauncher; use crate::WorkerId; use futures::future::Either; use tokio::sync::Notify; async fn start_listener() -> crate::Result<(TcpListener, u16)> { let address = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0); let listener = TcpListener::bind(address).await?; let port = { let socketaddr = listener.local_addr()?; socketaddr.port() }; log::info!("Listening on port {}", port); Ok((listener, port)) } async fn connect_to_server(addresses: &[SocketAddr]) -> crate::Result<(TcpStream, SocketAddr)> { log::info!( "Connecting to server (candidate addresses = {:?})", addresses ); let max_attempts = 20; for _ in 0..max_attempts { match TcpStream::connect(addresses).await { Ok(stream) => { let address = stream.peer_addr()?; log::debug!("Connected to server at {address:?}"); return Ok((stream, address)); } Err(e) => { log::error!("Could not connect to server, error: {}", e); sleep(Duration::from_secs(2)).await; } } } Result::Err(crate::Error::GenericError( "Server could not be connected".into(), )) } pub async fn connect_to_server_and_authenticate( server_addresses: &[SocketAddr], secret_key: &Option<Arc<SecretKey>>, ) -> crate::Result<ConnectionDescriptor> { let (stream, address) = connect_to_server(server_addresses).await?; let (mut writer, mut reader) = make_protocol_builder().new_framed(stream).split(); let (sealer, opener) = do_authentication( 0, "worker".to_string(), "server".to_string(), secret_key.clone(), &mut writer, &mut reader, ) .await?; Ok(ConnectionDescriptor { address, receiver: reader, sender: writer, sealer, opener, }) } // Maximum time to wait for running tasks to be shutdown when worker ends. const MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN: Duration = Duration::from_secs(5); /// Connects to the server and starts a message receiving loop. /// The worker will attempt to clean up after itself once it's stopped or once stop_flag is notified. pub async fn run_worker( scheduler_addresses: &[SocketAddr], mut configuration: WorkerConfiguration, secret_key: Option<Arc<SecretKey>>, launcher_setup: Box<dyn TaskLauncher>, stop_flag: Arc<Notify>, ) -> crate::Result<( (WorkerId, WorkerConfiguration), impl Future<Output = crate::Result<()>>, )> { let (_listener, port) = start_listener().await?; configuration.listen_address = format!("{}:{}", configuration.hostname, port); let ConnectionDescriptor { mut sender, mut receiver, mut opener, mut sealer, .. } = connect_to_server_and_authenticate(scheduler_addresses, &secret_key).await?; { let message = ConnectionRegistration::Worker(RegisterWorker { configuration: configuration.clone(), }); let data = serialize(&message)?.into(); sender.send(seal_message(&mut sealer, data)).await?; } let (queue_sender, queue_receiver) = tokio::sync::mpsc::unbounded_channel::<Bytes>(); let heartbeat_interval = configuration.heartbeat_interval; let overview_configuration = configuration.overview_configuration.clone(); let time_limit = configuration.time_limit; let (worker_id, state, start_task_notify) = { match timeout(Duration::from_secs(15), receiver.next()).await { Ok(Some(data)) => { let WorkerRegistrationResponse { worker_id, other_workers, resource_names, server_idle_timeout, server_uid, } = open_message(&mut opener, &data?)?; sync_worker_configuration(&mut configuration, server_idle_timeout); let start_task_notify = Rc::new(Notify::new()); let comm = WorkerComm::new(queue_sender, start_task_notify.clone()); let state_ref = WorkerStateRef::new( comm, worker_id, configuration.clone(), secret_key, ResourceMap::from_vec(resource_names), launcher_setup, server_uid, ); { let mut state = state_ref.get_mut(); for worker_info in other_workers { state.new_worker(worker_info); } } (worker_id, state_ref, start_task_notify) } Ok(None) => panic!("Connection closed without receiving registration response"), Err(_) => panic!("Did not receive worker registration response"), } }; let heartbeat_fut = heartbeat_process(heartbeat_interval, state.clone()); let idle_timeout_fut = match configuration.idle_timeout { Some(timeout) => Either::Left(idle_timeout_process(timeout, state.clone())), None => Either::Right(futures::future::pending()), }; let overview_fut = match overview_configuration { None => Either::Left(futures::future::pending()), Some(configuration) => Either::Right(send_overview_loop(state.clone(), configuration)), }; let time_limit_fut = match time_limit { None => Either::Left(futures::future::pending::<()>()), Some(d) => Either::Right(tokio::time::sleep(d)), }; let future = async move { let try_start_tasks = task_starter_process(state.clone(), start_task_notify); let send_loop = forward_queue_to_sealed_sink(queue_receiver, sender, sealer); tokio::pin! { let send_loop = send_loop; let try_start_tasks = try_start_tasks; } let result: crate::Result<Option<FromWorkerMessage>> = tokio::select! { r = worker_message_loop(state.clone(), receiver, opener) => { log::debug!("Server read connection has disconnected"); r.map(|_| None) } r = &mut send_loop => { log::debug!("Server write connection has disconnected"); r.map_err(|e| e.into()).map(|_| None) }, _ = time_limit_fut => { log::info!("Time limit reached"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::TimeLimitReached))) } _ = idle_timeout_fut => { log::info!("Idle timeout reached"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::IdleTimeout))) } _ = stop_flag.notified() => { log::info!("Worker received an external stop notification"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::Interrupted))) } _ = &mut try_start_tasks => { unreachable!() } _ = heartbeat_fut => { unreachable!() } _ = overview_fut => { unreachable!() } }; // Handle sending stop info to the server and finishing running tasks gracefully. let result = match result { Ok(Some(msg)) => { // Worker wants to end gracefully, send message to the server { state.get_mut().comm().send_message_to_server(msg); state.get_mut().comm().drop_sender(); } send_loop.await?; Ok(()) } Ok(None) => { // Graceful shutdown from server Ok(()) } Err(e) => { // Server has disconnected tokio::select! { _ = &mut try_start_tasks => { unreachable!() } r = finish_tasks_on_server_lost(state.clone()) => r } Err(e) } }; // At this point, there can still be some tasks that are running. // We cancel them here to make sure that we do not leak their spawned processes, if possible. // The futures of the tasks are scheduled onto the current tokio Runtime using spawn_local, // therefore we do not need to await any specific future to drive them forward. // try_start_tasks is not being polled, therefore no new tasks should be started. cancel_running_tasks_on_worker_end(state).await; result }; // Provide a local task set for spawning futures let future = async move { let set = tokio::task::LocalSet::new(); set.run_until(future).await }; Ok(((worker_id, configuration), future)) } async fn finish_tasks_on_server_lost(state: WorkerStateRef) { let on_server_lost = state.get().configuration.on_server_lost.clone(); match on_server_lost { ServerLostPolicy::Stop => {} ServerLostPolicy::FinishRunning => { let notify = { let mut state = state.get_mut(); state.drop_non_running_tasks(); if!state.is_empty() { let notify = Rc::new(Notify::new()); state.comm().set_idle_worker_notify(notify.clone()); Some(notify) } else { None } }; if let Some(notify) = notify { log::info!("Waiting for finishing running tasks"); notify.notified().await; log::info!("All running tasks were finished"); } else { log::info!("No running tasks remain") } } } } async fn cancel_running_tasks_on_worker_end(state: WorkerStateRef) { let notify = { let mut state = state.get_mut(); state.drop_non_running_tasks(); for task in state.running_tasks.clone() { state.cancel_task(task); } if state.running_tasks.is_empty() { return; } let notify = Rc::new(Notify::new()); state.comm().set_idle_worker_notify(notify.clone()); notify }; log::info!("Waiting for stopping running tasks"); match tokio::time::timeout(MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN, notify.notified()).await { Ok(_) => { log::info!("All running tasks were stopped"); } Err(_) => { log::info!("Timed out while waiting for running tasks to stop"); } } } /// Tries to start tasks after a new task appears or some task finishes. async fn task_starter_process(state_ref: WrappedRcRefCell<WorkerState>, notify: Rc<Notify>) { loop { notify.notified().await; let mut state = state_ref.get_mut(); state.start_task_scheduled = false; let remaining_time = if let Some(limit) = state.configuration.time_limit { let life_time = std::time::Instant::now() - state.start_time; if life_time >= limit { log::debug!("Trying to start a task after time limit"); break; } Some(limit - life_time) } else { None }; loop { let (task_map, ready_task_queue) = state.borrow_tasks_and_queue(); let allocations = ready_task_queue.try_start_tasks(task_map, remaining_time); if allocations.is_empty() { break; } for (task_id, allocation, resource_index) in allocations { run_task(&mut state, &state_ref, task_id, allocation, resource_index); } } } } /// Repeatedly sends a heartbeat message to the server. async fn heartbeat_process(heartbeat_interval: Duration, state_ref: WrappedRcRefCell<WorkerState>) { let mut interval = tokio::time::interval(heartbeat_interval); loop { interval.tick().await; state_ref .get_mut() .comm() .send_message_to_server(FromWorkerMessage::Heartbeat); log::debug!("Heartbeat sent"); } } /// Runs until an idle timeout happens. /// Idle timeout occurs when the worker doesn't have anything to do for the specified duration. async fn idle_timeout_process(idle_timeout: Duration, state_ref: WrappedRcRefCell<WorkerState>)
pub(crate) fn process_worker_message(state: &mut WorkerState, message: ToWorkerMessage) -> bool { match message { ToWorkerMessage::ComputeTask(msg) => { log::debug!("Task assigned: {}", msg.id); let task = Task::new(msg); state.add_task(task); } ToWorkerMessage::StealTasks(msg) => { log::debug!("Steal {} attempts", msg.ids.len()); let responses: Vec<_> = msg .ids .iter() .map(|task_id| { let response = state.steal_task(*task_id); log::debug!("Steal attempt: {}, response {:?}", task_id, response); (*task_id, response) }) .collect(); let message = FromWorkerMessage::StealResponse(StealResponseMsg { responses }); state.comm().send_message_to_server(message); } ToWorkerMessage::CancelTasks(msg) => { for task_id in msg.ids { state.cancel_task(task_id); } } ToWorkerMessage::NewWorker(msg) => { state.new_worker(msg); } ToWorkerMessage::LostWorker(worker_id) => { state.remove_worker(worker_id); } ToWorkerMessage::SetReservation(on_off) => { state.reservation = on_off; if!on_off { state.reset_idle_timer(); } } ToWorkerMessage::Stop => { log::info!("Received stop command"); return true; } } false } /// Runs until there are messages coming from the server. async fn worker_message_loop( state_ref: WorkerStateRef, mut stream: impl Stream<Item = Result<BytesMut, std::io::Error>> + Unpin, mut opener: Option<StreamOpener>, ) -> crate::Result<()> { while let Some(data) = stream.next().await { let data = data?; let message: ToWorkerMessage = open_message(&mut opener, &data)?; let mut state = state_ref.get_mut(); if process_worker_message(&mut state, message) { return Ok(()); } } log::debug!("Connection to server is closed"); Err("Server connection closed".into()) } async fn send_overview_loop( state_ref: WorkerStateRef, configuration: OverviewConfiguration, ) -> crate::Result<()> { let (tx, mut rx) = tokio::sync::mpsc::channel(1); let OverviewConfiguration { send_interval, gpu_families, } = configuration; // Fetching the HW state performs blocking I/O, therefore we should do it in a separate thread. // tokio::task::spawn_blocking is not used because it would need mutable access to a sampler, // which shouldn't be created again and again. std::thread::spawn(move || -> crate::Result<()> { let mut sampler = HwSampler::init(gpu_families)?; loop { std::thread::sleep(send_interval); let hw_state = sampler.fetch_hw_state()?; if let Err(error) = tx.blocking_send(hw_state) { log::error!("Cannot send HW state to overview loop: {error:?}"); break; } } Ok(()) }); let mut poll_interval = tokio::time::interval(send_interval); loop { poll_interval.tick().await; if let Some(hw_state) = rx.recv().await { let mut worker_state = state_ref.get_mut(); let message = FromWorkerMessage::Overview(WorkerOverview { id: worker_state.worker_id, running_tasks: worker_state .running_tasks .iter() .map(|&task_id| { let task = worker_state.get_task(task_id); let allocation: &Allocation = task.resource_allocation().unwrap(); ( task_id, resource_allocation_to_msg(allocation, worker_state.get_resource_map()), ) // TODO: Modify this when more cpus are allowed }) .collect(), hw_state: Some(WorkerHwStateMessage { state: hw_state }), }); worker_state.comm().send_message_to_server(message); } } } fn resource_allocation_to_msg( allocation: &Allocation, resource_map: &ResourceMap, ) -> TaskResourceAllocation { TaskResourceAllocation { resources: allocation .resources .iter() .map( |alloc| crate::internal::messages::worker::ResourceAllocation { resource: resource_map .get_name(alloc.resource) .unwrap_or("unknown")
{ let mut interval = tokio::time::interval(Duration::from_secs(1)); loop { interval.tick().await; let state = state_ref.get(); if !state.has_tasks() && !state.reservation { let elapsed = state.last_task_finish_time.elapsed(); if elapsed > idle_timeout { break; } } } }
identifier_body
rpc.rs
::time::timeout; use crate::comm::{ConnectionRegistration, RegisterWorker}; use crate::hwstats::WorkerHwStateMessage;
use crate::internal::common::WrappedRcRefCell; use crate::internal::messages::worker::{ FromWorkerMessage, StealResponseMsg, TaskResourceAllocation, TaskResourceAllocationValue, ToWorkerMessage, WorkerOverview, WorkerRegistrationResponse, WorkerStopReason, }; use crate::internal::server::rpc::ConnectionDescriptor; use crate::internal::transfer::auth::{ do_authentication, forward_queue_to_sealed_sink, open_message, seal_message, serialize, }; use crate::internal::transfer::transport::make_protocol_builder; use crate::internal::worker::comm::WorkerComm; use crate::internal::worker::configuration::{ sync_worker_configuration, OverviewConfiguration, ServerLostPolicy, WorkerConfiguration, }; use crate::internal::worker::hwmonitor::HwSampler; use crate::internal::worker::reactor::run_task; use crate::internal::worker::state::{WorkerState, WorkerStateRef}; use crate::internal::worker::task::Task; use crate::launcher::TaskLauncher; use crate::WorkerId; use futures::future::Either; use tokio::sync::Notify; async fn start_listener() -> crate::Result<(TcpListener, u16)> { let address = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0); let listener = TcpListener::bind(address).await?; let port = { let socketaddr = listener.local_addr()?; socketaddr.port() }; log::info!("Listening on port {}", port); Ok((listener, port)) } async fn connect_to_server(addresses: &[SocketAddr]) -> crate::Result<(TcpStream, SocketAddr)> { log::info!( "Connecting to server (candidate addresses = {:?})", addresses ); let max_attempts = 20; for _ in 0..max_attempts { match TcpStream::connect(addresses).await { Ok(stream) => { let address = stream.peer_addr()?; log::debug!("Connected to server at {address:?}"); return Ok((stream, address)); } Err(e) => { log::error!("Could not connect to server, error: {}", e); sleep(Duration::from_secs(2)).await; } } } Result::Err(crate::Error::GenericError( "Server could not be connected".into(), )) } pub async fn connect_to_server_and_authenticate( server_addresses: &[SocketAddr], secret_key: &Option<Arc<SecretKey>>, ) -> crate::Result<ConnectionDescriptor> { let (stream, address) = connect_to_server(server_addresses).await?; let (mut writer, mut reader) = make_protocol_builder().new_framed(stream).split(); let (sealer, opener) = do_authentication( 0, "worker".to_string(), "server".to_string(), secret_key.clone(), &mut writer, &mut reader, ) .await?; Ok(ConnectionDescriptor { address, receiver: reader, sender: writer, sealer, opener, }) } // Maximum time to wait for running tasks to be shutdown when worker ends. const MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN: Duration = Duration::from_secs(5); /// Connects to the server and starts a message receiving loop. /// The worker will attempt to clean up after itself once it's stopped or once stop_flag is notified. pub async fn run_worker( scheduler_addresses: &[SocketAddr], mut configuration: WorkerConfiguration, secret_key: Option<Arc<SecretKey>>, launcher_setup: Box<dyn TaskLauncher>, stop_flag: Arc<Notify>, ) -> crate::Result<( (WorkerId, WorkerConfiguration), impl Future<Output = crate::Result<()>>, )> { let (_listener, port) = start_listener().await?; configuration.listen_address = format!("{}:{}", configuration.hostname, port); let ConnectionDescriptor { mut sender, mut receiver, mut opener, mut sealer, .. } = connect_to_server_and_authenticate(scheduler_addresses, &secret_key).await?; { let message = ConnectionRegistration::Worker(RegisterWorker { configuration: configuration.clone(), }); let data = serialize(&message)?.into(); sender.send(seal_message(&mut sealer, data)).await?; } let (queue_sender, queue_receiver) = tokio::sync::mpsc::unbounded_channel::<Bytes>(); let heartbeat_interval = configuration.heartbeat_interval; let overview_configuration = configuration.overview_configuration.clone(); let time_limit = configuration.time_limit; let (worker_id, state, start_task_notify) = { match timeout(Duration::from_secs(15), receiver.next()).await { Ok(Some(data)) => { let WorkerRegistrationResponse { worker_id, other_workers, resource_names, server_idle_timeout, server_uid, } = open_message(&mut opener, &data?)?; sync_worker_configuration(&mut configuration, server_idle_timeout); let start_task_notify = Rc::new(Notify::new()); let comm = WorkerComm::new(queue_sender, start_task_notify.clone()); let state_ref = WorkerStateRef::new( comm, worker_id, configuration.clone(), secret_key, ResourceMap::from_vec(resource_names), launcher_setup, server_uid, ); { let mut state = state_ref.get_mut(); for worker_info in other_workers { state.new_worker(worker_info); } } (worker_id, state_ref, start_task_notify) } Ok(None) => panic!("Connection closed without receiving registration response"), Err(_) => panic!("Did not receive worker registration response"), } }; let heartbeat_fut = heartbeat_process(heartbeat_interval, state.clone()); let idle_timeout_fut = match configuration.idle_timeout { Some(timeout) => Either::Left(idle_timeout_process(timeout, state.clone())), None => Either::Right(futures::future::pending()), }; let overview_fut = match overview_configuration { None => Either::Left(futures::future::pending()), Some(configuration) => Either::Right(send_overview_loop(state.clone(), configuration)), }; let time_limit_fut = match time_limit { None => Either::Left(futures::future::pending::<()>()), Some(d) => Either::Right(tokio::time::sleep(d)), }; let future = async move { let try_start_tasks = task_starter_process(state.clone(), start_task_notify); let send_loop = forward_queue_to_sealed_sink(queue_receiver, sender, sealer); tokio::pin! { let send_loop = send_loop; let try_start_tasks = try_start_tasks; } let result: crate::Result<Option<FromWorkerMessage>> = tokio::select! { r = worker_message_loop(state.clone(), receiver, opener) => { log::debug!("Server read connection has disconnected"); r.map(|_| None) } r = &mut send_loop => { log::debug!("Server write connection has disconnected"); r.map_err(|e| e.into()).map(|_| None) }, _ = time_limit_fut => { log::info!("Time limit reached"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::TimeLimitReached))) } _ = idle_timeout_fut => { log::info!("Idle timeout reached"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::IdleTimeout))) } _ = stop_flag.notified() => { log::info!("Worker received an external stop notification"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::Interrupted))) } _ = &mut try_start_tasks => { unreachable!() } _ = heartbeat_fut => { unreachable!() } _ = overview_fut => { unreachable!() } }; // Handle sending stop info to the server and finishing running tasks gracefully. let result = match result { Ok(Some(msg)) => { // Worker wants to end gracefully, send message to the server { state.get_mut().comm().send_message_to_server(msg); state.get_mut().comm().drop_sender(); } send_loop.await?; Ok(()) } Ok(None) => { // Graceful shutdown from server Ok(()) } Err(e) => { // Server has disconnected tokio::select! { _ = &mut try_start_tasks => { unreachable!() } r = finish_tasks_on_server_lost(state.clone()) => r } Err(e) } }; // At this point, there can still be some tasks that are running. // We cancel them here to make sure that we do not leak their spawned processes, if possible. // The futures of the tasks are scheduled onto the current tokio Runtime using spawn_local, // therefore we do not need to await any specific future to drive them forward. // try_start_tasks is not being polled, therefore no new tasks should be started. cancel_running_tasks_on_worker_end(state).await; result }; // Provide a local task set for spawning futures let future = async move { let set = tokio::task::LocalSet::new(); set.run_until(future).await }; Ok(((worker_id, configuration), future)) } async fn finish_tasks_on_server_lost(state: WorkerStateRef) { let on_server_lost = state.get().configuration.on_server_lost.clone(); match on_server_lost { ServerLostPolicy::Stop => {} ServerLostPolicy::FinishRunning => { let notify = { let mut state = state.get_mut(); state.drop_non_running_tasks(); if!state.is_empty() { let notify = Rc::new(Notify::new()); state.comm().set_idle_worker_notify(notify.clone()); Some(notify) } else { None } }; if let Some(notify) = notify { log::info!("Waiting for finishing running tasks"); notify.notified().await; log::info!("All running tasks were finished"); } else { log::info!("No running tasks remain") } } } } async fn cancel_running_tasks_on_worker_end(state: WorkerStateRef) { let notify = { let mut state = state.get_mut(); state.drop_non_running_tasks(); for task in state.running_tasks.clone() { state.cancel_task(task); } if state.running_tasks.is_empty() { return; } let notify = Rc::new(Notify::new()); state.comm().set_idle_worker_notify(notify.clone()); notify }; log::info!("Waiting for stopping running tasks"); match tokio::time::timeout(MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN, notify.notified()).await { Ok(_) => { log::info!("All running tasks were stopped"); } Err(_) => { log::info!("Timed out while waiting for running tasks to stop"); } } } /// Tries to start tasks after a new task appears or some task finishes. async fn task_starter_process(state_ref: WrappedRcRefCell<WorkerState>, notify: Rc<Notify>) { loop { notify.notified().await; let mut state = state_ref.get_mut(); state.start_task_scheduled = false; let remaining_time = if let Some(limit) = state.configuration.time_limit { let life_time = std::time::Instant::now() - state.start_time; if life_time >= limit { log::debug!("Trying to start a task after time limit"); break; } Some(limit - life_time) } else { None }; loop { let (task_map, ready_task_queue) = state.borrow_tasks_and_queue(); let allocations = ready_task_queue.try_start_tasks(task_map, remaining_time); if allocations.is_empty() { break; } for (task_id, allocation, resource_index) in allocations { run_task(&mut state, &state_ref, task_id, allocation, resource_index); } } } } /// Repeatedly sends a heartbeat message to the server. async fn heartbeat_process(heartbeat_interval: Duration, state_ref: WrappedRcRefCell<WorkerState>) { let mut interval = tokio::time::interval(heartbeat_interval); loop { interval.tick().await; state_ref .get_mut() .comm() .send_message_to_server(FromWorkerMessage::Heartbeat); log::debug!("Heartbeat sent"); } } /// Runs until an idle timeout happens. /// Idle timeout occurs when the worker doesn't have anything to do for the specified duration. async fn idle_timeout_process(idle_timeout: Duration, state_ref: WrappedRcRefCell<WorkerState>) { let mut interval = tokio::time::interval(Duration::from_secs(1)); loop { interval.tick().await; let state = state_ref.get(); if!state.has_tasks() &&!state.reservation { let elapsed = state.last_task_finish_time.elapsed(); if elapsed > idle_timeout { break; } } } } pub(crate) fn process_worker_message(state: &mut WorkerState, message: ToWorkerMessage) -> bool { match message { ToWorkerMessage::ComputeTask(msg) => { log::debug!("Task assigned: {}", msg.id); let task = Task::new(msg); state.add_task(task); } ToWorkerMessage::StealTasks(msg) => { log::debug!("Steal {} attempts", msg.ids.len()); let responses: Vec<_> = msg .ids .iter() .map(|task_id| { let response = state.steal_task(*task_id); log::debug!("Steal attempt: {}, response {:?}", task_id, response); (*task_id, response) }) .collect(); let message = FromWorkerMessage::StealResponse(StealResponseMsg { responses }); state.comm().send_message_to_server(message); } ToWorkerMessage::CancelTasks(msg) => { for task_id in msg.ids { state.cancel_task(task_id); } } ToWorkerMessage::NewWorker(msg) => { state.new_worker(msg); } ToWorkerMessage::LostWorker(worker_id) => { state.remove_worker(worker_id); } ToWorkerMessage::SetReservation(on_off) => { state.reservation = on_off; if!on_off { state.reset_idle_timer(); } } ToWorkerMessage::Stop => { log::info!("Received stop command"); return true; } } false } /// Runs until there are messages coming from the server. async fn worker_message_loop( state_ref: WorkerStateRef, mut stream: impl Stream<Item = Result<BytesMut, std::io::Error>> + Unpin, mut opener: Option<StreamOpener>, ) -> crate::Result<()> { while let Some(data) = stream.next().await { let data = data?; let message: ToWorkerMessage = open_message(&mut opener, &data)?; let mut state = state_ref.get_mut(); if process_worker_message(&mut state, message) { return Ok(()); } } log::debug!("Connection to server is closed"); Err("Server connection closed".into()) } async fn send_overview_loop( state_ref: WorkerStateRef, configuration: OverviewConfiguration, ) -> crate::Result<()> { let (tx, mut rx) = tokio::sync::mpsc::channel(1); let OverviewConfiguration { send_interval, gpu_families, } = configuration; // Fetching the HW state performs blocking I/O, therefore we should do it in a separate thread. // tokio::task::spawn_blocking is not used because it would need mutable access to a sampler, // which shouldn't be created again and again. std::thread::spawn(move || -> crate::Result<()> { let mut sampler = HwSampler::init(gpu_families)?; loop { std::thread::sleep(send_interval); let hw_state = sampler.fetch_hw_state()?; if let Err(error) = tx.blocking_send(hw_state) { log::error!("Cannot send HW state to overview loop: {error:?}"); break; } } Ok(()) }); let mut poll_interval = tokio::time::interval(send_interval); loop { poll_interval.tick().await; if let Some(hw_state) = rx.recv().await { let mut worker_state = state_ref.get_mut(); let message = FromWorkerMessage::Overview(WorkerOverview { id: worker_state.worker_id, running_tasks: worker_state .running_tasks .iter() .map(|&task_id| { let task = worker_state.get_task(task_id); let allocation: &Allocation = task.resource_allocation().unwrap(); ( task_id, resource_allocation_to_msg(allocation, worker_state.get_resource_map()), ) // TODO: Modify this when more cpus are allowed }) .collect(), hw_state: Some(WorkerHwStateMessage { state: hw_state }), }); worker_state.comm().send_message_to_server(message); } } } fn resource_allocation_to_msg( allocation: &Allocation, resource_map: &ResourceMap, ) -> TaskResourceAllocation { TaskResourceAllocation { resources: allocation .resources .iter() .map( |alloc| crate::internal::messages::worker::ResourceAllocation { resource: resource_map .get_name(alloc.resource) .unwrap_or("unknown")
use crate::internal::common::resources::map::ResourceMap; use crate::internal::common::resources::{Allocation, AllocationValue};
random_line_split
rpc.rs
time::timeout; use crate::comm::{ConnectionRegistration, RegisterWorker}; use crate::hwstats::WorkerHwStateMessage; use crate::internal::common::resources::map::ResourceMap; use crate::internal::common::resources::{Allocation, AllocationValue}; use crate::internal::common::WrappedRcRefCell; use crate::internal::messages::worker::{ FromWorkerMessage, StealResponseMsg, TaskResourceAllocation, TaskResourceAllocationValue, ToWorkerMessage, WorkerOverview, WorkerRegistrationResponse, WorkerStopReason, }; use crate::internal::server::rpc::ConnectionDescriptor; use crate::internal::transfer::auth::{ do_authentication, forward_queue_to_sealed_sink, open_message, seal_message, serialize, }; use crate::internal::transfer::transport::make_protocol_builder; use crate::internal::worker::comm::WorkerComm; use crate::internal::worker::configuration::{ sync_worker_configuration, OverviewConfiguration, ServerLostPolicy, WorkerConfiguration, }; use crate::internal::worker::hwmonitor::HwSampler; use crate::internal::worker::reactor::run_task; use crate::internal::worker::state::{WorkerState, WorkerStateRef}; use crate::internal::worker::task::Task; use crate::launcher::TaskLauncher; use crate::WorkerId; use futures::future::Either; use tokio::sync::Notify; async fn start_listener() -> crate::Result<(TcpListener, u16)> { let address = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0); let listener = TcpListener::bind(address).await?; let port = { let socketaddr = listener.local_addr()?; socketaddr.port() }; log::info!("Listening on port {}", port); Ok((listener, port)) } async fn connect_to_server(addresses: &[SocketAddr]) -> crate::Result<(TcpStream, SocketAddr)> { log::info!( "Connecting to server (candidate addresses = {:?})", addresses ); let max_attempts = 20; for _ in 0..max_attempts { match TcpStream::connect(addresses).await { Ok(stream) => { let address = stream.peer_addr()?; log::debug!("Connected to server at {address:?}"); return Ok((stream, address)); } Err(e) => { log::error!("Could not connect to server, error: {}", e); sleep(Duration::from_secs(2)).await; } } } Result::Err(crate::Error::GenericError( "Server could not be connected".into(), )) } pub async fn connect_to_server_and_authenticate( server_addresses: &[SocketAddr], secret_key: &Option<Arc<SecretKey>>, ) -> crate::Result<ConnectionDescriptor> { let (stream, address) = connect_to_server(server_addresses).await?; let (mut writer, mut reader) = make_protocol_builder().new_framed(stream).split(); let (sealer, opener) = do_authentication( 0, "worker".to_string(), "server".to_string(), secret_key.clone(), &mut writer, &mut reader, ) .await?; Ok(ConnectionDescriptor { address, receiver: reader, sender: writer, sealer, opener, }) } // Maximum time to wait for running tasks to be shutdown when worker ends. const MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN: Duration = Duration::from_secs(5); /// Connects to the server and starts a message receiving loop. /// The worker will attempt to clean up after itself once it's stopped or once stop_flag is notified. pub async fn run_worker( scheduler_addresses: &[SocketAddr], mut configuration: WorkerConfiguration, secret_key: Option<Arc<SecretKey>>, launcher_setup: Box<dyn TaskLauncher>, stop_flag: Arc<Notify>, ) -> crate::Result<( (WorkerId, WorkerConfiguration), impl Future<Output = crate::Result<()>>, )> { let (_listener, port) = start_listener().await?; configuration.listen_address = format!("{}:{}", configuration.hostname, port); let ConnectionDescriptor { mut sender, mut receiver, mut opener, mut sealer, .. } = connect_to_server_and_authenticate(scheduler_addresses, &secret_key).await?; { let message = ConnectionRegistration::Worker(RegisterWorker { configuration: configuration.clone(), }); let data = serialize(&message)?.into(); sender.send(seal_message(&mut sealer, data)).await?; } let (queue_sender, queue_receiver) = tokio::sync::mpsc::unbounded_channel::<Bytes>(); let heartbeat_interval = configuration.heartbeat_interval; let overview_configuration = configuration.overview_configuration.clone(); let time_limit = configuration.time_limit; let (worker_id, state, start_task_notify) = { match timeout(Duration::from_secs(15), receiver.next()).await { Ok(Some(data)) => { let WorkerRegistrationResponse { worker_id, other_workers, resource_names, server_idle_timeout, server_uid, } = open_message(&mut opener, &data?)?; sync_worker_configuration(&mut configuration, server_idle_timeout); let start_task_notify = Rc::new(Notify::new()); let comm = WorkerComm::new(queue_sender, start_task_notify.clone()); let state_ref = WorkerStateRef::new( comm, worker_id, configuration.clone(), secret_key, ResourceMap::from_vec(resource_names), launcher_setup, server_uid, ); { let mut state = state_ref.get_mut(); for worker_info in other_workers { state.new_worker(worker_info); } } (worker_id, state_ref, start_task_notify) } Ok(None) => panic!("Connection closed without receiving registration response"), Err(_) => panic!("Did not receive worker registration response"), } }; let heartbeat_fut = heartbeat_process(heartbeat_interval, state.clone()); let idle_timeout_fut = match configuration.idle_timeout { Some(timeout) => Either::Left(idle_timeout_process(timeout, state.clone())), None => Either::Right(futures::future::pending()), }; let overview_fut = match overview_configuration { None => Either::Left(futures::future::pending()), Some(configuration) => Either::Right(send_overview_loop(state.clone(), configuration)), }; let time_limit_fut = match time_limit { None => Either::Left(futures::future::pending::<()>()), Some(d) => Either::Right(tokio::time::sleep(d)), }; let future = async move { let try_start_tasks = task_starter_process(state.clone(), start_task_notify); let send_loop = forward_queue_to_sealed_sink(queue_receiver, sender, sealer); tokio::pin! { let send_loop = send_loop; let try_start_tasks = try_start_tasks; } let result: crate::Result<Option<FromWorkerMessage>> = tokio::select! { r = worker_message_loop(state.clone(), receiver, opener) => { log::debug!("Server read connection has disconnected"); r.map(|_| None) } r = &mut send_loop => { log::debug!("Server write connection has disconnected"); r.map_err(|e| e.into()).map(|_| None) }, _ = time_limit_fut => { log::info!("Time limit reached"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::TimeLimitReached))) } _ = idle_timeout_fut => { log::info!("Idle timeout reached"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::IdleTimeout))) } _ = stop_flag.notified() => { log::info!("Worker received an external stop notification"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::Interrupted))) } _ = &mut try_start_tasks => { unreachable!() } _ = heartbeat_fut => { unreachable!() } _ = overview_fut => { unreachable!() } }; // Handle sending stop info to the server and finishing running tasks gracefully. let result = match result { Ok(Some(msg)) => { // Worker wants to end gracefully, send message to the server { state.get_mut().comm().send_message_to_server(msg); state.get_mut().comm().drop_sender(); } send_loop.await?; Ok(()) } Ok(None) => { // Graceful shutdown from server Ok(()) } Err(e) => { // Server has disconnected tokio::select! { _ = &mut try_start_tasks => { unreachable!() } r = finish_tasks_on_server_lost(state.clone()) => r } Err(e) } }; // At this point, there can still be some tasks that are running. // We cancel them here to make sure that we do not leak their spawned processes, if possible. // The futures of the tasks are scheduled onto the current tokio Runtime using spawn_local, // therefore we do not need to await any specific future to drive them forward. // try_start_tasks is not being polled, therefore no new tasks should be started. cancel_running_tasks_on_worker_end(state).await; result }; // Provide a local task set for spawning futures let future = async move { let set = tokio::task::LocalSet::new(); set.run_until(future).await }; Ok(((worker_id, configuration), future)) } async fn finish_tasks_on_server_lost(state: WorkerStateRef) { let on_server_lost = state.get().configuration.on_server_lost.clone(); match on_server_lost { ServerLostPolicy::Stop => {} ServerLostPolicy::FinishRunning => { let notify = { let mut state = state.get_mut(); state.drop_non_running_tasks(); if!state.is_empty() { let notify = Rc::new(Notify::new()); state.comm().set_idle_worker_notify(notify.clone()); Some(notify) } else { None } }; if let Some(notify) = notify { log::info!("Waiting for finishing running tasks"); notify.notified().await; log::info!("All running tasks were finished"); } else { log::info!("No running tasks remain") } } } } async fn cancel_running_tasks_on_worker_end(state: WorkerStateRef) { let notify = { let mut state = state.get_mut(); state.drop_non_running_tasks(); for task in state.running_tasks.clone() { state.cancel_task(task); } if state.running_tasks.is_empty() { return; } let notify = Rc::new(Notify::new()); state.comm().set_idle_worker_notify(notify.clone()); notify }; log::info!("Waiting for stopping running tasks"); match tokio::time::timeout(MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN, notify.notified()).await { Ok(_) => { log::info!("All running tasks were stopped"); } Err(_) => { log::info!("Timed out while waiting for running tasks to stop"); } } } /// Tries to start tasks after a new task appears or some task finishes. async fn task_starter_process(state_ref: WrappedRcRefCell<WorkerState>, notify: Rc<Notify>) { loop { notify.notified().await; let mut state = state_ref.get_mut(); state.start_task_scheduled = false; let remaining_time = if let Some(limit) = state.configuration.time_limit { let life_time = std::time::Instant::now() - state.start_time; if life_time >= limit { log::debug!("Trying to start a task after time limit"); break; } Some(limit - life_time) } else { None }; loop { let (task_map, ready_task_queue) = state.borrow_tasks_and_queue(); let allocations = ready_task_queue.try_start_tasks(task_map, remaining_time); if allocations.is_empty() { break; } for (task_id, allocation, resource_index) in allocations { run_task(&mut state, &state_ref, task_id, allocation, resource_index); } } } } /// Repeatedly sends a heartbeat message to the server. async fn heartbeat_process(heartbeat_interval: Duration, state_ref: WrappedRcRefCell<WorkerState>) { let mut interval = tokio::time::interval(heartbeat_interval); loop { interval.tick().await; state_ref .get_mut() .comm() .send_message_to_server(FromWorkerMessage::Heartbeat); log::debug!("Heartbeat sent"); } } /// Runs until an idle timeout happens. /// Idle timeout occurs when the worker doesn't have anything to do for the specified duration. async fn idle_timeout_process(idle_timeout: Duration, state_ref: WrappedRcRefCell<WorkerState>) { let mut interval = tokio::time::interval(Duration::from_secs(1)); loop { interval.tick().await; let state = state_ref.get(); if!state.has_tasks() &&!state.reservation { let elapsed = state.last_task_finish_time.elapsed(); if elapsed > idle_timeout { break; } } } } pub(crate) fn process_worker_message(state: &mut WorkerState, message: ToWorkerMessage) -> bool { match message { ToWorkerMessage::ComputeTask(msg) => { log::debug!("Task assigned: {}", msg.id); let task = Task::new(msg); state.add_task(task); } ToWorkerMessage::StealTasks(msg) => { log::debug!("Steal {} attempts", msg.ids.len()); let responses: Vec<_> = msg .ids .iter() .map(|task_id| { let response = state.steal_task(*task_id); log::debug!("Steal attempt: {}, response {:?}", task_id, response); (*task_id, response) }) .collect(); let message = FromWorkerMessage::StealResponse(StealResponseMsg { responses }); state.comm().send_message_to_server(message); } ToWorkerMessage::CancelTasks(msg) => { for task_id in msg.ids { state.cancel_task(task_id); } } ToWorkerMessage::NewWorker(msg) => { state.new_worker(msg); } ToWorkerMessage::LostWorker(worker_id) => { state.remove_worker(worker_id); } ToWorkerMessage::SetReservation(on_off) => { state.reservation = on_off; if!on_off
} ToWorkerMessage::Stop => { log::info!("Received stop command"); return true; } } false } /// Runs until there are messages coming from the server. async fn worker_message_loop( state_ref: WorkerStateRef, mut stream: impl Stream<Item = Result<BytesMut, std::io::Error>> + Unpin, mut opener: Option<StreamOpener>, ) -> crate::Result<()> { while let Some(data) = stream.next().await { let data = data?; let message: ToWorkerMessage = open_message(&mut opener, &data)?; let mut state = state_ref.get_mut(); if process_worker_message(&mut state, message) { return Ok(()); } } log::debug!("Connection to server is closed"); Err("Server connection closed".into()) } async fn send_overview_loop( state_ref: WorkerStateRef, configuration: OverviewConfiguration, ) -> crate::Result<()> { let (tx, mut rx) = tokio::sync::mpsc::channel(1); let OverviewConfiguration { send_interval, gpu_families, } = configuration; // Fetching the HW state performs blocking I/O, therefore we should do it in a separate thread. // tokio::task::spawn_blocking is not used because it would need mutable access to a sampler, // which shouldn't be created again and again. std::thread::spawn(move || -> crate::Result<()> { let mut sampler = HwSampler::init(gpu_families)?; loop { std::thread::sleep(send_interval); let hw_state = sampler.fetch_hw_state()?; if let Err(error) = tx.blocking_send(hw_state) { log::error!("Cannot send HW state to overview loop: {error:?}"); break; } } Ok(()) }); let mut poll_interval = tokio::time::interval(send_interval); loop { poll_interval.tick().await; if let Some(hw_state) = rx.recv().await { let mut worker_state = state_ref.get_mut(); let message = FromWorkerMessage::Overview(WorkerOverview { id: worker_state.worker_id, running_tasks: worker_state .running_tasks .iter() .map(|&task_id| { let task = worker_state.get_task(task_id); let allocation: &Allocation = task.resource_allocation().unwrap(); ( task_id, resource_allocation_to_msg(allocation, worker_state.get_resource_map()), ) // TODO: Modify this when more cpus are allowed }) .collect(), hw_state: Some(WorkerHwStateMessage { state: hw_state }), }); worker_state.comm().send_message_to_server(message); } } } fn resource_allocation_to_msg( allocation: &Allocation, resource_map: &ResourceMap, ) -> TaskResourceAllocation { TaskResourceAllocation { resources: allocation .resources .iter() .map( |alloc| crate::internal::messages::worker::ResourceAllocation { resource: resource_map .get_name(alloc.resource) .unwrap_or("unknown")
{ state.reset_idle_timer(); }
conditional_block
rpc.rs
time::timeout; use crate::comm::{ConnectionRegistration, RegisterWorker}; use crate::hwstats::WorkerHwStateMessage; use crate::internal::common::resources::map::ResourceMap; use crate::internal::common::resources::{Allocation, AllocationValue}; use crate::internal::common::WrappedRcRefCell; use crate::internal::messages::worker::{ FromWorkerMessage, StealResponseMsg, TaskResourceAllocation, TaskResourceAllocationValue, ToWorkerMessage, WorkerOverview, WorkerRegistrationResponse, WorkerStopReason, }; use crate::internal::server::rpc::ConnectionDescriptor; use crate::internal::transfer::auth::{ do_authentication, forward_queue_to_sealed_sink, open_message, seal_message, serialize, }; use crate::internal::transfer::transport::make_protocol_builder; use crate::internal::worker::comm::WorkerComm; use crate::internal::worker::configuration::{ sync_worker_configuration, OverviewConfiguration, ServerLostPolicy, WorkerConfiguration, }; use crate::internal::worker::hwmonitor::HwSampler; use crate::internal::worker::reactor::run_task; use crate::internal::worker::state::{WorkerState, WorkerStateRef}; use crate::internal::worker::task::Task; use crate::launcher::TaskLauncher; use crate::WorkerId; use futures::future::Either; use tokio::sync::Notify; async fn start_listener() -> crate::Result<(TcpListener, u16)> { let address = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0); let listener = TcpListener::bind(address).await?; let port = { let socketaddr = listener.local_addr()?; socketaddr.port() }; log::info!("Listening on port {}", port); Ok((listener, port)) } async fn connect_to_server(addresses: &[SocketAddr]) -> crate::Result<(TcpStream, SocketAddr)> { log::info!( "Connecting to server (candidate addresses = {:?})", addresses ); let max_attempts = 20; for _ in 0..max_attempts { match TcpStream::connect(addresses).await { Ok(stream) => { let address = stream.peer_addr()?; log::debug!("Connected to server at {address:?}"); return Ok((stream, address)); } Err(e) => { log::error!("Could not connect to server, error: {}", e); sleep(Duration::from_secs(2)).await; } } } Result::Err(crate::Error::GenericError( "Server could not be connected".into(), )) } pub async fn connect_to_server_and_authenticate( server_addresses: &[SocketAddr], secret_key: &Option<Arc<SecretKey>>, ) -> crate::Result<ConnectionDescriptor> { let (stream, address) = connect_to_server(server_addresses).await?; let (mut writer, mut reader) = make_protocol_builder().new_framed(stream).split(); let (sealer, opener) = do_authentication( 0, "worker".to_string(), "server".to_string(), secret_key.clone(), &mut writer, &mut reader, ) .await?; Ok(ConnectionDescriptor { address, receiver: reader, sender: writer, sealer, opener, }) } // Maximum time to wait for running tasks to be shutdown when worker ends. const MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN: Duration = Duration::from_secs(5); /// Connects to the server and starts a message receiving loop. /// The worker will attempt to clean up after itself once it's stopped or once stop_flag is notified. pub async fn run_worker( scheduler_addresses: &[SocketAddr], mut configuration: WorkerConfiguration, secret_key: Option<Arc<SecretKey>>, launcher_setup: Box<dyn TaskLauncher>, stop_flag: Arc<Notify>, ) -> crate::Result<( (WorkerId, WorkerConfiguration), impl Future<Output = crate::Result<()>>, )> { let (_listener, port) = start_listener().await?; configuration.listen_address = format!("{}:{}", configuration.hostname, port); let ConnectionDescriptor { mut sender, mut receiver, mut opener, mut sealer, .. } = connect_to_server_and_authenticate(scheduler_addresses, &secret_key).await?; { let message = ConnectionRegistration::Worker(RegisterWorker { configuration: configuration.clone(), }); let data = serialize(&message)?.into(); sender.send(seal_message(&mut sealer, data)).await?; } let (queue_sender, queue_receiver) = tokio::sync::mpsc::unbounded_channel::<Bytes>(); let heartbeat_interval = configuration.heartbeat_interval; let overview_configuration = configuration.overview_configuration.clone(); let time_limit = configuration.time_limit; let (worker_id, state, start_task_notify) = { match timeout(Duration::from_secs(15), receiver.next()).await { Ok(Some(data)) => { let WorkerRegistrationResponse { worker_id, other_workers, resource_names, server_idle_timeout, server_uid, } = open_message(&mut opener, &data?)?; sync_worker_configuration(&mut configuration, server_idle_timeout); let start_task_notify = Rc::new(Notify::new()); let comm = WorkerComm::new(queue_sender, start_task_notify.clone()); let state_ref = WorkerStateRef::new( comm, worker_id, configuration.clone(), secret_key, ResourceMap::from_vec(resource_names), launcher_setup, server_uid, ); { let mut state = state_ref.get_mut(); for worker_info in other_workers { state.new_worker(worker_info); } } (worker_id, state_ref, start_task_notify) } Ok(None) => panic!("Connection closed without receiving registration response"), Err(_) => panic!("Did not receive worker registration response"), } }; let heartbeat_fut = heartbeat_process(heartbeat_interval, state.clone()); let idle_timeout_fut = match configuration.idle_timeout { Some(timeout) => Either::Left(idle_timeout_process(timeout, state.clone())), None => Either::Right(futures::future::pending()), }; let overview_fut = match overview_configuration { None => Either::Left(futures::future::pending()), Some(configuration) => Either::Right(send_overview_loop(state.clone(), configuration)), }; let time_limit_fut = match time_limit { None => Either::Left(futures::future::pending::<()>()), Some(d) => Either::Right(tokio::time::sleep(d)), }; let future = async move { let try_start_tasks = task_starter_process(state.clone(), start_task_notify); let send_loop = forward_queue_to_sealed_sink(queue_receiver, sender, sealer); tokio::pin! { let send_loop = send_loop; let try_start_tasks = try_start_tasks; } let result: crate::Result<Option<FromWorkerMessage>> = tokio::select! { r = worker_message_loop(state.clone(), receiver, opener) => { log::debug!("Server read connection has disconnected"); r.map(|_| None) } r = &mut send_loop => { log::debug!("Server write connection has disconnected"); r.map_err(|e| e.into()).map(|_| None) }, _ = time_limit_fut => { log::info!("Time limit reached"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::TimeLimitReached))) } _ = idle_timeout_fut => { log::info!("Idle timeout reached"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::IdleTimeout))) } _ = stop_flag.notified() => { log::info!("Worker received an external stop notification"); Ok(Some(FromWorkerMessage::Stop(WorkerStopReason::Interrupted))) } _ = &mut try_start_tasks => { unreachable!() } _ = heartbeat_fut => { unreachable!() } _ = overview_fut => { unreachable!() } }; // Handle sending stop info to the server and finishing running tasks gracefully. let result = match result { Ok(Some(msg)) => { // Worker wants to end gracefully, send message to the server { state.get_mut().comm().send_message_to_server(msg); state.get_mut().comm().drop_sender(); } send_loop.await?; Ok(()) } Ok(None) => { // Graceful shutdown from server Ok(()) } Err(e) => { // Server has disconnected tokio::select! { _ = &mut try_start_tasks => { unreachable!() } r = finish_tasks_on_server_lost(state.clone()) => r } Err(e) } }; // At this point, there can still be some tasks that are running. // We cancel them here to make sure that we do not leak their spawned processes, if possible. // The futures of the tasks are scheduled onto the current tokio Runtime using spawn_local, // therefore we do not need to await any specific future to drive them forward. // try_start_tasks is not being polled, therefore no new tasks should be started. cancel_running_tasks_on_worker_end(state).await; result }; // Provide a local task set for spawning futures let future = async move { let set = tokio::task::LocalSet::new(); set.run_until(future).await }; Ok(((worker_id, configuration), future)) } async fn finish_tasks_on_server_lost(state: WorkerStateRef) { let on_server_lost = state.get().configuration.on_server_lost.clone(); match on_server_lost { ServerLostPolicy::Stop => {} ServerLostPolicy::FinishRunning => { let notify = { let mut state = state.get_mut(); state.drop_non_running_tasks(); if!state.is_empty() { let notify = Rc::new(Notify::new()); state.comm().set_idle_worker_notify(notify.clone()); Some(notify) } else { None } }; if let Some(notify) = notify { log::info!("Waiting for finishing running tasks"); notify.notified().await; log::info!("All running tasks were finished"); } else { log::info!("No running tasks remain") } } } } async fn cancel_running_tasks_on_worker_end(state: WorkerStateRef) { let notify = { let mut state = state.get_mut(); state.drop_non_running_tasks(); for task in state.running_tasks.clone() { state.cancel_task(task); } if state.running_tasks.is_empty() { return; } let notify = Rc::new(Notify::new()); state.comm().set_idle_worker_notify(notify.clone()); notify }; log::info!("Waiting for stopping running tasks"); match tokio::time::timeout(MAX_WAIT_FOR_RUNNING_TASKS_SHUTDOWN, notify.notified()).await { Ok(_) => { log::info!("All running tasks were stopped"); } Err(_) => { log::info!("Timed out while waiting for running tasks to stop"); } } } /// Tries to start tasks after a new task appears or some task finishes. async fn task_starter_process(state_ref: WrappedRcRefCell<WorkerState>, notify: Rc<Notify>) { loop { notify.notified().await; let mut state = state_ref.get_mut(); state.start_task_scheduled = false; let remaining_time = if let Some(limit) = state.configuration.time_limit { let life_time = std::time::Instant::now() - state.start_time; if life_time >= limit { log::debug!("Trying to start a task after time limit"); break; } Some(limit - life_time) } else { None }; loop { let (task_map, ready_task_queue) = state.borrow_tasks_and_queue(); let allocations = ready_task_queue.try_start_tasks(task_map, remaining_time); if allocations.is_empty() { break; } for (task_id, allocation, resource_index) in allocations { run_task(&mut state, &state_ref, task_id, allocation, resource_index); } } } } /// Repeatedly sends a heartbeat message to the server. async fn heartbeat_process(heartbeat_interval: Duration, state_ref: WrappedRcRefCell<WorkerState>) { let mut interval = tokio::time::interval(heartbeat_interval); loop { interval.tick().await; state_ref .get_mut() .comm() .send_message_to_server(FromWorkerMessage::Heartbeat); log::debug!("Heartbeat sent"); } } /// Runs until an idle timeout happens. /// Idle timeout occurs when the worker doesn't have anything to do for the specified duration. async fn
(idle_timeout: Duration, state_ref: WrappedRcRefCell<WorkerState>) { let mut interval = tokio::time::interval(Duration::from_secs(1)); loop { interval.tick().await; let state = state_ref.get(); if!state.has_tasks() &&!state.reservation { let elapsed = state.last_task_finish_time.elapsed(); if elapsed > idle_timeout { break; } } } } pub(crate) fn process_worker_message(state: &mut WorkerState, message: ToWorkerMessage) -> bool { match message { ToWorkerMessage::ComputeTask(msg) => { log::debug!("Task assigned: {}", msg.id); let task = Task::new(msg); state.add_task(task); } ToWorkerMessage::StealTasks(msg) => { log::debug!("Steal {} attempts", msg.ids.len()); let responses: Vec<_> = msg .ids .iter() .map(|task_id| { let response = state.steal_task(*task_id); log::debug!("Steal attempt: {}, response {:?}", task_id, response); (*task_id, response) }) .collect(); let message = FromWorkerMessage::StealResponse(StealResponseMsg { responses }); state.comm().send_message_to_server(message); } ToWorkerMessage::CancelTasks(msg) => { for task_id in msg.ids { state.cancel_task(task_id); } } ToWorkerMessage::NewWorker(msg) => { state.new_worker(msg); } ToWorkerMessage::LostWorker(worker_id) => { state.remove_worker(worker_id); } ToWorkerMessage::SetReservation(on_off) => { state.reservation = on_off; if!on_off { state.reset_idle_timer(); } } ToWorkerMessage::Stop => { log::info!("Received stop command"); return true; } } false } /// Runs until there are messages coming from the server. async fn worker_message_loop( state_ref: WorkerStateRef, mut stream: impl Stream<Item = Result<BytesMut, std::io::Error>> + Unpin, mut opener: Option<StreamOpener>, ) -> crate::Result<()> { while let Some(data) = stream.next().await { let data = data?; let message: ToWorkerMessage = open_message(&mut opener, &data)?; let mut state = state_ref.get_mut(); if process_worker_message(&mut state, message) { return Ok(()); } } log::debug!("Connection to server is closed"); Err("Server connection closed".into()) } async fn send_overview_loop( state_ref: WorkerStateRef, configuration: OverviewConfiguration, ) -> crate::Result<()> { let (tx, mut rx) = tokio::sync::mpsc::channel(1); let OverviewConfiguration { send_interval, gpu_families, } = configuration; // Fetching the HW state performs blocking I/O, therefore we should do it in a separate thread. // tokio::task::spawn_blocking is not used because it would need mutable access to a sampler, // which shouldn't be created again and again. std::thread::spawn(move || -> crate::Result<()> { let mut sampler = HwSampler::init(gpu_families)?; loop { std::thread::sleep(send_interval); let hw_state = sampler.fetch_hw_state()?; if let Err(error) = tx.blocking_send(hw_state) { log::error!("Cannot send HW state to overview loop: {error:?}"); break; } } Ok(()) }); let mut poll_interval = tokio::time::interval(send_interval); loop { poll_interval.tick().await; if let Some(hw_state) = rx.recv().await { let mut worker_state = state_ref.get_mut(); let message = FromWorkerMessage::Overview(WorkerOverview { id: worker_state.worker_id, running_tasks: worker_state .running_tasks .iter() .map(|&task_id| { let task = worker_state.get_task(task_id); let allocation: &Allocation = task.resource_allocation().unwrap(); ( task_id, resource_allocation_to_msg(allocation, worker_state.get_resource_map()), ) // TODO: Modify this when more cpus are allowed }) .collect(), hw_state: Some(WorkerHwStateMessage { state: hw_state }), }); worker_state.comm().send_message_to_server(message); } } } fn resource_allocation_to_msg( allocation: &Allocation, resource_map: &ResourceMap, ) -> TaskResourceAllocation { TaskResourceAllocation { resources: allocation .resources .iter() .map( |alloc| crate::internal::messages::worker::ResourceAllocation { resource: resource_map .get_name(alloc.resource) .unwrap_or("unknown")
idle_timeout_process
identifier_name
srt.rs
Salt | /// | ... | /// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ /// | Wrap | /// | ... | /// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ /// ``` /// #[derive(Clone, Eq, PartialEq)] pub struct KeyingMaterialMessage { pub pt: PacketType, // TODO: i think this is always KeyingMaterial.... pub key_flags: KeyFlags, pub keki: u32, pub cipher: CipherType, pub auth: Auth, pub salt: Vec<u8>, pub wrapped_keys: Vec<u8>, } impl From<GroupType> for u8 { fn from(from: GroupType) -> u8 { match from { GroupType::Undefined => 0, GroupType::Broadcast => 1, GroupType::MainBackup => 2, GroupType::Balancing => 3, GroupType::Multicast => 4, GroupType::Unrecognized(u) => u, } } } impl From<u8> for GroupType { fn from(from: u8) -> GroupType { match from { 0 => GroupType::Undefined, 1 => GroupType::Broadcast, 2 => GroupType::MainBackup, 3 => GroupType::Balancing, 4 => GroupType::Multicast, u => GroupType::Unrecognized(u), } } } impl fmt::Debug for KeyingMaterialMessage { fn
(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("KeyingMaterialMessage") .field("pt", &self.pt) .field("key_flags", &self.key_flags) .field("keki", &self.keki) .field("cipher", &self.cipher) .field("auth", &self.auth) .finish() } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum Auth { None = 0, } impl TryFrom<u8> for Auth { type Error = PacketParseError; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 0 => Ok(Auth::None), e => Err(PacketParseError::BadAuth(e)), } } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum StreamEncapsulation { Udp = 1, Srt = 2, } impl TryFrom<u8> for StreamEncapsulation { type Error = PacketParseError; fn try_from(value: u8) -> Result<Self, Self::Error> { Ok(match value { 1 => StreamEncapsulation::Udp, 2 => StreamEncapsulation::Srt, e => return Err(PacketParseError::BadStreamEncapsulation(e)), }) } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] // see htcryp_msg.h:43... // 7: Reserved to discriminate MPEG-TS packet (0x47=sync byte). pub enum PacketType { MediaStream = 1, // Media Stream Message (MSmsg) KeyingMaterial = 2, // Keying Material Message (KMmsg) } bitflags! { #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct KeyFlags : u8 { const EVEN = 0b01; const ODD = 0b10; } } impl TryFrom<u8> for PacketType { type Error = PacketParseError; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 1 => Ok(PacketType::MediaStream), 2 => Ok(PacketType::KeyingMaterial), err => Err(PacketParseError::BadKeyPacketType(err)), } } } /// from https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/haicrypt/hcrypt_msg.h#L121-L124 #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum CipherType { None = 0, Ecb = 1, Ctr = 2, Cbc = 3, } /// The SRT handshake object #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct SrtHandshake { /// The SRT version /// Serialized just as the u32 that SrtVersion serialized to pub version: SrtVersion, /// SRT connection init flags pub flags: SrtShakeFlags, /// The peer's TSBPD latency (latency to send at) /// This is serialized as the upper 16 bits of the third 32-bit word /// source: https://github.com/Haivision/srt/blob/4f7f2beb2e1e306111b9b11402049a90cb6d3787/srtcore/core.cpp#L1341-L1353 pub send_latency: Duration, /// The TSBPD latency (latency to recv at) /// This is serialized as the lower 16 bits of the third 32-bit word /// see csrtcc.cpp:132 in the reference implementation pub recv_latency: Duration, } bitflags! { #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct SrtShakeFlags: u32 { /// Timestamp-based Packet delivery real-time data sender const TSBPDSND = 0x1; /// Timestamp-based Packet delivery real-time data receiver const TSBPDRCV = 0x2; /// HaiCrypt AES-128/192/256-CTR /// also represents if it supports the encryption flags in the data packet const HAICRYPT = 0x4; /// Drop real-time data packets too late to be processed in time const TLPKTDROP = 0x8; /// Periodic NAK report const NAKREPORT = 0x10; /// One bit in payload packet msgno is "retransmitted" flag const REXMITFLG = 0x20; /// This entity supports stream ID packets const STREAM = 0x40; /// Again not sure... TODO: const PACKET_FILTER = 0x80; // currently implemented flags const SUPPORTED = Self::TSBPDSND.bits() | Self::TSBPDRCV.bits() | Self::HAICRYPT.bits() | Self::REXMITFLG.bits(); } } fn le_bytes_to_string(le_bytes: &mut impl Buf) -> Result<String, PacketParseError> { if le_bytes.remaining() % 4!= 0 { return Err(PacketParseError::NotEnoughData); } let mut str_bytes = Vec::with_capacity(le_bytes.remaining()); while le_bytes.remaining() > 4 { str_bytes.extend(le_bytes.get_u32_le().to_be_bytes()); } // make sure to skip padding bytes if any for the last word match le_bytes.get_u32_le().to_be_bytes() { [a, 0, 0, 0] => str_bytes.push(a), [a, b, 0, 0] => str_bytes.extend([a, b]), [a, b, c, 0] => str_bytes.extend([a, b, c]), [a, b, c, d] => str_bytes.extend([a, b, c, d]), } String::from_utf8(str_bytes).map_err(|e| PacketParseError::StreamTypeNotUtf8(e.utf8_error())) } fn string_to_le_bytes(str: &str, into: &mut impl BufMut) { let mut chunks = str.as_bytes().chunks_exact(4); while let Some(&[a, b, c, d]) = chunks.next() { into.put(&[d, c, b, a][..]); } // add padding bytes for the final word if needed match *chunks.remainder() { [a, b, c] => into.put(&[0, c, b, a][..]), [a, b] => into.put(&[0, 0, b, a][..]), [a] => into.put(&[0, 0, 0, a][..]), [] => {} // exact multiple of 4 _ => unreachable!(), } } impl Display for FilterSpec { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { for (i, (k, v)) in self.0.iter().enumerate() { write!(f, "{k}:{v}")?; if i!= self.0.len() - 1 { write!(f, ",")?; } } Ok(()) } } impl SrtControlPacket { pub fn parse<T: Buf>( packet_type: u16, buf: &mut T, ) -> Result<SrtControlPacket, PacketParseError> { use self::SrtControlPacket::*; match packet_type { 0 => Ok(Reject), 1 => Ok(HandshakeRequest(SrtHandshake::parse(buf)?)), 2 => Ok(HandshakeResponse(SrtHandshake::parse(buf)?)), 3 => Ok(KeyRefreshRequest(KeyingMaterialMessage::parse(buf)?)), 4 => Ok(KeyRefreshResponse(KeyingMaterialMessage::parse(buf)?)), 5 => { // the stream id string is stored as 32-bit little endian words // https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3 le_bytes_to_string(buf).map(StreamId) } 6 => le_bytes_to_string(buf).map(Congestion), // Filter 7 => { let filter_str = le_bytes_to_string(buf)?; Ok(Filter(FilterSpec( filter_str .split(',') .map(|kv| { let mut colon_split_iter = kv.split(':'); let k = colon_split_iter .next() .ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?; let v = colon_split_iter .next() .ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?; // only one colon if colon_split_iter.next().is_some() { return Err(PacketParseError::BadFilter(filter_str.clone())); } Ok((k.to_string(), v.to_string())) }) .collect::<Result<_, _>>()?, ))) } 8 => { let ty = buf.get_u8().into(); let flags = GroupFlags::from_bits_truncate(buf.get_u8()); let weight = buf.get_u16_le(); Ok(Group { ty, flags, weight }) } _ => Err(PacketParseError::UnsupportedSrtExtensionType(packet_type)), } } /// Get the value to fill the reserved area with pub fn type_id(&self) -> u16 { use self::SrtControlPacket::*; match self { Reject => 0, HandshakeRequest(_) => 1, HandshakeResponse(_) => 2, KeyRefreshRequest(_) => 3, KeyRefreshResponse(_) => 4, StreamId(_) => 5, Congestion(_) => 6, Filter(_) => 7, Group {.. } => 8, } } pub fn serialize<T: BufMut>(&self, into: &mut T) { use self::SrtControlPacket::*; match self { HandshakeRequest(s) | HandshakeResponse(s) => { s.serialize(into); } KeyRefreshRequest(k) | KeyRefreshResponse(k) => { k.serialize(into); } Filter(filter) => { string_to_le_bytes(&format!("{filter}"), into); } Group { ty, flags, weight } => { into.put_u8((*ty).into()); into.put_u8(flags.bits()); into.put_u16_le(*weight); } Reject => {} StreamId(str) | Congestion(str) => { // the stream id string and congestion string is stored as 32-bit little endian words // https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3 string_to_le_bytes(str, into); } } } // size in 32-bit words pub fn size_words(&self) -> u16 { use self::SrtControlPacket::*; match self { // 3 32-bit words, version, flags, latency HandshakeRequest(_) | HandshakeResponse(_) => 3, // 4 32-bit words + salt + key + wrap [2] KeyRefreshRequest(ref k) | KeyRefreshResponse(ref k) => { 4 + k.salt.len() as u16 / 4 + k.wrapped_keys.len() as u16 / 4 } Congestion(str) | StreamId(str) => ((str.len() + 3) / 4) as u16, // round up to nearest multiple of 4 // 1 32-bit word packed with type, flags, and weight Group {.. } => 1, Filter(filter) => ((format!("{filter}").len() + 3) / 4) as u16, // TODO: not optimial performace, but probably okay _ => unimplemented!("{:?}", self), } } } impl SrtHandshake { pub fn parse<T: Buf>(buf: &mut T) -> Result<SrtHandshake, PacketParseError> { if buf.remaining() < 12 { return Err(PacketParseError::NotEnoughData); } let version = SrtVersion::parse(buf.get_u32()); let shake_flags = buf.get_u32(); let flags = match SrtShakeFlags::from_bits(shake_flags) { Some(i) => i, None => { warn!("Unrecognized SRT flags: 0b{:b}", shake_flags); SrtShakeFlags::from_bits_truncate(shake_flags) } }; let peer_latency = buf.get_u16(); let latency = buf.get_u16(); Ok(SrtHandshake { version, flags, send_latency: Duration::from_millis(u64::from(peer_latency)), recv_latency: Duration::from_millis(u64::from(latency)), }) } pub fn serialize<T: BufMut>(&self, into: &mut T) { into.put_u32(self.version.to_u32()); into.put_u32(self.flags.bits()); // upper 16 bits are peer latency into.put_u16(self.send_latency.as_millis() as u16); // TODO: handle overflow // lower 16 is latency into.put_u16(self.recv_latency.as_millis() as u16); // TODO: handle overflow } } impl KeyingMaterialMessage { // from hcrypt_msg.h:39 // also const traits aren't a thing yet, so u16::from can't be used const SIGN: u16 = ((b'H' - b'@') as u16) << 10 | ((b'A' - b'@') as u16) << 5 | (b'I' - b'@') as u16; pub fn parse(buf: &mut impl Buf) -> Result<KeyingMaterialMessage, PacketParseError> { // first 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // |0|Vers | PT | Sign | resv |KF | // make sure there is enough data left in the buffer to at least get to the key flags and length, which tells us how long the packet will be // that's 4x32bit words if buf.remaining() < 4 * 4 { return Err(PacketParseError::NotEnoughData); } let vers_pt = buf.get_u8(); // make sure the first bit is zero if (vers_pt & 0b1000_0000)!= 0 { return Err(PacketParseError::BadSrtExtensionMessage); } // upper 4 bits are version let version = vers_pt >> 4; if version!= 1 { return Err(PacketParseError::BadSrtExtensionMessage); } // lower 4 bits are pt let pt = PacketType::try_from(vers_pt & 0b0000_1111)?; // next 16 bis are sign let sign = buf.get_u16(); if sign!= Self::SIGN { return Err(PacketParseError::BadKeySign(sign)); } // next 6 bits is reserved, then two bits of KF let key_flags = KeyFlags::from_bits_truncate(buf.get_u8() & 0b0000_0011); // second 32-bit word: keki let keki = buf.get_u32(); // third 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // | Cipher | Auth | SE | Resv1 | let cipher = CipherType::try_from(buf.get_u8())?; let auth = Auth::try_from(buf.get_u8())?; let se = StreamEncapsulation::try_from(buf.get_u8())?; if se!= StreamEncapsulation::Srt { return Err(PacketParseError::StreamEncapsulationNotSrt); } let _resv1 = buf.get_u8(); // fourth 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // | Resv2 | Slen/4 | Klen/4 | let _resv2 = buf.get_u16(); let salt_len = usize::from(buf.get_u8()) * 4; let key_len = usize::from(buf.get_u8()) * 4; // acceptable key lengths are 16, 24, and 32 match key_len { // OK 16 | 24 | 32 => {} // not e => return Err(PacketParseError::BadCryptoLength(e as u32)), } // get the size of the packet to make sure that there is enough space // salt + keys (there's a 1 for each in key flags, it's already been anded with 0b11 so max is 2), wrap data is 8 long if buf.remaining() < salt_len + key_len * (key_flags.bits().count_ones() as usize) + 8 { return Err(PacketParseError::NotEnoughData); } // the reference implmentation converts the whole thing to network order (bit endian) (in 32-bit words) // so we need to make sure to do the same. Source: // https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/srtcore/crypto.cpp#L115 // after this, is the salt let mut salt = vec![]; for _ in 0..salt_len / 4 { salt.extend_from_slice(&buf.get_u32().to_be_bytes()[..]); } // then key[s] let mut wrapped_keys = vec![]; for _ in 0..(key_len * key_flags.bits().count_ones() as usize + 8) / 4 { wrapped_keys.extend_from_slice(&buf.get_u32().to_be_bytes()[..]); } Ok(KeyingMaterialMessage { pt,
fmt
identifier_name
srt.rs
8_error())) } fn string_to_le_bytes(str: &str, into: &mut impl BufMut) { let mut chunks = str.as_bytes().chunks_exact(4); while let Some(&[a, b, c, d]) = chunks.next() { into.put(&[d, c, b, a][..]); } // add padding bytes for the final word if needed match *chunks.remainder() { [a, b, c] => into.put(&[0, c, b, a][..]), [a, b] => into.put(&[0, 0, b, a][..]), [a] => into.put(&[0, 0, 0, a][..]), [] => {} // exact multiple of 4 _ => unreachable!(), } } impl Display for FilterSpec { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { for (i, (k, v)) in self.0.iter().enumerate() { write!(f, "{k}:{v}")?; if i!= self.0.len() - 1 { write!(f, ",")?; } } Ok(()) } } impl SrtControlPacket { pub fn parse<T: Buf>( packet_type: u16, buf: &mut T, ) -> Result<SrtControlPacket, PacketParseError> { use self::SrtControlPacket::*; match packet_type { 0 => Ok(Reject), 1 => Ok(HandshakeRequest(SrtHandshake::parse(buf)?)), 2 => Ok(HandshakeResponse(SrtHandshake::parse(buf)?)), 3 => Ok(KeyRefreshRequest(KeyingMaterialMessage::parse(buf)?)), 4 => Ok(KeyRefreshResponse(KeyingMaterialMessage::parse(buf)?)), 5 => { // the stream id string is stored as 32-bit little endian words // https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3 le_bytes_to_string(buf).map(StreamId) } 6 => le_bytes_to_string(buf).map(Congestion), // Filter 7 => { let filter_str = le_bytes_to_string(buf)?; Ok(Filter(FilterSpec( filter_str .split(',') .map(|kv| { let mut colon_split_iter = kv.split(':'); let k = colon_split_iter .next() .ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?; let v = colon_split_iter .next() .ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?; // only one colon if colon_split_iter.next().is_some() { return Err(PacketParseError::BadFilter(filter_str.clone())); } Ok((k.to_string(), v.to_string())) }) .collect::<Result<_, _>>()?, ))) } 8 => { let ty = buf.get_u8().into(); let flags = GroupFlags::from_bits_truncate(buf.get_u8()); let weight = buf.get_u16_le(); Ok(Group { ty, flags, weight }) } _ => Err(PacketParseError::UnsupportedSrtExtensionType(packet_type)), } } /// Get the value to fill the reserved area with pub fn type_id(&self) -> u16 { use self::SrtControlPacket::*; match self { Reject => 0, HandshakeRequest(_) => 1, HandshakeResponse(_) => 2, KeyRefreshRequest(_) => 3, KeyRefreshResponse(_) => 4, StreamId(_) => 5, Congestion(_) => 6, Filter(_) => 7, Group {.. } => 8, } } pub fn serialize<T: BufMut>(&self, into: &mut T) { use self::SrtControlPacket::*; match self { HandshakeRequest(s) | HandshakeResponse(s) => { s.serialize(into); } KeyRefreshRequest(k) | KeyRefreshResponse(k) => { k.serialize(into); } Filter(filter) => { string_to_le_bytes(&format!("{filter}"), into); } Group { ty, flags, weight } => { into.put_u8((*ty).into()); into.put_u8(flags.bits()); into.put_u16_le(*weight); } Reject => {} StreamId(str) | Congestion(str) => { // the stream id string and congestion string is stored as 32-bit little endian words // https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3 string_to_le_bytes(str, into); } } } // size in 32-bit words pub fn size_words(&self) -> u16 { use self::SrtControlPacket::*; match self { // 3 32-bit words, version, flags, latency HandshakeRequest(_) | HandshakeResponse(_) => 3, // 4 32-bit words + salt + key + wrap [2] KeyRefreshRequest(ref k) | KeyRefreshResponse(ref k) => { 4 + k.salt.len() as u16 / 4 + k.wrapped_keys.len() as u16 / 4 } Congestion(str) | StreamId(str) => ((str.len() + 3) / 4) as u16, // round up to nearest multiple of 4 // 1 32-bit word packed with type, flags, and weight Group {.. } => 1, Filter(filter) => ((format!("{filter}").len() + 3) / 4) as u16, // TODO: not optimial performace, but probably okay _ => unimplemented!("{:?}", self), } } } impl SrtHandshake { pub fn parse<T: Buf>(buf: &mut T) -> Result<SrtHandshake, PacketParseError> { if buf.remaining() < 12 { return Err(PacketParseError::NotEnoughData); } let version = SrtVersion::parse(buf.get_u32()); let shake_flags = buf.get_u32(); let flags = match SrtShakeFlags::from_bits(shake_flags) { Some(i) => i, None => { warn!("Unrecognized SRT flags: 0b{:b}", shake_flags); SrtShakeFlags::from_bits_truncate(shake_flags) } }; let peer_latency = buf.get_u16(); let latency = buf.get_u16(); Ok(SrtHandshake { version, flags, send_latency: Duration::from_millis(u64::from(peer_latency)), recv_latency: Duration::from_millis(u64::from(latency)), }) } pub fn serialize<T: BufMut>(&self, into: &mut T) { into.put_u32(self.version.to_u32()); into.put_u32(self.flags.bits()); // upper 16 bits are peer latency into.put_u16(self.send_latency.as_millis() as u16); // TODO: handle overflow // lower 16 is latency into.put_u16(self.recv_latency.as_millis() as u16); // TODO: handle overflow } } impl KeyingMaterialMessage { // from hcrypt_msg.h:39 // also const traits aren't a thing yet, so u16::from can't be used const SIGN: u16 = ((b'H' - b'@') as u16) << 10 | ((b'A' - b'@') as u16) << 5 | (b'I' - b'@') as u16; pub fn parse(buf: &mut impl Buf) -> Result<KeyingMaterialMessage, PacketParseError> { // first 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // |0|Vers | PT | Sign | resv |KF | // make sure there is enough data left in the buffer to at least get to the key flags and length, which tells us how long the packet will be // that's 4x32bit words if buf.remaining() < 4 * 4 { return Err(PacketParseError::NotEnoughData); } let vers_pt = buf.get_u8(); // make sure the first bit is zero if (vers_pt & 0b1000_0000)!= 0 { return Err(PacketParseError::BadSrtExtensionMessage); } // upper 4 bits are version let version = vers_pt >> 4; if version!= 1 { return Err(PacketParseError::BadSrtExtensionMessage); } // lower 4 bits are pt let pt = PacketType::try_from(vers_pt & 0b0000_1111)?; // next 16 bis are sign let sign = buf.get_u16(); if sign!= Self::SIGN { return Err(PacketParseError::BadKeySign(sign)); } // next 6 bits is reserved, then two bits of KF let key_flags = KeyFlags::from_bits_truncate(buf.get_u8() & 0b0000_0011); // second 32-bit word: keki let keki = buf.get_u32(); // third 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // | Cipher | Auth | SE | Resv1 | let cipher = CipherType::try_from(buf.get_u8())?; let auth = Auth::try_from(buf.get_u8())?; let se = StreamEncapsulation::try_from(buf.get_u8())?; if se!= StreamEncapsulation::Srt { return Err(PacketParseError::StreamEncapsulationNotSrt); } let _resv1 = buf.get_u8(); // fourth 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // | Resv2 | Slen/4 | Klen/4 | let _resv2 = buf.get_u16(); let salt_len = usize::from(buf.get_u8()) * 4; let key_len = usize::from(buf.get_u8()) * 4; // acceptable key lengths are 16, 24, and 32 match key_len { // OK 16 | 24 | 32 => {} // not e => return Err(PacketParseError::BadCryptoLength(e as u32)), } // get the size of the packet to make sure that there is enough space // salt + keys (there's a 1 for each in key flags, it's already been anded with 0b11 so max is 2), wrap data is 8 long if buf.remaining() < salt_len + key_len * (key_flags.bits().count_ones() as usize) + 8 { return Err(PacketParseError::NotEnoughData); } // the reference implmentation converts the whole thing to network order (bit endian) (in 32-bit words) // so we need to make sure to do the same. Source: // https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/srtcore/crypto.cpp#L115 // after this, is the salt let mut salt = vec![]; for _ in 0..salt_len / 4 { salt.extend_from_slice(&buf.get_u32().to_be_bytes()[..]); } // then key[s] let mut wrapped_keys = vec![]; for _ in 0..(key_len * key_flags.bits().count_ones() as usize + 8) / 4 { wrapped_keys.extend_from_slice(&buf.get_u32().to_be_bytes()[..]); } Ok(KeyingMaterialMessage { pt, key_flags, keki, cipher, auth, salt, wrapped_keys, }) } fn serialize<T: BufMut>(&self, into: &mut T) { // first 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // |0|Vers | PT | Sign | resv |KF | // version is 1 into.put_u8(1 << 4 | self.pt as u8); into.put_u16(Self::SIGN); // rightmost bit of KF is even, other is odd into.put_u8(self.key_flags.bits()); // second 32-bit word: keki into.put_u32(self.keki); // third 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // | Cipher | Auth | SE | Resv1 | into.put_u8(self.cipher as u8); into.put_u8(self.auth as u8); into.put_u8(StreamEncapsulation::Srt as u8); into.put_u8(0); // resv1 // fourth 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // | Resv2 | Slen/4 | Klen/4 | into.put_u16(0); // resv2 into.put_u8((self.salt.len() / 4) as u8); // this unwrap is okay because we already panic above if both are None let key_len = (self.wrapped_keys.len() - 8) / self.key_flags.bits().count_ones() as usize; into.put_u8((key_len / 4) as u8); // put the salt then key[s] into.put(&self.salt[..]); // the reference implmentation converts the whole thing to network order (big endian) (in 32-bit words) // so we need to make sure to do the same. Source: // https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/srtcore/crypto.cpp#L115 for num in self.wrapped_keys[..].chunks(4) { into.put_u32(u32::from_be_bytes([num[0], num[1], num[2], num[3]])); } } } impl fmt::Debug for SrtControlPacket { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { SrtControlPacket::Reject => write!(f, "reject"), SrtControlPacket::HandshakeRequest(req) => write!(f, "hsreq={req:?}"), SrtControlPacket::HandshakeResponse(resp) => write!(f, "hsresp={resp:?}"), SrtControlPacket::KeyRefreshRequest(req) => write!(f, "kmreq={req:?}"), SrtControlPacket::KeyRefreshResponse(resp) => write!(f, "kmresp={resp:?}"), SrtControlPacket::StreamId(sid) => write!(f, "streamid={sid}"), SrtControlPacket::Congestion(ctype) => write!(f, "congestion={ctype}"), SrtControlPacket::Filter(filter) => write!(f, "filter={filter:?}"), SrtControlPacket::Group { ty, flags, weight } => { write!(f, "group=({ty:?}, {flags:?}, {weight:?})") } } } } impl TryFrom<u8> for CipherType { type Error = PacketParseError; fn try_from(from: u8) -> Result<CipherType, PacketParseError> { match from { 0 => Ok(CipherType::None), 1 => Ok(CipherType::Ecb), 2 => Ok(CipherType::Ctr), 3 => Ok(CipherType::Cbc), e => Err(PacketParseError::BadCipherKind(e)), } } } #[cfg(test)] mod tests { use super::{KeyingMaterialMessage, SrtControlPacket, SrtHandshake, SrtShakeFlags}; use crate::{options::*, packet::*}; use std::{io::Cursor, time::Duration}; #[test] fn deser_ser_shake() { let handshake = Packet::Control(ControlPacket { timestamp: TimeStamp::from_micros(123_141), dest_sockid: SocketId(123), control_type: ControlTypes::Srt(SrtControlPacket::HandshakeRequest(SrtHandshake { version: SrtVersion::CURRENT, flags: SrtShakeFlags::empty(), send_latency: Duration::from_millis(4000), recv_latency: Duration::from_millis(3000), })), }); let mut buf = Vec::new(); handshake.serialize(&mut buf); let deserialized = Packet::parse(&mut Cursor::new(buf), false).unwrap(); assert_eq!(handshake, deserialized); } #[test] fn ser_deser_sid() { let sid = Packet::Control(ControlPacket { timestamp: TimeStamp::from_micros(123), dest_sockid: SocketId(1234), control_type: ControlTypes::Srt(SrtControlPacket::StreamId("Hellohelloheloo".into())), }); let mut buf = Vec::new(); sid.serialize(&mut buf); let deser = Packet::parse(&mut Cursor::new(buf), false).unwrap(); assert_eq!(sid, deser); } #[test] fn srt_key_message_debug()
{ let salt = b"\x00\x00\x00\x00\x00\x00\x00\x00\x85\x2c\x3c\xcd\x02\x65\x1a\x22"; let wrapped = b"U\x06\xe9\xfd\xdfd\xf1'nr\xf4\xe9f\x81#(\xb7\xb5D\x19{\x9b\xcdx"; let km = KeyingMaterialMessage { pt: PacketType::KeyingMaterial, key_flags: KeyFlags::EVEN, keki: 0, cipher: CipherType::Ctr, auth: Auth::None, salt: salt[..].into(), wrapped_keys: wrapped[..].into(), }; assert_eq!(format!("{km:?}"), "KeyingMaterialMessage { pt: KeyingMaterial, key_flags: KeyFlags(EVEN), keki: 0, cipher: Ctr, auth: None }") }
identifier_body
srt.rs
Salt | /// | ... | /// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ /// | Wrap | /// | ... | /// +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ /// ``` /// #[derive(Clone, Eq, PartialEq)] pub struct KeyingMaterialMessage { pub pt: PacketType, // TODO: i think this is always KeyingMaterial.... pub key_flags: KeyFlags, pub keki: u32, pub cipher: CipherType, pub auth: Auth, pub salt: Vec<u8>, pub wrapped_keys: Vec<u8>, } impl From<GroupType> for u8 { fn from(from: GroupType) -> u8 { match from { GroupType::Undefined => 0, GroupType::Broadcast => 1, GroupType::MainBackup => 2, GroupType::Balancing => 3, GroupType::Multicast => 4, GroupType::Unrecognized(u) => u, } } } impl From<u8> for GroupType { fn from(from: u8) -> GroupType { match from { 0 => GroupType::Undefined, 1 => GroupType::Broadcast, 2 => GroupType::MainBackup, 3 => GroupType::Balancing, 4 => GroupType::Multicast, u => GroupType::Unrecognized(u), } } } impl fmt::Debug for KeyingMaterialMessage { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("KeyingMaterialMessage") .field("pt", &self.pt) .field("key_flags", &self.key_flags) .field("keki", &self.keki) .field("cipher", &self.cipher) .field("auth", &self.auth) .finish() } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum Auth { None = 0, } impl TryFrom<u8> for Auth { type Error = PacketParseError; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 0 => Ok(Auth::None), e => Err(PacketParseError::BadAuth(e)), } } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum StreamEncapsulation { Udp = 1, Srt = 2, } impl TryFrom<u8> for StreamEncapsulation { type Error = PacketParseError; fn try_from(value: u8) -> Result<Self, Self::Error> { Ok(match value { 1 => StreamEncapsulation::Udp, 2 => StreamEncapsulation::Srt, e => return Err(PacketParseError::BadStreamEncapsulation(e)), }) } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] // see htcryp_msg.h:43... // 7: Reserved to discriminate MPEG-TS packet (0x47=sync byte). pub enum PacketType { MediaStream = 1, // Media Stream Message (MSmsg) KeyingMaterial = 2, // Keying Material Message (KMmsg) } bitflags! { #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub struct KeyFlags : u8 { const EVEN = 0b01; const ODD = 0b10; } } impl TryFrom<u8> for PacketType { type Error = PacketParseError; fn try_from(value: u8) -> Result<Self, Self::Error> { match value { 1 => Ok(PacketType::MediaStream), 2 => Ok(PacketType::KeyingMaterial), err => Err(PacketParseError::BadKeyPacketType(err)), }
/// from https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/haicrypt/hcrypt_msg.h#L121-L124 #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum CipherType { None = 0, Ecb = 1, Ctr = 2, Cbc = 3, } /// The SRT handshake object #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct SrtHandshake { /// The SRT version /// Serialized just as the u32 that SrtVersion serialized to pub version: SrtVersion, /// SRT connection init flags pub flags: SrtShakeFlags, /// The peer's TSBPD latency (latency to send at) /// This is serialized as the upper 16 bits of the third 32-bit word /// source: https://github.com/Haivision/srt/blob/4f7f2beb2e1e306111b9b11402049a90cb6d3787/srtcore/core.cpp#L1341-L1353 pub send_latency: Duration, /// The TSBPD latency (latency to recv at) /// This is serialized as the lower 16 bits of the third 32-bit word /// see csrtcc.cpp:132 in the reference implementation pub recv_latency: Duration, } bitflags! { #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct SrtShakeFlags: u32 { /// Timestamp-based Packet delivery real-time data sender const TSBPDSND = 0x1; /// Timestamp-based Packet delivery real-time data receiver const TSBPDRCV = 0x2; /// HaiCrypt AES-128/192/256-CTR /// also represents if it supports the encryption flags in the data packet const HAICRYPT = 0x4; /// Drop real-time data packets too late to be processed in time const TLPKTDROP = 0x8; /// Periodic NAK report const NAKREPORT = 0x10; /// One bit in payload packet msgno is "retransmitted" flag const REXMITFLG = 0x20; /// This entity supports stream ID packets const STREAM = 0x40; /// Again not sure... TODO: const PACKET_FILTER = 0x80; // currently implemented flags const SUPPORTED = Self::TSBPDSND.bits() | Self::TSBPDRCV.bits() | Self::HAICRYPT.bits() | Self::REXMITFLG.bits(); } } fn le_bytes_to_string(le_bytes: &mut impl Buf) -> Result<String, PacketParseError> { if le_bytes.remaining() % 4!= 0 { return Err(PacketParseError::NotEnoughData); } let mut str_bytes = Vec::with_capacity(le_bytes.remaining()); while le_bytes.remaining() > 4 { str_bytes.extend(le_bytes.get_u32_le().to_be_bytes()); } // make sure to skip padding bytes if any for the last word match le_bytes.get_u32_le().to_be_bytes() { [a, 0, 0, 0] => str_bytes.push(a), [a, b, 0, 0] => str_bytes.extend([a, b]), [a, b, c, 0] => str_bytes.extend([a, b, c]), [a, b, c, d] => str_bytes.extend([a, b, c, d]), } String::from_utf8(str_bytes).map_err(|e| PacketParseError::StreamTypeNotUtf8(e.utf8_error())) } fn string_to_le_bytes(str: &str, into: &mut impl BufMut) { let mut chunks = str.as_bytes().chunks_exact(4); while let Some(&[a, b, c, d]) = chunks.next() { into.put(&[d, c, b, a][..]); } // add padding bytes for the final word if needed match *chunks.remainder() { [a, b, c] => into.put(&[0, c, b, a][..]), [a, b] => into.put(&[0, 0, b, a][..]), [a] => into.put(&[0, 0, 0, a][..]), [] => {} // exact multiple of 4 _ => unreachable!(), } } impl Display for FilterSpec { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { for (i, (k, v)) in self.0.iter().enumerate() { write!(f, "{k}:{v}")?; if i!= self.0.len() - 1 { write!(f, ",")?; } } Ok(()) } } impl SrtControlPacket { pub fn parse<T: Buf>( packet_type: u16, buf: &mut T, ) -> Result<SrtControlPacket, PacketParseError> { use self::SrtControlPacket::*; match packet_type { 0 => Ok(Reject), 1 => Ok(HandshakeRequest(SrtHandshake::parse(buf)?)), 2 => Ok(HandshakeResponse(SrtHandshake::parse(buf)?)), 3 => Ok(KeyRefreshRequest(KeyingMaterialMessage::parse(buf)?)), 4 => Ok(KeyRefreshResponse(KeyingMaterialMessage::parse(buf)?)), 5 => { // the stream id string is stored as 32-bit little endian words // https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3 le_bytes_to_string(buf).map(StreamId) } 6 => le_bytes_to_string(buf).map(Congestion), // Filter 7 => { let filter_str = le_bytes_to_string(buf)?; Ok(Filter(FilterSpec( filter_str .split(',') .map(|kv| { let mut colon_split_iter = kv.split(':'); let k = colon_split_iter .next() .ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?; let v = colon_split_iter .next() .ok_or_else(|| PacketParseError::BadFilter(filter_str.clone()))?; // only one colon if colon_split_iter.next().is_some() { return Err(PacketParseError::BadFilter(filter_str.clone())); } Ok((k.to_string(), v.to_string())) }) .collect::<Result<_, _>>()?, ))) } 8 => { let ty = buf.get_u8().into(); let flags = GroupFlags::from_bits_truncate(buf.get_u8()); let weight = buf.get_u16_le(); Ok(Group { ty, flags, weight }) } _ => Err(PacketParseError::UnsupportedSrtExtensionType(packet_type)), } } /// Get the value to fill the reserved area with pub fn type_id(&self) -> u16 { use self::SrtControlPacket::*; match self { Reject => 0, HandshakeRequest(_) => 1, HandshakeResponse(_) => 2, KeyRefreshRequest(_) => 3, KeyRefreshResponse(_) => 4, StreamId(_) => 5, Congestion(_) => 6, Filter(_) => 7, Group {.. } => 8, } } pub fn serialize<T: BufMut>(&self, into: &mut T) { use self::SrtControlPacket::*; match self { HandshakeRequest(s) | HandshakeResponse(s) => { s.serialize(into); } KeyRefreshRequest(k) | KeyRefreshResponse(k) => { k.serialize(into); } Filter(filter) => { string_to_le_bytes(&format!("{filter}"), into); } Group { ty, flags, weight } => { into.put_u8((*ty).into()); into.put_u8(flags.bits()); into.put_u16_le(*weight); } Reject => {} StreamId(str) | Congestion(str) => { // the stream id string and congestion string is stored as 32-bit little endian words // https://tools.ietf.org/html/draft-sharabayko-mops-srt-01#section-3.2.1.3 string_to_le_bytes(str, into); } } } // size in 32-bit words pub fn size_words(&self) -> u16 { use self::SrtControlPacket::*; match self { // 3 32-bit words, version, flags, latency HandshakeRequest(_) | HandshakeResponse(_) => 3, // 4 32-bit words + salt + key + wrap [2] KeyRefreshRequest(ref k) | KeyRefreshResponse(ref k) => { 4 + k.salt.len() as u16 / 4 + k.wrapped_keys.len() as u16 / 4 } Congestion(str) | StreamId(str) => ((str.len() + 3) / 4) as u16, // round up to nearest multiple of 4 // 1 32-bit word packed with type, flags, and weight Group {.. } => 1, Filter(filter) => ((format!("{filter}").len() + 3) / 4) as u16, // TODO: not optimial performace, but probably okay _ => unimplemented!("{:?}", self), } } } impl SrtHandshake { pub fn parse<T: Buf>(buf: &mut T) -> Result<SrtHandshake, PacketParseError> { if buf.remaining() < 12 { return Err(PacketParseError::NotEnoughData); } let version = SrtVersion::parse(buf.get_u32()); let shake_flags = buf.get_u32(); let flags = match SrtShakeFlags::from_bits(shake_flags) { Some(i) => i, None => { warn!("Unrecognized SRT flags: 0b{:b}", shake_flags); SrtShakeFlags::from_bits_truncate(shake_flags) } }; let peer_latency = buf.get_u16(); let latency = buf.get_u16(); Ok(SrtHandshake { version, flags, send_latency: Duration::from_millis(u64::from(peer_latency)), recv_latency: Duration::from_millis(u64::from(latency)), }) } pub fn serialize<T: BufMut>(&self, into: &mut T) { into.put_u32(self.version.to_u32()); into.put_u32(self.flags.bits()); // upper 16 bits are peer latency into.put_u16(self.send_latency.as_millis() as u16); // TODO: handle overflow // lower 16 is latency into.put_u16(self.recv_latency.as_millis() as u16); // TODO: handle overflow } } impl KeyingMaterialMessage { // from hcrypt_msg.h:39 // also const traits aren't a thing yet, so u16::from can't be used const SIGN: u16 = ((b'H' - b'@') as u16) << 10 | ((b'A' - b'@') as u16) << 5 | (b'I' - b'@') as u16; pub fn parse(buf: &mut impl Buf) -> Result<KeyingMaterialMessage, PacketParseError> { // first 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // |0|Vers | PT | Sign | resv |KF | // make sure there is enough data left in the buffer to at least get to the key flags and length, which tells us how long the packet will be // that's 4x32bit words if buf.remaining() < 4 * 4 { return Err(PacketParseError::NotEnoughData); } let vers_pt = buf.get_u8(); // make sure the first bit is zero if (vers_pt & 0b1000_0000)!= 0 { return Err(PacketParseError::BadSrtExtensionMessage); } // upper 4 bits are version let version = vers_pt >> 4; if version!= 1 { return Err(PacketParseError::BadSrtExtensionMessage); } // lower 4 bits are pt let pt = PacketType::try_from(vers_pt & 0b0000_1111)?; // next 16 bis are sign let sign = buf.get_u16(); if sign!= Self::SIGN { return Err(PacketParseError::BadKeySign(sign)); } // next 6 bits is reserved, then two bits of KF let key_flags = KeyFlags::from_bits_truncate(buf.get_u8() & 0b0000_0011); // second 32-bit word: keki let keki = buf.get_u32(); // third 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // | Cipher | Auth | SE | Resv1 | let cipher = CipherType::try_from(buf.get_u8())?; let auth = Auth::try_from(buf.get_u8())?; let se = StreamEncapsulation::try_from(buf.get_u8())?; if se!= StreamEncapsulation::Srt { return Err(PacketParseError::StreamEncapsulationNotSrt); } let _resv1 = buf.get_u8(); // fourth 32-bit word: // // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 // +-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-|-+-+-+-+-+-+-+-+ // | Resv2 | Slen/4 | Klen/4 | let _resv2 = buf.get_u16(); let salt_len = usize::from(buf.get_u8()) * 4; let key_len = usize::from(buf.get_u8()) * 4; // acceptable key lengths are 16, 24, and 32 match key_len { // OK 16 | 24 | 32 => {} // not e => return Err(PacketParseError::BadCryptoLength(e as u32)), } // get the size of the packet to make sure that there is enough space // salt + keys (there's a 1 for each in key flags, it's already been anded with 0b11 so max is 2), wrap data is 8 long if buf.remaining() < salt_len + key_len * (key_flags.bits().count_ones() as usize) + 8 { return Err(PacketParseError::NotEnoughData); } // the reference implmentation converts the whole thing to network order (bit endian) (in 32-bit words) // so we need to make sure to do the same. Source: // https://github.com/Haivision/srt/blob/2ef4ef003c2006df1458de6d47fbe3d2338edf69/srtcore/crypto.cpp#L115 // after this, is the salt let mut salt = vec![]; for _ in 0..salt_len / 4 { salt.extend_from_slice(&buf.get_u32().to_be_bytes()[..]); } // then key[s] let mut wrapped_keys = vec![]; for _ in 0..(key_len * key_flags.bits().count_ones() as usize + 8) / 4 { wrapped_keys.extend_from_slice(&buf.get_u32().to_be_bytes()[..]); } Ok(KeyingMaterialMessage { pt,
} }
random_line_split
mod.rs
//! # Day 19: Go With The Flow //! //! With the Elves well on their way constructing the North Pole base, you turn //! your attention back to understanding the inner workings of programming the //! device. //! //! You can't help but notice that the device's opcodes don't contain any flow //! control like jump instructions. The device's manual goes on to explain: //! //! "In programs where flow control is required, the instruction pointer can be //! bound to a register so that it can be manipulated directly. This way, //! setr/seti can function as absolute jumps, addr/addi can function as relative //! jumps, and other opcodes can cause truly fascinating effects." //! //! This mechanism is achieved through a declaration like #ip 1, which would //! modify register 1 so that accesses to it let the program indirectly access //! the instruction pointer itself. To compensate for this kind of binding, //! there are now six registers (numbered 0 through 5); the five not bound to //! the instruction pointer behave as normal. Otherwise, the same rules apply as //! the last time you worked with this device. //! //! When the instruction pointer is bound to a register, its value is written to //! that register just before each instruction is executed, and the value of //! that register is written back to the instruction pointer immediately after //! each instruction finishes execution. Afterward, move to the next instruction //! by adding one to the instruction pointer, even if the value in the //! instruction pointer was just updated by an instruction. (Because of this, //! instructions must effectively set the instruction pointer to the instruction //! before the one they want executed next.) //! //! The instruction pointer is 0 during the first instruction, 1 during the //! second, and so on. If the instruction pointer ever causes the device to //! attempt to load an instruction outside the instructions defined in the //! program, the program instead immediately halts. The instruction pointer //! starts at 0. //! //! It turns out that this new information is already proving useful: the CPU in //! the device is not very powerful, and a background process is occupying most //! of its time. You dump the background process' declarations and instructions //! to a file (your puzzle input), making sure to use the names of the opcodes //! rather than the numbers. //! //! For example, suppose you have the following program: //! //! ```text //! #ip 0 //! seti 5 0 1 //! seti 6 0 2 //! addi 0 1 0 //! addr 1 2 3 //! setr 1 0 0 //! seti 8 0 4 //! seti 9 0 5 //! ``` //! //! When executed, the following instructions are executed. Each line contains //! the value of the instruction pointer at the time the instruction started, //! the values of the six registers before executing the instructions (in square //! brackets), the instruction itself, and the values of the six registers after //! executing the instruction (also in square brackets). //! //! ```text //! ip=0 [0, 0, 0, 0, 0, 0] seti 5 0 1 [0, 5, 0, 0, 0, 0] //! ip=1 [1, 5, 0, 0, 0, 0] seti 6 0 2 [1, 5, 6, 0, 0, 0] //! ip=2 [2, 5, 6, 0, 0, 0] addi 0 1 0 [3, 5, 6, 0, 0, 0] //! ip=4 [4, 5, 6, 0, 0, 0] setr 1 0 0 [5, 5, 6, 0, 0, 0] //! ip=6 [6, 5, 6, 0, 0, 0] seti 9 0 5 [6, 5, 6, 0, 0, 9] //! ``` //! //! In detail, when running this program, the following events occur: //! //! * The first line (#ip 0) indicates that the instruction pointer should be //! bound to register 0 in this program. This is not an instruction, and so //! the value of the instruction pointer does not change during the processing //! of this line. //! * The instruction pointer contains 0, and so the first instruction is //! executed (seti 5 0 1). It updates register 0 to the current instruction //! pointer value (0), sets register 1 to 5, sets the instruction pointer to //! the value of register 0 (which has no effect, as the instruction did not //! modify register 0), and then adds one to the instruction pointer. //! * The instruction pointer contains 1, and so the second instruction, seti 6 //! 0 2, is executed. This is very similar to the instruction before it: 6 is //! stored in register 2, and the instruction pointer is left with the value //! 2. //! * The instruction pointer is 2, which points at the instruction addi 0 1 0. //! This is like a relative jump: the value of the instruction pointer, 2, is //! loaded into register 0. Then, addi finds the result of adding the value in //! register 0 and the value 1, storing the result, 3, back in register 0. //! Register 0 is then copied back to the instruction pointer, which will //! cause it to end up 1 larger than it would have otherwise and skip the next //! instruction (addr 1 2 3) entirely. Finally, 1 is added to the instruction //! pointer. //! * The instruction pointer is 4, so the instruction setr 1 0 0 is run. This //! is like an absolute jump: it copies the value contained in register 1, 5, //! into register 0, which causes it to end up in the instruction pointer. The //! instruction pointer is then incremented, leaving it at 6. //! * The instruction pointer is 6, so the instruction seti 9 0 5 stores 9 into //! register 5. The instruction pointer is incremented, causing it to point //! outside the program, and so the program ends. //! //! What value is left in register 0 when the background process halts? //! //! ## Part 2 //! //! A new background process immediately spins up in its place. It appears //! identical, but on closer inspection, you notice that this time, register 0 //! started with the value 1. //! //! What value is left in register 0 when this new background process halts? //! //! [Advent of Code 2018 - Day 19](https://adventofcode.com/2018/day/19) use std::{ fmt::{self, Display}, iter::FromIterator, ops::{Index, IndexMut}, }; use crate::day16::{Data, Mnemonic}; use self::Mnemonic::*; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Register([Data; 6]); impl Display for Register { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "[{}, {}, {}, {}, {}, {}]", self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5] ) } } impl Default for Register { fn default() -> Self { Register([0; 6]) } } impl From<[Data; 6]> for Register { fn from(value: [Data; 6]) -> Self { Register(value) } } impl Index<Data> for Register { type Output = Data; fn index(&self, index: Data) -> &Self::Output { &self.0[index as usize] } } impl IndexMut<Data> for Register { fn index_mut(&mut self, index: Data) -> &mut <Self as Index<Data>>::Output { &mut self.0[index as usize] } } #[derive(Debug, Clone, Copy, PartialEq)] pub struct Instruction { pub opcode: Mnemonic, pub a: Data, pub b: Data, pub c: Data, } impl Display for Instruction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{} {} {} {}", self.opcode, self.a, self.b, self.c) } } impl From<(Mnemonic, Data, Data, Data)> for Instruction { fn from((opcode, a, b, c): (Mnemonic, Data, Data, Data)) -> Self { Self { opcode, a, b, c } } } impl Instruction { pub fn new(opcode: Mnemonic, a: Data, b: Data, c: Data) -> Self { Self { opcode, a, b, c } } } pub type Addr = Data; #[derive(Debug, Clone, PartialEq)] pub struct Interpreter { ip_reg: Addr, ip: Addr, } impl Interpreter { pub fn new(ip_reg: Addr) -> Self { Self { ip_reg, ip: 0 } } #[inline] pub fn execute(&mut self, instruction: Instruction, register: &mut Register) { register[self.ip_reg] = self.ip; execute_mnemonic(instruction, register); self.ip = register[self.ip_reg] + 1; } pub fn run(&mut self, program: &[Instruction], register: &mut Register) -> Result<(), String> { while let Some(&instruction) = program.get(self.ip as usize) { if self.ip == 3 { self.ip = optimized(register); continue; } let _c_ip = self.ip; self.execute(instruction, register); //_trace(_c_ip, instruction, register); } Ok(()) } } fn execute_mnemonic(Instruction { opcode, a, b, c }: Instruction, reg: &mut Register) { match opcode { AddR => reg[c] = reg[a] + reg[b], AddI => reg[c] = reg[a] + b, MulR => reg[c] = reg[a] * reg[b], MulI => reg[c] = reg[a] * b, BanR => reg[c] = reg[a] & reg[b], BanI => reg[c] = reg[a] & b, BorR => reg[c] = reg[a] | reg[b], BorI => reg[c] = reg[a] | b, SetR => reg[c] = reg[a], SetI => reg[c] = a, GtIR => reg[c] = if a > reg[b] { 1 } else { 0 }, GtRI => reg[c] = if reg[a] > b { 1 } else { 0 }, GtRR => reg[c] = if reg[a] > reg[b] { 1 } else { 0 }, EqIR => reg[c] = if a == reg[b] { 1 } else { 0 }, EqRI => reg[c] = if reg[a] == b { 1 } else { 0 }, EqRR => reg[c] = if reg[a] == reg[b] { 1 } else { 0 }, } } #[inline] fn _trace(ip: Addr, Instruction { opcode, a, b, c }: Instruction, reg: &Register) { match opcode { AddR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), AddI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), MulR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), MulI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BanR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BanI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BorR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BorI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), SetR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), SetI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), GtIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), GtRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), GtRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), EqIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), EqRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), EqRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), } } #[derive(Debug, Clone, PartialEq)] pub struct Program { ip_reg: Addr, instructions: Vec<Instruction>, } impl Display for Program { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "#ip {}", self.ip_reg)?; for instruction in &self.instructions { writeln!(f, "{}", instruction)?; } Ok(()) } } impl Program { pub fn new(ip_reg: Addr, instructions: impl IntoIterator<Item = Instruction>) -> Self { Self { ip_reg, instructions: Vec::from_iter(instructions.into_iter()), } } pub fn ip_reg(&self) -> Addr { self.ip_reg } pub fn instructions(&self) -> &[Instruction] { &self.instructions } } #[aoc_generator(day19)] pub fn parse(input: &str) -> Result<Program, String> { let mut ip_reg = 6; let mut instructions = Vec::with_capacity(16); for line in input.lines() { if line.starts_with("#ip") { ip_reg = line[4..] .trim() .parse::<Addr>() .map_err(|e| e.to_string())?; } else { let opc = line[0..4].parse()?; let mut oprs = line[5..] .split(' ') .take(3) .map(|s| s.trim().parse::<Data>().map_err(|e| e.to_string())); let opr1 = oprs.next().unwrap()?; let opr2 = oprs.next().unwrap()?; let opr3 = oprs.next().unwrap()?; instructions.push(Instruction::new(opc, opr1, opr2, opr3)); } } Ok(Program::new(ip_reg, instructions)) } #[aoc(day19, part1)] pub fn run_background_process(program: &Program) -> Data { let mut interpreter = Interpreter::new(program.ip_reg); let mut register = Register::default(); interpreter .run(program.instructions(), &mut register) .unwrap(); register[0] } #[aoc(day19, part2)] pub fn run_background_process_2(program: &Program) -> Data { let mut interpreter = Interpreter::new(program.ip_reg); let mut register = Register::default(); register[0] = 1; interpreter .run(program.instructions(), &mut register) .unwrap(); register[0] } /// Repeated loop: /// /// ```text /// 'L1: R1 = R5 * R2 /// if R4 == R1 then /// R1 = 1 /// R0 = R5 + R0 /// else /// R1 = 0 /// end if /// R3 = R1 + R3 /// R2 = R2 + 1 /// if R2 > R4 then /// R1 = 1 /// R3 = R3 + 1 // goto 'L2 /// else /// R1 = 0 /// R3 = R3 + R1 /// R3 = 2 // goto 'L1 /// end if /// 'L2: /// ``` fn
(reg: &mut Register) -> Addr { if reg[4] % reg[5] == 0 { reg[0] = reg[5] + reg[0]; } reg[2] = reg[4]; reg[1] = 0; 12 } #[cfg(test)] mod tests;
optimized
identifier_name
mod.rs
//! # Day 19: Go With The Flow //! //! With the Elves well on their way constructing the North Pole base, you turn //! your attention back to understanding the inner workings of programming the //! device. //! //! You can't help but notice that the device's opcodes don't contain any flow //! control like jump instructions. The device's manual goes on to explain: //! //! "In programs where flow control is required, the instruction pointer can be //! bound to a register so that it can be manipulated directly. This way, //! setr/seti can function as absolute jumps, addr/addi can function as relative //! jumps, and other opcodes can cause truly fascinating effects." //! //! This mechanism is achieved through a declaration like #ip 1, which would //! modify register 1 so that accesses to it let the program indirectly access //! the instruction pointer itself. To compensate for this kind of binding, //! there are now six registers (numbered 0 through 5); the five not bound to //! the instruction pointer behave as normal. Otherwise, the same rules apply as //! the last time you worked with this device. //! //! When the instruction pointer is bound to a register, its value is written to //! that register just before each instruction is executed, and the value of //! that register is written back to the instruction pointer immediately after //! each instruction finishes execution. Afterward, move to the next instruction //! by adding one to the instruction pointer, even if the value in the //! instruction pointer was just updated by an instruction. (Because of this, //! instructions must effectively set the instruction pointer to the instruction //! before the one they want executed next.) //! //! The instruction pointer is 0 during the first instruction, 1 during the //! second, and so on. If the instruction pointer ever causes the device to //! attempt to load an instruction outside the instructions defined in the //! program, the program instead immediately halts. The instruction pointer //! starts at 0. //! //! It turns out that this new information is already proving useful: the CPU in //! the device is not very powerful, and a background process is occupying most //! of its time. You dump the background process' declarations and instructions //! to a file (your puzzle input), making sure to use the names of the opcodes //! rather than the numbers. //! //! For example, suppose you have the following program: //! //! ```text //! #ip 0 //! seti 5 0 1 //! seti 6 0 2 //! addi 0 1 0 //! addr 1 2 3 //! setr 1 0 0 //! seti 8 0 4 //! seti 9 0 5 //! ``` //! //! When executed, the following instructions are executed. Each line contains //! the value of the instruction pointer at the time the instruction started, //! the values of the six registers before executing the instructions (in square //! brackets), the instruction itself, and the values of the six registers after //! executing the instruction (also in square brackets). //! //! ```text //! ip=0 [0, 0, 0, 0, 0, 0] seti 5 0 1 [0, 5, 0, 0, 0, 0] //! ip=1 [1, 5, 0, 0, 0, 0] seti 6 0 2 [1, 5, 6, 0, 0, 0] //! ip=2 [2, 5, 6, 0, 0, 0] addi 0 1 0 [3, 5, 6, 0, 0, 0] //! ip=4 [4, 5, 6, 0, 0, 0] setr 1 0 0 [5, 5, 6, 0, 0, 0] //! ip=6 [6, 5, 6, 0, 0, 0] seti 9 0 5 [6, 5, 6, 0, 0, 9] //! ``` //!
//! bound to register 0 in this program. This is not an instruction, and so //! the value of the instruction pointer does not change during the processing //! of this line. //! * The instruction pointer contains 0, and so the first instruction is //! executed (seti 5 0 1). It updates register 0 to the current instruction //! pointer value (0), sets register 1 to 5, sets the instruction pointer to //! the value of register 0 (which has no effect, as the instruction did not //! modify register 0), and then adds one to the instruction pointer. //! * The instruction pointer contains 1, and so the second instruction, seti 6 //! 0 2, is executed. This is very similar to the instruction before it: 6 is //! stored in register 2, and the instruction pointer is left with the value //! 2. //! * The instruction pointer is 2, which points at the instruction addi 0 1 0. //! This is like a relative jump: the value of the instruction pointer, 2, is //! loaded into register 0. Then, addi finds the result of adding the value in //! register 0 and the value 1, storing the result, 3, back in register 0. //! Register 0 is then copied back to the instruction pointer, which will //! cause it to end up 1 larger than it would have otherwise and skip the next //! instruction (addr 1 2 3) entirely. Finally, 1 is added to the instruction //! pointer. //! * The instruction pointer is 4, so the instruction setr 1 0 0 is run. This //! is like an absolute jump: it copies the value contained in register 1, 5, //! into register 0, which causes it to end up in the instruction pointer. The //! instruction pointer is then incremented, leaving it at 6. //! * The instruction pointer is 6, so the instruction seti 9 0 5 stores 9 into //! register 5. The instruction pointer is incremented, causing it to point //! outside the program, and so the program ends. //! //! What value is left in register 0 when the background process halts? //! //! ## Part 2 //! //! A new background process immediately spins up in its place. It appears //! identical, but on closer inspection, you notice that this time, register 0 //! started with the value 1. //! //! What value is left in register 0 when this new background process halts? //! //! [Advent of Code 2018 - Day 19](https://adventofcode.com/2018/day/19) use std::{ fmt::{self, Display}, iter::FromIterator, ops::{Index, IndexMut}, }; use crate::day16::{Data, Mnemonic}; use self::Mnemonic::*; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Register([Data; 6]); impl Display for Register { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "[{}, {}, {}, {}, {}, {}]", self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5] ) } } impl Default for Register { fn default() -> Self { Register([0; 6]) } } impl From<[Data; 6]> for Register { fn from(value: [Data; 6]) -> Self { Register(value) } } impl Index<Data> for Register { type Output = Data; fn index(&self, index: Data) -> &Self::Output { &self.0[index as usize] } } impl IndexMut<Data> for Register { fn index_mut(&mut self, index: Data) -> &mut <Self as Index<Data>>::Output { &mut self.0[index as usize] } } #[derive(Debug, Clone, Copy, PartialEq)] pub struct Instruction { pub opcode: Mnemonic, pub a: Data, pub b: Data, pub c: Data, } impl Display for Instruction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{} {} {} {}", self.opcode, self.a, self.b, self.c) } } impl From<(Mnemonic, Data, Data, Data)> for Instruction { fn from((opcode, a, b, c): (Mnemonic, Data, Data, Data)) -> Self { Self { opcode, a, b, c } } } impl Instruction { pub fn new(opcode: Mnemonic, a: Data, b: Data, c: Data) -> Self { Self { opcode, a, b, c } } } pub type Addr = Data; #[derive(Debug, Clone, PartialEq)] pub struct Interpreter { ip_reg: Addr, ip: Addr, } impl Interpreter { pub fn new(ip_reg: Addr) -> Self { Self { ip_reg, ip: 0 } } #[inline] pub fn execute(&mut self, instruction: Instruction, register: &mut Register) { register[self.ip_reg] = self.ip; execute_mnemonic(instruction, register); self.ip = register[self.ip_reg] + 1; } pub fn run(&mut self, program: &[Instruction], register: &mut Register) -> Result<(), String> { while let Some(&instruction) = program.get(self.ip as usize) { if self.ip == 3 { self.ip = optimized(register); continue; } let _c_ip = self.ip; self.execute(instruction, register); //_trace(_c_ip, instruction, register); } Ok(()) } } fn execute_mnemonic(Instruction { opcode, a, b, c }: Instruction, reg: &mut Register) { match opcode { AddR => reg[c] = reg[a] + reg[b], AddI => reg[c] = reg[a] + b, MulR => reg[c] = reg[a] * reg[b], MulI => reg[c] = reg[a] * b, BanR => reg[c] = reg[a] & reg[b], BanI => reg[c] = reg[a] & b, BorR => reg[c] = reg[a] | reg[b], BorI => reg[c] = reg[a] | b, SetR => reg[c] = reg[a], SetI => reg[c] = a, GtIR => reg[c] = if a > reg[b] { 1 } else { 0 }, GtRI => reg[c] = if reg[a] > b { 1 } else { 0 }, GtRR => reg[c] = if reg[a] > reg[b] { 1 } else { 0 }, EqIR => reg[c] = if a == reg[b] { 1 } else { 0 }, EqRI => reg[c] = if reg[a] == b { 1 } else { 0 }, EqRR => reg[c] = if reg[a] == reg[b] { 1 } else { 0 }, } } #[inline] fn _trace(ip: Addr, Instruction { opcode, a, b, c }: Instruction, reg: &Register) { match opcode { AddR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), AddI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), MulR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), MulI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BanR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BanI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BorR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BorI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), SetR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), SetI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), GtIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), GtRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), GtRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), EqIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), EqRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), EqRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), } } #[derive(Debug, Clone, PartialEq)] pub struct Program { ip_reg: Addr, instructions: Vec<Instruction>, } impl Display for Program { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "#ip {}", self.ip_reg)?; for instruction in &self.instructions { writeln!(f, "{}", instruction)?; } Ok(()) } } impl Program { pub fn new(ip_reg: Addr, instructions: impl IntoIterator<Item = Instruction>) -> Self { Self { ip_reg, instructions: Vec::from_iter(instructions.into_iter()), } } pub fn ip_reg(&self) -> Addr { self.ip_reg } pub fn instructions(&self) -> &[Instruction] { &self.instructions } } #[aoc_generator(day19)] pub fn parse(input: &str) -> Result<Program, String> { let mut ip_reg = 6; let mut instructions = Vec::with_capacity(16); for line in input.lines() { if line.starts_with("#ip") { ip_reg = line[4..] .trim() .parse::<Addr>() .map_err(|e| e.to_string())?; } else { let opc = line[0..4].parse()?; let mut oprs = line[5..] .split(' ') .take(3) .map(|s| s.trim().parse::<Data>().map_err(|e| e.to_string())); let opr1 = oprs.next().unwrap()?; let opr2 = oprs.next().unwrap()?; let opr3 = oprs.next().unwrap()?; instructions.push(Instruction::new(opc, opr1, opr2, opr3)); } } Ok(Program::new(ip_reg, instructions)) } #[aoc(day19, part1)] pub fn run_background_process(program: &Program) -> Data { let mut interpreter = Interpreter::new(program.ip_reg); let mut register = Register::default(); interpreter .run(program.instructions(), &mut register) .unwrap(); register[0] } #[aoc(day19, part2)] pub fn run_background_process_2(program: &Program) -> Data { let mut interpreter = Interpreter::new(program.ip_reg); let mut register = Register::default(); register[0] = 1; interpreter .run(program.instructions(), &mut register) .unwrap(); register[0] } /// Repeated loop: /// /// ```text /// 'L1: R1 = R5 * R2 /// if R4 == R1 then /// R1 = 1 /// R0 = R5 + R0 /// else /// R1 = 0 /// end if /// R3 = R1 + R3 /// R2 = R2 + 1 /// if R2 > R4 then /// R1 = 1 /// R3 = R3 + 1 // goto 'L2 /// else /// R1 = 0 /// R3 = R3 + R1 /// R3 = 2 // goto 'L1 /// end if /// 'L2: /// ``` fn optimized(reg: &mut Register) -> Addr { if reg[4] % reg[5] == 0 { reg[0] = reg[5] + reg[0]; } reg[2] = reg[4]; reg[1] = 0; 12 } #[cfg(test)] mod tests;
//! In detail, when running this program, the following events occur: //! //! * The first line (#ip 0) indicates that the instruction pointer should be
random_line_split
mod.rs
//! # Day 19: Go With The Flow //! //! With the Elves well on their way constructing the North Pole base, you turn //! your attention back to understanding the inner workings of programming the //! device. //! //! You can't help but notice that the device's opcodes don't contain any flow //! control like jump instructions. The device's manual goes on to explain: //! //! "In programs where flow control is required, the instruction pointer can be //! bound to a register so that it can be manipulated directly. This way, //! setr/seti can function as absolute jumps, addr/addi can function as relative //! jumps, and other opcodes can cause truly fascinating effects." //! //! This mechanism is achieved through a declaration like #ip 1, which would //! modify register 1 so that accesses to it let the program indirectly access //! the instruction pointer itself. To compensate for this kind of binding, //! there are now six registers (numbered 0 through 5); the five not bound to //! the instruction pointer behave as normal. Otherwise, the same rules apply as //! the last time you worked with this device. //! //! When the instruction pointer is bound to a register, its value is written to //! that register just before each instruction is executed, and the value of //! that register is written back to the instruction pointer immediately after //! each instruction finishes execution. Afterward, move to the next instruction //! by adding one to the instruction pointer, even if the value in the //! instruction pointer was just updated by an instruction. (Because of this, //! instructions must effectively set the instruction pointer to the instruction //! before the one they want executed next.) //! //! The instruction pointer is 0 during the first instruction, 1 during the //! second, and so on. If the instruction pointer ever causes the device to //! attempt to load an instruction outside the instructions defined in the //! program, the program instead immediately halts. The instruction pointer //! starts at 0. //! //! It turns out that this new information is already proving useful: the CPU in //! the device is not very powerful, and a background process is occupying most //! of its time. You dump the background process' declarations and instructions //! to a file (your puzzle input), making sure to use the names of the opcodes //! rather than the numbers. //! //! For example, suppose you have the following program: //! //! ```text //! #ip 0 //! seti 5 0 1 //! seti 6 0 2 //! addi 0 1 0 //! addr 1 2 3 //! setr 1 0 0 //! seti 8 0 4 //! seti 9 0 5 //! ``` //! //! When executed, the following instructions are executed. Each line contains //! the value of the instruction pointer at the time the instruction started, //! the values of the six registers before executing the instructions (in square //! brackets), the instruction itself, and the values of the six registers after //! executing the instruction (also in square brackets). //! //! ```text //! ip=0 [0, 0, 0, 0, 0, 0] seti 5 0 1 [0, 5, 0, 0, 0, 0] //! ip=1 [1, 5, 0, 0, 0, 0] seti 6 0 2 [1, 5, 6, 0, 0, 0] //! ip=2 [2, 5, 6, 0, 0, 0] addi 0 1 0 [3, 5, 6, 0, 0, 0] //! ip=4 [4, 5, 6, 0, 0, 0] setr 1 0 0 [5, 5, 6, 0, 0, 0] //! ip=6 [6, 5, 6, 0, 0, 0] seti 9 0 5 [6, 5, 6, 0, 0, 9] //! ``` //! //! In detail, when running this program, the following events occur: //! //! * The first line (#ip 0) indicates that the instruction pointer should be //! bound to register 0 in this program. This is not an instruction, and so //! the value of the instruction pointer does not change during the processing //! of this line. //! * The instruction pointer contains 0, and so the first instruction is //! executed (seti 5 0 1). It updates register 0 to the current instruction //! pointer value (0), sets register 1 to 5, sets the instruction pointer to //! the value of register 0 (which has no effect, as the instruction did not //! modify register 0), and then adds one to the instruction pointer. //! * The instruction pointer contains 1, and so the second instruction, seti 6 //! 0 2, is executed. This is very similar to the instruction before it: 6 is //! stored in register 2, and the instruction pointer is left with the value //! 2. //! * The instruction pointer is 2, which points at the instruction addi 0 1 0. //! This is like a relative jump: the value of the instruction pointer, 2, is //! loaded into register 0. Then, addi finds the result of adding the value in //! register 0 and the value 1, storing the result, 3, back in register 0. //! Register 0 is then copied back to the instruction pointer, which will //! cause it to end up 1 larger than it would have otherwise and skip the next //! instruction (addr 1 2 3) entirely. Finally, 1 is added to the instruction //! pointer. //! * The instruction pointer is 4, so the instruction setr 1 0 0 is run. This //! is like an absolute jump: it copies the value contained in register 1, 5, //! into register 0, which causes it to end up in the instruction pointer. The //! instruction pointer is then incremented, leaving it at 6. //! * The instruction pointer is 6, so the instruction seti 9 0 5 stores 9 into //! register 5. The instruction pointer is incremented, causing it to point //! outside the program, and so the program ends. //! //! What value is left in register 0 when the background process halts? //! //! ## Part 2 //! //! A new background process immediately spins up in its place. It appears //! identical, but on closer inspection, you notice that this time, register 0 //! started with the value 1. //! //! What value is left in register 0 when this new background process halts? //! //! [Advent of Code 2018 - Day 19](https://adventofcode.com/2018/day/19) use std::{ fmt::{self, Display}, iter::FromIterator, ops::{Index, IndexMut}, }; use crate::day16::{Data, Mnemonic}; use self::Mnemonic::*; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct Register([Data; 6]); impl Display for Register { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "[{}, {}, {}, {}, {}, {}]", self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5] ) } } impl Default for Register { fn default() -> Self { Register([0; 6]) } } impl From<[Data; 6]> for Register { fn from(value: [Data; 6]) -> Self { Register(value) } } impl Index<Data> for Register { type Output = Data; fn index(&self, index: Data) -> &Self::Output { &self.0[index as usize] } } impl IndexMut<Data> for Register { fn index_mut(&mut self, index: Data) -> &mut <Self as Index<Data>>::Output { &mut self.0[index as usize] } } #[derive(Debug, Clone, Copy, PartialEq)] pub struct Instruction { pub opcode: Mnemonic, pub a: Data, pub b: Data, pub c: Data, } impl Display for Instruction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{} {} {} {}", self.opcode, self.a, self.b, self.c) } } impl From<(Mnemonic, Data, Data, Data)> for Instruction { fn from((opcode, a, b, c): (Mnemonic, Data, Data, Data)) -> Self { Self { opcode, a, b, c } } } impl Instruction { pub fn new(opcode: Mnemonic, a: Data, b: Data, c: Data) -> Self { Self { opcode, a, b, c } } } pub type Addr = Data; #[derive(Debug, Clone, PartialEq)] pub struct Interpreter { ip_reg: Addr, ip: Addr, } impl Interpreter { pub fn new(ip_reg: Addr) -> Self { Self { ip_reg, ip: 0 } } #[inline] pub fn execute(&mut self, instruction: Instruction, register: &mut Register) { register[self.ip_reg] = self.ip; execute_mnemonic(instruction, register); self.ip = register[self.ip_reg] + 1; } pub fn run(&mut self, program: &[Instruction], register: &mut Register) -> Result<(), String> { while let Some(&instruction) = program.get(self.ip as usize) { if self.ip == 3 { self.ip = optimized(register); continue; } let _c_ip = self.ip; self.execute(instruction, register); //_trace(_c_ip, instruction, register); } Ok(()) } } fn execute_mnemonic(Instruction { opcode, a, b, c }: Instruction, reg: &mut Register) { match opcode { AddR => reg[c] = reg[a] + reg[b], AddI => reg[c] = reg[a] + b, MulR => reg[c] = reg[a] * reg[b], MulI => reg[c] = reg[a] * b, BanR => reg[c] = reg[a] & reg[b], BanI => reg[c] = reg[a] & b, BorR => reg[c] = reg[a] | reg[b], BorI => reg[c] = reg[a] | b, SetR => reg[c] = reg[a], SetI => reg[c] = a, GtIR => reg[c] = if a > reg[b] { 1 } else { 0 }, GtRI => reg[c] = if reg[a] > b { 1 } else { 0 }, GtRR => reg[c] = if reg[a] > reg[b] { 1 } else { 0 }, EqIR => reg[c] = if a == reg[b] { 1 } else { 0 }, EqRI => reg[c] = if reg[a] == b { 1 } else { 0 }, EqRR => reg[c] = if reg[a] == reg[b] { 1 } else
, } } #[inline] fn _trace(ip: Addr, Instruction { opcode, a, b, c }: Instruction, reg: &Register) { match opcode { AddR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), AddI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), MulR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), MulI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BanR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BanI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BorR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), BorI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), SetR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), SetI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), GtIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), GtRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), GtRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), EqIR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), EqRI => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), EqRR => println!("{:02}: {} {} {} {} : {} ", ip, opcode, a, b, c, reg), } } #[derive(Debug, Clone, PartialEq)] pub struct Program { ip_reg: Addr, instructions: Vec<Instruction>, } impl Display for Program { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "#ip {}", self.ip_reg)?; for instruction in &self.instructions { writeln!(f, "{}", instruction)?; } Ok(()) } } impl Program { pub fn new(ip_reg: Addr, instructions: impl IntoIterator<Item = Instruction>) -> Self { Self { ip_reg, instructions: Vec::from_iter(instructions.into_iter()), } } pub fn ip_reg(&self) -> Addr { self.ip_reg } pub fn instructions(&self) -> &[Instruction] { &self.instructions } } #[aoc_generator(day19)] pub fn parse(input: &str) -> Result<Program, String> { let mut ip_reg = 6; let mut instructions = Vec::with_capacity(16); for line in input.lines() { if line.starts_with("#ip") { ip_reg = line[4..] .trim() .parse::<Addr>() .map_err(|e| e.to_string())?; } else { let opc = line[0..4].parse()?; let mut oprs = line[5..] .split(' ') .take(3) .map(|s| s.trim().parse::<Data>().map_err(|e| e.to_string())); let opr1 = oprs.next().unwrap()?; let opr2 = oprs.next().unwrap()?; let opr3 = oprs.next().unwrap()?; instructions.push(Instruction::new(opc, opr1, opr2, opr3)); } } Ok(Program::new(ip_reg, instructions)) } #[aoc(day19, part1)] pub fn run_background_process(program: &Program) -> Data { let mut interpreter = Interpreter::new(program.ip_reg); let mut register = Register::default(); interpreter .run(program.instructions(), &mut register) .unwrap(); register[0] } #[aoc(day19, part2)] pub fn run_background_process_2(program: &Program) -> Data { let mut interpreter = Interpreter::new(program.ip_reg); let mut register = Register::default(); register[0] = 1; interpreter .run(program.instructions(), &mut register) .unwrap(); register[0] } /// Repeated loop: /// /// ```text /// 'L1: R1 = R5 * R2 /// if R4 == R1 then /// R1 = 1 /// R0 = R5 + R0 /// else /// R1 = 0 /// end if /// R3 = R1 + R3 /// R2 = R2 + 1 /// if R2 > R4 then /// R1 = 1 /// R3 = R3 + 1 // goto 'L2 /// else /// R1 = 0 /// R3 = R3 + R1 /// R3 = 2 // goto 'L1 /// end if /// 'L2: /// ``` fn optimized(reg: &mut Register) -> Addr { if reg[4] % reg[5] == 0 { reg[0] = reg[5] + reg[0]; } reg[2] = reg[4]; reg[1] = 0; 12 } #[cfg(test)] mod tests;
{ 0 }
conditional_block
ym.rs
use core::time::Duration; use core::num::NonZeroU32; use core::fmt; use core::ops::Range; use chrono::NaiveDateTime; pub mod flags; pub mod effects; mod parse; mod player; use flags::*; use effects::*; pub const MAX_DD_SAMPLES: usize = 32; pub const MFP_TIMER_FREQUENCY: u32 = 2_457_600; const DEFAULT_CHIPSET_FREQUENCY: u32 = 2_000_000; const DEFAULT_FRAME_FREQUENCY: u16 = 50; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum YmVersion { Ym2, Ym3, Ym4, Ym5, Ym6, } impl YmVersion { /// The YM version identifier tag as a string (4 ascii characters). pub fn tag(self) -> &'static str { match self { YmVersion::Ym2 => "YM2!", YmVersion::Ym3 => "YM3!", YmVersion::Ym4 => "YM4!", YmVersion::Ym5 => "YM5!", YmVersion::Ym6 => "YM6!", } } } impl fmt::Display for YmVersion { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.tag().fmt(f) } } /// The **YM** music file. /// /// The YM-file consist of [YmFrame]s that represent the state of the AY/YM chipset registers and /// contain additional information about special effects. /// /// Depending on the [YmSong::version] special effects are being encoded differently. #[derive(Debug, Clone)] pub struct YmSong { /// YM-file version. pub version: YmVersion, /// The last modification timestamp of the YM-file from the LHA envelope. pub created: Option<NaiveDateTime>, /// The song attributes. pub song_attrs: SongAttributes, /// The song title or a file name. pub title: String, /// The song author. pub author: String, /// The comment. pub comments: String, /// The number of cycles per second of the AY/YM chipset clock. pub chipset_frequency: u32, /// The number of frames played each second. pub frame_frequency: u16, /// The loop frame index. pub loop_frame: u32, /// The AY/YM state frames. pub frames: Box<[YmFrame]>, /// `DIGI-DRUM` samples. pub dd_samples: Box<[u8]>, /// `DIGI-DRUM` sample end indexes in [YmSong::dd_samples]. pub dd_samples_ends: [usize;MAX_DD_SAMPLES], cursor: usize, voice_effects: [(SidVoice, SinusSid, DigiDrum); 3], buzzer: SyncBuzzer, } /// This type represent the state of the AY/YM chipset registers and contain additional information /// about special effects. /// /// ```text /// X - AY/YM register data. /// S - Controls special effects. /// P - Frequency pre-divisor. /// F - Frequency divisor. /// - - Unused. /// ---------------------------------------------------------- /// b7 b6 b5 b4 b3 b2 b1 b0 Register description /// 0: X X X X X X X X Fine period voice A /// 1: S S S S X X X X Coarse period voice A /// 2: X X X X X X X X Fine period voice B /// 3: S S S S X X X X Coarse period voice B /// 4: X X X X X X X X Fine period voice C /// 5: - - - - X X X X Coarse period voice C /// 6: P P P X X X X X Noise period /// 7: X X X X X X X X Mixer control /// 8: P P P X X X X X Volume voice A /// 9: - - - X X X X X Volume voice B /// 10: - - - X X X X X Volume voice C /// 11: X X X X X X X X Envelope fine period /// 12: X X X X X X X X Envelope coarse period /// 13: x x x x X X X X Envelope shape /// ---------------------------------------------------------- /// virtual registers to store extra data for special effects: /// ---------------------------------------------------------- /// 14: F F F F F F F F Frequency divisor for S in 1 /// 15: F F F F F F F F Frequency divisor for S in 3 /// ``` /// /// The AY/YM `Envelope shape` register is modified only if the value of the 13 frame /// register is not equal to `0xff`. /// /// # Special effects /// /// The frequency of a special effect is encoded as `(2457600 / P) / F`. /// /// The divisor `F` is an unsigned 8-bit integer. /// /// The pre-divisor `P` is encoded as: /// /// |PPP| pre-divisor value| /// |-----------------------| /// |000| Timer off | /// |001| 4 | /// |010| 10 | /// |011| 16 | /// |100| 50 | /// |101| 64 | /// |110| 100 | /// |111| 200 | /// /// * The pre-divisor `P` in register 6 matches effect controlled by register 1. /// * The divisor `F` in register 14 matches effect controlled by register 1. /// * The pre-divisor `P` in register 8 matches effect controlled by register 3. /// * The divisor `F` in register 15 matches effect controlled by register 3. /// /// If an effect is active, the additional data resides in `X` bits in the `Volume` register of /// the relevant voice: /// /// * For the [`SID voice`][SidVoice] and [`Sinus SID`][SinusSid] effects the 4 lowest `X` bits /// determine the effect's volume. /// * For the [`Sync Buzzer`][SyncBuzzer] the 4 lowest `X` bits determine the effect's `Envelope shape`. /// * For the [`DIGI-DRUM`][DigiDrum] effect the 5 `X` bits determine the played sample number. /// * The `DIGI-DRUM` sample plays until its end or if it's overridden by another effect. /// * All other effects are active only for the duration of a single frame. /// * When the `DIGI-DRUM` is active the volume register from the frame for the relevant voice is being /// ignored and the relevant voice mixer tone and noise bits are forced to be set. /// /// The control bits of special effects are interpreted differently depending on the YM-file verion. /// /// ## YM6! /// /// The `S` bits in registers 1 and 3 controls any two of the selectable effects: /// ```text /// b7 b6 b5 b4 /// - - 0 0 effect disabled /// - - 0 1 effect active on voice A /// - - 1 0 effect active on voice B /// - - 1 1 effect active on voice C /// 0 0 - - select SID voice effect /// 0 1 - - select DIGI-DRUM effect /// 1 0 - - select Sinus SID effect /// 1 1 - - select Sync Buzzer effect /// ``` /// /// ## YM4!/YM5! /// /// The `S` bits in register 1 controls the `SID voice` effect. /// The `S` bits in register 3 controls the `DIGI-DRUM` effect. /// ```text /// b7 b6 b5 b4 /// - - 0 0 effect disabled /// - - 0 1 effect active on voice A /// - - 1 0 effect active on voice B /// - - 1 1 effect active on voice C /// - 0 - - SID voice timer continues, ignored for DIGI-DRUM /// - 1 - - SID voice timer restarts, ignored for DIGI-DRUM ///``` /// /// ## YM3! /// /// There are no special effects in this version. /// /// ## YM2! /// /// Only the `DIGI-DRUM` effect is recognized in this format. It is being played on voice C, and /// uses one of the 40 predefined samples. /// /// * The effect starts when the highest bit (7) of the `Volume voice C` register (10) is 1. /// * The sample number is taken from the lowest 7 bits of the `Volume voice C` register (10). /// * The effect frequency is calculated by `(2457600 / 4) / X`, where `X` is the unsigned 8-bit /// value stored in the register 12 of the frame. /// * The value of AY/YM chipset registers 11, 12 and 13 is only written if the value of the /// frame register 13 is not equal to `0xFF`. /// * The register 12 of the AY/YM chipset is always being set to `0` in this format. /// * The register 13 of the AY/YM chipset is always being set to `0x10` in this format. #[derive(Default, Debug, Clone, Copy)] pub struct YmFrame { /// Frame data. pub data: [u8;16] } impl YmSong { /// Creates a new instance of `YmSong` from the given `frames` and other meta data. pub fn new( version: YmVersion, frames: Box<[YmFrame]>, loop_frame: u32, title: String, created: Option<NaiveDateTime> ) -> YmSong { YmSong { version, created, song_attrs: SongAttributes::default(), title, author: String::new(), comments: String::new(), chipset_frequency: DEFAULT_CHIPSET_FREQUENCY, frame_frequency: DEFAULT_FRAME_FREQUENCY, loop_frame, frames, dd_samples: Box::new([]), dd_samples_ends: [0usize;MAX_DD_SAMPLES], cursor: 0, voice_effects: Default::default(), buzzer: Default::default() } } /// Returns `YmSong` with the `author` and `comments` set from the given arguments. pub fn with_meta(mut self, author: String, comments: String) -> YmSong { self.author = author; self.comments = comments; self } /// Returns `YmSong` with the `song_attrs`, `dd_samples` and `dd_samples_ends` set from the given arguments. pub fn with_samples( mut self, song_attrs: SongAttributes, dd_samples: Box<[u8]>, dd_samples_ends: [usize;MAX_DD_SAMPLES] ) -> YmSong { self.song_attrs = song_attrs; self.dd_samples = dd_samples; self.dd_samples_ends = dd_samples_ends; self } /// Returns `YmSong` with the `chipset_frequency` and `frame_frequency` set from the given arguments. pub fn with_frequency(mut self, chipset_frequency: u32, frame_frequency: u16) -> YmSong
/// Returns the song duration. pub fn song_duration(&self) -> Duration { let seconds = self.frames.len() as f64 / self.frame_frequency as f64; Duration::from_secs_f64(seconds) } /// Returns the AY/YM chipset clock frequency. #[inline] pub fn clock_frequency(&self) -> f32 { self.chipset_frequency as f32 } /// Returns the number of AY/YM chipset clock cycles of a single music frame. pub fn frame_cycles(&self) -> f32 { self.clock_frequency() / self.frame_frequency as f32 } /// Calculates the timer interval in clock cycles, from the given `divisor`. pub fn timer_interval(&self, divisor: NonZeroU32) -> f32 { let divisor = divisor.get() as f32; self.clock_frequency() as f32 * divisor / MFP_TIMER_FREQUENCY as f32 } /// Returns the indicated sample data range in the [YmSong::dd_samples] for the given `sample`. /// /// # Panics /// Panics if `sample` value is not below [MAX_DD_SAMPLES]. pub fn sample_data_range(&self, sample: usize) -> Range<usize> { let end = self.dd_samples_ends[sample]; let start = match sample { 0 => 0, index => self.dd_samples_ends[index - 1] }; start..end } } impl YmFrame { /// Returns special effect control flags from the register 1. pub fn fx0(&self) -> FxCtrlFlags { FxCtrlFlags::from_bits_retain(self.data[1]) } /// Returns special effect control flags from the register 3. pub fn fx1(&self) -> FxCtrlFlags { FxCtrlFlags::from_bits_retain(self.data[3]) } /// Returns the value of the volume register for the indicated `chan`. /// /// The 2 lowest bits of `chan` indicate the voice channel: /// ```text /// b1 b0 voice channel /// 0 0 A /// 0 1 B /// 1 0 C /// 1 1 invalid (panics in debug mode) /// ``` pub fn vol(&self, chan: u8) -> u8 { let chan = chan & 3; debug_assert_ne!(chan, 3); self.data[(VOL_A_REG + chan) as usize] & 0x1f } /// Calculates the timer divsor for the special effect `fx0`. pub fn timer_divisor0(&self) -> Option<NonZeroU32> { calculate_timer_divisor(self.data[6], self.data[14]) } /// Calculates the timer divsor for the special effect `fx1`. pub fn timer_divisor1(&self) -> Option<NonZeroU32> { calculate_timer_divisor(self.data[8], self.data[15]) } } fn calculate_timer_divisor(prediv3: u8, div8: u8) -> Option<NonZeroU32> { let prediv = match prediv3 & 0b11100000 { 0b00000000 => 0, 0b00100000 => 4, 0b01000000 => 10, 0b01100000 => 16, 0b10000000 => 50, 0b10100000 => 64, 0b11000000 => 100, 0b11100000 => 200, _ => unreachable!() }; NonZeroU32::new(prediv * div8 as u32) }
{ self.chipset_frequency = chipset_frequency; self.frame_frequency = frame_frequency; self }
identifier_body
ym.rs
use core::time::Duration; use core::num::NonZeroU32; use core::fmt; use core::ops::Range; use chrono::NaiveDateTime; pub mod flags; pub mod effects; mod parse; mod player; use flags::*; use effects::*; pub const MAX_DD_SAMPLES: usize = 32; pub const MFP_TIMER_FREQUENCY: u32 = 2_457_600; const DEFAULT_CHIPSET_FREQUENCY: u32 = 2_000_000; const DEFAULT_FRAME_FREQUENCY: u16 = 50; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum YmVersion { Ym2, Ym3, Ym4, Ym5, Ym6, } impl YmVersion { /// The YM version identifier tag as a string (4 ascii characters). pub fn tag(self) -> &'static str { match self { YmVersion::Ym2 => "YM2!", YmVersion::Ym3 => "YM3!", YmVersion::Ym4 => "YM4!", YmVersion::Ym5 => "YM5!", YmVersion::Ym6 => "YM6!", } } } impl fmt::Display for YmVersion { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.tag().fmt(f) } } /// The **YM** music file. /// /// The YM-file consist of [YmFrame]s that represent the state of the AY/YM chipset registers and /// contain additional information about special effects. /// /// Depending on the [YmSong::version] special effects are being encoded differently. #[derive(Debug, Clone)] pub struct YmSong { /// YM-file version. pub version: YmVersion, /// The last modification timestamp of the YM-file from the LHA envelope. pub created: Option<NaiveDateTime>, /// The song attributes. pub song_attrs: SongAttributes, /// The song title or a file name. pub title: String, /// The song author. pub author: String, /// The comment. pub comments: String, /// The number of cycles per second of the AY/YM chipset clock. pub chipset_frequency: u32, /// The number of frames played each second. pub frame_frequency: u16, /// The loop frame index. pub loop_frame: u32, /// The AY/YM state frames. pub frames: Box<[YmFrame]>, /// `DIGI-DRUM` samples. pub dd_samples: Box<[u8]>, /// `DIGI-DRUM` sample end indexes in [YmSong::dd_samples]. pub dd_samples_ends: [usize;MAX_DD_SAMPLES], cursor: usize, voice_effects: [(SidVoice, SinusSid, DigiDrum); 3], buzzer: SyncBuzzer, } /// This type represent the state of the AY/YM chipset registers and contain additional information /// about special effects. /// /// ```text /// X - AY/YM register data. /// S - Controls special effects. /// P - Frequency pre-divisor. /// F - Frequency divisor. /// - - Unused. /// ---------------------------------------------------------- /// b7 b6 b5 b4 b3 b2 b1 b0 Register description /// 0: X X X X X X X X Fine period voice A /// 1: S S S S X X X X Coarse period voice A /// 2: X X X X X X X X Fine period voice B /// 3: S S S S X X X X Coarse period voice B /// 4: X X X X X X X X Fine period voice C /// 5: - - - - X X X X Coarse period voice C /// 6: P P P X X X X X Noise period /// 7: X X X X X X X X Mixer control /// 8: P P P X X X X X Volume voice A /// 9: - - - X X X X X Volume voice B /// 10: - - - X X X X X Volume voice C /// 11: X X X X X X X X Envelope fine period /// 12: X X X X X X X X Envelope coarse period /// 13: x x x x X X X X Envelope shape /// ---------------------------------------------------------- /// virtual registers to store extra data for special effects: /// ---------------------------------------------------------- /// 14: F F F F F F F F Frequency divisor for S in 1 /// 15: F F F F F F F F Frequency divisor for S in 3 /// ``` /// /// The AY/YM `Envelope shape` register is modified only if the value of the 13 frame /// register is not equal to `0xff`. /// /// # Special effects /// /// The frequency of a special effect is encoded as `(2457600 / P) / F`. /// /// The divisor `F` is an unsigned 8-bit integer. /// /// The pre-divisor `P` is encoded as: /// /// |PPP| pre-divisor value| /// |-----------------------| /// |000| Timer off | /// |001| 4 | /// |010| 10 | /// |011| 16 | /// |100| 50 | /// |101| 64 | /// |110| 100 | /// |111| 200 | /// /// * The pre-divisor `P` in register 6 matches effect controlled by register 1. /// * The divisor `F` in register 14 matches effect controlled by register 1. /// * The pre-divisor `P` in register 8 matches effect controlled by register 3. /// * The divisor `F` in register 15 matches effect controlled by register 3. /// /// If an effect is active, the additional data resides in `X` bits in the `Volume` register of /// the relevant voice: /// /// * For the [`SID voice`][SidVoice] and [`Sinus SID`][SinusSid] effects the 4 lowest `X` bits /// determine the effect's volume. /// * For the [`Sync Buzzer`][SyncBuzzer] the 4 lowest `X` bits determine the effect's `Envelope shape`. /// * For the [`DIGI-DRUM`][DigiDrum] effect the 5 `X` bits determine the played sample number. /// * The `DIGI-DRUM` sample plays until its end or if it's overridden by another effect. /// * All other effects are active only for the duration of a single frame. /// * When the `DIGI-DRUM` is active the volume register from the frame for the relevant voice is being /// ignored and the relevant voice mixer tone and noise bits are forced to be set. /// /// The control bits of special effects are interpreted differently depending on the YM-file verion. /// /// ## YM6! /// /// The `S` bits in registers 1 and 3 controls any two of the selectable effects: /// ```text /// b7 b6 b5 b4 /// - - 0 0 effect disabled /// - - 0 1 effect active on voice A /// - - 1 0 effect active on voice B /// - - 1 1 effect active on voice C /// 0 0 - - select SID voice effect /// 0 1 - - select DIGI-DRUM effect /// 1 0 - - select Sinus SID effect /// 1 1 - - select Sync Buzzer effect /// ``` /// /// ## YM4!/YM5! /// /// The `S` bits in register 1 controls the `SID voice` effect. /// The `S` bits in register 3 controls the `DIGI-DRUM` effect. /// ```text /// b7 b6 b5 b4 /// - - 0 0 effect disabled /// - - 0 1 effect active on voice A /// - - 1 0 effect active on voice B /// - - 1 1 effect active on voice C /// - 0 - - SID voice timer continues, ignored for DIGI-DRUM /// - 1 - - SID voice timer restarts, ignored for DIGI-DRUM ///``` /// /// ## YM3! /// /// There are no special effects in this version. /// /// ## YM2! /// /// Only the `DIGI-DRUM` effect is recognized in this format. It is being played on voice C, and /// uses one of the 40 predefined samples. /// /// * The effect starts when the highest bit (7) of the `Volume voice C` register (10) is 1. /// * The sample number is taken from the lowest 7 bits of the `Volume voice C` register (10). /// * The effect frequency is calculated by `(2457600 / 4) / X`, where `X` is the unsigned 8-bit /// value stored in the register 12 of the frame. /// * The value of AY/YM chipset registers 11, 12 and 13 is only written if the value of the /// frame register 13 is not equal to `0xFF`. /// * The register 12 of the AY/YM chipset is always being set to `0` in this format. /// * The register 13 of the AY/YM chipset is always being set to `0x10` in this format. #[derive(Default, Debug, Clone, Copy)] pub struct YmFrame { /// Frame data. pub data: [u8;16] } impl YmSong { /// Creates a new instance of `YmSong` from the given `frames` and other meta data. pub fn new( version: YmVersion, frames: Box<[YmFrame]>, loop_frame: u32, title: String, created: Option<NaiveDateTime> ) -> YmSong { YmSong { version, created, song_attrs: SongAttributes::default(), title, author: String::new(), comments: String::new(), chipset_frequency: DEFAULT_CHIPSET_FREQUENCY, frame_frequency: DEFAULT_FRAME_FREQUENCY, loop_frame, frames, dd_samples: Box::new([]), dd_samples_ends: [0usize;MAX_DD_SAMPLES], cursor: 0, voice_effects: Default::default(), buzzer: Default::default() } } /// Returns `YmSong` with the `author` and `comments` set from the given arguments. pub fn with_meta(mut self, author: String, comments: String) -> YmSong { self.author = author; self.comments = comments; self } /// Returns `YmSong` with the `song_attrs`, `dd_samples` and `dd_samples_ends` set from the given arguments. pub fn with_samples( mut self, song_attrs: SongAttributes, dd_samples: Box<[u8]>, dd_samples_ends: [usize;MAX_DD_SAMPLES] ) -> YmSong { self.song_attrs = song_attrs; self.dd_samples = dd_samples; self.dd_samples_ends = dd_samples_ends; self } /// Returns `YmSong` with the `chipset_frequency` and `frame_frequency` set from the given arguments. pub fn with_frequency(mut self, chipset_frequency: u32, frame_frequency: u16) -> YmSong { self.chipset_frequency = chipset_frequency; self.frame_frequency = frame_frequency; self } /// Returns the song duration. pub fn
(&self) -> Duration { let seconds = self.frames.len() as f64 / self.frame_frequency as f64; Duration::from_secs_f64(seconds) } /// Returns the AY/YM chipset clock frequency. #[inline] pub fn clock_frequency(&self) -> f32 { self.chipset_frequency as f32 } /// Returns the number of AY/YM chipset clock cycles of a single music frame. pub fn frame_cycles(&self) -> f32 { self.clock_frequency() / self.frame_frequency as f32 } /// Calculates the timer interval in clock cycles, from the given `divisor`. pub fn timer_interval(&self, divisor: NonZeroU32) -> f32 { let divisor = divisor.get() as f32; self.clock_frequency() as f32 * divisor / MFP_TIMER_FREQUENCY as f32 } /// Returns the indicated sample data range in the [YmSong::dd_samples] for the given `sample`. /// /// # Panics /// Panics if `sample` value is not below [MAX_DD_SAMPLES]. pub fn sample_data_range(&self, sample: usize) -> Range<usize> { let end = self.dd_samples_ends[sample]; let start = match sample { 0 => 0, index => self.dd_samples_ends[index - 1] }; start..end } } impl YmFrame { /// Returns special effect control flags from the register 1. pub fn fx0(&self) -> FxCtrlFlags { FxCtrlFlags::from_bits_retain(self.data[1]) } /// Returns special effect control flags from the register 3. pub fn fx1(&self) -> FxCtrlFlags { FxCtrlFlags::from_bits_retain(self.data[3]) } /// Returns the value of the volume register for the indicated `chan`. /// /// The 2 lowest bits of `chan` indicate the voice channel: /// ```text /// b1 b0 voice channel /// 0 0 A /// 0 1 B /// 1 0 C /// 1 1 invalid (panics in debug mode) /// ``` pub fn vol(&self, chan: u8) -> u8 { let chan = chan & 3; debug_assert_ne!(chan, 3); self.data[(VOL_A_REG + chan) as usize] & 0x1f } /// Calculates the timer divsor for the special effect `fx0`. pub fn timer_divisor0(&self) -> Option<NonZeroU32> { calculate_timer_divisor(self.data[6], self.data[14]) } /// Calculates the timer divsor for the special effect `fx1`. pub fn timer_divisor1(&self) -> Option<NonZeroU32> { calculate_timer_divisor(self.data[8], self.data[15]) } } fn calculate_timer_divisor(prediv3: u8, div8: u8) -> Option<NonZeroU32> { let prediv = match prediv3 & 0b11100000 { 0b00000000 => 0, 0b00100000 => 4, 0b01000000 => 10, 0b01100000 => 16, 0b10000000 => 50, 0b10100000 => 64, 0b11000000 => 100, 0b11100000 => 200, _ => unreachable!() }; NonZeroU32::new(prediv * div8 as u32) }
song_duration
identifier_name
ym.rs
use core::time::Duration; use core::num::NonZeroU32; use core::fmt; use core::ops::Range; use chrono::NaiveDateTime; pub mod flags; pub mod effects; mod parse; mod player; use flags::*; use effects::*; pub const MAX_DD_SAMPLES: usize = 32; pub const MFP_TIMER_FREQUENCY: u32 = 2_457_600; const DEFAULT_CHIPSET_FREQUENCY: u32 = 2_000_000; const DEFAULT_FRAME_FREQUENCY: u16 = 50; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum YmVersion { Ym2, Ym3, Ym4, Ym5, Ym6, } impl YmVersion { /// The YM version identifier tag as a string (4 ascii characters). pub fn tag(self) -> &'static str { match self { YmVersion::Ym2 => "YM2!", YmVersion::Ym3 => "YM3!", YmVersion::Ym4 => "YM4!", YmVersion::Ym5 => "YM5!", YmVersion::Ym6 => "YM6!", } } } impl fmt::Display for YmVersion { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.tag().fmt(f) } } /// The **YM** music file. /// /// The YM-file consist of [YmFrame]s that represent the state of the AY/YM chipset registers and /// contain additional information about special effects. /// /// Depending on the [YmSong::version] special effects are being encoded differently. #[derive(Debug, Clone)] pub struct YmSong { /// YM-file version. pub version: YmVersion, /// The last modification timestamp of the YM-file from the LHA envelope. pub created: Option<NaiveDateTime>, /// The song attributes. pub song_attrs: SongAttributes, /// The song title or a file name. pub title: String, /// The song author. pub author: String, /// The comment. pub comments: String, /// The number of cycles per second of the AY/YM chipset clock. pub chipset_frequency: u32, /// The number of frames played each second. pub frame_frequency: u16, /// The loop frame index. pub loop_frame: u32, /// The AY/YM state frames. pub frames: Box<[YmFrame]>, /// `DIGI-DRUM` samples. pub dd_samples: Box<[u8]>, /// `DIGI-DRUM` sample end indexes in [YmSong::dd_samples]. pub dd_samples_ends: [usize;MAX_DD_SAMPLES], cursor: usize, voice_effects: [(SidVoice, SinusSid, DigiDrum); 3], buzzer: SyncBuzzer, } /// This type represent the state of the AY/YM chipset registers and contain additional information /// about special effects. /// /// ```text /// X - AY/YM register data. /// S - Controls special effects. /// P - Frequency pre-divisor. /// F - Frequency divisor. /// - - Unused. /// ---------------------------------------------------------- /// b7 b6 b5 b4 b3 b2 b1 b0 Register description /// 0: X X X X X X X X Fine period voice A /// 1: S S S S X X X X Coarse period voice A /// 2: X X X X X X X X Fine period voice B /// 3: S S S S X X X X Coarse period voice B /// 4: X X X X X X X X Fine period voice C /// 5: - - - - X X X X Coarse period voice C /// 6: P P P X X X X X Noise period /// 7: X X X X X X X X Mixer control /// 8: P P P X X X X X Volume voice A /// 9: - - - X X X X X Volume voice B /// 10: - - - X X X X X Volume voice C /// 11: X X X X X X X X Envelope fine period /// 12: X X X X X X X X Envelope coarse period /// 13: x x x x X X X X Envelope shape /// ---------------------------------------------------------- /// virtual registers to store extra data for special effects: /// ---------------------------------------------------------- /// 14: F F F F F F F F Frequency divisor for S in 1 /// 15: F F F F F F F F Frequency divisor for S in 3 /// ``` /// /// The AY/YM `Envelope shape` register is modified only if the value of the 13 frame /// register is not equal to `0xff`. /// /// # Special effects /// /// The frequency of a special effect is encoded as `(2457600 / P) / F`. /// /// The divisor `F` is an unsigned 8-bit integer. /// /// The pre-divisor `P` is encoded as: /// /// |PPP| pre-divisor value| /// |-----------------------| /// |000| Timer off | /// |001| 4 | /// |010| 10 | /// |011| 16 | /// |100| 50 | /// |101| 64 | /// |110| 100 | /// |111| 200 | /// /// * The pre-divisor `P` in register 6 matches effect controlled by register 1. /// * The divisor `F` in register 14 matches effect controlled by register 1. /// * The pre-divisor `P` in register 8 matches effect controlled by register 3. /// * The divisor `F` in register 15 matches effect controlled by register 3. /// /// If an effect is active, the additional data resides in `X` bits in the `Volume` register of /// the relevant voice: /// /// * For the [`SID voice`][SidVoice] and [`Sinus SID`][SinusSid] effects the 4 lowest `X` bits /// determine the effect's volume. /// * For the [`Sync Buzzer`][SyncBuzzer] the 4 lowest `X` bits determine the effect's `Envelope shape`. /// * For the [`DIGI-DRUM`][DigiDrum] effect the 5 `X` bits determine the played sample number. /// * The `DIGI-DRUM` sample plays until its end or if it's overridden by another effect. /// * All other effects are active only for the duration of a single frame. /// * When the `DIGI-DRUM` is active the volume register from the frame for the relevant voice is being /// ignored and the relevant voice mixer tone and noise bits are forced to be set. /// /// The control bits of special effects are interpreted differently depending on the YM-file verion. /// /// ## YM6! /// /// The `S` bits in registers 1 and 3 controls any two of the selectable effects: /// ```text /// b7 b6 b5 b4 /// - - 0 0 effect disabled /// - - 0 1 effect active on voice A /// - - 1 0 effect active on voice B /// - - 1 1 effect active on voice C /// 0 0 - - select SID voice effect /// 0 1 - - select DIGI-DRUM effect /// 1 0 - - select Sinus SID effect /// 1 1 - - select Sync Buzzer effect /// ``` /// /// ## YM4!/YM5! /// /// The `S` bits in register 1 controls the `SID voice` effect. /// The `S` bits in register 3 controls the `DIGI-DRUM` effect. /// ```text /// b7 b6 b5 b4 /// - - 0 0 effect disabled /// - - 0 1 effect active on voice A /// - - 1 0 effect active on voice B /// - - 1 1 effect active on voice C /// - 0 - - SID voice timer continues, ignored for DIGI-DRUM /// - 1 - - SID voice timer restarts, ignored for DIGI-DRUM ///``` /// /// ## YM3! /// /// There are no special effects in this version. /// /// ## YM2! /// /// Only the `DIGI-DRUM` effect is recognized in this format. It is being played on voice C, and
/// uses one of the 40 predefined samples. /// /// * The effect starts when the highest bit (7) of the `Volume voice C` register (10) is 1. /// * The sample number is taken from the lowest 7 bits of the `Volume voice C` register (10). /// * The effect frequency is calculated by `(2457600 / 4) / X`, where `X` is the unsigned 8-bit /// value stored in the register 12 of the frame. /// * The value of AY/YM chipset registers 11, 12 and 13 is only written if the value of the /// frame register 13 is not equal to `0xFF`. /// * The register 12 of the AY/YM chipset is always being set to `0` in this format. /// * The register 13 of the AY/YM chipset is always being set to `0x10` in this format. #[derive(Default, Debug, Clone, Copy)] pub struct YmFrame { /// Frame data. pub data: [u8;16] } impl YmSong { /// Creates a new instance of `YmSong` from the given `frames` and other meta data. pub fn new( version: YmVersion, frames: Box<[YmFrame]>, loop_frame: u32, title: String, created: Option<NaiveDateTime> ) -> YmSong { YmSong { version, created, song_attrs: SongAttributes::default(), title, author: String::new(), comments: String::new(), chipset_frequency: DEFAULT_CHIPSET_FREQUENCY, frame_frequency: DEFAULT_FRAME_FREQUENCY, loop_frame, frames, dd_samples: Box::new([]), dd_samples_ends: [0usize;MAX_DD_SAMPLES], cursor: 0, voice_effects: Default::default(), buzzer: Default::default() } } /// Returns `YmSong` with the `author` and `comments` set from the given arguments. pub fn with_meta(mut self, author: String, comments: String) -> YmSong { self.author = author; self.comments = comments; self } /// Returns `YmSong` with the `song_attrs`, `dd_samples` and `dd_samples_ends` set from the given arguments. pub fn with_samples( mut self, song_attrs: SongAttributes, dd_samples: Box<[u8]>, dd_samples_ends: [usize;MAX_DD_SAMPLES] ) -> YmSong { self.song_attrs = song_attrs; self.dd_samples = dd_samples; self.dd_samples_ends = dd_samples_ends; self } /// Returns `YmSong` with the `chipset_frequency` and `frame_frequency` set from the given arguments. pub fn with_frequency(mut self, chipset_frequency: u32, frame_frequency: u16) -> YmSong { self.chipset_frequency = chipset_frequency; self.frame_frequency = frame_frequency; self } /// Returns the song duration. pub fn song_duration(&self) -> Duration { let seconds = self.frames.len() as f64 / self.frame_frequency as f64; Duration::from_secs_f64(seconds) } /// Returns the AY/YM chipset clock frequency. #[inline] pub fn clock_frequency(&self) -> f32 { self.chipset_frequency as f32 } /// Returns the number of AY/YM chipset clock cycles of a single music frame. pub fn frame_cycles(&self) -> f32 { self.clock_frequency() / self.frame_frequency as f32 } /// Calculates the timer interval in clock cycles, from the given `divisor`. pub fn timer_interval(&self, divisor: NonZeroU32) -> f32 { let divisor = divisor.get() as f32; self.clock_frequency() as f32 * divisor / MFP_TIMER_FREQUENCY as f32 } /// Returns the indicated sample data range in the [YmSong::dd_samples] for the given `sample`. /// /// # Panics /// Panics if `sample` value is not below [MAX_DD_SAMPLES]. pub fn sample_data_range(&self, sample: usize) -> Range<usize> { let end = self.dd_samples_ends[sample]; let start = match sample { 0 => 0, index => self.dd_samples_ends[index - 1] }; start..end } } impl YmFrame { /// Returns special effect control flags from the register 1. pub fn fx0(&self) -> FxCtrlFlags { FxCtrlFlags::from_bits_retain(self.data[1]) } /// Returns special effect control flags from the register 3. pub fn fx1(&self) -> FxCtrlFlags { FxCtrlFlags::from_bits_retain(self.data[3]) } /// Returns the value of the volume register for the indicated `chan`. /// /// The 2 lowest bits of `chan` indicate the voice channel: /// ```text /// b1 b0 voice channel /// 0 0 A /// 0 1 B /// 1 0 C /// 1 1 invalid (panics in debug mode) /// ``` pub fn vol(&self, chan: u8) -> u8 { let chan = chan & 3; debug_assert_ne!(chan, 3); self.data[(VOL_A_REG + chan) as usize] & 0x1f } /// Calculates the timer divsor for the special effect `fx0`. pub fn timer_divisor0(&self) -> Option<NonZeroU32> { calculate_timer_divisor(self.data[6], self.data[14]) } /// Calculates the timer divsor for the special effect `fx1`. pub fn timer_divisor1(&self) -> Option<NonZeroU32> { calculate_timer_divisor(self.data[8], self.data[15]) } } fn calculate_timer_divisor(prediv3: u8, div8: u8) -> Option<NonZeroU32> { let prediv = match prediv3 & 0b11100000 { 0b00000000 => 0, 0b00100000 => 4, 0b01000000 => 10, 0b01100000 => 16, 0b10000000 => 50, 0b10100000 => 64, 0b11000000 => 100, 0b11100000 => 200, _ => unreachable!() }; NonZeroU32::new(prediv * div8 as u32) }
random_line_split
coprocessor.rs
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. use std::sync::Arc; use std::{result, error}; use std::thread::{self, JoinHandle}; use std::sync::mpsc::{self, Sender, Receiver}; use mio::Token; use tipb::select::{self, SelectRequest, SelectResponse, Row}; use tipb::schema::IndexInfo; use protobuf::{Message as PbMsg, RepeatedField}; use byteorder::{BigEndian, ReadBytesExt}; use storage::{Engine, SnapshotStore, engine, txn, mvcc}; use kvproto::kvrpcpb::{Context, LockInfo}; use kvproto::msgpb::{MessageType, Message}; use kvproto::coprocessor::{Request, Response, KeyRange}; use kvproto::errorpb; use storage::Key; use util::codec::{Datum, table, datum}; use util::xeval::Evaluator; use server::{self, SendCh, Msg, ConnData}; pub const REQ_TYPE_SELECT: i64 = 101; pub const REQ_TYPE_INDEX: i64 = 102; const DEFAULT_ERROR_CODE: i32 = 1; quick_error! { #[derive(Debug)] pub enum Error { Region(err: errorpb::Error) { description("region related failure") display("region {:?}", err) } Locked(l: LockInfo) { description("key is locked") display("locked {:?}", l) } Other(err: Box<error::Error + Send + Sync>) { from() cause(err.as_ref()) description(err.description()) display("unknown error {:?}", err) } } } pub type Result<T> = result::Result<T, Error>; impl From<engine::Error> for Error { fn from(e: engine::Error) -> Error { match e { engine::Error::Request(e) => Error::Region(e), _ => Error::Other(box e), } } } impl From<txn::Error> for Error { fn from(e: txn::Error) -> Error { match e { txn::Error::Mvcc(mvcc::Error::KeyIsLocked { primary, ts, key }) => { let mut info = LockInfo::new(); info.set_primary_lock(primary); info.set_lock_version(ts); info.set_key(key); Error::Locked(info) } _ => Error::Other(box e), } } } pub struct RegionEndPoint { tx: Sender<EndPointMessage>, handle: Option<JoinHandle<()>>, } type ResponseHandler = Box<Fn(Response) -> ()>; #[derive(Debug)] enum EndPointMessage { Job(Request, Token, u64), Close, } fn msg_poller(engine: Arc<Box<Engine>>, rx: Receiver<EndPointMessage>, ch: SendCh) { info!("EndPoint started."); let end_point = SnapshotEndPoint::new(engine); loop { let msg = rx.recv(); if let Err(e) = msg { error!("failed to receive job: {:?}", e); break; } let msg = msg.unwrap(); debug!("recv req: {:?}", msg); match msg { EndPointMessage::Job(req, token, msg_id) => { handle_request(req, ch.clone(), token, msg_id, &end_point) } EndPointMessage::Close => break, } } info!("EndPoint closing."); } impl RegionEndPoint { pub fn new(engine: Arc<Box<Engine>>, ch: SendCh) -> RegionEndPoint { let (tx, rx) = mpsc::channel(); let builder = thread::Builder::new().name("EndPoint".to_owned()); let handle = builder.spawn(move || msg_poller(engine, rx, ch)).unwrap(); RegionEndPoint { tx: tx, handle: Some(handle), } } pub fn on_request(&self, req: Request, token: Token, msg_id: u64) -> server::Result<()> { box_try!(self.tx.send(EndPointMessage::Job(req, token, msg_id))); Ok(()) } pub fn stop(&mut self) { if self.handle.is_none() { return; } if let Err(e) = self.tx.send(EndPointMessage::Close) { error!("failed to ask the coprocessor to stop: {:?}", e); } if let Err(e) = self.handle.take().unwrap().join() { error!("failed to stop the coprocessor: {:?}", e); } } } fn handle_request(req: Request, ch: SendCh, token: Token, msg_id: u64, end_point: &SnapshotEndPoint) { let cb = box move |r| { let mut resp_msg = Message::new(); resp_msg.set_msg_type(MessageType::CopResp); resp_msg.set_cop_resp(r); if let Err(e) = ch.send(Msg::WriteData { token: token, data: ConnData::new(msg_id, resp_msg), }) { error!("send cop resp failed with token {:?}, msg id {}, err {:?}", token, msg_id, e); } }; match req.get_tp() { REQ_TYPE_SELECT | REQ_TYPE_INDEX => { let mut sel = SelectRequest::new(); if let Err(e) = sel.merge_from_bytes(req.get_data()) { on_error(box_err!(e), cb); return; } match end_point.handle_select(req, sel) { Ok(r) => cb(r), Err(e) => on_error(e, cb), } } t => on_error(box_err!("unsupported tp {}", t), cb), } } fn on_error(e: Error, cb: ResponseHandler) { let mut resp = Response::new(); match e { Error::Region(e) => resp.set_region_error(e), Error::Locked(info) => resp.set_locked(info), Error::Other(_) => resp.set_other_error(format!("{}", e)), } cb(resp) } pub struct SnapshotEndPoint { engine: Arc<Box<Engine>>, } impl SnapshotEndPoint { pub fn new(engine: Arc<Box<Engine>>) -> SnapshotEndPoint { // TODO: Spawn a new thread for handling requests asynchronously. SnapshotEndPoint { engine: engine } } fn new_snapshot<'a>(&'a self, ctx: &Context, start_ts: u64) -> Result<SnapshotStore<'a>> { let snapshot = try!(self.engine.snapshot(ctx)); Ok(SnapshotStore::new(snapshot, start_ts)) } } impl SnapshotEndPoint { pub fn handle_select(&self, mut req: Request, sel: SelectRequest) -> Result<Response> { let snap = try!(self.new_snapshot(req.get_context(), sel.get_start_ts())); let range = req.take_ranges().into_vec(); debug!("scanning range: {:?}", range); let res = if req.get_tp() == REQ_TYPE_SELECT { get_rows_from_sel(&snap, &sel, range) } else { get_rows_from_idx(&snap, &sel, range) }; let mut resp = Response::new(); let mut sel_resp = SelectResponse::new(); match res { Ok(rows) => sel_resp.set_rows(RepeatedField::from_vec(rows)), Err(e) => { if let Error::Other(_) = e { // should we handle locked here too? sel_resp.set_error(to_pb_error(&e)); // TODO add detail error resp.set_other_error(format!("{}", e)); } else { // other error should be handle by ti client. return Err(e); } } } let data = box_try!(sel_resp.write_to_bytes()); resp.set_data(data); Ok(resp) } } fn to_pb_error(err: &Error) -> select::Error { let mut e = select::Error::new(); e.set_code(DEFAULT_ERROR_CODE); e.set_msg(format!("{}", err)); e } fn get_rows_from_sel(snap: &SnapshotStore, sel: &SelectRequest, ranges: Vec<KeyRange>) -> Result<Vec<Row>> { let mut eval = Evaluator::default(); let mut rows = vec![]; for ran in ranges { let ran_rows = try!(get_rows_from_range(snap, sel, ran, &mut eval)); rows.extend(ran_rows); } Ok(rows) } fn prefix_next(key: &[u8]) -> Vec<u8> { let mut nk = key.to_vec(); if nk.is_empty() { nk.push(0); return nk; } let mut i = nk.len() - 1; loop { if nk[i] == 255 { nk[i] = 0; } else { nk[i] += 1; return nk; } if i == 0 { nk = key.to_vec(); nk.push(0); return nk; } i -= 1; } } /// `is_point` checks if the key range represents a point. fn is_point(range: &KeyRange) -> bool { range.get_end() == &*prefix_next(range.get_start()) } fn get_rows_from_range(snap: &SnapshotStore, sel: &SelectRequest, mut range: KeyRange, eval: &mut Evaluator) -> Result<Vec<Row>> { let mut rows = vec![]; if is_point(&range) { if let None = try!(snap.get(&Key::from_raw(range.get_start().to_vec()))) { return Ok(rows); } let h = box_try!(table::decode_handle(range.get_start())); if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) { rows.push(row); } } else { let mut seek_key = range.take_start(); loop { trace!("seek {:?}", seek_key); let mut res = try!(snap.scan(Key::from_raw(seek_key), 1)); if res.is_empty() { debug!("no more data to scan."); break; } let (key, _) = try!(res.pop().unwrap()); if range.get_end() <= &key { debug!("reach end key: {:?} >= {:?}", key, range.get_end()); break; } let h = box_try!(table::decode_handle(&key)); if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) { rows.push(row); } seek_key = prefix_next(&key); } } Ok(rows) } fn get_row_by_handle(snap: &SnapshotStore, sel: &SelectRequest, h: i64, eval: &mut Evaluator) -> Result<Option<Row>> { let tid = sel.get_table_info().get_table_id(); let columns = sel.get_table_info().get_columns(); let mut row = Row::new(); let handle = box_try!(datum::encode_value(&[Datum::I64(h)])); for col in columns { if col.get_pk_handle() { row.mut_data().extend(handle.clone()); } else { let raw_key = table::encode_column_key(tid, h, col.get_column_id()); let key = Key::from_raw(raw_key); match try!(snap.get(&key)) { None => return Err(box_err!("key {:?} not exists", key)), Some(bs) => row.mut_data().extend(bs), } } } row.set_handle(handle); if!sel.has_field_where() { return Ok(Some(row)); } trace!("filtering row {:?}", row); if!row.get_data().is_empty() { let (datums, _) = box_try!(datum::decode(row.get_data())); for (c, d) in columns.iter().zip(datums) { eval.insert(c.get_column_id(), d); } } let res = box_try!(eval.eval(sel.get_field_where())); if let Datum::Null = res { trace!("got null, skip."); return Ok(None); } if box_try!(res.as_bool()) { trace!("pass."); return Ok(Some(row)); } trace!("got false, skip."); Ok(None) } fn get_rows_from_idx(snap: &SnapshotStore, sel: &SelectRequest, ranges: Vec<KeyRange>) -> Result<Vec<Row>> { let mut rows = vec![]; for r in ranges { let part = try!(get_idx_row_from_range(snap, sel.get_index_info(), r)); rows.extend(part); } Ok(rows) } fn get_idx_row_from_range(snap: &SnapshotStore, info: &IndexInfo, mut r: KeyRange) -> Result<Vec<Row>>
Datum::I64(h) }; let data = box_try!(datum::encode_value(&datums)); let handle_data = box_try!(datum::encode_value(&[handle])); let mut row = Row::new(); row.set_handle(handle_data); row.set_data(data); rows.push(row); seek_key = prefix_next(&key); } }
{ let mut rows = vec![]; let mut seek_key = r.take_start(); loop { trace!("seek {:?}", seek_key); let mut nk = try!(snap.scan(Key::from_raw(seek_key.clone()), 1)); if nk.is_empty() { debug!("no more data to scan"); return Ok(rows); } let (key, value) = try!(nk.pop().unwrap()); if r.get_end() <= &key { debug!("reach end key: {:?} >= {:?}", key, r.get_end()); return Ok(rows); } let mut datums = box_try!(table::decode_index_key(&key)); let handle = if datums.len() > info.get_columns().len() { datums.pop().unwrap() } else { let h = box_try!((&*value).read_i64::<BigEndian>());
identifier_body
coprocessor.rs
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. use std::sync::Arc; use std::{result, error}; use std::thread::{self, JoinHandle}; use std::sync::mpsc::{self, Sender, Receiver}; use mio::Token; use tipb::select::{self, SelectRequest, SelectResponse, Row}; use tipb::schema::IndexInfo; use protobuf::{Message as PbMsg, RepeatedField}; use byteorder::{BigEndian, ReadBytesExt}; use storage::{Engine, SnapshotStore, engine, txn, mvcc}; use kvproto::kvrpcpb::{Context, LockInfo}; use kvproto::msgpb::{MessageType, Message}; use kvproto::coprocessor::{Request, Response, KeyRange}; use kvproto::errorpb; use storage::Key; use util::codec::{Datum, table, datum}; use util::xeval::Evaluator; use server::{self, SendCh, Msg, ConnData}; pub const REQ_TYPE_SELECT: i64 = 101; pub const REQ_TYPE_INDEX: i64 = 102; const DEFAULT_ERROR_CODE: i32 = 1; quick_error! { #[derive(Debug)] pub enum Error { Region(err: errorpb::Error) { description("region related failure") display("region {:?}", err) } Locked(l: LockInfo) { description("key is locked") display("locked {:?}", l) } Other(err: Box<error::Error + Send + Sync>) { from() cause(err.as_ref()) description(err.description()) display("unknown error {:?}", err) } } } pub type Result<T> = result::Result<T, Error>; impl From<engine::Error> for Error { fn from(e: engine::Error) -> Error { match e { engine::Error::Request(e) => Error::Region(e), _ => Error::Other(box e), } } } impl From<txn::Error> for Error { fn from(e: txn::Error) -> Error { match e { txn::Error::Mvcc(mvcc::Error::KeyIsLocked { primary, ts, key }) => { let mut info = LockInfo::new(); info.set_primary_lock(primary); info.set_lock_version(ts); info.set_key(key); Error::Locked(info) } _ => Error::Other(box e), } } } pub struct RegionEndPoint { tx: Sender<EndPointMessage>, handle: Option<JoinHandle<()>>, } type ResponseHandler = Box<Fn(Response) -> ()>; #[derive(Debug)] enum EndPointMessage { Job(Request, Token, u64), Close, } fn msg_poller(engine: Arc<Box<Engine>>, rx: Receiver<EndPointMessage>, ch: SendCh) { info!("EndPoint started."); let end_point = SnapshotEndPoint::new(engine); loop { let msg = rx.recv(); if let Err(e) = msg { error!("failed to receive job: {:?}", e); break; } let msg = msg.unwrap(); debug!("recv req: {:?}", msg); match msg { EndPointMessage::Job(req, token, msg_id) => { handle_request(req, ch.clone(), token, msg_id, &end_point) } EndPointMessage::Close => break, } } info!("EndPoint closing."); } impl RegionEndPoint { pub fn new(engine: Arc<Box<Engine>>, ch: SendCh) -> RegionEndPoint { let (tx, rx) = mpsc::channel(); let builder = thread::Builder::new().name("EndPoint".to_owned()); let handle = builder.spawn(move || msg_poller(engine, rx, ch)).unwrap(); RegionEndPoint { tx: tx, handle: Some(handle), } } pub fn on_request(&self, req: Request, token: Token, msg_id: u64) -> server::Result<()> { box_try!(self.tx.send(EndPointMessage::Job(req, token, msg_id))); Ok(()) } pub fn stop(&mut self) { if self.handle.is_none() { return; } if let Err(e) = self.tx.send(EndPointMessage::Close) { error!("failed to ask the coprocessor to stop: {:?}", e); } if let Err(e) = self.handle.take().unwrap().join() { error!("failed to stop the coprocessor: {:?}", e); } } } fn handle_request(req: Request, ch: SendCh, token: Token, msg_id: u64, end_point: &SnapshotEndPoint) { let cb = box move |r| { let mut resp_msg = Message::new(); resp_msg.set_msg_type(MessageType::CopResp); resp_msg.set_cop_resp(r); if let Err(e) = ch.send(Msg::WriteData { token: token, data: ConnData::new(msg_id, resp_msg), }) { error!("send cop resp failed with token {:?}, msg id {}, err {:?}", token, msg_id, e); } }; match req.get_tp() { REQ_TYPE_SELECT | REQ_TYPE_INDEX => { let mut sel = SelectRequest::new(); if let Err(e) = sel.merge_from_bytes(req.get_data()) { on_error(box_err!(e), cb); return; } match end_point.handle_select(req, sel) { Ok(r) => cb(r), Err(e) => on_error(e, cb), } } t => on_error(box_err!("unsupported tp {}", t), cb), } } fn on_error(e: Error, cb: ResponseHandler) { let mut resp = Response::new(); match e { Error::Region(e) => resp.set_region_error(e), Error::Locked(info) => resp.set_locked(info), Error::Other(_) => resp.set_other_error(format!("{}", e)), } cb(resp) } pub struct SnapshotEndPoint { engine: Arc<Box<Engine>>, } impl SnapshotEndPoint { pub fn new(engine: Arc<Box<Engine>>) -> SnapshotEndPoint { // TODO: Spawn a new thread for handling requests asynchronously. SnapshotEndPoint { engine: engine } } fn new_snapshot<'a>(&'a self, ctx: &Context, start_ts: u64) -> Result<SnapshotStore<'a>> { let snapshot = try!(self.engine.snapshot(ctx)); Ok(SnapshotStore::new(snapshot, start_ts)) } } impl SnapshotEndPoint { pub fn handle_select(&self, mut req: Request, sel: SelectRequest) -> Result<Response> { let snap = try!(self.new_snapshot(req.get_context(), sel.get_start_ts())); let range = req.take_ranges().into_vec(); debug!("scanning range: {:?}", range); let res = if req.get_tp() == REQ_TYPE_SELECT { get_rows_from_sel(&snap, &sel, range) } else { get_rows_from_idx(&snap, &sel, range) }; let mut resp = Response::new(); let mut sel_resp = SelectResponse::new(); match res { Ok(rows) => sel_resp.set_rows(RepeatedField::from_vec(rows)), Err(e) => { if let Error::Other(_) = e { // should we handle locked here too? sel_resp.set_error(to_pb_error(&e)); // TODO add detail error resp.set_other_error(format!("{}", e)); } else { // other error should be handle by ti client. return Err(e); } } } let data = box_try!(sel_resp.write_to_bytes()); resp.set_data(data); Ok(resp) } } fn to_pb_error(err: &Error) -> select::Error { let mut e = select::Error::new(); e.set_code(DEFAULT_ERROR_CODE); e.set_msg(format!("{}", err)); e } fn get_rows_from_sel(snap: &SnapshotStore, sel: &SelectRequest, ranges: Vec<KeyRange>) -> Result<Vec<Row>> { let mut eval = Evaluator::default(); let mut rows = vec![]; for ran in ranges { let ran_rows = try!(get_rows_from_range(snap, sel, ran, &mut eval)); rows.extend(ran_rows); } Ok(rows) } fn prefix_next(key: &[u8]) -> Vec<u8> { let mut nk = key.to_vec(); if nk.is_empty() { nk.push(0); return nk; } let mut i = nk.len() - 1; loop { if nk[i] == 255 { nk[i] = 0; } else { nk[i] += 1; return nk; } if i == 0 { nk = key.to_vec(); nk.push(0); return nk; } i -= 1; } } /// `is_point` checks if the key range represents a point. fn is_point(range: &KeyRange) -> bool { range.get_end() == &*prefix_next(range.get_start()) } fn get_rows_from_range(snap: &SnapshotStore, sel: &SelectRequest, mut range: KeyRange, eval: &mut Evaluator) -> Result<Vec<Row>> { let mut rows = vec![]; if is_point(&range) { if let None = try!(snap.get(&Key::from_raw(range.get_start().to_vec()))) { return Ok(rows); } let h = box_try!(table::decode_handle(range.get_start())); if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) { rows.push(row); } } else { let mut seek_key = range.take_start(); loop { trace!("seek {:?}", seek_key); let mut res = try!(snap.scan(Key::from_raw(seek_key), 1)); if res.is_empty() { debug!("no more data to scan."); break; } let (key, _) = try!(res.pop().unwrap()); if range.get_end() <= &key { debug!("reach end key: {:?} >= {:?}", key, range.get_end()); break; } let h = box_try!(table::decode_handle(&key)); if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) { rows.push(row); } seek_key = prefix_next(&key); } } Ok(rows) } fn get_row_by_handle(snap: &SnapshotStore, sel: &SelectRequest, h: i64, eval: &mut Evaluator) -> Result<Option<Row>> { let tid = sel.get_table_info().get_table_id(); let columns = sel.get_table_info().get_columns(); let mut row = Row::new(); let handle = box_try!(datum::encode_value(&[Datum::I64(h)])); for col in columns { if col.get_pk_handle() { row.mut_data().extend(handle.clone()); } else { let raw_key = table::encode_column_key(tid, h, col.get_column_id()); let key = Key::from_raw(raw_key); match try!(snap.get(&key)) { None => return Err(box_err!("key {:?} not exists", key)), Some(bs) => row.mut_data().extend(bs), } } } row.set_handle(handle); if!sel.has_field_where() { return Ok(Some(row)); } trace!("filtering row {:?}", row); if!row.get_data().is_empty() { let (datums, _) = box_try!(datum::decode(row.get_data())); for (c, d) in columns.iter().zip(datums) { eval.insert(c.get_column_id(), d); } } let res = box_try!(eval.eval(sel.get_field_where())); if let Datum::Null = res { trace!("got null, skip."); return Ok(None); } if box_try!(res.as_bool()) { trace!("pass."); return Ok(Some(row)); } trace!("got false, skip."); Ok(None) } fn get_rows_from_idx(snap: &SnapshotStore, sel: &SelectRequest, ranges: Vec<KeyRange>) -> Result<Vec<Row>> { let mut rows = vec![]; for r in ranges { let part = try!(get_idx_row_from_range(snap, sel.get_index_info(), r)); rows.extend(part); } Ok(rows) } fn get_idx_row_from_range(snap: &SnapshotStore, info: &IndexInfo, mut r: KeyRange) -> Result<Vec<Row>> { let mut rows = vec![]; let mut seek_key = r.take_start(); loop { trace!("seek {:?}", seek_key); let mut nk = try!(snap.scan(Key::from_raw(seek_key.clone()), 1)); if nk.is_empty() { debug!("no more data to scan"); return Ok(rows); } let (key, value) = try!(nk.pop().unwrap()); if r.get_end() <= &key { debug!("reach end key: {:?} >= {:?}", key, r.get_end());
} else { let h = box_try!((&*value).read_i64::<BigEndian>()); Datum::I64(h) }; let data = box_try!(datum::encode_value(&datums)); let handle_data = box_try!(datum::encode_value(&[handle])); let mut row = Row::new(); row.set_handle(handle_data); row.set_data(data); rows.push(row); seek_key = prefix_next(&key); } }
return Ok(rows); } let mut datums = box_try!(table::decode_index_key(&key)); let handle = if datums.len() > info.get_columns().len() { datums.pop().unwrap()
random_line_split
coprocessor.rs
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. use std::sync::Arc; use std::{result, error}; use std::thread::{self, JoinHandle}; use std::sync::mpsc::{self, Sender, Receiver}; use mio::Token; use tipb::select::{self, SelectRequest, SelectResponse, Row}; use tipb::schema::IndexInfo; use protobuf::{Message as PbMsg, RepeatedField}; use byteorder::{BigEndian, ReadBytesExt}; use storage::{Engine, SnapshotStore, engine, txn, mvcc}; use kvproto::kvrpcpb::{Context, LockInfo}; use kvproto::msgpb::{MessageType, Message}; use kvproto::coprocessor::{Request, Response, KeyRange}; use kvproto::errorpb; use storage::Key; use util::codec::{Datum, table, datum}; use util::xeval::Evaluator; use server::{self, SendCh, Msg, ConnData}; pub const REQ_TYPE_SELECT: i64 = 101; pub const REQ_TYPE_INDEX: i64 = 102; const DEFAULT_ERROR_CODE: i32 = 1; quick_error! { #[derive(Debug)] pub enum Error { Region(err: errorpb::Error) { description("region related failure") display("region {:?}", err) } Locked(l: LockInfo) { description("key is locked") display("locked {:?}", l) } Other(err: Box<error::Error + Send + Sync>) { from() cause(err.as_ref()) description(err.description()) display("unknown error {:?}", err) } } } pub type Result<T> = result::Result<T, Error>; impl From<engine::Error> for Error { fn from(e: engine::Error) -> Error { match e { engine::Error::Request(e) => Error::Region(e), _ => Error::Other(box e), } } } impl From<txn::Error> for Error { fn from(e: txn::Error) -> Error { match e { txn::Error::Mvcc(mvcc::Error::KeyIsLocked { primary, ts, key }) => { let mut info = LockInfo::new(); info.set_primary_lock(primary); info.set_lock_version(ts); info.set_key(key); Error::Locked(info) } _ => Error::Other(box e), } } } pub struct RegionEndPoint { tx: Sender<EndPointMessage>, handle: Option<JoinHandle<()>>, } type ResponseHandler = Box<Fn(Response) -> ()>; #[derive(Debug)] enum EndPointMessage { Job(Request, Token, u64), Close, } fn msg_poller(engine: Arc<Box<Engine>>, rx: Receiver<EndPointMessage>, ch: SendCh) { info!("EndPoint started."); let end_point = SnapshotEndPoint::new(engine); loop { let msg = rx.recv(); if let Err(e) = msg { error!("failed to receive job: {:?}", e); break; } let msg = msg.unwrap(); debug!("recv req: {:?}", msg); match msg { EndPointMessage::Job(req, token, msg_id) => { handle_request(req, ch.clone(), token, msg_id, &end_point) } EndPointMessage::Close => break, } } info!("EndPoint closing."); } impl RegionEndPoint { pub fn new(engine: Arc<Box<Engine>>, ch: SendCh) -> RegionEndPoint { let (tx, rx) = mpsc::channel(); let builder = thread::Builder::new().name("EndPoint".to_owned()); let handle = builder.spawn(move || msg_poller(engine, rx, ch)).unwrap(); RegionEndPoint { tx: tx, handle: Some(handle), } } pub fn on_request(&self, req: Request, token: Token, msg_id: u64) -> server::Result<()> { box_try!(self.tx.send(EndPointMessage::Job(req, token, msg_id))); Ok(()) } pub fn stop(&mut self) { if self.handle.is_none() { return; } if let Err(e) = self.tx.send(EndPointMessage::Close) { error!("failed to ask the coprocessor to stop: {:?}", e); } if let Err(e) = self.handle.take().unwrap().join() { error!("failed to stop the coprocessor: {:?}", e); } } } fn
(req: Request, ch: SendCh, token: Token, msg_id: u64, end_point: &SnapshotEndPoint) { let cb = box move |r| { let mut resp_msg = Message::new(); resp_msg.set_msg_type(MessageType::CopResp); resp_msg.set_cop_resp(r); if let Err(e) = ch.send(Msg::WriteData { token: token, data: ConnData::new(msg_id, resp_msg), }) { error!("send cop resp failed with token {:?}, msg id {}, err {:?}", token, msg_id, e); } }; match req.get_tp() { REQ_TYPE_SELECT | REQ_TYPE_INDEX => { let mut sel = SelectRequest::new(); if let Err(e) = sel.merge_from_bytes(req.get_data()) { on_error(box_err!(e), cb); return; } match end_point.handle_select(req, sel) { Ok(r) => cb(r), Err(e) => on_error(e, cb), } } t => on_error(box_err!("unsupported tp {}", t), cb), } } fn on_error(e: Error, cb: ResponseHandler) { let mut resp = Response::new(); match e { Error::Region(e) => resp.set_region_error(e), Error::Locked(info) => resp.set_locked(info), Error::Other(_) => resp.set_other_error(format!("{}", e)), } cb(resp) } pub struct SnapshotEndPoint { engine: Arc<Box<Engine>>, } impl SnapshotEndPoint { pub fn new(engine: Arc<Box<Engine>>) -> SnapshotEndPoint { // TODO: Spawn a new thread for handling requests asynchronously. SnapshotEndPoint { engine: engine } } fn new_snapshot<'a>(&'a self, ctx: &Context, start_ts: u64) -> Result<SnapshotStore<'a>> { let snapshot = try!(self.engine.snapshot(ctx)); Ok(SnapshotStore::new(snapshot, start_ts)) } } impl SnapshotEndPoint { pub fn handle_select(&self, mut req: Request, sel: SelectRequest) -> Result<Response> { let snap = try!(self.new_snapshot(req.get_context(), sel.get_start_ts())); let range = req.take_ranges().into_vec(); debug!("scanning range: {:?}", range); let res = if req.get_tp() == REQ_TYPE_SELECT { get_rows_from_sel(&snap, &sel, range) } else { get_rows_from_idx(&snap, &sel, range) }; let mut resp = Response::new(); let mut sel_resp = SelectResponse::new(); match res { Ok(rows) => sel_resp.set_rows(RepeatedField::from_vec(rows)), Err(e) => { if let Error::Other(_) = e { // should we handle locked here too? sel_resp.set_error(to_pb_error(&e)); // TODO add detail error resp.set_other_error(format!("{}", e)); } else { // other error should be handle by ti client. return Err(e); } } } let data = box_try!(sel_resp.write_to_bytes()); resp.set_data(data); Ok(resp) } } fn to_pb_error(err: &Error) -> select::Error { let mut e = select::Error::new(); e.set_code(DEFAULT_ERROR_CODE); e.set_msg(format!("{}", err)); e } fn get_rows_from_sel(snap: &SnapshotStore, sel: &SelectRequest, ranges: Vec<KeyRange>) -> Result<Vec<Row>> { let mut eval = Evaluator::default(); let mut rows = vec![]; for ran in ranges { let ran_rows = try!(get_rows_from_range(snap, sel, ran, &mut eval)); rows.extend(ran_rows); } Ok(rows) } fn prefix_next(key: &[u8]) -> Vec<u8> { let mut nk = key.to_vec(); if nk.is_empty() { nk.push(0); return nk; } let mut i = nk.len() - 1; loop { if nk[i] == 255 { nk[i] = 0; } else { nk[i] += 1; return nk; } if i == 0 { nk = key.to_vec(); nk.push(0); return nk; } i -= 1; } } /// `is_point` checks if the key range represents a point. fn is_point(range: &KeyRange) -> bool { range.get_end() == &*prefix_next(range.get_start()) } fn get_rows_from_range(snap: &SnapshotStore, sel: &SelectRequest, mut range: KeyRange, eval: &mut Evaluator) -> Result<Vec<Row>> { let mut rows = vec![]; if is_point(&range) { if let None = try!(snap.get(&Key::from_raw(range.get_start().to_vec()))) { return Ok(rows); } let h = box_try!(table::decode_handle(range.get_start())); if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) { rows.push(row); } } else { let mut seek_key = range.take_start(); loop { trace!("seek {:?}", seek_key); let mut res = try!(snap.scan(Key::from_raw(seek_key), 1)); if res.is_empty() { debug!("no more data to scan."); break; } let (key, _) = try!(res.pop().unwrap()); if range.get_end() <= &key { debug!("reach end key: {:?} >= {:?}", key, range.get_end()); break; } let h = box_try!(table::decode_handle(&key)); if let Some(row) = try!(get_row_by_handle(snap, sel, h, eval)) { rows.push(row); } seek_key = prefix_next(&key); } } Ok(rows) } fn get_row_by_handle(snap: &SnapshotStore, sel: &SelectRequest, h: i64, eval: &mut Evaluator) -> Result<Option<Row>> { let tid = sel.get_table_info().get_table_id(); let columns = sel.get_table_info().get_columns(); let mut row = Row::new(); let handle = box_try!(datum::encode_value(&[Datum::I64(h)])); for col in columns { if col.get_pk_handle() { row.mut_data().extend(handle.clone()); } else { let raw_key = table::encode_column_key(tid, h, col.get_column_id()); let key = Key::from_raw(raw_key); match try!(snap.get(&key)) { None => return Err(box_err!("key {:?} not exists", key)), Some(bs) => row.mut_data().extend(bs), } } } row.set_handle(handle); if!sel.has_field_where() { return Ok(Some(row)); } trace!("filtering row {:?}", row); if!row.get_data().is_empty() { let (datums, _) = box_try!(datum::decode(row.get_data())); for (c, d) in columns.iter().zip(datums) { eval.insert(c.get_column_id(), d); } } let res = box_try!(eval.eval(sel.get_field_where())); if let Datum::Null = res { trace!("got null, skip."); return Ok(None); } if box_try!(res.as_bool()) { trace!("pass."); return Ok(Some(row)); } trace!("got false, skip."); Ok(None) } fn get_rows_from_idx(snap: &SnapshotStore, sel: &SelectRequest, ranges: Vec<KeyRange>) -> Result<Vec<Row>> { let mut rows = vec![]; for r in ranges { let part = try!(get_idx_row_from_range(snap, sel.get_index_info(), r)); rows.extend(part); } Ok(rows) } fn get_idx_row_from_range(snap: &SnapshotStore, info: &IndexInfo, mut r: KeyRange) -> Result<Vec<Row>> { let mut rows = vec![]; let mut seek_key = r.take_start(); loop { trace!("seek {:?}", seek_key); let mut nk = try!(snap.scan(Key::from_raw(seek_key.clone()), 1)); if nk.is_empty() { debug!("no more data to scan"); return Ok(rows); } let (key, value) = try!(nk.pop().unwrap()); if r.get_end() <= &key { debug!("reach end key: {:?} >= {:?}", key, r.get_end()); return Ok(rows); } let mut datums = box_try!(table::decode_index_key(&key)); let handle = if datums.len() > info.get_columns().len() { datums.pop().unwrap() } else { let h = box_try!((&*value).read_i64::<BigEndian>()); Datum::I64(h) }; let data = box_try!(datum::encode_value(&datums)); let handle_data = box_try!(datum::encode_value(&[handle])); let mut row = Row::new(); row.set_handle(handle_data); row.set_data(data); rows.push(row); seek_key = prefix_next(&key); } }
handle_request
identifier_name
server.rs
// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Grin server implementation, glues the different parts of the system (mostly //! the peer-to-peer server, the blockchain and the transaction pool) and acts //! as a facade. use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::sync::{mpsc, Arc}; use std::{convert::TryInto, fs}; use std::{ thread::{self, JoinHandle}, time::{self, Duration}, }; use fs2::FileExt; use walkdir::WalkDir; use crate::api; use crate::api::TLSConfig; use crate::chain::{self, SyncState, SyncStatus}; use crate::common::adapters::{ ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter, }; use crate::common::hooks::{init_chain_hooks, init_net_hooks}; use crate::common::stats::{ ChainStats, DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats, TxStats, }; use crate::common::types::{Error, ServerConfig, StratumServerConfig}; use crate::core::core::hash::{Hashed, ZERO_HASH}; use crate::core::ser::ProtocolVersion; use crate::core::{consensus, genesis, global, pow}; use crate::grin::{dandelion_monitor, seed, sync}; use crate::mining::stratumserver; use crate::mining::test_miner::Miner; use crate::p2p; use crate::p2p::types::{Capabilities, PeerAddr}; use crate::pool; use crate::util::file::get_first_line; use crate::util::{RwLock, StopState}; use futures::channel::oneshot; use grin_util::logger::LogEntry; /// Arcified thread-safe TransactionPool with type parameters used by server components pub type ServerTxPool = Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter>>>; /// Grin server holding internal structures. pub struct Server { /// server config pub config: ServerConfig, /// handle to our network server pub p2p: Arc<p2p::Server>, /// data store access pub chain: Arc<chain::Chain>, /// in-memory transaction pool pub tx_pool: ServerTxPool, /// Whether we're currently syncing pub sync_state: Arc<SyncState>, /// To be passed around to collect stats and info state_info: ServerStateInfo, /// Stop flag pub stop_state: Arc<StopState>, /// Maintain a lock_file so we do not run multiple Grin nodes from same dir. lock_file: Arc<File>, connect_thread: Option<JoinHandle<()>>, sync_thread: JoinHandle<()>, dandelion_thread: JoinHandle<()>, } impl Server { /// Instantiates and starts a new server. Optionally takes a callback /// for the server to send an ARC copy of itself, to allow another process /// to poll info about the server status pub fn start<F>( config: ServerConfig, logs_rx: Option<mpsc::Receiver<LogEntry>>, mut info_callback: F, stop_state: Option<Arc<StopState>>, api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result<(), Error> where F: FnMut(Server, Option<mpsc::Receiver<LogEntry>>), { let mining_config = config.stratum_mining_config.clone(); let enable_test_miner = config.run_test_miner; let test_miner_wallet_url = config.test_miner_wallet_url.clone(); let serv = Server::new(config, stop_state, api_chan)?; if let Some(c) = mining_config { let enable_stratum_server = c.enable_stratum_server; if let Some(s) = enable_stratum_server { if s { { let mut stratum_stats = serv.state_info.stratum_stats.write(); stratum_stats.is_enabled = true; } serv.start_stratum_server(c); } } } if let Some(s) = enable_test_miner { if s { serv.start_test_miner(test_miner_wallet_url, serv.stop_state.clone()); } } info_callback(serv, logs_rx); Ok(()) } // Exclusive (advisory) lock_file to ensure we do not run multiple // instance of grin server from the same dir. // This uses fs2 and should be safe cross-platform unless somebody abuses the file itself. fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> { let path = Path::new(&config.db_root); fs::create_dir_all(&path)?; let path = path.join("grin.lock"); let lock_file = fs::OpenOptions::new() .read(true) .write(true) .create(true) .open(&path)?; lock_file.try_lock_exclusive().map_err(|e| { let mut stderr = std::io::stderr(); writeln!( &mut stderr, "Failed to lock {:?} (grin server already running?)", path ) .expect("Could not write to stderr"); e })?; Ok(Arc::new(lock_file)) } /// Instantiates a new server associated with the provided future reactor. pub fn new( config: ServerConfig, stop_state: Option<Arc<StopState>>, api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result<Server, Error> { // Obtain our lock_file or fail immediately with an error. let lock_file = Server::one_grin_at_a_time(&config)?; // Defaults to None (optional) in config file. // This translates to false here. let archive_mode = match config.archive_mode { None => false, Some(b) => b, }; let stop_state = if stop_state.is_some() { stop_state.unwrap() } else { Arc::new(StopState::new()) }; let pool_adapter = Arc::new(PoolToChainAdapter::new()); let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone())); let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new( config.pool_config.clone(), pool_adapter.clone(), pool_net_adapter.clone(), ))); let sync_state = Arc::new(SyncState::new()); let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new( tx_pool.clone(), init_chain_hooks(&config), )); let genesis = match config.chain_type { global::ChainTypes::AutomatedTesting => pow::mine_genesis_block().unwrap(), global::ChainTypes::UserTesting => pow::mine_genesis_block().unwrap(), global::ChainTypes::Testnet => genesis::genesis_test(), global::ChainTypes::Mainnet => genesis::genesis_main(), }; info!("Starting server, genesis block: {}", genesis.hash()); let shared_chain = Arc::new(chain::Chain::init( config.db_root.clone(), chain_adapter.clone(), genesis.clone(), pow::verify_size, archive_mode, )?); pool_adapter.set_chain(shared_chain.clone()); let net_adapter = Arc::new(NetToChainAdapter::new( sync_state.clone(), shared_chain.clone(), tx_pool.clone(), config.clone(), init_net_hooks(&config), )); // Initialize our capabilities. // Currently either "default" or with optional "archive_mode" (block history) support enabled. let capabilities = if let Some(true) = config.archive_mode { Capabilities::default() | Capabilities::BLOCK_HIST } else { Capabilities::default() }; debug!("Capabilities: {:?}", capabilities); let p2p_server = Arc::new(p2p::Server::new( &config.db_root, capabilities, config.p2p_config.clone(), net_adapter.clone(), genesis.hash(), stop_state.clone(), )?); // Initialize various adapters with our dynamic set of connected peers. chain_adapter.init(p2p_server.peers.clone()); pool_net_adapter.init(p2p_server.peers.clone()); net_adapter.init(p2p_server.peers.clone()); let mut connect_thread = None; if config.p2p_config.seeding_type!= p2p::Seeding::Programmatic { let seed_list = match config.p2p_config.seeding_type { p2p::Seeding::None => { warn!("No seed configured, will stay solo until connected to"); seed::predefined_seeds(vec![]) } p2p::Seeding::List => match &config.p2p_config.seeds { Some(seeds) => seed::predefined_seeds(seeds.peers.clone()), None => { return Err(Error::Configuration( "Seeds must be configured for seeding type List".to_owned(), )); } }, p2p::Seeding::DNSSeed => seed::default_dns_seeds(), _ => unreachable!(), }; connect_thread = Some(seed::connect_and_monitor( p2p_server.clone(), seed_list, config.p2p_config.clone(), stop_state.clone(), )?); } // Defaults to None (optional) in config file. // This translates to false here so we do not skip by default. let skip_sync_wait = config.skip_sync_wait.unwrap_or(false); sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait)); let sync_thread = sync::run_sync( sync_state.clone(), p2p_server.peers.clone(), shared_chain.clone(), stop_state.clone(), )?; let p2p_inner = p2p_server.clone(); let _ = thread::Builder::new() .name("p2p-server".to_string()) .spawn(move || { if let Err(e) = p2p_inner.listen() { error!("P2P server failed with erorr: {:?}", e); } })?; info!("Starting rest apis at: {}", &config.api_http_addr); let api_secret = get_first_line(config.api_secret_path.clone()); let foreign_api_secret = get_first_line(config.foreign_api_secret_path.clone()); let tls_conf = match config.tls_certificate_file.clone() { None => None, Some(file) => { let key = match config.tls_certificate_key.clone() { Some(k) => k, None => { let msg = "Private key for certificate is not set".to_string(); return Err(Error::ArgumentError(msg)); } }; Some(TLSConfig::new(file, key)) } }; api::node_apis( &config.api_http_addr, shared_chain.clone(), tx_pool.clone(), p2p_server.peers.clone(), sync_state.clone(), api_secret, foreign_api_secret, tls_conf, api_chan, stop_state.clone(), )?; info!("Starting dandelion monitor: {}", &config.api_http_addr); let dandelion_thread = dandelion_monitor::monitor_transactions( config.dandelion_config.clone(), tx_pool.clone(), pool_net_adapter, stop_state.clone(), )?; warn!("Grin server started."); Ok(Server { config, p2p: p2p_server, chain: shared_chain, tx_pool, sync_state, state_info: ServerStateInfo { ..Default::default() }, stop_state, lock_file, connect_thread, sync_thread, dandelion_thread, }) } /// Asks the server to connect to a peer at the provided network address. pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> { self.p2p.connect(addr)?; Ok(()) } /// Ping all peers, mostly useful for tests to have connected peers share /// their heights pub fn
(&self) -> Result<(), Error> { let head = self.chain.head()?; self.p2p.peers.check_all(head.total_difficulty, head.height); Ok(()) } /// Number of peers pub fn peer_count(&self) -> u32 { self.p2p .peers .iter() .connected() .count() .try_into() .unwrap() } /// Start a minimal "stratum" mining service on a separate thread pub fn start_stratum_server(&self, config: StratumServerConfig) { let proof_size = global::proofsize(); let sync_state = self.sync_state.clone(); let mut stratum_server = stratumserver::StratumServer::new( config, self.chain.clone(), self.tx_pool.clone(), self.state_info.stratum_stats.clone(), ); let _ = thread::Builder::new() .name("stratum_server".to_string()) .spawn(move || { stratum_server.run_loop(proof_size, sync_state); }); } /// Start mining for blocks internally on a separate thread. Relies on /// internal miner, and should only be used for automated testing. Burns /// reward if wallet_listener_url is 'None' pub fn start_test_miner( &self, wallet_listener_url: Option<String>, stop_state: Arc<StopState>, ) { info!("start_test_miner - start",); let sync_state = self.sync_state.clone(); let config_wallet_url = match wallet_listener_url.clone() { Some(u) => u, None => String::from("http://127.0.0.1:13415"), }; let config = StratumServerConfig { attempt_time_per_block: 60, burn_reward: false, enable_stratum_server: None, stratum_server_addr: None, wallet_listener_url: config_wallet_url, minimum_share_difficulty: 1, }; let mut miner = Miner::new( config, self.chain.clone(), self.tx_pool.clone(), stop_state, sync_state, ); miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port)); let _ = thread::Builder::new() .name("test_miner".to_string()) .spawn(move || miner.run_loop(wallet_listener_url)); } /// The chain head pub fn head(&self) -> Result<chain::Tip, Error> { self.chain.head().map_err(|e| e.into()) } /// The head of the block header chain pub fn header_head(&self) -> Result<chain::Tip, Error> { self.chain.header_head().map_err(|e| e.into()) } /// The p2p layer protocol version for this node. pub fn protocol_version() -> ProtocolVersion { ProtocolVersion::local() } /// Returns a set of stats about this server. This and the ServerStats /// structure /// can be updated over time to include any information needed by tests or /// other consumers pub fn get_server_stats(&self) -> Result<ServerStats, Error> { let stratum_stats = self.state_info.stratum_stats.read().clone(); // Fill out stats on our current difficulty calculation // TODO: check the overhead of calculating this again isn't too much // could return it from next_difficulty, but would rather keep consensus // code clean. This may be handy for testing but not really needed // for release let diff_stats = { let last_blocks: Vec<consensus::HeaderDifficultyInfo> = global::difficulty_data_to_vector(self.chain.difficulty_iter()?) .into_iter() .collect(); let tip_height = self.head()?.height as i64; let mut height = tip_height as i64 - last_blocks.len() as i64 + 1; let diff_entries: Vec<DiffBlock> = last_blocks .windows(2) .map(|pair| { let prev = &pair[0]; let next = &pair[1]; height += 1; let block_hash = next.hash.unwrap_or(ZERO_HASH); DiffBlock { block_height: height, block_hash, difficulty: next.difficulty.to_num(), time: next.timestamp, duration: next.timestamp - prev.timestamp, secondary_scaling: next.secondary_scaling, is_secondary: next.is_secondary, } }) .collect(); let block_time_sum = diff_entries.iter().fold(0, |sum, t| sum + t.duration); let block_diff_sum = diff_entries.iter().fold(0, |sum, d| sum + d.difficulty); DiffStats { height: height as u64, last_blocks: diff_entries, average_block_time: block_time_sum / (consensus::DMA_WINDOW - 1), average_difficulty: block_diff_sum / (consensus::DMA_WINDOW - 1), window_size: consensus::DMA_WINDOW, } }; let peer_stats = self .p2p .peers .iter() .connected() .into_iter() .map(|p| PeerStats::from_peer(&p)) .collect(); // Updating TUI stats should not block any other processing so only attempt to // acquire various read locks with a timeout. let read_timeout = Duration::from_millis(500); let tx_stats = self.tx_pool.try_read_for(read_timeout).map(|pool| TxStats { tx_pool_size: pool.txpool.size(), tx_pool_kernels: pool.txpool.kernel_count(), stem_pool_size: pool.stempool.size(), stem_pool_kernels: pool.stempool.kernel_count(), }); let head = self.chain.head_header()?; let head_stats = ChainStats { latest_timestamp: head.timestamp, height: head.height, last_block_h: head.hash(), total_difficulty: head.total_difficulty(), }; let header_head = self.chain.header_head()?; let header = self.chain.get_block_header(&header_head.hash())?; let header_stats = ChainStats { latest_timestamp: header.timestamp, height: header.height, last_block_h: header.hash(), total_difficulty: header.total_difficulty(), }; let disk_usage_bytes = WalkDir::new(&self.config.db_root) .min_depth(1) .max_depth(3) .into_iter() .filter_map(|entry| entry.ok()) .filter_map(|entry| entry.metadata().ok()) .filter(|metadata| metadata.is_file()) .fold(0, |acc, m| acc + m.len()); let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000_f64)); Ok(ServerStats { peer_count: self.peer_count(), chain_stats: head_stats, header_stats: header_stats, sync_status: self.sync_state.status(), disk_usage_gb: disk_usage_gb, stratum_stats: stratum_stats, peer_stats: peer_stats, diff_stats: diff_stats, tx_stats: tx_stats, }) } /// Stop the server. pub fn stop(self) { { self.sync_state.update(SyncStatus::Shutdown); self.stop_state.stop(); if let Some(connect_thread) = self.connect_thread { match connect_thread.join() { Err(e) => error!("failed to join to connect_and_monitor thread: {:?}", e), Ok(_) => info!("connect_and_monitor thread stopped"), } } else { info!("No active connect_and_monitor thread") } match self.sync_thread.join() { Err(e) => error!("failed to join to sync thread: {:?}", e), Ok(_) => info!("sync thread stopped"), } match self.dandelion_thread.join() { Err(e) => error!("failed to join to dandelion_monitor thread: {:?}", e), Ok(_) => info!("dandelion_monitor thread stopped"), } } // this call is blocking and makes sure all peers stop, however // we can't be sure that we stopped a listener blocked on accept, so we don't join the p2p thread self.p2p.stop(); let _ = self.lock_file.unlock(); warn!("Shutdown complete"); } /// Pause the p2p server. pub fn pause(&self) { self.stop_state.pause(); thread::sleep(time::Duration::from_secs(1)); self.p2p.pause(); } /// Resume p2p server. /// TODO - We appear not to resume the p2p server (peer connections) here? pub fn resume(&self) { self.stop_state.resume(); } /// Stops the test miner without stopping the p2p layer pub fn stop_test_miner(&self, stop: Arc<StopState>) { stop.stop(); info!("stop_test_miner - stop",); } }
ping_peers
identifier_name
server.rs
// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Grin server implementation, glues the different parts of the system (mostly //! the peer-to-peer server, the blockchain and the transaction pool) and acts //! as a facade. use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::sync::{mpsc, Arc}; use std::{convert::TryInto, fs}; use std::{ thread::{self, JoinHandle}, time::{self, Duration}, }; use fs2::FileExt; use walkdir::WalkDir; use crate::api; use crate::api::TLSConfig; use crate::chain::{self, SyncState, SyncStatus}; use crate::common::adapters::{ ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter, }; use crate::common::hooks::{init_chain_hooks, init_net_hooks}; use crate::common::stats::{ ChainStats, DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats, TxStats, }; use crate::common::types::{Error, ServerConfig, StratumServerConfig}; use crate::core::core::hash::{Hashed, ZERO_HASH}; use crate::core::ser::ProtocolVersion; use crate::core::{consensus, genesis, global, pow}; use crate::grin::{dandelion_monitor, seed, sync}; use crate::mining::stratumserver; use crate::mining::test_miner::Miner; use crate::p2p; use crate::p2p::types::{Capabilities, PeerAddr}; use crate::pool; use crate::util::file::get_first_line; use crate::util::{RwLock, StopState}; use futures::channel::oneshot; use grin_util::logger::LogEntry; /// Arcified thread-safe TransactionPool with type parameters used by server components pub type ServerTxPool = Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter>>>; /// Grin server holding internal structures. pub struct Server { /// server config pub config: ServerConfig, /// handle to our network server pub p2p: Arc<p2p::Server>, /// data store access pub chain: Arc<chain::Chain>, /// in-memory transaction pool pub tx_pool: ServerTxPool, /// Whether we're currently syncing pub sync_state: Arc<SyncState>, /// To be passed around to collect stats and info state_info: ServerStateInfo, /// Stop flag pub stop_state: Arc<StopState>, /// Maintain a lock_file so we do not run multiple Grin nodes from same dir. lock_file: Arc<File>, connect_thread: Option<JoinHandle<()>>, sync_thread: JoinHandle<()>, dandelion_thread: JoinHandle<()>, } impl Server { /// Instantiates and starts a new server. Optionally takes a callback /// for the server to send an ARC copy of itself, to allow another process /// to poll info about the server status pub fn start<F>( config: ServerConfig, logs_rx: Option<mpsc::Receiver<LogEntry>>, mut info_callback: F, stop_state: Option<Arc<StopState>>, api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result<(), Error> where F: FnMut(Server, Option<mpsc::Receiver<LogEntry>>), { let mining_config = config.stratum_mining_config.clone(); let enable_test_miner = config.run_test_miner; let test_miner_wallet_url = config.test_miner_wallet_url.clone(); let serv = Server::new(config, stop_state, api_chan)?; if let Some(c) = mining_config { let enable_stratum_server = c.enable_stratum_server; if let Some(s) = enable_stratum_server { if s { { let mut stratum_stats = serv.state_info.stratum_stats.write(); stratum_stats.is_enabled = true; } serv.start_stratum_server(c); } } } if let Some(s) = enable_test_miner { if s { serv.start_test_miner(test_miner_wallet_url, serv.stop_state.clone()); } } info_callback(serv, logs_rx); Ok(()) } // Exclusive (advisory) lock_file to ensure we do not run multiple // instance of grin server from the same dir. // This uses fs2 and should be safe cross-platform unless somebody abuses the file itself. fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> { let path = Path::new(&config.db_root); fs::create_dir_all(&path)?; let path = path.join("grin.lock"); let lock_file = fs::OpenOptions::new() .read(true) .write(true) .create(true) .open(&path)?; lock_file.try_lock_exclusive().map_err(|e| { let mut stderr = std::io::stderr(); writeln!( &mut stderr, "Failed to lock {:?} (grin server already running?)", path ) .expect("Could not write to stderr"); e })?; Ok(Arc::new(lock_file)) } /// Instantiates a new server associated with the provided future reactor. pub fn new( config: ServerConfig, stop_state: Option<Arc<StopState>>, api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result<Server, Error> { // Obtain our lock_file or fail immediately with an error. let lock_file = Server::one_grin_at_a_time(&config)?; // Defaults to None (optional) in config file. // This translates to false here. let archive_mode = match config.archive_mode { None => false, Some(b) => b, }; let stop_state = if stop_state.is_some() { stop_state.unwrap() } else { Arc::new(StopState::new()) }; let pool_adapter = Arc::new(PoolToChainAdapter::new()); let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone())); let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new( config.pool_config.clone(), pool_adapter.clone(), pool_net_adapter.clone(), ))); let sync_state = Arc::new(SyncState::new()); let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new( tx_pool.clone(), init_chain_hooks(&config), )); let genesis = match config.chain_type { global::ChainTypes::AutomatedTesting => pow::mine_genesis_block().unwrap(), global::ChainTypes::UserTesting => pow::mine_genesis_block().unwrap(), global::ChainTypes::Testnet => genesis::genesis_test(), global::ChainTypes::Mainnet => genesis::genesis_main(), }; info!("Starting server, genesis block: {}", genesis.hash()); let shared_chain = Arc::new(chain::Chain::init( config.db_root.clone(), chain_adapter.clone(), genesis.clone(), pow::verify_size, archive_mode, )?); pool_adapter.set_chain(shared_chain.clone()); let net_adapter = Arc::new(NetToChainAdapter::new( sync_state.clone(), shared_chain.clone(), tx_pool.clone(), config.clone(), init_net_hooks(&config), )); // Initialize our capabilities. // Currently either "default" or with optional "archive_mode" (block history) support enabled. let capabilities = if let Some(true) = config.archive_mode { Capabilities::default() | Capabilities::BLOCK_HIST } else { Capabilities::default() }; debug!("Capabilities: {:?}", capabilities); let p2p_server = Arc::new(p2p::Server::new( &config.db_root, capabilities, config.p2p_config.clone(), net_adapter.clone(), genesis.hash(), stop_state.clone(), )?); // Initialize various adapters with our dynamic set of connected peers. chain_adapter.init(p2p_server.peers.clone()); pool_net_adapter.init(p2p_server.peers.clone()); net_adapter.init(p2p_server.peers.clone()); let mut connect_thread = None; if config.p2p_config.seeding_type!= p2p::Seeding::Programmatic { let seed_list = match config.p2p_config.seeding_type { p2p::Seeding::None => { warn!("No seed configured, will stay solo until connected to"); seed::predefined_seeds(vec![]) } p2p::Seeding::List => match &config.p2p_config.seeds { Some(seeds) => seed::predefined_seeds(seeds.peers.clone()), None => { return Err(Error::Configuration( "Seeds must be configured for seeding type List".to_owned(), )); } }, p2p::Seeding::DNSSeed => seed::default_dns_seeds(), _ => unreachable!(), }; connect_thread = Some(seed::connect_and_monitor( p2p_server.clone(), seed_list, config.p2p_config.clone(), stop_state.clone(), )?); } // Defaults to None (optional) in config file. // This translates to false here so we do not skip by default. let skip_sync_wait = config.skip_sync_wait.unwrap_or(false); sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait)); let sync_thread = sync::run_sync( sync_state.clone(), p2p_server.peers.clone(), shared_chain.clone(), stop_state.clone(), )?; let p2p_inner = p2p_server.clone(); let _ = thread::Builder::new() .name("p2p-server".to_string()) .spawn(move || { if let Err(e) = p2p_inner.listen() { error!("P2P server failed with erorr: {:?}", e); } })?; info!("Starting rest apis at: {}", &config.api_http_addr); let api_secret = get_first_line(config.api_secret_path.clone()); let foreign_api_secret = get_first_line(config.foreign_api_secret_path.clone()); let tls_conf = match config.tls_certificate_file.clone() { None => None, Some(file) => { let key = match config.tls_certificate_key.clone() { Some(k) => k, None => { let msg = "Private key for certificate is not set".to_string(); return Err(Error::ArgumentError(msg)); } }; Some(TLSConfig::new(file, key)) } }; api::node_apis( &config.api_http_addr, shared_chain.clone(), tx_pool.clone(), p2p_server.peers.clone(), sync_state.clone(), api_secret, foreign_api_secret, tls_conf, api_chan, stop_state.clone(), )?; info!("Starting dandelion monitor: {}", &config.api_http_addr); let dandelion_thread = dandelion_monitor::monitor_transactions( config.dandelion_config.clone(), tx_pool.clone(), pool_net_adapter, stop_state.clone(), )?; warn!("Grin server started."); Ok(Server { config, p2p: p2p_server, chain: shared_chain, tx_pool, sync_state, state_info: ServerStateInfo { ..Default::default() }, stop_state, lock_file, connect_thread, sync_thread, dandelion_thread, }) } /// Asks the server to connect to a peer at the provided network address. pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> { self.p2p.connect(addr)?; Ok(()) } /// Ping all peers, mostly useful for tests to have connected peers share /// their heights pub fn ping_peers(&self) -> Result<(), Error> { let head = self.chain.head()?; self.p2p.peers.check_all(head.total_difficulty, head.height); Ok(()) } /// Number of peers pub fn peer_count(&self) -> u32 { self.p2p .peers .iter() .connected() .count() .try_into() .unwrap() } /// Start a minimal "stratum" mining service on a separate thread pub fn start_stratum_server(&self, config: StratumServerConfig) { let proof_size = global::proofsize(); let sync_state = self.sync_state.clone(); let mut stratum_server = stratumserver::StratumServer::new( config, self.chain.clone(), self.tx_pool.clone(), self.state_info.stratum_stats.clone(), ); let _ = thread::Builder::new() .name("stratum_server".to_string()) .spawn(move || { stratum_server.run_loop(proof_size, sync_state); }); } /// Start mining for blocks internally on a separate thread. Relies on /// internal miner, and should only be used for automated testing. Burns /// reward if wallet_listener_url is 'None' pub fn start_test_miner( &self, wallet_listener_url: Option<String>, stop_state: Arc<StopState>, ) { info!("start_test_miner - start",); let sync_state = self.sync_state.clone(); let config_wallet_url = match wallet_listener_url.clone() { Some(u) => u, None => String::from("http://127.0.0.1:13415"), }; let config = StratumServerConfig { attempt_time_per_block: 60, burn_reward: false, enable_stratum_server: None, stratum_server_addr: None, wallet_listener_url: config_wallet_url, minimum_share_difficulty: 1, }; let mut miner = Miner::new( config, self.chain.clone(), self.tx_pool.clone(), stop_state, sync_state, ); miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port)); let _ = thread::Builder::new() .name("test_miner".to_string()) .spawn(move || miner.run_loop(wallet_listener_url)); } /// The chain head pub fn head(&self) -> Result<chain::Tip, Error>
/// The head of the block header chain pub fn header_head(&self) -> Result<chain::Tip, Error> { self.chain.header_head().map_err(|e| e.into()) } /// The p2p layer protocol version for this node. pub fn protocol_version() -> ProtocolVersion { ProtocolVersion::local() } /// Returns a set of stats about this server. This and the ServerStats /// structure /// can be updated over time to include any information needed by tests or /// other consumers pub fn get_server_stats(&self) -> Result<ServerStats, Error> { let stratum_stats = self.state_info.stratum_stats.read().clone(); // Fill out stats on our current difficulty calculation // TODO: check the overhead of calculating this again isn't too much // could return it from next_difficulty, but would rather keep consensus // code clean. This may be handy for testing but not really needed // for release let diff_stats = { let last_blocks: Vec<consensus::HeaderDifficultyInfo> = global::difficulty_data_to_vector(self.chain.difficulty_iter()?) .into_iter() .collect(); let tip_height = self.head()?.height as i64; let mut height = tip_height as i64 - last_blocks.len() as i64 + 1; let diff_entries: Vec<DiffBlock> = last_blocks .windows(2) .map(|pair| { let prev = &pair[0]; let next = &pair[1]; height += 1; let block_hash = next.hash.unwrap_or(ZERO_HASH); DiffBlock { block_height: height, block_hash, difficulty: next.difficulty.to_num(), time: next.timestamp, duration: next.timestamp - prev.timestamp, secondary_scaling: next.secondary_scaling, is_secondary: next.is_secondary, } }) .collect(); let block_time_sum = diff_entries.iter().fold(0, |sum, t| sum + t.duration); let block_diff_sum = diff_entries.iter().fold(0, |sum, d| sum + d.difficulty); DiffStats { height: height as u64, last_blocks: diff_entries, average_block_time: block_time_sum / (consensus::DMA_WINDOW - 1), average_difficulty: block_diff_sum / (consensus::DMA_WINDOW - 1), window_size: consensus::DMA_WINDOW, } }; let peer_stats = self .p2p .peers .iter() .connected() .into_iter() .map(|p| PeerStats::from_peer(&p)) .collect(); // Updating TUI stats should not block any other processing so only attempt to // acquire various read locks with a timeout. let read_timeout = Duration::from_millis(500); let tx_stats = self.tx_pool.try_read_for(read_timeout).map(|pool| TxStats { tx_pool_size: pool.txpool.size(), tx_pool_kernels: pool.txpool.kernel_count(), stem_pool_size: pool.stempool.size(), stem_pool_kernels: pool.stempool.kernel_count(), }); let head = self.chain.head_header()?; let head_stats = ChainStats { latest_timestamp: head.timestamp, height: head.height, last_block_h: head.hash(), total_difficulty: head.total_difficulty(), }; let header_head = self.chain.header_head()?; let header = self.chain.get_block_header(&header_head.hash())?; let header_stats = ChainStats { latest_timestamp: header.timestamp, height: header.height, last_block_h: header.hash(), total_difficulty: header.total_difficulty(), }; let disk_usage_bytes = WalkDir::new(&self.config.db_root) .min_depth(1) .max_depth(3) .into_iter() .filter_map(|entry| entry.ok()) .filter_map(|entry| entry.metadata().ok()) .filter(|metadata| metadata.is_file()) .fold(0, |acc, m| acc + m.len()); let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000_f64)); Ok(ServerStats { peer_count: self.peer_count(), chain_stats: head_stats, header_stats: header_stats, sync_status: self.sync_state.status(), disk_usage_gb: disk_usage_gb, stratum_stats: stratum_stats, peer_stats: peer_stats, diff_stats: diff_stats, tx_stats: tx_stats, }) } /// Stop the server. pub fn stop(self) { { self.sync_state.update(SyncStatus::Shutdown); self.stop_state.stop(); if let Some(connect_thread) = self.connect_thread { match connect_thread.join() { Err(e) => error!("failed to join to connect_and_monitor thread: {:?}", e), Ok(_) => info!("connect_and_monitor thread stopped"), } } else { info!("No active connect_and_monitor thread") } match self.sync_thread.join() { Err(e) => error!("failed to join to sync thread: {:?}", e), Ok(_) => info!("sync thread stopped"), } match self.dandelion_thread.join() { Err(e) => error!("failed to join to dandelion_monitor thread: {:?}", e), Ok(_) => info!("dandelion_monitor thread stopped"), } } // this call is blocking and makes sure all peers stop, however // we can't be sure that we stopped a listener blocked on accept, so we don't join the p2p thread self.p2p.stop(); let _ = self.lock_file.unlock(); warn!("Shutdown complete"); } /// Pause the p2p server. pub fn pause(&self) { self.stop_state.pause(); thread::sleep(time::Duration::from_secs(1)); self.p2p.pause(); } /// Resume p2p server. /// TODO - We appear not to resume the p2p server (peer connections) here? pub fn resume(&self) { self.stop_state.resume(); } /// Stops the test miner without stopping the p2p layer pub fn stop_test_miner(&self, stop: Arc<StopState>) { stop.stop(); info!("stop_test_miner - stop",); } }
{ self.chain.head().map_err(|e| e.into()) }
identifier_body
server.rs
// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Grin server implementation, glues the different parts of the system (mostly //! the peer-to-peer server, the blockchain and the transaction pool) and acts //! as a facade. use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::sync::{mpsc, Arc}; use std::{convert::TryInto, fs}; use std::{ thread::{self, JoinHandle}, time::{self, Duration}, }; use fs2::FileExt; use walkdir::WalkDir; use crate::api; use crate::api::TLSConfig; use crate::chain::{self, SyncState, SyncStatus}; use crate::common::adapters::{ ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter, }; use crate::common::hooks::{init_chain_hooks, init_net_hooks}; use crate::common::stats::{ ChainStats, DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats, TxStats, }; use crate::common::types::{Error, ServerConfig, StratumServerConfig}; use crate::core::core::hash::{Hashed, ZERO_HASH}; use crate::core::ser::ProtocolVersion; use crate::core::{consensus, genesis, global, pow}; use crate::grin::{dandelion_monitor, seed, sync}; use crate::mining::stratumserver; use crate::mining::test_miner::Miner; use crate::p2p; use crate::p2p::types::{Capabilities, PeerAddr}; use crate::pool; use crate::util::file::get_first_line; use crate::util::{RwLock, StopState}; use futures::channel::oneshot; use grin_util::logger::LogEntry; /// Arcified thread-safe TransactionPool with type parameters used by server components pub type ServerTxPool = Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter>>>; /// Grin server holding internal structures. pub struct Server { /// server config pub config: ServerConfig, /// handle to our network server pub p2p: Arc<p2p::Server>, /// data store access pub chain: Arc<chain::Chain>, /// in-memory transaction pool pub tx_pool: ServerTxPool, /// Whether we're currently syncing pub sync_state: Arc<SyncState>, /// To be passed around to collect stats and info state_info: ServerStateInfo, /// Stop flag pub stop_state: Arc<StopState>, /// Maintain a lock_file so we do not run multiple Grin nodes from same dir. lock_file: Arc<File>, connect_thread: Option<JoinHandle<()>>, sync_thread: JoinHandle<()>, dandelion_thread: JoinHandle<()>, } impl Server { /// Instantiates and starts a new server. Optionally takes a callback /// for the server to send an ARC copy of itself, to allow another process /// to poll info about the server status pub fn start<F>( config: ServerConfig, logs_rx: Option<mpsc::Receiver<LogEntry>>, mut info_callback: F, stop_state: Option<Arc<StopState>>, api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result<(), Error> where F: FnMut(Server, Option<mpsc::Receiver<LogEntry>>), { let mining_config = config.stratum_mining_config.clone(); let enable_test_miner = config.run_test_miner; let test_miner_wallet_url = config.test_miner_wallet_url.clone(); let serv = Server::new(config, stop_state, api_chan)?; if let Some(c) = mining_config { let enable_stratum_server = c.enable_stratum_server; if let Some(s) = enable_stratum_server { if s { { let mut stratum_stats = serv.state_info.stratum_stats.write(); stratum_stats.is_enabled = true; } serv.start_stratum_server(c); } } } if let Some(s) = enable_test_miner { if s { serv.start_test_miner(test_miner_wallet_url, serv.stop_state.clone()); } } info_callback(serv, logs_rx); Ok(()) } // Exclusive (advisory) lock_file to ensure we do not run multiple // instance of grin server from the same dir. // This uses fs2 and should be safe cross-platform unless somebody abuses the file itself. fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> { let path = Path::new(&config.db_root); fs::create_dir_all(&path)?; let path = path.join("grin.lock"); let lock_file = fs::OpenOptions::new() .read(true) .write(true) .create(true) .open(&path)?; lock_file.try_lock_exclusive().map_err(|e| { let mut stderr = std::io::stderr(); writeln!( &mut stderr, "Failed to lock {:?} (grin server already running?)", path ) .expect("Could not write to stderr"); e })?; Ok(Arc::new(lock_file)) } /// Instantiates a new server associated with the provided future reactor. pub fn new( config: ServerConfig, stop_state: Option<Arc<StopState>>, api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result<Server, Error> { // Obtain our lock_file or fail immediately with an error. let lock_file = Server::one_grin_at_a_time(&config)?; // Defaults to None (optional) in config file. // This translates to false here. let archive_mode = match config.archive_mode { None => false, Some(b) => b, }; let stop_state = if stop_state.is_some() { stop_state.unwrap() } else { Arc::new(StopState::new()) }; let pool_adapter = Arc::new(PoolToChainAdapter::new()); let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone())); let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new( config.pool_config.clone(), pool_adapter.clone(), pool_net_adapter.clone(), ))); let sync_state = Arc::new(SyncState::new()); let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new( tx_pool.clone(), init_chain_hooks(&config), )); let genesis = match config.chain_type { global::ChainTypes::AutomatedTesting => pow::mine_genesis_block().unwrap(), global::ChainTypes::UserTesting => pow::mine_genesis_block().unwrap(), global::ChainTypes::Testnet => genesis::genesis_test(), global::ChainTypes::Mainnet => genesis::genesis_main(), }; info!("Starting server, genesis block: {}", genesis.hash()); let shared_chain = Arc::new(chain::Chain::init( config.db_root.clone(), chain_adapter.clone(), genesis.clone(), pow::verify_size, archive_mode, )?); pool_adapter.set_chain(shared_chain.clone()); let net_adapter = Arc::new(NetToChainAdapter::new( sync_state.clone(), shared_chain.clone(), tx_pool.clone(), config.clone(), init_net_hooks(&config), )); // Initialize our capabilities. // Currently either "default" or with optional "archive_mode" (block history) support enabled. let capabilities = if let Some(true) = config.archive_mode { Capabilities::default() | Capabilities::BLOCK_HIST } else { Capabilities::default() }; debug!("Capabilities: {:?}", capabilities); let p2p_server = Arc::new(p2p::Server::new( &config.db_root, capabilities, config.p2p_config.clone(), net_adapter.clone(), genesis.hash(), stop_state.clone(), )?); // Initialize various adapters with our dynamic set of connected peers. chain_adapter.init(p2p_server.peers.clone()); pool_net_adapter.init(p2p_server.peers.clone()); net_adapter.init(p2p_server.peers.clone()); let mut connect_thread = None; if config.p2p_config.seeding_type!= p2p::Seeding::Programmatic { let seed_list = match config.p2p_config.seeding_type { p2p::Seeding::None => { warn!("No seed configured, will stay solo until connected to"); seed::predefined_seeds(vec![]) } p2p::Seeding::List => match &config.p2p_config.seeds { Some(seeds) => seed::predefined_seeds(seeds.peers.clone()), None => { return Err(Error::Configuration( "Seeds must be configured for seeding type List".to_owned(), )); } }, p2p::Seeding::DNSSeed => seed::default_dns_seeds(), _ => unreachable!(), }; connect_thread = Some(seed::connect_and_monitor( p2p_server.clone(), seed_list, config.p2p_config.clone(), stop_state.clone(), )?); } // Defaults to None (optional) in config file. // This translates to false here so we do not skip by default. let skip_sync_wait = config.skip_sync_wait.unwrap_or(false); sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait)); let sync_thread = sync::run_sync( sync_state.clone(), p2p_server.peers.clone(), shared_chain.clone(), stop_state.clone(), )?; let p2p_inner = p2p_server.clone(); let _ = thread::Builder::new() .name("p2p-server".to_string()) .spawn(move || { if let Err(e) = p2p_inner.listen() { error!("P2P server failed with erorr: {:?}", e); } })?; info!("Starting rest apis at: {}", &config.api_http_addr); let api_secret = get_first_line(config.api_secret_path.clone()); let foreign_api_secret = get_first_line(config.foreign_api_secret_path.clone()); let tls_conf = match config.tls_certificate_file.clone() { None => None, Some(file) => { let key = match config.tls_certificate_key.clone() { Some(k) => k, None => { let msg = "Private key for certificate is not set".to_string(); return Err(Error::ArgumentError(msg)); } }; Some(TLSConfig::new(file, key)) } }; api::node_apis( &config.api_http_addr, shared_chain.clone(), tx_pool.clone(), p2p_server.peers.clone(), sync_state.clone(), api_secret, foreign_api_secret, tls_conf, api_chan, stop_state.clone(), )?; info!("Starting dandelion monitor: {}", &config.api_http_addr); let dandelion_thread = dandelion_monitor::monitor_transactions( config.dandelion_config.clone(), tx_pool.clone(), pool_net_adapter, stop_state.clone(), )?; warn!("Grin server started."); Ok(Server { config, p2p: p2p_server, chain: shared_chain, tx_pool, sync_state, state_info: ServerStateInfo { ..Default::default() }, stop_state, lock_file, connect_thread, sync_thread, dandelion_thread, }) } /// Asks the server to connect to a peer at the provided network address. pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> { self.p2p.connect(addr)?; Ok(()) } /// Ping all peers, mostly useful for tests to have connected peers share /// their heights pub fn ping_peers(&self) -> Result<(), Error> { let head = self.chain.head()?; self.p2p.peers.check_all(head.total_difficulty, head.height); Ok(()) } /// Number of peers pub fn peer_count(&self) -> u32 { self.p2p .peers .iter() .connected() .count() .try_into() .unwrap() } /// Start a minimal "stratum" mining service on a separate thread pub fn start_stratum_server(&self, config: StratumServerConfig) { let proof_size = global::proofsize(); let sync_state = self.sync_state.clone(); let mut stratum_server = stratumserver::StratumServer::new( config, self.chain.clone(), self.tx_pool.clone(), self.state_info.stratum_stats.clone(), ); let _ = thread::Builder::new() .name("stratum_server".to_string()) .spawn(move || { stratum_server.run_loop(proof_size, sync_state); }); } /// Start mining for blocks internally on a separate thread. Relies on /// internal miner, and should only be used for automated testing. Burns /// reward if wallet_listener_url is 'None' pub fn start_test_miner( &self, wallet_listener_url: Option<String>, stop_state: Arc<StopState>, ) { info!("start_test_miner - start",); let sync_state = self.sync_state.clone(); let config_wallet_url = match wallet_listener_url.clone() { Some(u) => u, None => String::from("http://127.0.0.1:13415"), }; let config = StratumServerConfig { attempt_time_per_block: 60, burn_reward: false, enable_stratum_server: None, stratum_server_addr: None, wallet_listener_url: config_wallet_url, minimum_share_difficulty: 1, }; let mut miner = Miner::new( config, self.chain.clone(), self.tx_pool.clone(), stop_state, sync_state, ); miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port)); let _ = thread::Builder::new() .name("test_miner".to_string()) .spawn(move || miner.run_loop(wallet_listener_url)); } /// The chain head pub fn head(&self) -> Result<chain::Tip, Error> { self.chain.head().map_err(|e| e.into()) } /// The head of the block header chain pub fn header_head(&self) -> Result<chain::Tip, Error> { self.chain.header_head().map_err(|e| e.into()) } /// The p2p layer protocol version for this node. pub fn protocol_version() -> ProtocolVersion { ProtocolVersion::local() } /// Returns a set of stats about this server. This and the ServerStats /// structure /// can be updated over time to include any information needed by tests or /// other consumers pub fn get_server_stats(&self) -> Result<ServerStats, Error> { let stratum_stats = self.state_info.stratum_stats.read().clone(); // Fill out stats on our current difficulty calculation // TODO: check the overhead of calculating this again isn't too much // could return it from next_difficulty, but would rather keep consensus // code clean. This may be handy for testing but not really needed // for release let diff_stats = { let last_blocks: Vec<consensus::HeaderDifficultyInfo> = global::difficulty_data_to_vector(self.chain.difficulty_iter()?) .into_iter() .collect(); let tip_height = self.head()?.height as i64; let mut height = tip_height as i64 - last_blocks.len() as i64 + 1; let diff_entries: Vec<DiffBlock> = last_blocks .windows(2) .map(|pair| { let prev = &pair[0]; let next = &pair[1]; height += 1; let block_hash = next.hash.unwrap_or(ZERO_HASH); DiffBlock { block_height: height, block_hash, difficulty: next.difficulty.to_num(), time: next.timestamp, duration: next.timestamp - prev.timestamp, secondary_scaling: next.secondary_scaling, is_secondary: next.is_secondary, } }) .collect(); let block_time_sum = diff_entries.iter().fold(0, |sum, t| sum + t.duration); let block_diff_sum = diff_entries.iter().fold(0, |sum, d| sum + d.difficulty); DiffStats { height: height as u64, last_blocks: diff_entries, average_block_time: block_time_sum / (consensus::DMA_WINDOW - 1), average_difficulty: block_diff_sum / (consensus::DMA_WINDOW - 1), window_size: consensus::DMA_WINDOW, } }; let peer_stats = self .p2p .peers .iter() .connected() .into_iter() .map(|p| PeerStats::from_peer(&p)) .collect(); // Updating TUI stats should not block any other processing so only attempt to // acquire various read locks with a timeout. let read_timeout = Duration::from_millis(500); let tx_stats = self.tx_pool.try_read_for(read_timeout).map(|pool| TxStats { tx_pool_size: pool.txpool.size(), tx_pool_kernels: pool.txpool.kernel_count(), stem_pool_size: pool.stempool.size(), stem_pool_kernels: pool.stempool.kernel_count(), }); let head = self.chain.head_header()?; let head_stats = ChainStats { latest_timestamp: head.timestamp, height: head.height, last_block_h: head.hash(), total_difficulty: head.total_difficulty(), }; let header_head = self.chain.header_head()?; let header = self.chain.get_block_header(&header_head.hash())?;
}; let disk_usage_bytes = WalkDir::new(&self.config.db_root) .min_depth(1) .max_depth(3) .into_iter() .filter_map(|entry| entry.ok()) .filter_map(|entry| entry.metadata().ok()) .filter(|metadata| metadata.is_file()) .fold(0, |acc, m| acc + m.len()); let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000_f64)); Ok(ServerStats { peer_count: self.peer_count(), chain_stats: head_stats, header_stats: header_stats, sync_status: self.sync_state.status(), disk_usage_gb: disk_usage_gb, stratum_stats: stratum_stats, peer_stats: peer_stats, diff_stats: diff_stats, tx_stats: tx_stats, }) } /// Stop the server. pub fn stop(self) { { self.sync_state.update(SyncStatus::Shutdown); self.stop_state.stop(); if let Some(connect_thread) = self.connect_thread { match connect_thread.join() { Err(e) => error!("failed to join to connect_and_monitor thread: {:?}", e), Ok(_) => info!("connect_and_monitor thread stopped"), } } else { info!("No active connect_and_monitor thread") } match self.sync_thread.join() { Err(e) => error!("failed to join to sync thread: {:?}", e), Ok(_) => info!("sync thread stopped"), } match self.dandelion_thread.join() { Err(e) => error!("failed to join to dandelion_monitor thread: {:?}", e), Ok(_) => info!("dandelion_monitor thread stopped"), } } // this call is blocking and makes sure all peers stop, however // we can't be sure that we stopped a listener blocked on accept, so we don't join the p2p thread self.p2p.stop(); let _ = self.lock_file.unlock(); warn!("Shutdown complete"); } /// Pause the p2p server. pub fn pause(&self) { self.stop_state.pause(); thread::sleep(time::Duration::from_secs(1)); self.p2p.pause(); } /// Resume p2p server. /// TODO - We appear not to resume the p2p server (peer connections) here? pub fn resume(&self) { self.stop_state.resume(); } /// Stops the test miner without stopping the p2p layer pub fn stop_test_miner(&self, stop: Arc<StopState>) { stop.stop(); info!("stop_test_miner - stop",); } }
let header_stats = ChainStats { latest_timestamp: header.timestamp, height: header.height, last_block_h: header.hash(), total_difficulty: header.total_difficulty(),
random_line_split
server.rs
// Copyright 2021 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Grin server implementation, glues the different parts of the system (mostly //! the peer-to-peer server, the blockchain and the transaction pool) and acts //! as a facade. use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::sync::{mpsc, Arc}; use std::{convert::TryInto, fs}; use std::{ thread::{self, JoinHandle}, time::{self, Duration}, }; use fs2::FileExt; use walkdir::WalkDir; use crate::api; use crate::api::TLSConfig; use crate::chain::{self, SyncState, SyncStatus}; use crate::common::adapters::{ ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter, }; use crate::common::hooks::{init_chain_hooks, init_net_hooks}; use crate::common::stats::{ ChainStats, DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats, TxStats, }; use crate::common::types::{Error, ServerConfig, StratumServerConfig}; use crate::core::core::hash::{Hashed, ZERO_HASH}; use crate::core::ser::ProtocolVersion; use crate::core::{consensus, genesis, global, pow}; use crate::grin::{dandelion_monitor, seed, sync}; use crate::mining::stratumserver; use crate::mining::test_miner::Miner; use crate::p2p; use crate::p2p::types::{Capabilities, PeerAddr}; use crate::pool; use crate::util::file::get_first_line; use crate::util::{RwLock, StopState}; use futures::channel::oneshot; use grin_util::logger::LogEntry; /// Arcified thread-safe TransactionPool with type parameters used by server components pub type ServerTxPool = Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter>>>; /// Grin server holding internal structures. pub struct Server { /// server config pub config: ServerConfig, /// handle to our network server pub p2p: Arc<p2p::Server>, /// data store access pub chain: Arc<chain::Chain>, /// in-memory transaction pool pub tx_pool: ServerTxPool, /// Whether we're currently syncing pub sync_state: Arc<SyncState>, /// To be passed around to collect stats and info state_info: ServerStateInfo, /// Stop flag pub stop_state: Arc<StopState>, /// Maintain a lock_file so we do not run multiple Grin nodes from same dir. lock_file: Arc<File>, connect_thread: Option<JoinHandle<()>>, sync_thread: JoinHandle<()>, dandelion_thread: JoinHandle<()>, } impl Server { /// Instantiates and starts a new server. Optionally takes a callback /// for the server to send an ARC copy of itself, to allow another process /// to poll info about the server status pub fn start<F>( config: ServerConfig, logs_rx: Option<mpsc::Receiver<LogEntry>>, mut info_callback: F, stop_state: Option<Arc<StopState>>, api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result<(), Error> where F: FnMut(Server, Option<mpsc::Receiver<LogEntry>>), { let mining_config = config.stratum_mining_config.clone(); let enable_test_miner = config.run_test_miner; let test_miner_wallet_url = config.test_miner_wallet_url.clone(); let serv = Server::new(config, stop_state, api_chan)?; if let Some(c) = mining_config { let enable_stratum_server = c.enable_stratum_server; if let Some(s) = enable_stratum_server { if s { { let mut stratum_stats = serv.state_info.stratum_stats.write(); stratum_stats.is_enabled = true; } serv.start_stratum_server(c); } } } if let Some(s) = enable_test_miner { if s { serv.start_test_miner(test_miner_wallet_url, serv.stop_state.clone()); } } info_callback(serv, logs_rx); Ok(()) } // Exclusive (advisory) lock_file to ensure we do not run multiple // instance of grin server from the same dir. // This uses fs2 and should be safe cross-platform unless somebody abuses the file itself. fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> { let path = Path::new(&config.db_root); fs::create_dir_all(&path)?; let path = path.join("grin.lock"); let lock_file = fs::OpenOptions::new() .read(true) .write(true) .create(true) .open(&path)?; lock_file.try_lock_exclusive().map_err(|e| { let mut stderr = std::io::stderr(); writeln!( &mut stderr, "Failed to lock {:?} (grin server already running?)", path ) .expect("Could not write to stderr"); e })?; Ok(Arc::new(lock_file)) } /// Instantiates a new server associated with the provided future reactor. pub fn new( config: ServerConfig, stop_state: Option<Arc<StopState>>, api_chan: &'static mut (oneshot::Sender<()>, oneshot::Receiver<()>), ) -> Result<Server, Error> { // Obtain our lock_file or fail immediately with an error. let lock_file = Server::one_grin_at_a_time(&config)?; // Defaults to None (optional) in config file. // This translates to false here. let archive_mode = match config.archive_mode { None => false, Some(b) => b, }; let stop_state = if stop_state.is_some() { stop_state.unwrap() } else { Arc::new(StopState::new()) }; let pool_adapter = Arc::new(PoolToChainAdapter::new()); let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone())); let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new( config.pool_config.clone(), pool_adapter.clone(), pool_net_adapter.clone(), ))); let sync_state = Arc::new(SyncState::new()); let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new( tx_pool.clone(), init_chain_hooks(&config), )); let genesis = match config.chain_type { global::ChainTypes::AutomatedTesting => pow::mine_genesis_block().unwrap(), global::ChainTypes::UserTesting => pow::mine_genesis_block().unwrap(), global::ChainTypes::Testnet => genesis::genesis_test(), global::ChainTypes::Mainnet => genesis::genesis_main(), }; info!("Starting server, genesis block: {}", genesis.hash()); let shared_chain = Arc::new(chain::Chain::init( config.db_root.clone(), chain_adapter.clone(), genesis.clone(), pow::verify_size, archive_mode, )?); pool_adapter.set_chain(shared_chain.clone()); let net_adapter = Arc::new(NetToChainAdapter::new( sync_state.clone(), shared_chain.clone(), tx_pool.clone(), config.clone(), init_net_hooks(&config), )); // Initialize our capabilities. // Currently either "default" or with optional "archive_mode" (block history) support enabled. let capabilities = if let Some(true) = config.archive_mode { Capabilities::default() | Capabilities::BLOCK_HIST } else { Capabilities::default() }; debug!("Capabilities: {:?}", capabilities); let p2p_server = Arc::new(p2p::Server::new( &config.db_root, capabilities, config.p2p_config.clone(), net_adapter.clone(), genesis.hash(), stop_state.clone(), )?); // Initialize various adapters with our dynamic set of connected peers. chain_adapter.init(p2p_server.peers.clone()); pool_net_adapter.init(p2p_server.peers.clone()); net_adapter.init(p2p_server.peers.clone()); let mut connect_thread = None; if config.p2p_config.seeding_type!= p2p::Seeding::Programmatic { let seed_list = match config.p2p_config.seeding_type { p2p::Seeding::None => { warn!("No seed configured, will stay solo until connected to"); seed::predefined_seeds(vec![]) } p2p::Seeding::List => match &config.p2p_config.seeds { Some(seeds) => seed::predefined_seeds(seeds.peers.clone()), None =>
}, p2p::Seeding::DNSSeed => seed::default_dns_seeds(), _ => unreachable!(), }; connect_thread = Some(seed::connect_and_monitor( p2p_server.clone(), seed_list, config.p2p_config.clone(), stop_state.clone(), )?); } // Defaults to None (optional) in config file. // This translates to false here so we do not skip by default. let skip_sync_wait = config.skip_sync_wait.unwrap_or(false); sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait)); let sync_thread = sync::run_sync( sync_state.clone(), p2p_server.peers.clone(), shared_chain.clone(), stop_state.clone(), )?; let p2p_inner = p2p_server.clone(); let _ = thread::Builder::new() .name("p2p-server".to_string()) .spawn(move || { if let Err(e) = p2p_inner.listen() { error!("P2P server failed with erorr: {:?}", e); } })?; info!("Starting rest apis at: {}", &config.api_http_addr); let api_secret = get_first_line(config.api_secret_path.clone()); let foreign_api_secret = get_first_line(config.foreign_api_secret_path.clone()); let tls_conf = match config.tls_certificate_file.clone() { None => None, Some(file) => { let key = match config.tls_certificate_key.clone() { Some(k) => k, None => { let msg = "Private key for certificate is not set".to_string(); return Err(Error::ArgumentError(msg)); } }; Some(TLSConfig::new(file, key)) } }; api::node_apis( &config.api_http_addr, shared_chain.clone(), tx_pool.clone(), p2p_server.peers.clone(), sync_state.clone(), api_secret, foreign_api_secret, tls_conf, api_chan, stop_state.clone(), )?; info!("Starting dandelion monitor: {}", &config.api_http_addr); let dandelion_thread = dandelion_monitor::monitor_transactions( config.dandelion_config.clone(), tx_pool.clone(), pool_net_adapter, stop_state.clone(), )?; warn!("Grin server started."); Ok(Server { config, p2p: p2p_server, chain: shared_chain, tx_pool, sync_state, state_info: ServerStateInfo { ..Default::default() }, stop_state, lock_file, connect_thread, sync_thread, dandelion_thread, }) } /// Asks the server to connect to a peer at the provided network address. pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> { self.p2p.connect(addr)?; Ok(()) } /// Ping all peers, mostly useful for tests to have connected peers share /// their heights pub fn ping_peers(&self) -> Result<(), Error> { let head = self.chain.head()?; self.p2p.peers.check_all(head.total_difficulty, head.height); Ok(()) } /// Number of peers pub fn peer_count(&self) -> u32 { self.p2p .peers .iter() .connected() .count() .try_into() .unwrap() } /// Start a minimal "stratum" mining service on a separate thread pub fn start_stratum_server(&self, config: StratumServerConfig) { let proof_size = global::proofsize(); let sync_state = self.sync_state.clone(); let mut stratum_server = stratumserver::StratumServer::new( config, self.chain.clone(), self.tx_pool.clone(), self.state_info.stratum_stats.clone(), ); let _ = thread::Builder::new() .name("stratum_server".to_string()) .spawn(move || { stratum_server.run_loop(proof_size, sync_state); }); } /// Start mining for blocks internally on a separate thread. Relies on /// internal miner, and should only be used for automated testing. Burns /// reward if wallet_listener_url is 'None' pub fn start_test_miner( &self, wallet_listener_url: Option<String>, stop_state: Arc<StopState>, ) { info!("start_test_miner - start",); let sync_state = self.sync_state.clone(); let config_wallet_url = match wallet_listener_url.clone() { Some(u) => u, None => String::from("http://127.0.0.1:13415"), }; let config = StratumServerConfig { attempt_time_per_block: 60, burn_reward: false, enable_stratum_server: None, stratum_server_addr: None, wallet_listener_url: config_wallet_url, minimum_share_difficulty: 1, }; let mut miner = Miner::new( config, self.chain.clone(), self.tx_pool.clone(), stop_state, sync_state, ); miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port)); let _ = thread::Builder::new() .name("test_miner".to_string()) .spawn(move || miner.run_loop(wallet_listener_url)); } /// The chain head pub fn head(&self) -> Result<chain::Tip, Error> { self.chain.head().map_err(|e| e.into()) } /// The head of the block header chain pub fn header_head(&self) -> Result<chain::Tip, Error> { self.chain.header_head().map_err(|e| e.into()) } /// The p2p layer protocol version for this node. pub fn protocol_version() -> ProtocolVersion { ProtocolVersion::local() } /// Returns a set of stats about this server. This and the ServerStats /// structure /// can be updated over time to include any information needed by tests or /// other consumers pub fn get_server_stats(&self) -> Result<ServerStats, Error> { let stratum_stats = self.state_info.stratum_stats.read().clone(); // Fill out stats on our current difficulty calculation // TODO: check the overhead of calculating this again isn't too much // could return it from next_difficulty, but would rather keep consensus // code clean. This may be handy for testing but not really needed // for release let diff_stats = { let last_blocks: Vec<consensus::HeaderDifficultyInfo> = global::difficulty_data_to_vector(self.chain.difficulty_iter()?) .into_iter() .collect(); let tip_height = self.head()?.height as i64; let mut height = tip_height as i64 - last_blocks.len() as i64 + 1; let diff_entries: Vec<DiffBlock> = last_blocks .windows(2) .map(|pair| { let prev = &pair[0]; let next = &pair[1]; height += 1; let block_hash = next.hash.unwrap_or(ZERO_HASH); DiffBlock { block_height: height, block_hash, difficulty: next.difficulty.to_num(), time: next.timestamp, duration: next.timestamp - prev.timestamp, secondary_scaling: next.secondary_scaling, is_secondary: next.is_secondary, } }) .collect(); let block_time_sum = diff_entries.iter().fold(0, |sum, t| sum + t.duration); let block_diff_sum = diff_entries.iter().fold(0, |sum, d| sum + d.difficulty); DiffStats { height: height as u64, last_blocks: diff_entries, average_block_time: block_time_sum / (consensus::DMA_WINDOW - 1), average_difficulty: block_diff_sum / (consensus::DMA_WINDOW - 1), window_size: consensus::DMA_WINDOW, } }; let peer_stats = self .p2p .peers .iter() .connected() .into_iter() .map(|p| PeerStats::from_peer(&p)) .collect(); // Updating TUI stats should not block any other processing so only attempt to // acquire various read locks with a timeout. let read_timeout = Duration::from_millis(500); let tx_stats = self.tx_pool.try_read_for(read_timeout).map(|pool| TxStats { tx_pool_size: pool.txpool.size(), tx_pool_kernels: pool.txpool.kernel_count(), stem_pool_size: pool.stempool.size(), stem_pool_kernels: pool.stempool.kernel_count(), }); let head = self.chain.head_header()?; let head_stats = ChainStats { latest_timestamp: head.timestamp, height: head.height, last_block_h: head.hash(), total_difficulty: head.total_difficulty(), }; let header_head = self.chain.header_head()?; let header = self.chain.get_block_header(&header_head.hash())?; let header_stats = ChainStats { latest_timestamp: header.timestamp, height: header.height, last_block_h: header.hash(), total_difficulty: header.total_difficulty(), }; let disk_usage_bytes = WalkDir::new(&self.config.db_root) .min_depth(1) .max_depth(3) .into_iter() .filter_map(|entry| entry.ok()) .filter_map(|entry| entry.metadata().ok()) .filter(|metadata| metadata.is_file()) .fold(0, |acc, m| acc + m.len()); let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000_f64)); Ok(ServerStats { peer_count: self.peer_count(), chain_stats: head_stats, header_stats: header_stats, sync_status: self.sync_state.status(), disk_usage_gb: disk_usage_gb, stratum_stats: stratum_stats, peer_stats: peer_stats, diff_stats: diff_stats, tx_stats: tx_stats, }) } /// Stop the server. pub fn stop(self) { { self.sync_state.update(SyncStatus::Shutdown); self.stop_state.stop(); if let Some(connect_thread) = self.connect_thread { match connect_thread.join() { Err(e) => error!("failed to join to connect_and_monitor thread: {:?}", e), Ok(_) => info!("connect_and_monitor thread stopped"), } } else { info!("No active connect_and_monitor thread") } match self.sync_thread.join() { Err(e) => error!("failed to join to sync thread: {:?}", e), Ok(_) => info!("sync thread stopped"), } match self.dandelion_thread.join() { Err(e) => error!("failed to join to dandelion_monitor thread: {:?}", e), Ok(_) => info!("dandelion_monitor thread stopped"), } } // this call is blocking and makes sure all peers stop, however // we can't be sure that we stopped a listener blocked on accept, so we don't join the p2p thread self.p2p.stop(); let _ = self.lock_file.unlock(); warn!("Shutdown complete"); } /// Pause the p2p server. pub fn pause(&self) { self.stop_state.pause(); thread::sleep(time::Duration::from_secs(1)); self.p2p.pause(); } /// Resume p2p server. /// TODO - We appear not to resume the p2p server (peer connections) here? pub fn resume(&self) { self.stop_state.resume(); } /// Stops the test miner without stopping the p2p layer pub fn stop_test_miner(&self, stop: Arc<StopState>) { stop.stop(); info!("stop_test_miner - stop",); } }
{ return Err(Error::Configuration( "Seeds must be configured for seeding type List".to_owned(), )); }
conditional_block
hkdf_test.rs
// Copyright 2020 The Tink-Rust Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////////// use maplit::hashmap; use serde::Deserialize; use tink_core::Prf; use tink_prf::subtle::{validate_hkdf_prf_params, HkdfPrf}; use tink_proto::HashType; struct Rfc5869Test { hash: HashType, key: &'static str, salt: &'static str, info: &'static str, output_length: usize, okm: &'static str, } #[test] fn test_vectors_rfc5869() { // Test vectors from RFC 5869. let testvectors = [ Rfc5869Test{ hash: HashType::Sha256, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "000102030405060708090a0b0c", info: "f0f1f2f3f4f5f6f7f8f9", output_length: 42, okm: "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865", }, Rfc5869Test{ hash: HashType::Sha256, key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f", salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf", info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", output_length: 82, okm: "b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87", }, Rfc5869Test{ hash: HashType::Sha256, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "", info: "", output_length: 42, okm: "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8", }, Rfc5869Test{ hash: HashType::Sha1, key: "0b0b0b0b0b0b0b0b0b0b0b", salt: "000102030405060708090a0b0c", info: "f0f1f2f3f4f5f6f7f8f9", output_length: 42, okm: "085a01ea1b10f36933068b56efa5ad81a4f14b822f5b091568a9cdd4f155fda2c22e422478d305f3f896", }, Rfc5869Test{ hash: HashType::Sha1, key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f", salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf", info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", output_length: 82, okm: "0bd770a74d1160f7c9f12cd5912a06ebff6adcae899d92191fe4305673ba2ffe8fa3f1a4e5ad79f3f334b3b202b2173c486ea37ce3d397ed034c7f9dfeb15c5e927336d0441f4c4300e2cff0d0900b52d3b4", }, Rfc5869Test{ hash: HashType::Sha1, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "", info: "", output_length: 42, okm: "0ac1af7002b3d761d1e55298da9d0506b9ae52057220a306e07b6b87e8df21d0ea00033de03984d34918", }, Rfc5869Test{ hash: HashType::Sha1, key: "0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c", salt: "", info: "", output_length: 42, okm: "2c91117204d745f3500d636a62f64f0ab3bae548aa53d423b0d1f27ebba6f5e5673a081d70cce7acfc48", }, ]; for v in testvectors.iter() { let key = hex::decode(v.key).expect("Could not decode key"); let salt = hex::decode(v.salt).expect("Could not decode salt"); let info = hex::decode(v.info).expect("Could not decode info"); let p = HkdfPrf::new(v.hash, &key, &salt).expect("Could not create HKDF object"); let output = p .compute_prf(&info, v.output_length) .expect("Error computing HKDF"); assert_eq!( hex::encode(output), v.okm, "Computation and test vector differ." ); } } #[derive(Debug, Deserialize)] struct HkdfTestData { #[serde(flatten)] pub suite: tink_tests::WycheproofSuite, #[serde(rename = "testGroups")] pub test_groups: Vec<HkdfTestGroup>, } #[derive(Debug, Deserialize)] struct HkdfTestGroup { #[serde(flatten)] pub group: tink_tests::WycheproofGroup, #[serde(rename = "keySize")] pub key_size: u32, pub tests: Vec<HkdfTestCase>, } #[derive(Debug, Deserialize)] struct HkdfTestCase { #[serde(flatten)] pub case: tink_tests::WycheproofCase, #[serde(with = "tink_tests::hex_string")] pub ikm: Vec<u8>, #[serde(with = "tink_tests::hex_string")] pub salt: Vec<u8>, #[serde(with = "tink_tests::hex_string")] pub info: Vec<u8>, pub size: usize, #[serde(with = "tink_tests::hex_string")] pub okm: Vec<u8>, } #[test] fn test_hkdf_prf_wycheproof_cases() { for hash in &[HashType::Sha1, HashType::Sha256, HashType::Sha512] { let hash_name = format!("{:?}", hash); let filename = format!("testvectors/hkdf_{}_test.json", hash_name.to_lowercase()); println!("wycheproof file '{}' hash {}", filename, hash_name); let bytes = tink_tests::wycheproof_data(&filename); let data: HkdfTestData = serde_json::from_slice(&bytes).unwrap(); for g in &data.test_groups { println!(" key info: key_size={}", g.key_size); for tc in &g.tests { println!( " case {} [{}] {}", tc.case.case_id, tc.case.result, tc.case.comment ); assert_eq!(tc.ikm.len() * 8, g.key_size as usize); let hkdf_prf = HkdfPrf::new(*hash, &tc.ikm, &tc.salt); let valid = tc.case.result == tink_tests::WycheproofResult::Valid; if valid && hkdf_prf.is_err() { panic!( "Could not create HKDF {:?} PRF for test case {} ({})", hash, tc.case.case_id, tc.case.comment ); } if!valid && hkdf_prf.is_err() { continue; } let res = match hkdf_prf.unwrap().compute_prf(&tc.info, tc.size) { Err(_) => { assert!( !valid, "Could not compute HKDF {:?} PRF for test case {} ({})", hash, tc.case.case_id, tc.case.comment ); continue; } Ok(r) => r, }; if valid { assert_eq!( res, tc.okm, "Computed HKDF {:?} PRF and expected for test case {} ({}) do not match", hash, tc.case.case_id, tc.case.comment ); } else { assert_ne!( res, tc.okm, "Computed HKDF {:?} PRF and invalid expected for test case {} ({}) match", hash, tc.case.case_id, tc.case.comment ); } } } } } #[test] fn test_hkdf_prf_hash() { assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA256" ); assert!( HkdfPrf::new( HashType::Sha512, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA512" ); assert!( HkdfPrf::new( HashType::Sha1, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA1" ); assert!( HkdfPrf::new( HashType::UnknownHash, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_err(), "Expected HkdfPrf::new to fail with unknown hash" ); } #[test] fn test_hkdf_prf_salt()
&[0xaf, 0xfe, 0xc0, 0xff, 0xee] ) .is_ok(), "Expected HkdfPrf::new to work with salt" ); } #[test] fn test_hkdf_prf_output_length() { let testdata = hashmap! { HashType::Sha1 => 20, HashType::Sha256 => 32, HashType::Sha512 => 64, }; for (hash, length) in testdata { let prf = HkdfPrf::new( hash, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, ], &[], ) .unwrap_or_else(|_| { panic!( "Expected HkdfPrf::new to work on 32 byte key with hash {:?}", hash ) }); // If overflow checks are enabled (which they are by default for tests), // this loop runs too slow, so only test every 10th length. let stride: usize = if cfg!(overflow_checks) { 1 } else { 10 }; for i in (0..=(length * 255)).step_by(stride) { let output = prf.compute_prf(&[0x01, 0x02], i).unwrap_or_else(|e| { panic!( "Expected to be able to compute HKDF {:?} PRF with {} output length: {:?}", hash, i, e ) }); assert_eq!( output.len(), i, "Expected HKDF {:?} PRF to compute {} bytes, got {}", hash, i, output.len() ); } for i in (length * 255 + 1)..(length * 255 + 100) { assert!( prf.compute_prf(&[0x01, 0x02], i).is_err(), "Expected to not be able to compute HKDF {:?} PRF with {} output length", hash, i ); } } } #[test] fn test_validate_hkdf_prf_params() { assert!( validate_hkdf_prf_params(HashType::Sha256, 32, &[]).is_ok(), "Unexpected error for valid HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha256, 32, &[0xaf, 0xfe, 0xc0, 0xff, 0xee]).is_ok(), "Unexpected error for salted valid HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha256, 4, &[]).is_err(), "Short key size not detected for HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::UnknownHash, 32, &[]).is_err(), "Unknown hash function not detected for HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha1, 32, &[]).is_err(), "Weak hash function not detected for HKDF PRF params" ); }
{ assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work empty salt" ); assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ],
identifier_body
hkdf_test.rs
// Copyright 2020 The Tink-Rust Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////////// use maplit::hashmap; use serde::Deserialize; use tink_core::Prf; use tink_prf::subtle::{validate_hkdf_prf_params, HkdfPrf}; use tink_proto::HashType; struct Rfc5869Test { hash: HashType, key: &'static str, salt: &'static str, info: &'static str, output_length: usize, okm: &'static str, } #[test] fn test_vectors_rfc5869() { // Test vectors from RFC 5869. let testvectors = [ Rfc5869Test{ hash: HashType::Sha256, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "000102030405060708090a0b0c", info: "f0f1f2f3f4f5f6f7f8f9", output_length: 42, okm: "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865", }, Rfc5869Test{ hash: HashType::Sha256, key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f", salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf", info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", output_length: 82, okm: "b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87", }, Rfc5869Test{ hash: HashType::Sha256, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "", info: "", output_length: 42, okm: "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8", }, Rfc5869Test{ hash: HashType::Sha1, key: "0b0b0b0b0b0b0b0b0b0b0b", salt: "000102030405060708090a0b0c", info: "f0f1f2f3f4f5f6f7f8f9", output_length: 42, okm: "085a01ea1b10f36933068b56efa5ad81a4f14b822f5b091568a9cdd4f155fda2c22e422478d305f3f896", }, Rfc5869Test{ hash: HashType::Sha1, key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f", salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf", info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", output_length: 82, okm: "0bd770a74d1160f7c9f12cd5912a06ebff6adcae899d92191fe4305673ba2ffe8fa3f1a4e5ad79f3f334b3b202b2173c486ea37ce3d397ed034c7f9dfeb15c5e927336d0441f4c4300e2cff0d0900b52d3b4", }, Rfc5869Test{ hash: HashType::Sha1, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "", info: "", output_length: 42, okm: "0ac1af7002b3d761d1e55298da9d0506b9ae52057220a306e07b6b87e8df21d0ea00033de03984d34918", }, Rfc5869Test{ hash: HashType::Sha1, key: "0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c", salt: "", info: "", output_length: 42, okm: "2c91117204d745f3500d636a62f64f0ab3bae548aa53d423b0d1f27ebba6f5e5673a081d70cce7acfc48", }, ]; for v in testvectors.iter() { let key = hex::decode(v.key).expect("Could not decode key"); let salt = hex::decode(v.salt).expect("Could not decode salt"); let info = hex::decode(v.info).expect("Could not decode info"); let p = HkdfPrf::new(v.hash, &key, &salt).expect("Could not create HKDF object"); let output = p .compute_prf(&info, v.output_length) .expect("Error computing HKDF"); assert_eq!( hex::encode(output), v.okm, "Computation and test vector differ." ); } } #[derive(Debug, Deserialize)] struct HkdfTestData { #[serde(flatten)] pub suite: tink_tests::WycheproofSuite, #[serde(rename = "testGroups")] pub test_groups: Vec<HkdfTestGroup>, } #[derive(Debug, Deserialize)] struct HkdfTestGroup { #[serde(flatten)] pub group: tink_tests::WycheproofGroup, #[serde(rename = "keySize")] pub key_size: u32, pub tests: Vec<HkdfTestCase>, } #[derive(Debug, Deserialize)] struct HkdfTestCase { #[serde(flatten)] pub case: tink_tests::WycheproofCase, #[serde(with = "tink_tests::hex_string")] pub ikm: Vec<u8>, #[serde(with = "tink_tests::hex_string")] pub salt: Vec<u8>, #[serde(with = "tink_tests::hex_string")] pub info: Vec<u8>, pub size: usize, #[serde(with = "tink_tests::hex_string")] pub okm: Vec<u8>, } #[test] fn test_hkdf_prf_wycheproof_cases() { for hash in &[HashType::Sha1, HashType::Sha256, HashType::Sha512] { let hash_name = format!("{:?}", hash); let filename = format!("testvectors/hkdf_{}_test.json", hash_name.to_lowercase()); println!("wycheproof file '{}' hash {}", filename, hash_name); let bytes = tink_tests::wycheproof_data(&filename); let data: HkdfTestData = serde_json::from_slice(&bytes).unwrap(); for g in &data.test_groups { println!(" key info: key_size={}", g.key_size); for tc in &g.tests { println!( " case {} [{}] {}", tc.case.case_id, tc.case.result, tc.case.comment ); assert_eq!(tc.ikm.len() * 8, g.key_size as usize); let hkdf_prf = HkdfPrf::new(*hash, &tc.ikm, &tc.salt); let valid = tc.case.result == tink_tests::WycheproofResult::Valid; if valid && hkdf_prf.is_err() { panic!( "Could not create HKDF {:?} PRF for test case {} ({})", hash, tc.case.case_id, tc.case.comment ); } if!valid && hkdf_prf.is_err() { continue; } let res = match hkdf_prf.unwrap().compute_prf(&tc.info, tc.size) { Err(_) => { assert!( !valid, "Could not compute HKDF {:?} PRF for test case {} ({})", hash, tc.case.case_id, tc.case.comment ); continue; } Ok(r) => r, }; if valid { assert_eq!( res, tc.okm, "Computed HKDF {:?} PRF and expected for test case {} ({}) do not match", hash, tc.case.case_id, tc.case.comment ); } else { assert_ne!( res, tc.okm, "Computed HKDF {:?} PRF and invalid expected for test case {} ({}) match", hash, tc.case.case_id, tc.case.comment ); } } } } } #[test] fn test_hkdf_prf_hash() { assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA256" ); assert!( HkdfPrf::new( HashType::Sha512, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
&[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA512" ); assert!( HkdfPrf::new( HashType::Sha1, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA1" ); assert!( HkdfPrf::new( HashType::UnknownHash, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_err(), "Expected HkdfPrf::new to fail with unknown hash" ); } #[test] fn test_hkdf_prf_salt() { assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work empty salt" ); assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[0xaf, 0xfe, 0xc0, 0xff, 0xee] ) .is_ok(), "Expected HkdfPrf::new to work with salt" ); } #[test] fn test_hkdf_prf_output_length() { let testdata = hashmap! { HashType::Sha1 => 20, HashType::Sha256 => 32, HashType::Sha512 => 64, }; for (hash, length) in testdata { let prf = HkdfPrf::new( hash, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, ], &[], ) .unwrap_or_else(|_| { panic!( "Expected HkdfPrf::new to work on 32 byte key with hash {:?}", hash ) }); // If overflow checks are enabled (which they are by default for tests), // this loop runs too slow, so only test every 10th length. let stride: usize = if cfg!(overflow_checks) { 1 } else { 10 }; for i in (0..=(length * 255)).step_by(stride) { let output = prf.compute_prf(&[0x01, 0x02], i).unwrap_or_else(|e| { panic!( "Expected to be able to compute HKDF {:?} PRF with {} output length: {:?}", hash, i, e ) }); assert_eq!( output.len(), i, "Expected HKDF {:?} PRF to compute {} bytes, got {}", hash, i, output.len() ); } for i in (length * 255 + 1)..(length * 255 + 100) { assert!( prf.compute_prf(&[0x01, 0x02], i).is_err(), "Expected to not be able to compute HKDF {:?} PRF with {} output length", hash, i ); } } } #[test] fn test_validate_hkdf_prf_params() { assert!( validate_hkdf_prf_params(HashType::Sha256, 32, &[]).is_ok(), "Unexpected error for valid HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha256, 32, &[0xaf, 0xfe, 0xc0, 0xff, 0xee]).is_ok(), "Unexpected error for salted valid HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha256, 4, &[]).is_err(), "Short key size not detected for HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::UnknownHash, 32, &[]).is_err(), "Unknown hash function not detected for HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha1, 32, &[]).is_err(), "Weak hash function not detected for HKDF PRF params" ); }
0x0f, 0x10 ],
random_line_split
hkdf_test.rs
// Copyright 2020 The Tink-Rust Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////////// use maplit::hashmap; use serde::Deserialize; use tink_core::Prf; use tink_prf::subtle::{validate_hkdf_prf_params, HkdfPrf}; use tink_proto::HashType; struct Rfc5869Test { hash: HashType, key: &'static str, salt: &'static str, info: &'static str, output_length: usize, okm: &'static str, } #[test] fn test_vectors_rfc5869() { // Test vectors from RFC 5869. let testvectors = [ Rfc5869Test{ hash: HashType::Sha256, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "000102030405060708090a0b0c", info: "f0f1f2f3f4f5f6f7f8f9", output_length: 42, okm: "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865", }, Rfc5869Test{ hash: HashType::Sha256, key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f", salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf", info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", output_length: 82, okm: "b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87", }, Rfc5869Test{ hash: HashType::Sha256, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "", info: "", output_length: 42, okm: "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8", }, Rfc5869Test{ hash: HashType::Sha1, key: "0b0b0b0b0b0b0b0b0b0b0b", salt: "000102030405060708090a0b0c", info: "f0f1f2f3f4f5f6f7f8f9", output_length: 42, okm: "085a01ea1b10f36933068b56efa5ad81a4f14b822f5b091568a9cdd4f155fda2c22e422478d305f3f896", }, Rfc5869Test{ hash: HashType::Sha1, key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f", salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf", info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", output_length: 82, okm: "0bd770a74d1160f7c9f12cd5912a06ebff6adcae899d92191fe4305673ba2ffe8fa3f1a4e5ad79f3f334b3b202b2173c486ea37ce3d397ed034c7f9dfeb15c5e927336d0441f4c4300e2cff0d0900b52d3b4", }, Rfc5869Test{ hash: HashType::Sha1, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "", info: "", output_length: 42, okm: "0ac1af7002b3d761d1e55298da9d0506b9ae52057220a306e07b6b87e8df21d0ea00033de03984d34918", }, Rfc5869Test{ hash: HashType::Sha1, key: "0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c", salt: "", info: "", output_length: 42, okm: "2c91117204d745f3500d636a62f64f0ab3bae548aa53d423b0d1f27ebba6f5e5673a081d70cce7acfc48", }, ]; for v in testvectors.iter() { let key = hex::decode(v.key).expect("Could not decode key"); let salt = hex::decode(v.salt).expect("Could not decode salt"); let info = hex::decode(v.info).expect("Could not decode info"); let p = HkdfPrf::new(v.hash, &key, &salt).expect("Could not create HKDF object"); let output = p .compute_prf(&info, v.output_length) .expect("Error computing HKDF"); assert_eq!( hex::encode(output), v.okm, "Computation and test vector differ." ); } } #[derive(Debug, Deserialize)] struct HkdfTestData { #[serde(flatten)] pub suite: tink_tests::WycheproofSuite, #[serde(rename = "testGroups")] pub test_groups: Vec<HkdfTestGroup>, } #[derive(Debug, Deserialize)] struct HkdfTestGroup { #[serde(flatten)] pub group: tink_tests::WycheproofGroup, #[serde(rename = "keySize")] pub key_size: u32, pub tests: Vec<HkdfTestCase>, } #[derive(Debug, Deserialize)] struct HkdfTestCase { #[serde(flatten)] pub case: tink_tests::WycheproofCase, #[serde(with = "tink_tests::hex_string")] pub ikm: Vec<u8>, #[serde(with = "tink_tests::hex_string")] pub salt: Vec<u8>, #[serde(with = "tink_tests::hex_string")] pub info: Vec<u8>, pub size: usize, #[serde(with = "tink_tests::hex_string")] pub okm: Vec<u8>, } #[test] fn test_hkdf_prf_wycheproof_cases() { for hash in &[HashType::Sha1, HashType::Sha256, HashType::Sha512] { let hash_name = format!("{:?}", hash); let filename = format!("testvectors/hkdf_{}_test.json", hash_name.to_lowercase()); println!("wycheproof file '{}' hash {}", filename, hash_name); let bytes = tink_tests::wycheproof_data(&filename); let data: HkdfTestData = serde_json::from_slice(&bytes).unwrap(); for g in &data.test_groups { println!(" key info: key_size={}", g.key_size); for tc in &g.tests { println!( " case {} [{}] {}", tc.case.case_id, tc.case.result, tc.case.comment ); assert_eq!(tc.ikm.len() * 8, g.key_size as usize); let hkdf_prf = HkdfPrf::new(*hash, &tc.ikm, &tc.salt); let valid = tc.case.result == tink_tests::WycheproofResult::Valid; if valid && hkdf_prf.is_err() { panic!( "Could not create HKDF {:?} PRF for test case {} ({})", hash, tc.case.case_id, tc.case.comment ); } if!valid && hkdf_prf.is_err() { continue; } let res = match hkdf_prf.unwrap().compute_prf(&tc.info, tc.size) { Err(_) => { assert!( !valid, "Could not compute HKDF {:?} PRF for test case {} ({})", hash, tc.case.case_id, tc.case.comment ); continue; } Ok(r) => r, }; if valid
else { assert_ne!( res, tc.okm, "Computed HKDF {:?} PRF and invalid expected for test case {} ({}) match", hash, tc.case.case_id, tc.case.comment ); } } } } } #[test] fn test_hkdf_prf_hash() { assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA256" ); assert!( HkdfPrf::new( HashType::Sha512, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA512" ); assert!( HkdfPrf::new( HashType::Sha1, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA1" ); assert!( HkdfPrf::new( HashType::UnknownHash, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_err(), "Expected HkdfPrf::new to fail with unknown hash" ); } #[test] fn test_hkdf_prf_salt() { assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work empty salt" ); assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[0xaf, 0xfe, 0xc0, 0xff, 0xee] ) .is_ok(), "Expected HkdfPrf::new to work with salt" ); } #[test] fn test_hkdf_prf_output_length() { let testdata = hashmap! { HashType::Sha1 => 20, HashType::Sha256 => 32, HashType::Sha512 => 64, }; for (hash, length) in testdata { let prf = HkdfPrf::new( hash, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, ], &[], ) .unwrap_or_else(|_| { panic!( "Expected HkdfPrf::new to work on 32 byte key with hash {:?}", hash ) }); // If overflow checks are enabled (which they are by default for tests), // this loop runs too slow, so only test every 10th length. let stride: usize = if cfg!(overflow_checks) { 1 } else { 10 }; for i in (0..=(length * 255)).step_by(stride) { let output = prf.compute_prf(&[0x01, 0x02], i).unwrap_or_else(|e| { panic!( "Expected to be able to compute HKDF {:?} PRF with {} output length: {:?}", hash, i, e ) }); assert_eq!( output.len(), i, "Expected HKDF {:?} PRF to compute {} bytes, got {}", hash, i, output.len() ); } for i in (length * 255 + 1)..(length * 255 + 100) { assert!( prf.compute_prf(&[0x01, 0x02], i).is_err(), "Expected to not be able to compute HKDF {:?} PRF with {} output length", hash, i ); } } } #[test] fn test_validate_hkdf_prf_params() { assert!( validate_hkdf_prf_params(HashType::Sha256, 32, &[]).is_ok(), "Unexpected error for valid HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha256, 32, &[0xaf, 0xfe, 0xc0, 0xff, 0xee]).is_ok(), "Unexpected error for salted valid HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha256, 4, &[]).is_err(), "Short key size not detected for HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::UnknownHash, 32, &[]).is_err(), "Unknown hash function not detected for HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha1, 32, &[]).is_err(), "Weak hash function not detected for HKDF PRF params" ); }
{ assert_eq!( res, tc.okm, "Computed HKDF {:?} PRF and expected for test case {} ({}) do not match", hash, tc.case.case_id, tc.case.comment ); }
conditional_block
hkdf_test.rs
// Copyright 2020 The Tink-Rust Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////////// use maplit::hashmap; use serde::Deserialize; use tink_core::Prf; use tink_prf::subtle::{validate_hkdf_prf_params, HkdfPrf}; use tink_proto::HashType; struct Rfc5869Test { hash: HashType, key: &'static str, salt: &'static str, info: &'static str, output_length: usize, okm: &'static str, } #[test] fn test_vectors_rfc5869() { // Test vectors from RFC 5869. let testvectors = [ Rfc5869Test{ hash: HashType::Sha256, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "000102030405060708090a0b0c", info: "f0f1f2f3f4f5f6f7f8f9", output_length: 42, okm: "3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf34007208d5b887185865", }, Rfc5869Test{ hash: HashType::Sha256, key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f", salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf", info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", output_length: 82, okm: "b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a99cac7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87c14c01d5c1f3434f1d87", }, Rfc5869Test{ hash: HashType::Sha256, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "", info: "", output_length: 42, okm: "8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d201395faa4b61a96c8", }, Rfc5869Test{ hash: HashType::Sha1, key: "0b0b0b0b0b0b0b0b0b0b0b", salt: "000102030405060708090a0b0c", info: "f0f1f2f3f4f5f6f7f8f9", output_length: 42, okm: "085a01ea1b10f36933068b56efa5ad81a4f14b822f5b091568a9cdd4f155fda2c22e422478d305f3f896", }, Rfc5869Test{ hash: HashType::Sha1, key: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f", salt: "606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeaf", info: "b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff", output_length: 82, okm: "0bd770a74d1160f7c9f12cd5912a06ebff6adcae899d92191fe4305673ba2ffe8fa3f1a4e5ad79f3f334b3b202b2173c486ea37ce3d397ed034c7f9dfeb15c5e927336d0441f4c4300e2cff0d0900b52d3b4", }, Rfc5869Test{ hash: HashType::Sha1, key: "0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b", salt: "", info: "", output_length: 42, okm: "0ac1af7002b3d761d1e55298da9d0506b9ae52057220a306e07b6b87e8df21d0ea00033de03984d34918", }, Rfc5869Test{ hash: HashType::Sha1, key: "0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c", salt: "", info: "", output_length: 42, okm: "2c91117204d745f3500d636a62f64f0ab3bae548aa53d423b0d1f27ebba6f5e5673a081d70cce7acfc48", }, ]; for v in testvectors.iter() { let key = hex::decode(v.key).expect("Could not decode key"); let salt = hex::decode(v.salt).expect("Could not decode salt"); let info = hex::decode(v.info).expect("Could not decode info"); let p = HkdfPrf::new(v.hash, &key, &salt).expect("Could not create HKDF object"); let output = p .compute_prf(&info, v.output_length) .expect("Error computing HKDF"); assert_eq!( hex::encode(output), v.okm, "Computation and test vector differ." ); } } #[derive(Debug, Deserialize)] struct HkdfTestData { #[serde(flatten)] pub suite: tink_tests::WycheproofSuite, #[serde(rename = "testGroups")] pub test_groups: Vec<HkdfTestGroup>, } #[derive(Debug, Deserialize)] struct HkdfTestGroup { #[serde(flatten)] pub group: tink_tests::WycheproofGroup, #[serde(rename = "keySize")] pub key_size: u32, pub tests: Vec<HkdfTestCase>, } #[derive(Debug, Deserialize)] struct HkdfTestCase { #[serde(flatten)] pub case: tink_tests::WycheproofCase, #[serde(with = "tink_tests::hex_string")] pub ikm: Vec<u8>, #[serde(with = "tink_tests::hex_string")] pub salt: Vec<u8>, #[serde(with = "tink_tests::hex_string")] pub info: Vec<u8>, pub size: usize, #[serde(with = "tink_tests::hex_string")] pub okm: Vec<u8>, } #[test] fn
() { for hash in &[HashType::Sha1, HashType::Sha256, HashType::Sha512] { let hash_name = format!("{:?}", hash); let filename = format!("testvectors/hkdf_{}_test.json", hash_name.to_lowercase()); println!("wycheproof file '{}' hash {}", filename, hash_name); let bytes = tink_tests::wycheproof_data(&filename); let data: HkdfTestData = serde_json::from_slice(&bytes).unwrap(); for g in &data.test_groups { println!(" key info: key_size={}", g.key_size); for tc in &g.tests { println!( " case {} [{}] {}", tc.case.case_id, tc.case.result, tc.case.comment ); assert_eq!(tc.ikm.len() * 8, g.key_size as usize); let hkdf_prf = HkdfPrf::new(*hash, &tc.ikm, &tc.salt); let valid = tc.case.result == tink_tests::WycheproofResult::Valid; if valid && hkdf_prf.is_err() { panic!( "Could not create HKDF {:?} PRF for test case {} ({})", hash, tc.case.case_id, tc.case.comment ); } if!valid && hkdf_prf.is_err() { continue; } let res = match hkdf_prf.unwrap().compute_prf(&tc.info, tc.size) { Err(_) => { assert!( !valid, "Could not compute HKDF {:?} PRF for test case {} ({})", hash, tc.case.case_id, tc.case.comment ); continue; } Ok(r) => r, }; if valid { assert_eq!( res, tc.okm, "Computed HKDF {:?} PRF and expected for test case {} ({}) do not match", hash, tc.case.case_id, tc.case.comment ); } else { assert_ne!( res, tc.okm, "Computed HKDF {:?} PRF and invalid expected for test case {} ({}) match", hash, tc.case.case_id, tc.case.comment ); } } } } } #[test] fn test_hkdf_prf_hash() { assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA256" ); assert!( HkdfPrf::new( HashType::Sha512, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA512" ); assert!( HkdfPrf::new( HashType::Sha1, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work with SHA1" ); assert!( HkdfPrf::new( HashType::UnknownHash, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_err(), "Expected HkdfPrf::new to fail with unknown hash" ); } #[test] fn test_hkdf_prf_salt() { assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[] ) .is_ok(), "Expected HkdfPrf::new to work empty salt" ); assert!( HkdfPrf::new( HashType::Sha256, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10 ], &[0xaf, 0xfe, 0xc0, 0xff, 0xee] ) .is_ok(), "Expected HkdfPrf::new to work with salt" ); } #[test] fn test_hkdf_prf_output_length() { let testdata = hashmap! { HashType::Sha1 => 20, HashType::Sha256 => 32, HashType::Sha512 => 64, }; for (hash, length) in testdata { let prf = HkdfPrf::new( hash, &[ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, ], &[], ) .unwrap_or_else(|_| { panic!( "Expected HkdfPrf::new to work on 32 byte key with hash {:?}", hash ) }); // If overflow checks are enabled (which they are by default for tests), // this loop runs too slow, so only test every 10th length. let stride: usize = if cfg!(overflow_checks) { 1 } else { 10 }; for i in (0..=(length * 255)).step_by(stride) { let output = prf.compute_prf(&[0x01, 0x02], i).unwrap_or_else(|e| { panic!( "Expected to be able to compute HKDF {:?} PRF with {} output length: {:?}", hash, i, e ) }); assert_eq!( output.len(), i, "Expected HKDF {:?} PRF to compute {} bytes, got {}", hash, i, output.len() ); } for i in (length * 255 + 1)..(length * 255 + 100) { assert!( prf.compute_prf(&[0x01, 0x02], i).is_err(), "Expected to not be able to compute HKDF {:?} PRF with {} output length", hash, i ); } } } #[test] fn test_validate_hkdf_prf_params() { assert!( validate_hkdf_prf_params(HashType::Sha256, 32, &[]).is_ok(), "Unexpected error for valid HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha256, 32, &[0xaf, 0xfe, 0xc0, 0xff, 0xee]).is_ok(), "Unexpected error for salted valid HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha256, 4, &[]).is_err(), "Short key size not detected for HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::UnknownHash, 32, &[]).is_err(), "Unknown hash function not detected for HKDF PRF params" ); assert!( validate_hkdf_prf_params(HashType::Sha1, 32, &[]).is_err(), "Weak hash function not detected for HKDF PRF params" ); }
test_hkdf_prf_wycheproof_cases
identifier_name
config.rs
//! Tendermint configuration file types (with serde parsers/serializers) //! //! This module contains types which correspond to the following config files: //! //! - `config.toml`: `config::TendermintConfig` //! - `node_key.rs`: `config::node_key::NodeKey` //! - `priv_validator_key.rs`: `config::priv_validator_key::PrivValidatorKey` mod node_key; mod priv_validator_key; pub use self::{node_key::NodeKey, priv_validator_key::PrivValidatorKey}; use crate::{ abci::tag, error::{Error, ErrorKind}, genesis::Genesis, net, node, Moniker, Timeout, }; use serde::{de, de::Error as _, ser, Deserialize, Serialize}; use std::{ collections::BTreeMap, fmt, fs, path::{Path, PathBuf}, str::FromStr, }; /// Tendermint `config.toml` file #[derive(Clone, Debug, Deserialize, Serialize)] pub struct TendermintConfig { /// TCP or UNIX socket address of the ABCI application, /// or the name of an ABCI application compiled in with the Tendermint binary. pub proxy_app: net::Address, /// A custom human readable name for this node pub moniker: Moniker, /// If this node is many blocks behind the tip of the chain, FastSync /// allows them to catchup quickly by downloading blocks in parallel /// and verifying their commits pub fast_sync: bool, /// Database backend: `leveldb | memdb | cleveldb` pub db_backend: DbBackend, /// Database directory pub db_dir: PathBuf, /// Output level for logging, including package level options pub log_level: LogLevel, /// Output format: 'plain' (colored text) or 'json' pub log_format: LogFormat, /// Path to the JSON file containing the initial validator set and other meta data pub genesis_file: PathBuf, /// Path to the JSON file containing the private key to use as a validator in the consensus protocol pub priv_validator_key_file: Option<PathBuf>, /// Path to the JSON file containing the last sign state of a validator pub priv_validator_state_file: PathBuf, /// TCP or UNIX socket address for Tendermint to listen on for /// connections from an external PrivValidator process #[serde(deserialize_with = "deserialize_optional_value")] pub priv_validator_laddr: Option<net::Address>, /// Path to the JSON file containing the private key to use for node authentication in the p2p protocol pub node_key_file: PathBuf, /// Mechanism to connect to the ABCI application: socket | grpc pub abci: AbciMode, /// TCP or UNIX socket address for the profiling server to listen on #[serde(deserialize_with = "deserialize_optional_value")] pub prof_laddr: Option<net::Address>, /// If `true`, query the ABCI app on connecting to a new peer /// so the app can decide if we should keep the connection or not pub filter_peers: bool, /// rpc server configuration options pub rpc: RpcConfig, /// peer to peer configuration options pub p2p: P2PConfig, /// mempool configuration options pub mempool: MempoolConfig, /// consensus configuration options pub consensus: ConsensusConfig, /// transactions indexer configuration options pub tx_index: TxIndexConfig, /// instrumentation configuration options pub instrumentation: InstrumentationConfig, } impl TendermintConfig { /// Parse Tendermint `config.toml` pub fn parse_toml<T: AsRef<str>>(toml_string: T) -> Result<Self, Error> { Ok(toml::from_str(toml_string.as_ref())?) } /// Load `config.toml` from a file pub fn load_toml_file<P>(path: &P) -> Result<Self, Error> where P: AsRef<Path>, { let toml_string = fs::read_to_string(path).map_err(|e| { err!( ErrorKind::Parse, "couldn't open {}: {}", path.as_ref().display(), e ) })?; Self::parse_toml(toml_string) } /// Load `genesis.json` file from the configured location pub fn load_genesis_file(&self, home: impl AsRef<Path>) -> Result<Genesis, Error> { let path = home.as_ref().join(&self.genesis_file); let genesis_json = fs::read_to_string(&path) .map_err(|e| err!(ErrorKind::Parse, "couldn't open {}: {}", path.display(), e))?; Ok(serde_json::from_str(genesis_json.as_ref())?) } /// Load `node_key.json` file from the configured location pub fn load_node_key(&self, home: impl AsRef<Path>) -> Result<NodeKey, Error> { let path = home.as_ref().join(&self.node_key_file); NodeKey::load_json_file(&path) } } /// Database backend #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum DbBackend { /// LevelDB backend #[serde(rename = "leveldb")] LevelDb, /// MemDB backend #[serde(rename = "memdb")] MemDb, /// CLevelDB backend #[serde(rename = "cleveldb")] CLevelDb, } /// Loglevel configuration #[derive(Clone, Debug, Eq, PartialEq)] pub struct LogLevel(BTreeMap<String, String>); impl LogLevel { /// Get the setting for the given key pub fn get<S>(&self, key: S) -> Option<&str> where S: AsRef<str>, { self.0.get(key.as_ref()).map(AsRef::as_ref) } /// Iterate over the levels pub fn iter(&self) -> LogLevelIter<'_> { self.0.iter() } } /// Iterator over log levels pub type LogLevelIter<'a> = std::collections::btree_map::Iter<'a, String, String>; impl FromStr for LogLevel { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let mut levels = BTreeMap::new(); for level in s.split(',') { let parts = level.split(':').collect::<Vec<_>>(); if parts.len()!= 2 { return Err(err!(ErrorKind::Parse, "error parsing log level: {}", level)); } let key = parts[0].to_owned(); let value = parts[1].to_owned(); if levels.insert(key, value).is_some() { return Err(err!( ErrorKind::Parse, "duplicate log level setting for: {}", level )); } } Ok(LogLevel(levels)) } } impl fmt::Display for LogLevel { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for (i, (k, v)) in self.0.iter().enumerate() { write!(f, "{}:{}", k, v)?; if i < self.0.len() - 1 { write!(f, ",")?; } } Ok(()) } } impl<'de> Deserialize<'de> for LogLevel { fn deserialize<D: de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> { let levels = String::deserialize(deserializer)?; Ok(Self::from_str(&levels).map_err(|e| D::Error::custom(format!("{}", e)))?) } } impl Serialize for LogLevel { fn serialize<S: ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { self.to_string().serialize(serializer) } } /// Logging format #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum LogFormat { /// Plain (colored text) #[serde(rename = "plain")] Plain, /// JSON #[serde(rename = "json")] Json, } /// Mechanism to connect to the ABCI application: socket | grpc #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum AbciMode { /// Socket #[serde(rename = "socket")] Socket, /// GRPC #[serde(rename = "grpc")] Grpc, } /// Tendermint `config.toml` file's `[rpc]` section #[derive(Clone, Debug, Deserialize, Serialize)] pub struct RpcConfig { /// TCP or UNIX socket address for the RPC server to listen on pub laddr: net::Address, /// A list of origins a cross-domain request can be executed from /// Default value `[]` disables cors support /// Use `["*"]` to allow any origin pub cors_allowed_origins: Vec<CorsOrigin>, /// A list of methods the client is allowed to use with cross-domain requests pub cors_allowed_methods: Vec<CorsMethod>, /// A list of non simple headers the client is allowed to use with cross-domain requests pub cors_allowed_headers: Vec<CorsHeader>, /// TCP or UNIX socket address for the gRPC server to listen on /// NOTE: This server only supports `/broadcast_tx_commit` #[serde(deserialize_with = "deserialize_optional_value")] pub grpc_laddr: Option<net::Address>, /// Maximum number of simultaneous GRPC connections. /// Does not include RPC (HTTP&WebSocket) connections. See `max_open_connections`. pub grpc_max_open_connections: u64, /// Activate unsafe RPC commands like `/dial_seeds` and `/unsafe_flush_mempool` #[serde(rename = "unsafe")] pub unsafe_commands: bool, /// Maximum number of simultaneous connections (including WebSocket). /// Does not include gRPC connections. See `grpc_max_open_connections`. pub max_open_connections: u64, /// Maximum number of unique clientIDs that can `/subscribe`. pub max_subscription_clients: u64, /// Maximum number of unique queries a given client can `/subscribe` to. pub max_subscriptions_per_client: u64, /// How long to wait for a tx to be committed during `/broadcast_tx_commit`. pub timeout_broadcast_tx_commit: Timeout, /// The name of a file containing certificate that is used to create the HTTPS server. #[serde(deserialize_with = "deserialize_optional_value")] pub tls_cert_file: Option<PathBuf>, /// The name of a file containing matching private key that is used to create the HTTPS server. #[serde(deserialize_with = "deserialize_optional_value")] pub tls_key_file: Option<PathBuf>, } /// Origin hosts allowed with CORS requests to the RPC API // TODO(tarcieri): parse and validate this string #[derive(Clone, Debug, Deserialize, Serialize)] pub struct CorsOrigin(String); impl AsRef<str> for CorsOrigin { fn as_ref(&self) -> &str { self.0.as_ref() } } impl fmt::Display for CorsOrigin { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.0) } } /// HTTP methods allowed with CORS requests to the RPC API // TODO(tarcieri): parse and validate this string #[derive(Clone, Debug, Deserialize, Serialize)] pub struct
(String); impl AsRef<str> for CorsMethod { fn as_ref(&self) -> &str { self.0.as_ref() } } impl fmt::Display for CorsMethod { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.0) } } /// HTTP headers allowed to be sent via CORS to the RPC API // TODO(tarcieri): parse and validate this string #[derive(Clone, Debug, Deserialize, Serialize)] pub struct CorsHeader(String); impl AsRef<str> for CorsHeader { fn as_ref(&self) -> &str { self.0.as_ref() } } impl fmt::Display for CorsHeader { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.0) } } /// peer to peer configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct P2PConfig { /// Address to listen for incoming connections pub laddr: net::Address, /// Address to advertise to peers for them to dial /// If empty, will use the same port as the laddr, /// and will introspect on the listener or use UPnP /// to figure out the address. #[serde(deserialize_with = "deserialize_optional_value")] pub external_address: Option<net::Address>, /// Comma separated list of seed nodes to connect to #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub seeds: Vec<net::Address>, /// Comma separated list of nodes to keep persistent connections to #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub persistent_peers: Vec<net::Address>, /// UPNP port forwarding pub upnp: bool, /// Path to address book pub addr_book_file: PathBuf, /// Set `true` for strict address routability rules /// Set `false` for private or local networks pub addr_book_strict: bool, /// Maximum number of inbound peers pub max_num_inbound_peers: u64, /// Maximum number of outbound peers to connect to, excluding persistent peers pub max_num_outbound_peers: u64, /// Time to wait before flushing messages out on the connection pub flush_throttle_timeout: Timeout, /// Maximum size of a message packet payload, in bytes pub max_packet_msg_payload_size: u64, /// Rate at which packets can be sent, in bytes/second pub send_rate: TransferRate, /// Rate at which packets can be received, in bytes/second pub recv_rate: TransferRate, /// Set `true` to enable the peer-exchange reactor pub pex: bool, /// Seed mode, in which node constantly crawls the network and looks for /// peers. If another node asks it for addresses, it responds and disconnects. /// /// Does not work if the peer-exchange reactor is disabled. pub seed_mode: bool, /// Comma separated list of peer IDs to keep private (will not be gossiped to other peers) #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub private_peer_ids: Vec<node::Id>, /// Toggle to disable guard against peers connecting from the same ip. pub allow_duplicate_ip: bool, /// Handshake timeout pub handshake_timeout: Timeout, /// Timeout when dialing other peers pub dial_timeout: Timeout, } /// mempool configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct MempoolConfig { /// Recheck enabled pub recheck: bool, /// Broadcast enabled pub broadcast: bool, /// WAL dir #[serde(deserialize_with = "deserialize_optional_value")] pub wal_dir: Option<PathBuf>, /// Maximum number of transactions in the mempool pub size: u64, /// Limit the total size of all txs in the mempool. /// This only accounts for raw transactions (e.g. given 1MB transactions and /// `max_txs_bytes`=5MB, mempool will only accept 5 transactions). pub max_txs_bytes: u64, /// Size of the cache (used to filter transactions we saw earlier) in transactions pub cache_size: u64, } /// consensus configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct ConsensusConfig { /// Path to WAL file pub wal_file: PathBuf, /// Propose timeout pub timeout_propose: Timeout, /// Propose timeout delta pub timeout_propose_delta: Timeout, /// Prevote timeout pub timeout_prevote: Timeout, /// Prevote timeout delta pub timeout_prevote_delta: Timeout, /// Precommit timeout pub timeout_precommit: Timeout, /// Precommit timeout delta pub timeout_precommit_delta: Timeout, /// Commit timeout pub timeout_commit: Timeout, /// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) pub skip_timeout_commit: bool, /// EmptyBlocks mode pub create_empty_blocks: bool, /// Interval between empty blocks pub create_empty_blocks_interval: Timeout, /// Reactor sleep duration pub peer_gossip_sleep_duration: Timeout, /// Reactor query sleep duration pub peer_query_maj23_sleep_duration: Timeout, } /// transactions indexer configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct TxIndexConfig { /// What indexer to use for transactions #[serde(default)] pub indexer: TxIndexer, /// Comma-separated list of tags to index (by default the only tag is `tx.hash`) // TODO(tarcieri): switch to `tendermint::abci::Tag` #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub index_tags: Vec<tag::Key>, /// When set to true, tells indexer to index all tags (predefined tags: /// `tx.hash`, `tx.height` and all tags from DeliverTx responses). pub index_all_tags: bool, } /// What indexer to use for transactions #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum TxIndexer { /// "null" // TODO(tarcieri): use an `Option` type here? #[serde(rename = "null")] Null, /// "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). #[serde(rename = "kv")] Kv, } impl Default for TxIndexer { fn default() -> TxIndexer { TxIndexer::Kv } } /// instrumentation configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct InstrumentationConfig { /// When `true`, Prometheus metrics are served under /metrics on /// PrometheusListenAddr. pub prometheus: bool, /// Address to listen for Prometheus collector(s) connections // TODO(tarcieri): parse to `tendermint::net::Addr` pub prometheus_listen_addr: String, /// Maximum number of simultaneous connections. pub max_open_connections: u64, /// Instrumentation namespace pub namespace: String, } /// Rate at which bytes can be sent/received #[derive(Copy, Clone, Debug, Deserialize, Serialize)] pub struct TransferRate(u64); impl TransferRate { /// Get the trasfer rate in bytes per second pub fn bytes_per_sec(self) -> u64 { self.0 } } /// Deserialize `Option<T: FromStr>` where an empty string indicates `None` fn deserialize_optional_value<'de, D, T, E>(deserializer: D) -> Result<Option<T>, D::Error> where D: de::Deserializer<'de>, T: FromStr<Err = E>, E: fmt::Display, { let string = String::deserialize(deserializer)?; if string.is_empty() { return Ok(None); } string .parse() .map(Some) .map_err(|e| D::Error::custom(format!("{}", e))) } /// Deserialize a comma separated list of types that impl `FromStr` as a `Vec` fn deserialize_comma_separated_list<'de, D, T, E>(deserializer: D) -> Result<Vec<T>, D::Error> where D: de::Deserializer<'de>, T: FromStr<Err = E>, E: fmt::Display, { let mut result = vec![]; let string = String::deserialize(deserializer)?; if string.is_empty() { return Ok(result); } for item in string.split(',') { result.push( item.parse() .map_err(|e| D::Error::custom(format!("{}", e)))?, ); } Ok(result) } /// Serialize a comma separated list types that impl `ToString` fn serialize_comma_separated_list<S, T>(list: &[T], serializer: S) -> Result<S::Ok, S::Error> where S: ser::Serializer, T: ToString, { let str_list = list.iter().map(|addr| addr.to_string()).collect::<Vec<_>>(); str_list.join(",").serialize(serializer) }
CorsMethod
identifier_name
config.rs
//! Tendermint configuration file types (with serde parsers/serializers) //! //! This module contains types which correspond to the following config files: //! //! - `config.toml`: `config::TendermintConfig` //! - `node_key.rs`: `config::node_key::NodeKey` //! - `priv_validator_key.rs`: `config::priv_validator_key::PrivValidatorKey` mod node_key; mod priv_validator_key; pub use self::{node_key::NodeKey, priv_validator_key::PrivValidatorKey}; use crate::{ abci::tag, error::{Error, ErrorKind}, genesis::Genesis, net, node, Moniker, Timeout, }; use serde::{de, de::Error as _, ser, Deserialize, Serialize}; use std::{ collections::BTreeMap, fmt, fs, path::{Path, PathBuf}, str::FromStr, }; /// Tendermint `config.toml` file #[derive(Clone, Debug, Deserialize, Serialize)] pub struct TendermintConfig { /// TCP or UNIX socket address of the ABCI application, /// or the name of an ABCI application compiled in with the Tendermint binary. pub proxy_app: net::Address, /// A custom human readable name for this node pub moniker: Moniker, /// If this node is many blocks behind the tip of the chain, FastSync /// allows them to catchup quickly by downloading blocks in parallel /// and verifying their commits pub fast_sync: bool, /// Database backend: `leveldb | memdb | cleveldb` pub db_backend: DbBackend, /// Database directory pub db_dir: PathBuf, /// Output level for logging, including package level options pub log_level: LogLevel, /// Output format: 'plain' (colored text) or 'json' pub log_format: LogFormat, /// Path to the JSON file containing the initial validator set and other meta data pub genesis_file: PathBuf, /// Path to the JSON file containing the private key to use as a validator in the consensus protocol pub priv_validator_key_file: Option<PathBuf>, /// Path to the JSON file containing the last sign state of a validator pub priv_validator_state_file: PathBuf, /// TCP or UNIX socket address for Tendermint to listen on for /// connections from an external PrivValidator process #[serde(deserialize_with = "deserialize_optional_value")] pub priv_validator_laddr: Option<net::Address>, /// Path to the JSON file containing the private key to use for node authentication in the p2p protocol pub node_key_file: PathBuf, /// Mechanism to connect to the ABCI application: socket | grpc pub abci: AbciMode, /// TCP or UNIX socket address for the profiling server to listen on #[serde(deserialize_with = "deserialize_optional_value")] pub prof_laddr: Option<net::Address>, /// If `true`, query the ABCI app on connecting to a new peer /// so the app can decide if we should keep the connection or not pub filter_peers: bool, /// rpc server configuration options pub rpc: RpcConfig, /// peer to peer configuration options pub p2p: P2PConfig, /// mempool configuration options pub mempool: MempoolConfig, /// consensus configuration options pub consensus: ConsensusConfig, /// transactions indexer configuration options pub tx_index: TxIndexConfig, /// instrumentation configuration options pub instrumentation: InstrumentationConfig, } impl TendermintConfig { /// Parse Tendermint `config.toml` pub fn parse_toml<T: AsRef<str>>(toml_string: T) -> Result<Self, Error> { Ok(toml::from_str(toml_string.as_ref())?) } /// Load `config.toml` from a file pub fn load_toml_file<P>(path: &P) -> Result<Self, Error> where P: AsRef<Path>, { let toml_string = fs::read_to_string(path).map_err(|e| { err!( ErrorKind::Parse, "couldn't open {}: {}", path.as_ref().display(), e ) })?; Self::parse_toml(toml_string) } /// Load `genesis.json` file from the configured location pub fn load_genesis_file(&self, home: impl AsRef<Path>) -> Result<Genesis, Error> { let path = home.as_ref().join(&self.genesis_file); let genesis_json = fs::read_to_string(&path) .map_err(|e| err!(ErrorKind::Parse, "couldn't open {}: {}", path.display(), e))?; Ok(serde_json::from_str(genesis_json.as_ref())?) } /// Load `node_key.json` file from the configured location pub fn load_node_key(&self, home: impl AsRef<Path>) -> Result<NodeKey, Error> { let path = home.as_ref().join(&self.node_key_file); NodeKey::load_json_file(&path) } } /// Database backend #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum DbBackend { /// LevelDB backend #[serde(rename = "leveldb")] LevelDb, /// MemDB backend #[serde(rename = "memdb")] MemDb, /// CLevelDB backend #[serde(rename = "cleveldb")] CLevelDb, } /// Loglevel configuration #[derive(Clone, Debug, Eq, PartialEq)] pub struct LogLevel(BTreeMap<String, String>); impl LogLevel { /// Get the setting for the given key pub fn get<S>(&self, key: S) -> Option<&str> where S: AsRef<str>, { self.0.get(key.as_ref()).map(AsRef::as_ref) } /// Iterate over the levels pub fn iter(&self) -> LogLevelIter<'_> { self.0.iter() } } /// Iterator over log levels pub type LogLevelIter<'a> = std::collections::btree_map::Iter<'a, String, String>; impl FromStr for LogLevel { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let mut levels = BTreeMap::new(); for level in s.split(',') { let parts = level.split(':').collect::<Vec<_>>(); if parts.len()!= 2
let key = parts[0].to_owned(); let value = parts[1].to_owned(); if levels.insert(key, value).is_some() { return Err(err!( ErrorKind::Parse, "duplicate log level setting for: {}", level )); } } Ok(LogLevel(levels)) } } impl fmt::Display for LogLevel { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for (i, (k, v)) in self.0.iter().enumerate() { write!(f, "{}:{}", k, v)?; if i < self.0.len() - 1 { write!(f, ",")?; } } Ok(()) } } impl<'de> Deserialize<'de> for LogLevel { fn deserialize<D: de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> { let levels = String::deserialize(deserializer)?; Ok(Self::from_str(&levels).map_err(|e| D::Error::custom(format!("{}", e)))?) } } impl Serialize for LogLevel { fn serialize<S: ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { self.to_string().serialize(serializer) } } /// Logging format #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum LogFormat { /// Plain (colored text) #[serde(rename = "plain")] Plain, /// JSON #[serde(rename = "json")] Json, } /// Mechanism to connect to the ABCI application: socket | grpc #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum AbciMode { /// Socket #[serde(rename = "socket")] Socket, /// GRPC #[serde(rename = "grpc")] Grpc, } /// Tendermint `config.toml` file's `[rpc]` section #[derive(Clone, Debug, Deserialize, Serialize)] pub struct RpcConfig { /// TCP or UNIX socket address for the RPC server to listen on pub laddr: net::Address, /// A list of origins a cross-domain request can be executed from /// Default value `[]` disables cors support /// Use `["*"]` to allow any origin pub cors_allowed_origins: Vec<CorsOrigin>, /// A list of methods the client is allowed to use with cross-domain requests pub cors_allowed_methods: Vec<CorsMethod>, /// A list of non simple headers the client is allowed to use with cross-domain requests pub cors_allowed_headers: Vec<CorsHeader>, /// TCP or UNIX socket address for the gRPC server to listen on /// NOTE: This server only supports `/broadcast_tx_commit` #[serde(deserialize_with = "deserialize_optional_value")] pub grpc_laddr: Option<net::Address>, /// Maximum number of simultaneous GRPC connections. /// Does not include RPC (HTTP&WebSocket) connections. See `max_open_connections`. pub grpc_max_open_connections: u64, /// Activate unsafe RPC commands like `/dial_seeds` and `/unsafe_flush_mempool` #[serde(rename = "unsafe")] pub unsafe_commands: bool, /// Maximum number of simultaneous connections (including WebSocket). /// Does not include gRPC connections. See `grpc_max_open_connections`. pub max_open_connections: u64, /// Maximum number of unique clientIDs that can `/subscribe`. pub max_subscription_clients: u64, /// Maximum number of unique queries a given client can `/subscribe` to. pub max_subscriptions_per_client: u64, /// How long to wait for a tx to be committed during `/broadcast_tx_commit`. pub timeout_broadcast_tx_commit: Timeout, /// The name of a file containing certificate that is used to create the HTTPS server. #[serde(deserialize_with = "deserialize_optional_value")] pub tls_cert_file: Option<PathBuf>, /// The name of a file containing matching private key that is used to create the HTTPS server. #[serde(deserialize_with = "deserialize_optional_value")] pub tls_key_file: Option<PathBuf>, } /// Origin hosts allowed with CORS requests to the RPC API // TODO(tarcieri): parse and validate this string #[derive(Clone, Debug, Deserialize, Serialize)] pub struct CorsOrigin(String); impl AsRef<str> for CorsOrigin { fn as_ref(&self) -> &str { self.0.as_ref() } } impl fmt::Display for CorsOrigin { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.0) } } /// HTTP methods allowed with CORS requests to the RPC API // TODO(tarcieri): parse and validate this string #[derive(Clone, Debug, Deserialize, Serialize)] pub struct CorsMethod(String); impl AsRef<str> for CorsMethod { fn as_ref(&self) -> &str { self.0.as_ref() } } impl fmt::Display for CorsMethod { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.0) } } /// HTTP headers allowed to be sent via CORS to the RPC API // TODO(tarcieri): parse and validate this string #[derive(Clone, Debug, Deserialize, Serialize)] pub struct CorsHeader(String); impl AsRef<str> for CorsHeader { fn as_ref(&self) -> &str { self.0.as_ref() } } impl fmt::Display for CorsHeader { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.0) } } /// peer to peer configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct P2PConfig { /// Address to listen for incoming connections pub laddr: net::Address, /// Address to advertise to peers for them to dial /// If empty, will use the same port as the laddr, /// and will introspect on the listener or use UPnP /// to figure out the address. #[serde(deserialize_with = "deserialize_optional_value")] pub external_address: Option<net::Address>, /// Comma separated list of seed nodes to connect to #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub seeds: Vec<net::Address>, /// Comma separated list of nodes to keep persistent connections to #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub persistent_peers: Vec<net::Address>, /// UPNP port forwarding pub upnp: bool, /// Path to address book pub addr_book_file: PathBuf, /// Set `true` for strict address routability rules /// Set `false` for private or local networks pub addr_book_strict: bool, /// Maximum number of inbound peers pub max_num_inbound_peers: u64, /// Maximum number of outbound peers to connect to, excluding persistent peers pub max_num_outbound_peers: u64, /// Time to wait before flushing messages out on the connection pub flush_throttle_timeout: Timeout, /// Maximum size of a message packet payload, in bytes pub max_packet_msg_payload_size: u64, /// Rate at which packets can be sent, in bytes/second pub send_rate: TransferRate, /// Rate at which packets can be received, in bytes/second pub recv_rate: TransferRate, /// Set `true` to enable the peer-exchange reactor pub pex: bool, /// Seed mode, in which node constantly crawls the network and looks for /// peers. If another node asks it for addresses, it responds and disconnects. /// /// Does not work if the peer-exchange reactor is disabled. pub seed_mode: bool, /// Comma separated list of peer IDs to keep private (will not be gossiped to other peers) #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub private_peer_ids: Vec<node::Id>, /// Toggle to disable guard against peers connecting from the same ip. pub allow_duplicate_ip: bool, /// Handshake timeout pub handshake_timeout: Timeout, /// Timeout when dialing other peers pub dial_timeout: Timeout, } /// mempool configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct MempoolConfig { /// Recheck enabled pub recheck: bool, /// Broadcast enabled pub broadcast: bool, /// WAL dir #[serde(deserialize_with = "deserialize_optional_value")] pub wal_dir: Option<PathBuf>, /// Maximum number of transactions in the mempool pub size: u64, /// Limit the total size of all txs in the mempool. /// This only accounts for raw transactions (e.g. given 1MB transactions and /// `max_txs_bytes`=5MB, mempool will only accept 5 transactions). pub max_txs_bytes: u64, /// Size of the cache (used to filter transactions we saw earlier) in transactions pub cache_size: u64, } /// consensus configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct ConsensusConfig { /// Path to WAL file pub wal_file: PathBuf, /// Propose timeout pub timeout_propose: Timeout, /// Propose timeout delta pub timeout_propose_delta: Timeout, /// Prevote timeout pub timeout_prevote: Timeout, /// Prevote timeout delta pub timeout_prevote_delta: Timeout, /// Precommit timeout pub timeout_precommit: Timeout, /// Precommit timeout delta pub timeout_precommit_delta: Timeout, /// Commit timeout pub timeout_commit: Timeout, /// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) pub skip_timeout_commit: bool, /// EmptyBlocks mode pub create_empty_blocks: bool, /// Interval between empty blocks pub create_empty_blocks_interval: Timeout, /// Reactor sleep duration pub peer_gossip_sleep_duration: Timeout, /// Reactor query sleep duration pub peer_query_maj23_sleep_duration: Timeout, } /// transactions indexer configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct TxIndexConfig { /// What indexer to use for transactions #[serde(default)] pub indexer: TxIndexer, /// Comma-separated list of tags to index (by default the only tag is `tx.hash`) // TODO(tarcieri): switch to `tendermint::abci::Tag` #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub index_tags: Vec<tag::Key>, /// When set to true, tells indexer to index all tags (predefined tags: /// `tx.hash`, `tx.height` and all tags from DeliverTx responses). pub index_all_tags: bool, } /// What indexer to use for transactions #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum TxIndexer { /// "null" // TODO(tarcieri): use an `Option` type here? #[serde(rename = "null")] Null, /// "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). #[serde(rename = "kv")] Kv, } impl Default for TxIndexer { fn default() -> TxIndexer { TxIndexer::Kv } } /// instrumentation configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct InstrumentationConfig { /// When `true`, Prometheus metrics are served under /metrics on /// PrometheusListenAddr. pub prometheus: bool, /// Address to listen for Prometheus collector(s) connections // TODO(tarcieri): parse to `tendermint::net::Addr` pub prometheus_listen_addr: String, /// Maximum number of simultaneous connections. pub max_open_connections: u64, /// Instrumentation namespace pub namespace: String, } /// Rate at which bytes can be sent/received #[derive(Copy, Clone, Debug, Deserialize, Serialize)] pub struct TransferRate(u64); impl TransferRate { /// Get the trasfer rate in bytes per second pub fn bytes_per_sec(self) -> u64 { self.0 } } /// Deserialize `Option<T: FromStr>` where an empty string indicates `None` fn deserialize_optional_value<'de, D, T, E>(deserializer: D) -> Result<Option<T>, D::Error> where D: de::Deserializer<'de>, T: FromStr<Err = E>, E: fmt::Display, { let string = String::deserialize(deserializer)?; if string.is_empty() { return Ok(None); } string .parse() .map(Some) .map_err(|e| D::Error::custom(format!("{}", e))) } /// Deserialize a comma separated list of types that impl `FromStr` as a `Vec` fn deserialize_comma_separated_list<'de, D, T, E>(deserializer: D) -> Result<Vec<T>, D::Error> where D: de::Deserializer<'de>, T: FromStr<Err = E>, E: fmt::Display, { let mut result = vec![]; let string = String::deserialize(deserializer)?; if string.is_empty() { return Ok(result); } for item in string.split(',') { result.push( item.parse() .map_err(|e| D::Error::custom(format!("{}", e)))?, ); } Ok(result) } /// Serialize a comma separated list types that impl `ToString` fn serialize_comma_separated_list<S, T>(list: &[T], serializer: S) -> Result<S::Ok, S::Error> where S: ser::Serializer, T: ToString, { let str_list = list.iter().map(|addr| addr.to_string()).collect::<Vec<_>>(); str_list.join(",").serialize(serializer) }
{ return Err(err!(ErrorKind::Parse, "error parsing log level: {}", level)); }
conditional_block
config.rs
//! Tendermint configuration file types (with serde parsers/serializers) //! //! This module contains types which correspond to the following config files: //! //! - `config.toml`: `config::TendermintConfig` //! - `node_key.rs`: `config::node_key::NodeKey` //! - `priv_validator_key.rs`: `config::priv_validator_key::PrivValidatorKey` mod node_key; mod priv_validator_key; pub use self::{node_key::NodeKey, priv_validator_key::PrivValidatorKey}; use crate::{ abci::tag, error::{Error, ErrorKind}, genesis::Genesis, net, node, Moniker, Timeout, }; use serde::{de, de::Error as _, ser, Deserialize, Serialize}; use std::{ collections::BTreeMap, fmt, fs, path::{Path, PathBuf}, str::FromStr, }; /// Tendermint `config.toml` file #[derive(Clone, Debug, Deserialize, Serialize)] pub struct TendermintConfig { /// TCP or UNIX socket address of the ABCI application, /// or the name of an ABCI application compiled in with the Tendermint binary. pub proxy_app: net::Address, /// A custom human readable name for this node pub moniker: Moniker, /// If this node is many blocks behind the tip of the chain, FastSync /// allows them to catchup quickly by downloading blocks in parallel /// and verifying their commits pub fast_sync: bool, /// Database backend: `leveldb | memdb | cleveldb` pub db_backend: DbBackend, /// Database directory pub db_dir: PathBuf, /// Output level for logging, including package level options pub log_level: LogLevel, /// Output format: 'plain' (colored text) or 'json' pub log_format: LogFormat, /// Path to the JSON file containing the initial validator set and other meta data pub genesis_file: PathBuf, /// Path to the JSON file containing the private key to use as a validator in the consensus protocol pub priv_validator_key_file: Option<PathBuf>, /// Path to the JSON file containing the last sign state of a validator pub priv_validator_state_file: PathBuf, /// TCP or UNIX socket address for Tendermint to listen on for /// connections from an external PrivValidator process #[serde(deserialize_with = "deserialize_optional_value")] pub priv_validator_laddr: Option<net::Address>, /// Path to the JSON file containing the private key to use for node authentication in the p2p protocol pub node_key_file: PathBuf, /// Mechanism to connect to the ABCI application: socket | grpc pub abci: AbciMode, /// TCP or UNIX socket address for the profiling server to listen on #[serde(deserialize_with = "deserialize_optional_value")] pub prof_laddr: Option<net::Address>, /// If `true`, query the ABCI app on connecting to a new peer /// so the app can decide if we should keep the connection or not pub filter_peers: bool, /// rpc server configuration options pub rpc: RpcConfig, /// peer to peer configuration options pub p2p: P2PConfig, /// mempool configuration options pub mempool: MempoolConfig, /// consensus configuration options pub consensus: ConsensusConfig, /// transactions indexer configuration options pub tx_index: TxIndexConfig, /// instrumentation configuration options pub instrumentation: InstrumentationConfig, } impl TendermintConfig { /// Parse Tendermint `config.toml` pub fn parse_toml<T: AsRef<str>>(toml_string: T) -> Result<Self, Error> { Ok(toml::from_str(toml_string.as_ref())?) } /// Load `config.toml` from a file pub fn load_toml_file<P>(path: &P) -> Result<Self, Error> where P: AsRef<Path>, { let toml_string = fs::read_to_string(path).map_err(|e| { err!( ErrorKind::Parse, "couldn't open {}: {}", path.as_ref().display(), e ) })?; Self::parse_toml(toml_string) } /// Load `genesis.json` file from the configured location pub fn load_genesis_file(&self, home: impl AsRef<Path>) -> Result<Genesis, Error> { let path = home.as_ref().join(&self.genesis_file); let genesis_json = fs::read_to_string(&path) .map_err(|e| err!(ErrorKind::Parse, "couldn't open {}: {}", path.display(), e))?; Ok(serde_json::from_str(genesis_json.as_ref())?) } /// Load `node_key.json` file from the configured location pub fn load_node_key(&self, home: impl AsRef<Path>) -> Result<NodeKey, Error> { let path = home.as_ref().join(&self.node_key_file); NodeKey::load_json_file(&path) } } /// Database backend #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum DbBackend { /// LevelDB backend #[serde(rename = "leveldb")] LevelDb, /// MemDB backend #[serde(rename = "memdb")] MemDb, /// CLevelDB backend #[serde(rename = "cleveldb")] CLevelDb, } /// Loglevel configuration #[derive(Clone, Debug, Eq, PartialEq)] pub struct LogLevel(BTreeMap<String, String>); impl LogLevel { /// Get the setting for the given key pub fn get<S>(&self, key: S) -> Option<&str> where S: AsRef<str>, { self.0.get(key.as_ref()).map(AsRef::as_ref) } /// Iterate over the levels pub fn iter(&self) -> LogLevelIter<'_> { self.0.iter() } } /// Iterator over log levels pub type LogLevelIter<'a> = std::collections::btree_map::Iter<'a, String, String>; impl FromStr for LogLevel { type Err = Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let mut levels = BTreeMap::new(); for level in s.split(',') { let parts = level.split(':').collect::<Vec<_>>(); if parts.len()!= 2 { return Err(err!(ErrorKind::Parse, "error parsing log level: {}", level)); } let key = parts[0].to_owned(); let value = parts[1].to_owned(); if levels.insert(key, value).is_some() { return Err(err!( ErrorKind::Parse, "duplicate log level setting for: {}", level )); } } Ok(LogLevel(levels)) } } impl fmt::Display for LogLevel { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for (i, (k, v)) in self.0.iter().enumerate() { write!(f, "{}:{}", k, v)?; if i < self.0.len() - 1 { write!(f, ",")?; } } Ok(()) } } impl<'de> Deserialize<'de> for LogLevel { fn deserialize<D: de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> { let levels = String::deserialize(deserializer)?; Ok(Self::from_str(&levels).map_err(|e| D::Error::custom(format!("{}", e)))?) } } impl Serialize for LogLevel { fn serialize<S: ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> { self.to_string().serialize(serializer) } } /// Logging format #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum LogFormat { /// Plain (colored text) #[serde(rename = "plain")]
} /// Mechanism to connect to the ABCI application: socket | grpc #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum AbciMode { /// Socket #[serde(rename = "socket")] Socket, /// GRPC #[serde(rename = "grpc")] Grpc, } /// Tendermint `config.toml` file's `[rpc]` section #[derive(Clone, Debug, Deserialize, Serialize)] pub struct RpcConfig { /// TCP or UNIX socket address for the RPC server to listen on pub laddr: net::Address, /// A list of origins a cross-domain request can be executed from /// Default value `[]` disables cors support /// Use `["*"]` to allow any origin pub cors_allowed_origins: Vec<CorsOrigin>, /// A list of methods the client is allowed to use with cross-domain requests pub cors_allowed_methods: Vec<CorsMethod>, /// A list of non simple headers the client is allowed to use with cross-domain requests pub cors_allowed_headers: Vec<CorsHeader>, /// TCP or UNIX socket address for the gRPC server to listen on /// NOTE: This server only supports `/broadcast_tx_commit` #[serde(deserialize_with = "deserialize_optional_value")] pub grpc_laddr: Option<net::Address>, /// Maximum number of simultaneous GRPC connections. /// Does not include RPC (HTTP&WebSocket) connections. See `max_open_connections`. pub grpc_max_open_connections: u64, /// Activate unsafe RPC commands like `/dial_seeds` and `/unsafe_flush_mempool` #[serde(rename = "unsafe")] pub unsafe_commands: bool, /// Maximum number of simultaneous connections (including WebSocket). /// Does not include gRPC connections. See `grpc_max_open_connections`. pub max_open_connections: u64, /// Maximum number of unique clientIDs that can `/subscribe`. pub max_subscription_clients: u64, /// Maximum number of unique queries a given client can `/subscribe` to. pub max_subscriptions_per_client: u64, /// How long to wait for a tx to be committed during `/broadcast_tx_commit`. pub timeout_broadcast_tx_commit: Timeout, /// The name of a file containing certificate that is used to create the HTTPS server. #[serde(deserialize_with = "deserialize_optional_value")] pub tls_cert_file: Option<PathBuf>, /// The name of a file containing matching private key that is used to create the HTTPS server. #[serde(deserialize_with = "deserialize_optional_value")] pub tls_key_file: Option<PathBuf>, } /// Origin hosts allowed with CORS requests to the RPC API // TODO(tarcieri): parse and validate this string #[derive(Clone, Debug, Deserialize, Serialize)] pub struct CorsOrigin(String); impl AsRef<str> for CorsOrigin { fn as_ref(&self) -> &str { self.0.as_ref() } } impl fmt::Display for CorsOrigin { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.0) } } /// HTTP methods allowed with CORS requests to the RPC API // TODO(tarcieri): parse and validate this string #[derive(Clone, Debug, Deserialize, Serialize)] pub struct CorsMethod(String); impl AsRef<str> for CorsMethod { fn as_ref(&self) -> &str { self.0.as_ref() } } impl fmt::Display for CorsMethod { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.0) } } /// HTTP headers allowed to be sent via CORS to the RPC API // TODO(tarcieri): parse and validate this string #[derive(Clone, Debug, Deserialize, Serialize)] pub struct CorsHeader(String); impl AsRef<str> for CorsHeader { fn as_ref(&self) -> &str { self.0.as_ref() } } impl fmt::Display for CorsHeader { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self.0) } } /// peer to peer configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct P2PConfig { /// Address to listen for incoming connections pub laddr: net::Address, /// Address to advertise to peers for them to dial /// If empty, will use the same port as the laddr, /// and will introspect on the listener or use UPnP /// to figure out the address. #[serde(deserialize_with = "deserialize_optional_value")] pub external_address: Option<net::Address>, /// Comma separated list of seed nodes to connect to #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub seeds: Vec<net::Address>, /// Comma separated list of nodes to keep persistent connections to #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub persistent_peers: Vec<net::Address>, /// UPNP port forwarding pub upnp: bool, /// Path to address book pub addr_book_file: PathBuf, /// Set `true` for strict address routability rules /// Set `false` for private or local networks pub addr_book_strict: bool, /// Maximum number of inbound peers pub max_num_inbound_peers: u64, /// Maximum number of outbound peers to connect to, excluding persistent peers pub max_num_outbound_peers: u64, /// Time to wait before flushing messages out on the connection pub flush_throttle_timeout: Timeout, /// Maximum size of a message packet payload, in bytes pub max_packet_msg_payload_size: u64, /// Rate at which packets can be sent, in bytes/second pub send_rate: TransferRate, /// Rate at which packets can be received, in bytes/second pub recv_rate: TransferRate, /// Set `true` to enable the peer-exchange reactor pub pex: bool, /// Seed mode, in which node constantly crawls the network and looks for /// peers. If another node asks it for addresses, it responds and disconnects. /// /// Does not work if the peer-exchange reactor is disabled. pub seed_mode: bool, /// Comma separated list of peer IDs to keep private (will not be gossiped to other peers) #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub private_peer_ids: Vec<node::Id>, /// Toggle to disable guard against peers connecting from the same ip. pub allow_duplicate_ip: bool, /// Handshake timeout pub handshake_timeout: Timeout, /// Timeout when dialing other peers pub dial_timeout: Timeout, } /// mempool configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct MempoolConfig { /// Recheck enabled pub recheck: bool, /// Broadcast enabled pub broadcast: bool, /// WAL dir #[serde(deserialize_with = "deserialize_optional_value")] pub wal_dir: Option<PathBuf>, /// Maximum number of transactions in the mempool pub size: u64, /// Limit the total size of all txs in the mempool. /// This only accounts for raw transactions (e.g. given 1MB transactions and /// `max_txs_bytes`=5MB, mempool will only accept 5 transactions). pub max_txs_bytes: u64, /// Size of the cache (used to filter transactions we saw earlier) in transactions pub cache_size: u64, } /// consensus configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct ConsensusConfig { /// Path to WAL file pub wal_file: PathBuf, /// Propose timeout pub timeout_propose: Timeout, /// Propose timeout delta pub timeout_propose_delta: Timeout, /// Prevote timeout pub timeout_prevote: Timeout, /// Prevote timeout delta pub timeout_prevote_delta: Timeout, /// Precommit timeout pub timeout_precommit: Timeout, /// Precommit timeout delta pub timeout_precommit_delta: Timeout, /// Commit timeout pub timeout_commit: Timeout, /// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) pub skip_timeout_commit: bool, /// EmptyBlocks mode pub create_empty_blocks: bool, /// Interval between empty blocks pub create_empty_blocks_interval: Timeout, /// Reactor sleep duration pub peer_gossip_sleep_duration: Timeout, /// Reactor query sleep duration pub peer_query_maj23_sleep_duration: Timeout, } /// transactions indexer configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct TxIndexConfig { /// What indexer to use for transactions #[serde(default)] pub indexer: TxIndexer, /// Comma-separated list of tags to index (by default the only tag is `tx.hash`) // TODO(tarcieri): switch to `tendermint::abci::Tag` #[serde( serialize_with = "serialize_comma_separated_list", deserialize_with = "deserialize_comma_separated_list" )] pub index_tags: Vec<tag::Key>, /// When set to true, tells indexer to index all tags (predefined tags: /// `tx.hash`, `tx.height` and all tags from DeliverTx responses). pub index_all_tags: bool, } /// What indexer to use for transactions #[derive(Copy, Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub enum TxIndexer { /// "null" // TODO(tarcieri): use an `Option` type here? #[serde(rename = "null")] Null, /// "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). #[serde(rename = "kv")] Kv, } impl Default for TxIndexer { fn default() -> TxIndexer { TxIndexer::Kv } } /// instrumentation configuration options #[derive(Clone, Debug, Deserialize, Serialize)] pub struct InstrumentationConfig { /// When `true`, Prometheus metrics are served under /metrics on /// PrometheusListenAddr. pub prometheus: bool, /// Address to listen for Prometheus collector(s) connections // TODO(tarcieri): parse to `tendermint::net::Addr` pub prometheus_listen_addr: String, /// Maximum number of simultaneous connections. pub max_open_connections: u64, /// Instrumentation namespace pub namespace: String, } /// Rate at which bytes can be sent/received #[derive(Copy, Clone, Debug, Deserialize, Serialize)] pub struct TransferRate(u64); impl TransferRate { /// Get the trasfer rate in bytes per second pub fn bytes_per_sec(self) -> u64 { self.0 } } /// Deserialize `Option<T: FromStr>` where an empty string indicates `None` fn deserialize_optional_value<'de, D, T, E>(deserializer: D) -> Result<Option<T>, D::Error> where D: de::Deserializer<'de>, T: FromStr<Err = E>, E: fmt::Display, { let string = String::deserialize(deserializer)?; if string.is_empty() { return Ok(None); } string .parse() .map(Some) .map_err(|e| D::Error::custom(format!("{}", e))) } /// Deserialize a comma separated list of types that impl `FromStr` as a `Vec` fn deserialize_comma_separated_list<'de, D, T, E>(deserializer: D) -> Result<Vec<T>, D::Error> where D: de::Deserializer<'de>, T: FromStr<Err = E>, E: fmt::Display, { let mut result = vec![]; let string = String::deserialize(deserializer)?; if string.is_empty() { return Ok(result); } for item in string.split(',') { result.push( item.parse() .map_err(|e| D::Error::custom(format!("{}", e)))?, ); } Ok(result) } /// Serialize a comma separated list types that impl `ToString` fn serialize_comma_separated_list<S, T>(list: &[T], serializer: S) -> Result<S::Ok, S::Error> where S: ser::Serializer, T: ToString, { let str_list = list.iter().map(|addr| addr.to_string()).collect::<Vec<_>>(); str_list.join(",").serialize(serializer) }
Plain, /// JSON #[serde(rename = "json")] Json,
random_line_split
instream.rs
extern crate libsoundio_sys as raw; use super::error::*; use super::format::*; use super::sample::*; use super::util::*; use std::marker::PhantomData; use std::os::raw::{c_double, c_int}; use std::ptr; use std::slice; /// This is called when an instream has been read. The `InStreamUserData` struct is obtained /// from the stream.userdata, then the user-supplied callback is called with an `InStreamReader` /// object. pub extern "C" fn instream_read_callback( stream: *mut raw::SoundIoInStream, frame_count_min: c_int, frame_count_max: c_int, ) { // Use stream.userdata to get a reference to the InStreamUserData object. let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData }; let userdata = unsafe { &mut (*raw_userdata_pointer) }; let mut stream_reader = InStreamReader { instream: userdata.instream, frame_count_min: frame_count_min as _, frame_count_max: frame_count_max as _, read_started: false, channel_areas: Vec::new(), frame_count: 0, phantom: PhantomData, }; (userdata.read_callback)(&mut stream_reader); } pub extern "C" fn instream_overflow_callback(stream: *mut raw::SoundIoInStream) { // Use stream.userdata to get a reference to the InStreamUserData object. let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData }; let userdata = unsafe { &mut (*raw_userdata_pointer) }; if let Some(ref mut cb) = userdata.overflow_callback { cb(); } else { println!("Overflow!"); } } pub extern "C" fn instream_error_callback(stream: *mut raw::SoundIoInStream, err: c_int) { // Use stream.userdata to get a reference to the InStreamUserData object. let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData }; let userdata = unsafe { &mut (*raw_userdata_pointer) }; if let Some(ref mut cb) = userdata.error_callback { cb(err.into()); } else { println!("Error: {}", Error::from(err)); } } /// InStream represents an input stream for recording. /// /// It is obtained from `Device` using `Device::open_instream()` and /// can be started and paused. pub struct InStream<'a> { pub userdata: Box<InStreamUserData<'a>>, // This is just here to say that InStream cannot outlive the Device it was created from. pub phantom: PhantomData<&'a ()>, } /// The callbacks required for an instream are stored in this object. We also store a pointer /// to the raw instream so that it can be passed to `InStreamReader` in the write callback. pub struct InStreamUserData<'a> { pub instream: *mut raw::SoundIoInStream, pub read_callback: Box<dyn FnMut(&mut InStreamReader) + 'a>, pub overflow_callback: Option<Box<dyn FnMut() + 'a>>, pub error_callback: Option<Box<dyn FnMut(Error) + 'a>>, } impl<'a> Drop for InStreamUserData<'a> { fn drop(&mut self) { unsafe { raw::soundio_instream_destroy(self.instream); } } } impl<'a> InStream<'a> { /// Starts the stream, returning `Ok(())` if it started successfully. Once /// started the read callback will be periodically called according to the /// requested latency. /// /// `start()` should only ever be called once on an `InStream`. /// Do not use `start()` to resume a stream after pausing it. Instead call `pause(false)`. /// /// # Errors /// /// * `Error::BackendDisconnected` /// * `Error::Streaming` /// * `Error::OpeningDevice` /// * `Error::SystemResources` /// pub fn start(&mut self) -> Result<()> { match unsafe { raw::soundio_instream_start(self.userdata.instream) } { 0 => Ok(()), x => Err(x.into()),
} // TODO: Can pause() be called from the read callback? /// If the underlying backend and device support pausing, this pauses the /// stream. The `write_callback()` may be called a few more times if /// the buffer is not full. /// /// Pausing might put the hardware into a low power state which is ideal if your /// software is silent for some time. /// /// This should not be called before `start()`. Pausing when already paused or /// unpausing when already unpaused has no effect and returns `Ok(())`. /// /// # Errors /// /// * `Error::BackendDisconnected` /// * `Error::Streaming` /// * `Error::IncompatibleDevice` - device does not support pausing/unpausing /// pub fn pause(&mut self, pause: bool) -> Result<()> { match unsafe { raw::soundio_instream_pause(self.userdata.instream, pause as i8) } { 0 => Ok(()), e => Err(e.into()), } } /// Returns the stream format. pub fn format(&self) -> Format { unsafe { (*self.userdata.instream).format.into() } } /// Sample rate is the number of frames per second. pub fn sample_rate(&self) -> i32 { unsafe { (*self.userdata.instream).sample_rate as _ } } /// Ignoring hardware latency, this is the number of seconds it takes for a /// captured sample to become available for reading. /// After you call `Device::open_instream()`, this value is replaced with the /// actual software latency, as near to this value as possible. /// /// A higher value means less CPU usage. Defaults to a large value, /// potentially upwards of 2 seconds. /// /// If the device has unknown software latency min and max values, you may /// still set this (in `Device::open_instream()`), but you might not /// get the value you requested. /// /// For PulseAudio, if you set this value to non-default, it sets /// `PA_STREAM_ADJUST_LATENCY` and is the value used for `fragsize`. /// For JACK, this value is always equal to /// `Device::software_latency().current`. pub fn software_latency(&self) -> f64 { unsafe { (*self.userdata.instream).software_latency as _ } } /// The name of the stream, which defaults to "SoundIoInStream". /// /// PulseAudio uses this for the stream name. /// JACK uses this for the client name of the client that connects when you /// open the stream. /// WASAPI uses this for the session display name. /// Must not contain a colon (":"). /// /// TODO: Currently there is no way to set this. pub fn name(&self) -> String { unsafe { utf8_to_string((*self.userdata.instream).name) } } /// The number of bytes per frame, equal to the number of bytes /// per sample, multiplied by the number of channels. pub fn bytes_per_frame(&self) -> i32 { unsafe { (*self.userdata.instream).bytes_per_frame as _ } } /// The number of bytes in a sample, e.g. 3 for `i24`. pub fn bytes_per_sample(&self) -> i32 { unsafe { (*self.userdata.instream).bytes_per_sample as _ } } } /// `InStreamReader` is passed to the read callback and can be used to read from the stream. /// /// You start by calling `begin_read()` and then you can read the samples. When the `InStreamReader` /// is dropped the samples are dropped. An error at that point is written to the console and ignored. /// pub struct InStreamReader<'a> { instream: *mut raw::SoundIoInStream, frame_count_min: usize, frame_count_max: usize, read_started: bool, // The memory area to write to - one for each channel. Populated after begin_read() channel_areas: Vec<raw::SoundIoChannelArea>, // The actual frame count. Populated after begin_read() frame_count: usize, // This cannot outlive the scope that it is spawned from (in the write callback). phantom: PhantomData<&'a ()>, } impl<'a> InStreamReader<'a> { /// Start a read. You can only call this once per callback otherwise it panics. /// /// frame_count is the number of frames you want to read. It must be between /// frame_count_min and frame_count_max inclusive, or `begin_read()` will panic. /// /// It returns the number of frames you can actually read. The returned value /// will always be less than or equal to the provided value. /// /// # Errors /// /// * `Error::Invalid` /// * `frame_count` < `frame_count_min` or `frame_count` > `frame_count_max` /// * `Error::Streaming` /// * `Error::IncompatibleDevice` - in rare cases it might just now /// be discovered that the device uses non-byte-aligned access, in which /// case this error code is returned. /// pub fn begin_read(&mut self, frame_count: usize) -> Result<usize> { assert!( frame_count >= self.frame_count_min && frame_count <= self.frame_count_max, "frame_count out of range" ); let mut areas: *mut raw::SoundIoChannelArea = ptr::null_mut(); let mut actual_frame_count: c_int = frame_count as _; match unsafe { raw::soundio_instream_begin_read( self.instream, &mut areas as *mut _, &mut actual_frame_count as *mut _, ) } { 0 => { self.read_started = true; self.frame_count = actual_frame_count as _; // Return now if there's no frames to actually read. if actual_frame_count <= 0 { return Ok(0); } let cc = self.channel_count(); self.channel_areas = vec![ raw::SoundIoChannelArea { ptr: ptr::null_mut(), step: 0 }; cc ]; unsafe { self.channel_areas.copy_from_slice(slice::from_raw_parts::< raw::SoundIoChannelArea, >(areas, cc)); } Ok(actual_frame_count as _) } e => Err(e.into()), } } /// Commits the write that you began with `begin_read()`. /// /// Errors are currently are just printed to the console and ignored. /// /// # Errors /// /// * `Error::Streaming` /// * `Error::Underflow` - an underflow caused this call to fail. You might /// also get an `underflow_callback()`, and you might not get /// this error code when an underflow occurs. Unlike `Error::Streaming`, /// the outstream is still in a valid state and streaming can continue. pub fn end_read(&mut self) { if self.read_started { unsafe { match raw::soundio_instream_end_read(self.instream) { 0 => { self.read_started = false; } x => println!("Error ending instream: {}", Error::from(x)), } } } } /// Get the minimum frame count that you can call `begin_read()` with. /// Retreive this value before calling `begin_read()` to ensure you read the correct number /// of frames. pub fn frame_count_min(&self) -> usize { self.frame_count_min } /// Get the maximum frame count that you can call `begin_read()` with. /// Retreive this value before calling `begin_read()` to ensure you read the correct number /// of frames. pub fn frame_count_max(&self) -> usize { self.frame_count_max } /// Get the actual frame count that you did call `begin_read()` with. Panics if you haven't called /// `begin_read()` yet. pub fn frame_count(&self) -> usize { assert!(self.read_started); self.frame_count } /// Get latency in seconds due to software only, not including hardware. pub fn software_latency(&self) -> f64 { unsafe { (*self.instream).software_latency as _ } } /// Return the number of channels in this stream. Guaranteed to be at least 1. pub fn channel_count(&self) -> usize { unsafe { (*self.instream).layout.channel_count as _ } } /// Get the sample rate in Hertz. pub fn sample_rate(&self) -> i32 { unsafe { (*self.instream).sample_rate as _ } } /// Obtain the number of seconds that the next frame of sound being /// captured will take to arrive in the buffer, plus the amount of time that is /// represented in the buffer. This includes both software and hardware latency. /// /// # Errors /// /// * `Error::Streaming` /// pub fn get_latency(&mut self) -> Result<f64> { let mut x: c_double = 0.0; match unsafe { raw::soundio_instream_get_latency(self.instream, &mut x as *mut c_double) } { 0 => Ok(x), e => Err(e.into()), } } /// Get the value of a sample. This panics if the `channel` or `frame` are /// out of range or if you haven't called `begin_read()` yet. /// /// If you request a different type from the actual one it will be converted. /// /// # Examples /// /// ``` /// fn read_callback(stream: &mut soundio::InStreamReader) { /// let frame_count_max = stream.frame_count_max(); /// stream.begin_read(frame_count_max).unwrap(); /// for c in 0..stream.channel_count() { /// for f in 0..stream.frame_count() { /// do_something_with(stream.sample::<i16>(c, f)); /// } /// } /// } /// # fn do_something_with(_: i16) { } /// ``` pub fn sample<T: Sample>(&self, channel: usize, frame: usize) -> T { assert!(self.read_started); assert!(channel < self.channel_count(), "Channel out of range"); assert!(frame < self.frame_count(), "Frame out of range"); unsafe { let ptr = self.channel_areas[channel] .ptr .add(frame * self.channel_areas[channel].step as usize) as *mut u8; match (*self.instream).format { raw::SoundIoFormat::SoundIoFormatS8 => T::from_i8(i8::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU8 => T::from_u8(u8::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS16LE => T::from_i16(i16::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS16BE => T::from_i16(i16::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatU16LE => T::from_u16(u16::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU16BE => T::from_u16(u16::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatS24LE => T::from_i24(i24::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS24BE => T::from_i24(i24::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatU24LE => T::from_u24(u24::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU24BE => T::from_u24(u24::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatS32LE => T::from_i32(i32::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS32BE => T::from_i32(i32::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatU32LE => T::from_u32(u32::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU32BE => T::from_u32(u32::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatFloat32LE => T::from_f32(f32::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatFloat32BE => T::from_f32(f32::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatFloat64LE => T::from_f64(f64::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatFloat64BE => T::from_f64(f64::from_raw_be(ptr)), _ => panic!("Unknown format"), } } } // TODO: To acheive speed *and* safety I can use iterators. That will be in a future API. } impl<'a> Drop for InStreamReader<'a> { /// This will drop all of the frames from when you called `begin_read()`. /// /// Errors are currently are just printed to the console and ignored. /// /// # Errors /// /// * `Error::Streaming` fn drop(&mut self) { if self.read_started { unsafe { match raw::soundio_instream_end_read(self.instream) { 0 => {} x => println!("Error reading instream: {}", Error::from(x)), } } } } }
}
random_line_split
instream.rs
extern crate libsoundio_sys as raw; use super::error::*; use super::format::*; use super::sample::*; use super::util::*; use std::marker::PhantomData; use std::os::raw::{c_double, c_int}; use std::ptr; use std::slice; /// This is called when an instream has been read. The `InStreamUserData` struct is obtained /// from the stream.userdata, then the user-supplied callback is called with an `InStreamReader` /// object. pub extern "C" fn instream_read_callback( stream: *mut raw::SoundIoInStream, frame_count_min: c_int, frame_count_max: c_int, ) { // Use stream.userdata to get a reference to the InStreamUserData object. let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData }; let userdata = unsafe { &mut (*raw_userdata_pointer) }; let mut stream_reader = InStreamReader { instream: userdata.instream, frame_count_min: frame_count_min as _, frame_count_max: frame_count_max as _, read_started: false, channel_areas: Vec::new(), frame_count: 0, phantom: PhantomData, }; (userdata.read_callback)(&mut stream_reader); } pub extern "C" fn instream_overflow_callback(stream: *mut raw::SoundIoInStream) { // Use stream.userdata to get a reference to the InStreamUserData object. let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData }; let userdata = unsafe { &mut (*raw_userdata_pointer) }; if let Some(ref mut cb) = userdata.overflow_callback { cb(); } else { println!("Overflow!"); } } pub extern "C" fn instream_error_callback(stream: *mut raw::SoundIoInStream, err: c_int) { // Use stream.userdata to get a reference to the InStreamUserData object. let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData }; let userdata = unsafe { &mut (*raw_userdata_pointer) }; if let Some(ref mut cb) = userdata.error_callback { cb(err.into()); } else { println!("Error: {}", Error::from(err)); } } /// InStream represents an input stream for recording. /// /// It is obtained from `Device` using `Device::open_instream()` and /// can be started and paused. pub struct InStream<'a> { pub userdata: Box<InStreamUserData<'a>>, // This is just here to say that InStream cannot outlive the Device it was created from. pub phantom: PhantomData<&'a ()>, } /// The callbacks required for an instream are stored in this object. We also store a pointer /// to the raw instream so that it can be passed to `InStreamReader` in the write callback. pub struct InStreamUserData<'a> { pub instream: *mut raw::SoundIoInStream, pub read_callback: Box<dyn FnMut(&mut InStreamReader) + 'a>, pub overflow_callback: Option<Box<dyn FnMut() + 'a>>, pub error_callback: Option<Box<dyn FnMut(Error) + 'a>>, } impl<'a> Drop for InStreamUserData<'a> { fn drop(&mut self) { unsafe { raw::soundio_instream_destroy(self.instream); } } } impl<'a> InStream<'a> { /// Starts the stream, returning `Ok(())` if it started successfully. Once /// started the read callback will be periodically called according to the /// requested latency. /// /// `start()` should only ever be called once on an `InStream`. /// Do not use `start()` to resume a stream after pausing it. Instead call `pause(false)`. /// /// # Errors /// /// * `Error::BackendDisconnected` /// * `Error::Streaming` /// * `Error::OpeningDevice` /// * `Error::SystemResources` /// pub fn start(&mut self) -> Result<()> { match unsafe { raw::soundio_instream_start(self.userdata.instream) } { 0 => Ok(()), x => Err(x.into()), } } // TODO: Can pause() be called from the read callback? /// If the underlying backend and device support pausing, this pauses the /// stream. The `write_callback()` may be called a few more times if /// the buffer is not full. /// /// Pausing might put the hardware into a low power state which is ideal if your /// software is silent for some time. /// /// This should not be called before `start()`. Pausing when already paused or /// unpausing when already unpaused has no effect and returns `Ok(())`. /// /// # Errors /// /// * `Error::BackendDisconnected` /// * `Error::Streaming` /// * `Error::IncompatibleDevice` - device does not support pausing/unpausing /// pub fn pause(&mut self, pause: bool) -> Result<()> { match unsafe { raw::soundio_instream_pause(self.userdata.instream, pause as i8) } { 0 => Ok(()), e => Err(e.into()), } } /// Returns the stream format. pub fn format(&self) -> Format { unsafe { (*self.userdata.instream).format.into() } } /// Sample rate is the number of frames per second. pub fn sample_rate(&self) -> i32 { unsafe { (*self.userdata.instream).sample_rate as _ } } /// Ignoring hardware latency, this is the number of seconds it takes for a /// captured sample to become available for reading. /// After you call `Device::open_instream()`, this value is replaced with the /// actual software latency, as near to this value as possible. /// /// A higher value means less CPU usage. Defaults to a large value, /// potentially upwards of 2 seconds. /// /// If the device has unknown software latency min and max values, you may /// still set this (in `Device::open_instream()`), but you might not /// get the value you requested. /// /// For PulseAudio, if you set this value to non-default, it sets /// `PA_STREAM_ADJUST_LATENCY` and is the value used for `fragsize`. /// For JACK, this value is always equal to /// `Device::software_latency().current`. pub fn software_latency(&self) -> f64 { unsafe { (*self.userdata.instream).software_latency as _ } } /// The name of the stream, which defaults to "SoundIoInStream". /// /// PulseAudio uses this for the stream name. /// JACK uses this for the client name of the client that connects when you /// open the stream. /// WASAPI uses this for the session display name. /// Must not contain a colon (":"). /// /// TODO: Currently there is no way to set this. pub fn name(&self) -> String { unsafe { utf8_to_string((*self.userdata.instream).name) } } /// The number of bytes per frame, equal to the number of bytes /// per sample, multiplied by the number of channels. pub fn bytes_per_frame(&self) -> i32 { unsafe { (*self.userdata.instream).bytes_per_frame as _ } } /// The number of bytes in a sample, e.g. 3 for `i24`. pub fn bytes_per_sample(&self) -> i32 { unsafe { (*self.userdata.instream).bytes_per_sample as _ } } } /// `InStreamReader` is passed to the read callback and can be used to read from the stream. /// /// You start by calling `begin_read()` and then you can read the samples. When the `InStreamReader` /// is dropped the samples are dropped. An error at that point is written to the console and ignored. /// pub struct InStreamReader<'a> { instream: *mut raw::SoundIoInStream, frame_count_min: usize, frame_count_max: usize, read_started: bool, // The memory area to write to - one for each channel. Populated after begin_read() channel_areas: Vec<raw::SoundIoChannelArea>, // The actual frame count. Populated after begin_read() frame_count: usize, // This cannot outlive the scope that it is spawned from (in the write callback). phantom: PhantomData<&'a ()>, } impl<'a> InStreamReader<'a> { /// Start a read. You can only call this once per callback otherwise it panics. /// /// frame_count is the number of frames you want to read. It must be between /// frame_count_min and frame_count_max inclusive, or `begin_read()` will panic. /// /// It returns the number of frames you can actually read. The returned value /// will always be less than or equal to the provided value. /// /// # Errors /// /// * `Error::Invalid` /// * `frame_count` < `frame_count_min` or `frame_count` > `frame_count_max` /// * `Error::Streaming` /// * `Error::IncompatibleDevice` - in rare cases it might just now /// be discovered that the device uses non-byte-aligned access, in which /// case this error code is returned. /// pub fn begin_read(&mut self, frame_count: usize) -> Result<usize> { assert!( frame_count >= self.frame_count_min && frame_count <= self.frame_count_max, "frame_count out of range" ); let mut areas: *mut raw::SoundIoChannelArea = ptr::null_mut(); let mut actual_frame_count: c_int = frame_count as _; match unsafe { raw::soundio_instream_begin_read( self.instream, &mut areas as *mut _, &mut actual_frame_count as *mut _, ) } { 0 => { self.read_started = true; self.frame_count = actual_frame_count as _; // Return now if there's no frames to actually read. if actual_frame_count <= 0 { return Ok(0); } let cc = self.channel_count(); self.channel_areas = vec![ raw::SoundIoChannelArea { ptr: ptr::null_mut(), step: 0 }; cc ]; unsafe { self.channel_areas.copy_from_slice(slice::from_raw_parts::< raw::SoundIoChannelArea, >(areas, cc)); } Ok(actual_frame_count as _) } e => Err(e.into()), } } /// Commits the write that you began with `begin_read()`. /// /// Errors are currently are just printed to the console and ignored. /// /// # Errors /// /// * `Error::Streaming` /// * `Error::Underflow` - an underflow caused this call to fail. You might /// also get an `underflow_callback()`, and you might not get /// this error code when an underflow occurs. Unlike `Error::Streaming`, /// the outstream is still in a valid state and streaming can continue. pub fn end_read(&mut self) { if self.read_started { unsafe { match raw::soundio_instream_end_read(self.instream) { 0 => { self.read_started = false; } x => println!("Error ending instream: {}", Error::from(x)), } } } } /// Get the minimum frame count that you can call `begin_read()` with. /// Retreive this value before calling `begin_read()` to ensure you read the correct number /// of frames. pub fn frame_count_min(&self) -> usize { self.frame_count_min } /// Get the maximum frame count that you can call `begin_read()` with. /// Retreive this value before calling `begin_read()` to ensure you read the correct number /// of frames. pub fn frame_count_max(&self) -> usize { self.frame_count_max } /// Get the actual frame count that you did call `begin_read()` with. Panics if you haven't called /// `begin_read()` yet. pub fn frame_count(&self) -> usize
/// Get latency in seconds due to software only, not including hardware. pub fn software_latency(&self) -> f64 { unsafe { (*self.instream).software_latency as _ } } /// Return the number of channels in this stream. Guaranteed to be at least 1. pub fn channel_count(&self) -> usize { unsafe { (*self.instream).layout.channel_count as _ } } /// Get the sample rate in Hertz. pub fn sample_rate(&self) -> i32 { unsafe { (*self.instream).sample_rate as _ } } /// Obtain the number of seconds that the next frame of sound being /// captured will take to arrive in the buffer, plus the amount of time that is /// represented in the buffer. This includes both software and hardware latency. /// /// # Errors /// /// * `Error::Streaming` /// pub fn get_latency(&mut self) -> Result<f64> { let mut x: c_double = 0.0; match unsafe { raw::soundio_instream_get_latency(self.instream, &mut x as *mut c_double) } { 0 => Ok(x), e => Err(e.into()), } } /// Get the value of a sample. This panics if the `channel` or `frame` are /// out of range or if you haven't called `begin_read()` yet. /// /// If you request a different type from the actual one it will be converted. /// /// # Examples /// /// ``` /// fn read_callback(stream: &mut soundio::InStreamReader) { /// let frame_count_max = stream.frame_count_max(); /// stream.begin_read(frame_count_max).unwrap(); /// for c in 0..stream.channel_count() { /// for f in 0..stream.frame_count() { /// do_something_with(stream.sample::<i16>(c, f)); /// } /// } /// } /// # fn do_something_with(_: i16) { } /// ``` pub fn sample<T: Sample>(&self, channel: usize, frame: usize) -> T { assert!(self.read_started); assert!(channel < self.channel_count(), "Channel out of range"); assert!(frame < self.frame_count(), "Frame out of range"); unsafe { let ptr = self.channel_areas[channel] .ptr .add(frame * self.channel_areas[channel].step as usize) as *mut u8; match (*self.instream).format { raw::SoundIoFormat::SoundIoFormatS8 => T::from_i8(i8::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU8 => T::from_u8(u8::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS16LE => T::from_i16(i16::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS16BE => T::from_i16(i16::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatU16LE => T::from_u16(u16::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU16BE => T::from_u16(u16::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatS24LE => T::from_i24(i24::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS24BE => T::from_i24(i24::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatU24LE => T::from_u24(u24::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU24BE => T::from_u24(u24::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatS32LE => T::from_i32(i32::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS32BE => T::from_i32(i32::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatU32LE => T::from_u32(u32::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU32BE => T::from_u32(u32::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatFloat32LE => T::from_f32(f32::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatFloat32BE => T::from_f32(f32::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatFloat64LE => T::from_f64(f64::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatFloat64BE => T::from_f64(f64::from_raw_be(ptr)), _ => panic!("Unknown format"), } } } // TODO: To acheive speed *and* safety I can use iterators. That will be in a future API. } impl<'a> Drop for InStreamReader<'a> { /// This will drop all of the frames from when you called `begin_read()`. /// /// Errors are currently are just printed to the console and ignored. /// /// # Errors /// /// * `Error::Streaming` fn drop(&mut self) { if self.read_started { unsafe { match raw::soundio_instream_end_read(self.instream) { 0 => {} x => println!("Error reading instream: {}", Error::from(x)), } } } } }
{ assert!(self.read_started); self.frame_count }
identifier_body
instream.rs
extern crate libsoundio_sys as raw; use super::error::*; use super::format::*; use super::sample::*; use super::util::*; use std::marker::PhantomData; use std::os::raw::{c_double, c_int}; use std::ptr; use std::slice; /// This is called when an instream has been read. The `InStreamUserData` struct is obtained /// from the stream.userdata, then the user-supplied callback is called with an `InStreamReader` /// object. pub extern "C" fn instream_read_callback( stream: *mut raw::SoundIoInStream, frame_count_min: c_int, frame_count_max: c_int, ) { // Use stream.userdata to get a reference to the InStreamUserData object. let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData }; let userdata = unsafe { &mut (*raw_userdata_pointer) }; let mut stream_reader = InStreamReader { instream: userdata.instream, frame_count_min: frame_count_min as _, frame_count_max: frame_count_max as _, read_started: false, channel_areas: Vec::new(), frame_count: 0, phantom: PhantomData, }; (userdata.read_callback)(&mut stream_reader); } pub extern "C" fn instream_overflow_callback(stream: *mut raw::SoundIoInStream) { // Use stream.userdata to get a reference to the InStreamUserData object. let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData }; let userdata = unsafe { &mut (*raw_userdata_pointer) }; if let Some(ref mut cb) = userdata.overflow_callback { cb(); } else { println!("Overflow!"); } } pub extern "C" fn instream_error_callback(stream: *mut raw::SoundIoInStream, err: c_int) { // Use stream.userdata to get a reference to the InStreamUserData object. let raw_userdata_pointer = unsafe { (*stream).userdata as *mut InStreamUserData }; let userdata = unsafe { &mut (*raw_userdata_pointer) }; if let Some(ref mut cb) = userdata.error_callback { cb(err.into()); } else { println!("Error: {}", Error::from(err)); } } /// InStream represents an input stream for recording. /// /// It is obtained from `Device` using `Device::open_instream()` and /// can be started and paused. pub struct InStream<'a> { pub userdata: Box<InStreamUserData<'a>>, // This is just here to say that InStream cannot outlive the Device it was created from. pub phantom: PhantomData<&'a ()>, } /// The callbacks required for an instream are stored in this object. We also store a pointer /// to the raw instream so that it can be passed to `InStreamReader` in the write callback. pub struct InStreamUserData<'a> { pub instream: *mut raw::SoundIoInStream, pub read_callback: Box<dyn FnMut(&mut InStreamReader) + 'a>, pub overflow_callback: Option<Box<dyn FnMut() + 'a>>, pub error_callback: Option<Box<dyn FnMut(Error) + 'a>>, } impl<'a> Drop for InStreamUserData<'a> { fn drop(&mut self) { unsafe { raw::soundio_instream_destroy(self.instream); } } } impl<'a> InStream<'a> { /// Starts the stream, returning `Ok(())` if it started successfully. Once /// started the read callback will be periodically called according to the /// requested latency. /// /// `start()` should only ever be called once on an `InStream`. /// Do not use `start()` to resume a stream after pausing it. Instead call `pause(false)`. /// /// # Errors /// /// * `Error::BackendDisconnected` /// * `Error::Streaming` /// * `Error::OpeningDevice` /// * `Error::SystemResources` /// pub fn
(&mut self) -> Result<()> { match unsafe { raw::soundio_instream_start(self.userdata.instream) } { 0 => Ok(()), x => Err(x.into()), } } // TODO: Can pause() be called from the read callback? /// If the underlying backend and device support pausing, this pauses the /// stream. The `write_callback()` may be called a few more times if /// the buffer is not full. /// /// Pausing might put the hardware into a low power state which is ideal if your /// software is silent for some time. /// /// This should not be called before `start()`. Pausing when already paused or /// unpausing when already unpaused has no effect and returns `Ok(())`. /// /// # Errors /// /// * `Error::BackendDisconnected` /// * `Error::Streaming` /// * `Error::IncompatibleDevice` - device does not support pausing/unpausing /// pub fn pause(&mut self, pause: bool) -> Result<()> { match unsafe { raw::soundio_instream_pause(self.userdata.instream, pause as i8) } { 0 => Ok(()), e => Err(e.into()), } } /// Returns the stream format. pub fn format(&self) -> Format { unsafe { (*self.userdata.instream).format.into() } } /// Sample rate is the number of frames per second. pub fn sample_rate(&self) -> i32 { unsafe { (*self.userdata.instream).sample_rate as _ } } /// Ignoring hardware latency, this is the number of seconds it takes for a /// captured sample to become available for reading. /// After you call `Device::open_instream()`, this value is replaced with the /// actual software latency, as near to this value as possible. /// /// A higher value means less CPU usage. Defaults to a large value, /// potentially upwards of 2 seconds. /// /// If the device has unknown software latency min and max values, you may /// still set this (in `Device::open_instream()`), but you might not /// get the value you requested. /// /// For PulseAudio, if you set this value to non-default, it sets /// `PA_STREAM_ADJUST_LATENCY` and is the value used for `fragsize`. /// For JACK, this value is always equal to /// `Device::software_latency().current`. pub fn software_latency(&self) -> f64 { unsafe { (*self.userdata.instream).software_latency as _ } } /// The name of the stream, which defaults to "SoundIoInStream". /// /// PulseAudio uses this for the stream name. /// JACK uses this for the client name of the client that connects when you /// open the stream. /// WASAPI uses this for the session display name. /// Must not contain a colon (":"). /// /// TODO: Currently there is no way to set this. pub fn name(&self) -> String { unsafe { utf8_to_string((*self.userdata.instream).name) } } /// The number of bytes per frame, equal to the number of bytes /// per sample, multiplied by the number of channels. pub fn bytes_per_frame(&self) -> i32 { unsafe { (*self.userdata.instream).bytes_per_frame as _ } } /// The number of bytes in a sample, e.g. 3 for `i24`. pub fn bytes_per_sample(&self) -> i32 { unsafe { (*self.userdata.instream).bytes_per_sample as _ } } } /// `InStreamReader` is passed to the read callback and can be used to read from the stream. /// /// You start by calling `begin_read()` and then you can read the samples. When the `InStreamReader` /// is dropped the samples are dropped. An error at that point is written to the console and ignored. /// pub struct InStreamReader<'a> { instream: *mut raw::SoundIoInStream, frame_count_min: usize, frame_count_max: usize, read_started: bool, // The memory area to write to - one for each channel. Populated after begin_read() channel_areas: Vec<raw::SoundIoChannelArea>, // The actual frame count. Populated after begin_read() frame_count: usize, // This cannot outlive the scope that it is spawned from (in the write callback). phantom: PhantomData<&'a ()>, } impl<'a> InStreamReader<'a> { /// Start a read. You can only call this once per callback otherwise it panics. /// /// frame_count is the number of frames you want to read. It must be between /// frame_count_min and frame_count_max inclusive, or `begin_read()` will panic. /// /// It returns the number of frames you can actually read. The returned value /// will always be less than or equal to the provided value. /// /// # Errors /// /// * `Error::Invalid` /// * `frame_count` < `frame_count_min` or `frame_count` > `frame_count_max` /// * `Error::Streaming` /// * `Error::IncompatibleDevice` - in rare cases it might just now /// be discovered that the device uses non-byte-aligned access, in which /// case this error code is returned. /// pub fn begin_read(&mut self, frame_count: usize) -> Result<usize> { assert!( frame_count >= self.frame_count_min && frame_count <= self.frame_count_max, "frame_count out of range" ); let mut areas: *mut raw::SoundIoChannelArea = ptr::null_mut(); let mut actual_frame_count: c_int = frame_count as _; match unsafe { raw::soundio_instream_begin_read( self.instream, &mut areas as *mut _, &mut actual_frame_count as *mut _, ) } { 0 => { self.read_started = true; self.frame_count = actual_frame_count as _; // Return now if there's no frames to actually read. if actual_frame_count <= 0 { return Ok(0); } let cc = self.channel_count(); self.channel_areas = vec![ raw::SoundIoChannelArea { ptr: ptr::null_mut(), step: 0 }; cc ]; unsafe { self.channel_areas.copy_from_slice(slice::from_raw_parts::< raw::SoundIoChannelArea, >(areas, cc)); } Ok(actual_frame_count as _) } e => Err(e.into()), } } /// Commits the write that you began with `begin_read()`. /// /// Errors are currently are just printed to the console and ignored. /// /// # Errors /// /// * `Error::Streaming` /// * `Error::Underflow` - an underflow caused this call to fail. You might /// also get an `underflow_callback()`, and you might not get /// this error code when an underflow occurs. Unlike `Error::Streaming`, /// the outstream is still in a valid state and streaming can continue. pub fn end_read(&mut self) { if self.read_started { unsafe { match raw::soundio_instream_end_read(self.instream) { 0 => { self.read_started = false; } x => println!("Error ending instream: {}", Error::from(x)), } } } } /// Get the minimum frame count that you can call `begin_read()` with. /// Retreive this value before calling `begin_read()` to ensure you read the correct number /// of frames. pub fn frame_count_min(&self) -> usize { self.frame_count_min } /// Get the maximum frame count that you can call `begin_read()` with. /// Retreive this value before calling `begin_read()` to ensure you read the correct number /// of frames. pub fn frame_count_max(&self) -> usize { self.frame_count_max } /// Get the actual frame count that you did call `begin_read()` with. Panics if you haven't called /// `begin_read()` yet. pub fn frame_count(&self) -> usize { assert!(self.read_started); self.frame_count } /// Get latency in seconds due to software only, not including hardware. pub fn software_latency(&self) -> f64 { unsafe { (*self.instream).software_latency as _ } } /// Return the number of channels in this stream. Guaranteed to be at least 1. pub fn channel_count(&self) -> usize { unsafe { (*self.instream).layout.channel_count as _ } } /// Get the sample rate in Hertz. pub fn sample_rate(&self) -> i32 { unsafe { (*self.instream).sample_rate as _ } } /// Obtain the number of seconds that the next frame of sound being /// captured will take to arrive in the buffer, plus the amount of time that is /// represented in the buffer. This includes both software and hardware latency. /// /// # Errors /// /// * `Error::Streaming` /// pub fn get_latency(&mut self) -> Result<f64> { let mut x: c_double = 0.0; match unsafe { raw::soundio_instream_get_latency(self.instream, &mut x as *mut c_double) } { 0 => Ok(x), e => Err(e.into()), } } /// Get the value of a sample. This panics if the `channel` or `frame` are /// out of range or if you haven't called `begin_read()` yet. /// /// If you request a different type from the actual one it will be converted. /// /// # Examples /// /// ``` /// fn read_callback(stream: &mut soundio::InStreamReader) { /// let frame_count_max = stream.frame_count_max(); /// stream.begin_read(frame_count_max).unwrap(); /// for c in 0..stream.channel_count() { /// for f in 0..stream.frame_count() { /// do_something_with(stream.sample::<i16>(c, f)); /// } /// } /// } /// # fn do_something_with(_: i16) { } /// ``` pub fn sample<T: Sample>(&self, channel: usize, frame: usize) -> T { assert!(self.read_started); assert!(channel < self.channel_count(), "Channel out of range"); assert!(frame < self.frame_count(), "Frame out of range"); unsafe { let ptr = self.channel_areas[channel] .ptr .add(frame * self.channel_areas[channel].step as usize) as *mut u8; match (*self.instream).format { raw::SoundIoFormat::SoundIoFormatS8 => T::from_i8(i8::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU8 => T::from_u8(u8::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS16LE => T::from_i16(i16::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS16BE => T::from_i16(i16::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatU16LE => T::from_u16(u16::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU16BE => T::from_u16(u16::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatS24LE => T::from_i24(i24::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS24BE => T::from_i24(i24::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatU24LE => T::from_u24(u24::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU24BE => T::from_u24(u24::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatS32LE => T::from_i32(i32::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatS32BE => T::from_i32(i32::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatU32LE => T::from_u32(u32::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatU32BE => T::from_u32(u32::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatFloat32LE => T::from_f32(f32::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatFloat32BE => T::from_f32(f32::from_raw_be(ptr)), raw::SoundIoFormat::SoundIoFormatFloat64LE => T::from_f64(f64::from_raw_le(ptr)), raw::SoundIoFormat::SoundIoFormatFloat64BE => T::from_f64(f64::from_raw_be(ptr)), _ => panic!("Unknown format"), } } } // TODO: To acheive speed *and* safety I can use iterators. That will be in a future API. } impl<'a> Drop for InStreamReader<'a> { /// This will drop all of the frames from when you called `begin_read()`. /// /// Errors are currently are just printed to the console and ignored. /// /// # Errors /// /// * `Error::Streaming` fn drop(&mut self) { if self.read_started { unsafe { match raw::soundio_instream_end_read(self.instream) { 0 => {} x => println!("Error reading instream: {}", Error::from(x)), } } } } }
start
identifier_name
mod.rs
/// Toy x86_64 JIT use libc; use std::alloc::{alloc, dealloc, Layout}; use std::collections::BTreeMap; use std::convert::TryFrom; use std::ffi::c_void; use std::io::{Read, Write}; use std::mem::transmute; use std::ptr::write_bytes; use std::slice; mod x86; use crate::ir::Instruction; const PAGE_SIZE: usize = 4096; pub struct Program { contents: *mut u8, size: usize, } impl Program { pub fn new(size: usize) -> Self { // allocate some memory to write our instructions let size = size * PAGE_SIZE; let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap(); let contents = unsafe { let raw = alloc(layout); write_bytes(raw, 0xc3, size); libc::mprotect(raw as *mut libc::c_void, size, libc::PROT_NONE); raw }; Program { contents, size } } pub fn into_sliceable(self) -> SliceableProgram { SliceableProgram::new(self) } pub fn into_callable(self) -> CallableProgram { CallableProgram::new(self) } } impl Drop for Program { fn drop(&mut self) { let layout = Layout::from_size_align(self.size, PAGE_SIZE).unwrap(); unsafe { dealloc(self.contents, layout); } } } pub struct SliceableProgram { program: Program, } impl SliceableProgram { pub fn new(program: Program) -> Self { unsafe { libc::mprotect( program.contents as *mut libc::c_void, program.size, libc::PROT_READ | libc::PROT_WRITE, ); } SliceableProgram { program } } pub fn as_slice(&self) -> &[u8] { unsafe { slice::from_raw_parts(self.program.contents, self.program.size) } } pub fn as_mut_slice(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut(self.program.contents, self.program.size) } } pub fn lock(self) -> Program { unsafe { libc::mprotect( self.program.contents as *mut libc::c_void, self.program.size, libc::PROT_NONE, ); } self.program } } pub struct CallableProgram { program: Program, } impl CallableProgram { pub fn new(program: Program) -> Self { unsafe { libc::mprotect( program.contents as *mut libc::c_void, program.size, libc::PROT_READ | libc::PROT_EXEC, ); } CallableProgram { program } } pub fn as_function( &mut self, ) -> unsafe extern "C" fn( *mut u8, *mut c_void, *mut WriteWrapper, *mut c_void, *mut ReadWrapper, ) -> i32 { unsafe { transmute(self.program.contents) } } pub fn lock(self) -> Program { self.program } } #[derive(Debug)] struct JumpInfo { asm_offset: usize, target: usize, } pub fn
(instructions: &[Instruction]) -> Program { // we'll emit something that respects x86_64 system-v: // rdi (1st parameter): pointer to cell array // rsi (2nd parameter): pointer to output function // rdx (3rd parameter): pointer to WriteWrapper // rcx (4th parameter): pointer to input function // r8 (5th parameter): pointer to ReadWrapper let program = Program::new(8); let mut sliceable = program.into_sliceable(); let slice = sliceable.as_mut_slice(); let mut emitter = x86::Emitter::new(slice); // we receive a stack that's misaligned by 8 bytes at the start of the function // we always push on argument onto it and that aligns it :) // move arguments to saved registers // rsi -> rbp // rdx -> r12 // rcx -> r13 // r8 -> r14 emitter.push(x86::Register::Rbp); emitter.push(x86::Register::R12); emitter.push(x86::Register::R13); emitter.push(x86::Register::R14); emitter.mov64_reg(x86::Register::Rbp, x86::Register::Rsi); emitter.mov64_reg(x86::Register::R12, x86::Register::Rdx); emitter.mov64_reg(x86::Register::R13, x86::Register::Rcx); emitter.mov64_reg(x86::Register::R14, x86::Register::R8); let mut jumps = BTreeMap::new(); for (idx, instr) in instructions.iter().enumerate() { match instr { Instruction::IncrementPointer(inc) => { if inc.is_positive() { emitter.addu8_reg(x86::Register::Rdi, *inc as u8); } else if inc.is_negative() { emitter.subu8_reg(x86::Register::Rdi, -*inc as u8); } } Instruction::IncrementByte(inc) => { if inc.is_positive() { emitter.addu8_ptr(x86::Register::Rdi, *inc as u8); } else if inc.is_negative() { emitter.subu8_ptr(x86::Register::Rdi, -*inc as u8); } } Instruction::IncrementPointerAndByte(pointer_inc, byte_inc) => { if byte_inc.is_positive() { emitter.addu8_ptr_u8disp( x86::Register::Rdi, *pointer_inc as u8, *byte_inc as u8, ); } else if byte_inc.is_negative() { emitter.subu8_ptr_u8disp( x86::Register::Rdi, *pointer_inc as u8, -*byte_inc as u8, ); } if pointer_inc.is_positive() { emitter.addu8_reg(x86::Register::Rdi, *pointer_inc as u8); } else if pointer_inc.is_negative() { emitter.subu8_reg(x86::Register::Rdi, -*pointer_inc as u8); } } // The way I've implemented jumps is terribly hacky. I should probably find a better solution someday Instruction::JumpBackwardsIfNotZero(jmp) => { emitter.cmpu8_ptr(x86::Register::Rdi, 0); let jumpinfo = JumpInfo { target: idx - jmp, asm_offset: emitter.index, }; jumps.insert(idx, jumpinfo); // bogus temp value emitter.jneu32(42); } Instruction::JumpForwardsIfZero(jmp) => { emitter.cmpu8_ptr(x86::Register::Rdi, 0); let jumpinfo = JumpInfo { target: idx + jmp, asm_offset: emitter.index, }; jumps.insert(idx, jumpinfo); // bogus temp value emitter.jeu32(42); } Instruction::OutputByte => { // move ptr to WriteWrapper to Rsi emitter.mov64_reg(x86::Register::Rsi, x86::Register::R12); emitter.push(x86::Register::Rdi); emitter.call64(x86::Register::Rbp); emitter.pop(x86::Register::Rdi); } Instruction::ReadByte => { // move ptr to ReadWrapper to Rsi emitter.mov64_reg(x86::Register::Rsi, x86::Register::R14); emitter.push(x86::Register::Rdi); emitter.call64(x86::Register::R13); emitter.pop(x86::Register::Rdi); } } } emitter.pop(x86::Register::R14); emitter.pop(x86::Register::R13); emitter.pop(x86::Register::R12); emitter.pop(x86::Register::Rbp); for jumpinfo in jumps.values() { let target = jumps.get(&jumpinfo.target).unwrap(); // this is kinda nuts, but I'll try to explain // we encode jumps as x86 *near* (used to be short but brainfuck hates me) jumps // which are *six* bytes: two opcodes and 7 bytes of offset from the NEXT INSTRUCTION (I think?) // we do this indexing crazyness to rewrite our offset to our target's next instruction offset // TODO: x86 jumps are hard. IIRC MIPS also does this. Check when I'm less sleepy and fix these comments let offset = (target.asm_offset as isize) - (jumpinfo.asm_offset as isize); let le_bytes = i32::try_from(offset) .expect("offset overflowed i32") .to_le_bytes(); slice[jumpinfo.asm_offset + 2] = le_bytes[0]; slice[jumpinfo.asm_offset + 3] = le_bytes[1]; slice[jumpinfo.asm_offset + 4] = le_bytes[2]; slice[jumpinfo.asm_offset + 5] = le_bytes[3]; } sliceable.lock() } unsafe extern "C" fn write_trampoline(byte_ptr: *mut u8, wrapper_ptr: *mut WriteWrapper) { let wrapper = &*wrapper_ptr; let output = &mut *wrapper.write; let byte = *byte_ptr; output.write_all(&[byte]).unwrap(); } unsafe extern "C" fn read_trampoline(byte_ptr: *mut u8, wrapper_ptr: *mut ReadWrapper) { let wrapper = &*wrapper_ptr; let input = &mut *wrapper.read; let slice = slice::from_raw_parts_mut(byte_ptr, 1); input.read_exact(slice).unwrap(); } // I thought about a Wrapper<T>, but I'm not going to muck aroung with generics here pub struct WriteWrapper { write: *mut dyn Write, } pub struct ReadWrapper { read: *mut dyn Read, } pub struct Vm { program: CallableProgram, cells: [u8; 30000], } impl Vm { pub fn new(program: Program) -> Self { Vm { program: program.into_callable(), cells: [0; 30000], } } pub fn vm_loop(&mut self, input: &mut dyn Read, output: &mut dyn Write) { let program = self.program.as_function(); let mut out_wrapper = WriteWrapper { write: output as *const dyn Write as *mut dyn Write, }; let mut in_wrapper = ReadWrapper { read: input as *const dyn Read as *mut dyn Read, }; unsafe { program( self.cells.as_mut_ptr() as *mut u8, write_trampoline as *mut c_void, &mut out_wrapper as *mut WriteWrapper, read_trampoline as *mut c_void, &mut in_wrapper as *mut ReadWrapper, ) }; } }
transform
identifier_name
mod.rs
/// Toy x86_64 JIT use libc; use std::alloc::{alloc, dealloc, Layout}; use std::collections::BTreeMap; use std::convert::TryFrom; use std::ffi::c_void; use std::io::{Read, Write}; use std::mem::transmute; use std::ptr::write_bytes; use std::slice; mod x86; use crate::ir::Instruction; const PAGE_SIZE: usize = 4096; pub struct Program { contents: *mut u8, size: usize, } impl Program { pub fn new(size: usize) -> Self { // allocate some memory to write our instructions let size = size * PAGE_SIZE; let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap(); let contents = unsafe { let raw = alloc(layout); write_bytes(raw, 0xc3, size); libc::mprotect(raw as *mut libc::c_void, size, libc::PROT_NONE); raw }; Program { contents, size } } pub fn into_sliceable(self) -> SliceableProgram { SliceableProgram::new(self) } pub fn into_callable(self) -> CallableProgram { CallableProgram::new(self) } } impl Drop for Program { fn drop(&mut self) { let layout = Layout::from_size_align(self.size, PAGE_SIZE).unwrap(); unsafe { dealloc(self.contents, layout); } } } pub struct SliceableProgram { program: Program, } impl SliceableProgram { pub fn new(program: Program) -> Self { unsafe { libc::mprotect( program.contents as *mut libc::c_void, program.size, libc::PROT_READ | libc::PROT_WRITE, ); } SliceableProgram { program } } pub fn as_slice(&self) -> &[u8]
pub fn as_mut_slice(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut(self.program.contents, self.program.size) } } pub fn lock(self) -> Program { unsafe { libc::mprotect( self.program.contents as *mut libc::c_void, self.program.size, libc::PROT_NONE, ); } self.program } } pub struct CallableProgram { program: Program, } impl CallableProgram { pub fn new(program: Program) -> Self { unsafe { libc::mprotect( program.contents as *mut libc::c_void, program.size, libc::PROT_READ | libc::PROT_EXEC, ); } CallableProgram { program } } pub fn as_function( &mut self, ) -> unsafe extern "C" fn( *mut u8, *mut c_void, *mut WriteWrapper, *mut c_void, *mut ReadWrapper, ) -> i32 { unsafe { transmute(self.program.contents) } } pub fn lock(self) -> Program { self.program } } #[derive(Debug)] struct JumpInfo { asm_offset: usize, target: usize, } pub fn transform(instructions: &[Instruction]) -> Program { // we'll emit something that respects x86_64 system-v: // rdi (1st parameter): pointer to cell array // rsi (2nd parameter): pointer to output function // rdx (3rd parameter): pointer to WriteWrapper // rcx (4th parameter): pointer to input function // r8 (5th parameter): pointer to ReadWrapper let program = Program::new(8); let mut sliceable = program.into_sliceable(); let slice = sliceable.as_mut_slice(); let mut emitter = x86::Emitter::new(slice); // we receive a stack that's misaligned by 8 bytes at the start of the function // we always push on argument onto it and that aligns it :) // move arguments to saved registers // rsi -> rbp // rdx -> r12 // rcx -> r13 // r8 -> r14 emitter.push(x86::Register::Rbp); emitter.push(x86::Register::R12); emitter.push(x86::Register::R13); emitter.push(x86::Register::R14); emitter.mov64_reg(x86::Register::Rbp, x86::Register::Rsi); emitter.mov64_reg(x86::Register::R12, x86::Register::Rdx); emitter.mov64_reg(x86::Register::R13, x86::Register::Rcx); emitter.mov64_reg(x86::Register::R14, x86::Register::R8); let mut jumps = BTreeMap::new(); for (idx, instr) in instructions.iter().enumerate() { match instr { Instruction::IncrementPointer(inc) => { if inc.is_positive() { emitter.addu8_reg(x86::Register::Rdi, *inc as u8); } else if inc.is_negative() { emitter.subu8_reg(x86::Register::Rdi, -*inc as u8); } } Instruction::IncrementByte(inc) => { if inc.is_positive() { emitter.addu8_ptr(x86::Register::Rdi, *inc as u8); } else if inc.is_negative() { emitter.subu8_ptr(x86::Register::Rdi, -*inc as u8); } } Instruction::IncrementPointerAndByte(pointer_inc, byte_inc) => { if byte_inc.is_positive() { emitter.addu8_ptr_u8disp( x86::Register::Rdi, *pointer_inc as u8, *byte_inc as u8, ); } else if byte_inc.is_negative() { emitter.subu8_ptr_u8disp( x86::Register::Rdi, *pointer_inc as u8, -*byte_inc as u8, ); } if pointer_inc.is_positive() { emitter.addu8_reg(x86::Register::Rdi, *pointer_inc as u8); } else if pointer_inc.is_negative() { emitter.subu8_reg(x86::Register::Rdi, -*pointer_inc as u8); } } // The way I've implemented jumps is terribly hacky. I should probably find a better solution someday Instruction::JumpBackwardsIfNotZero(jmp) => { emitter.cmpu8_ptr(x86::Register::Rdi, 0); let jumpinfo = JumpInfo { target: idx - jmp, asm_offset: emitter.index, }; jumps.insert(idx, jumpinfo); // bogus temp value emitter.jneu32(42); } Instruction::JumpForwardsIfZero(jmp) => { emitter.cmpu8_ptr(x86::Register::Rdi, 0); let jumpinfo = JumpInfo { target: idx + jmp, asm_offset: emitter.index, }; jumps.insert(idx, jumpinfo); // bogus temp value emitter.jeu32(42); } Instruction::OutputByte => { // move ptr to WriteWrapper to Rsi emitter.mov64_reg(x86::Register::Rsi, x86::Register::R12); emitter.push(x86::Register::Rdi); emitter.call64(x86::Register::Rbp); emitter.pop(x86::Register::Rdi); } Instruction::ReadByte => { // move ptr to ReadWrapper to Rsi emitter.mov64_reg(x86::Register::Rsi, x86::Register::R14); emitter.push(x86::Register::Rdi); emitter.call64(x86::Register::R13); emitter.pop(x86::Register::Rdi); } } } emitter.pop(x86::Register::R14); emitter.pop(x86::Register::R13); emitter.pop(x86::Register::R12); emitter.pop(x86::Register::Rbp); for jumpinfo in jumps.values() { let target = jumps.get(&jumpinfo.target).unwrap(); // this is kinda nuts, but I'll try to explain // we encode jumps as x86 *near* (used to be short but brainfuck hates me) jumps // which are *six* bytes: two opcodes and 7 bytes of offset from the NEXT INSTRUCTION (I think?) // we do this indexing crazyness to rewrite our offset to our target's next instruction offset // TODO: x86 jumps are hard. IIRC MIPS also does this. Check when I'm less sleepy and fix these comments let offset = (target.asm_offset as isize) - (jumpinfo.asm_offset as isize); let le_bytes = i32::try_from(offset) .expect("offset overflowed i32") .to_le_bytes(); slice[jumpinfo.asm_offset + 2] = le_bytes[0]; slice[jumpinfo.asm_offset + 3] = le_bytes[1]; slice[jumpinfo.asm_offset + 4] = le_bytes[2]; slice[jumpinfo.asm_offset + 5] = le_bytes[3]; } sliceable.lock() } unsafe extern "C" fn write_trampoline(byte_ptr: *mut u8, wrapper_ptr: *mut WriteWrapper) { let wrapper = &*wrapper_ptr; let output = &mut *wrapper.write; let byte = *byte_ptr; output.write_all(&[byte]).unwrap(); } unsafe extern "C" fn read_trampoline(byte_ptr: *mut u8, wrapper_ptr: *mut ReadWrapper) { let wrapper = &*wrapper_ptr; let input = &mut *wrapper.read; let slice = slice::from_raw_parts_mut(byte_ptr, 1); input.read_exact(slice).unwrap(); } // I thought about a Wrapper<T>, but I'm not going to muck aroung with generics here pub struct WriteWrapper { write: *mut dyn Write, } pub struct ReadWrapper { read: *mut dyn Read, } pub struct Vm { program: CallableProgram, cells: [u8; 30000], } impl Vm { pub fn new(program: Program) -> Self { Vm { program: program.into_callable(), cells: [0; 30000], } } pub fn vm_loop(&mut self, input: &mut dyn Read, output: &mut dyn Write) { let program = self.program.as_function(); let mut out_wrapper = WriteWrapper { write: output as *const dyn Write as *mut dyn Write, }; let mut in_wrapper = ReadWrapper { read: input as *const dyn Read as *mut dyn Read, }; unsafe { program( self.cells.as_mut_ptr() as *mut u8, write_trampoline as *mut c_void, &mut out_wrapper as *mut WriteWrapper, read_trampoline as *mut c_void, &mut in_wrapper as *mut ReadWrapper, ) }; } }
{ unsafe { slice::from_raw_parts(self.program.contents, self.program.size) } }
identifier_body
mod.rs
/// Toy x86_64 JIT use libc; use std::alloc::{alloc, dealloc, Layout}; use std::collections::BTreeMap; use std::convert::TryFrom; use std::ffi::c_void; use std::io::{Read, Write}; use std::mem::transmute; use std::ptr::write_bytes; use std::slice; mod x86; use crate::ir::Instruction; const PAGE_SIZE: usize = 4096; pub struct Program { contents: *mut u8, size: usize, } impl Program { pub fn new(size: usize) -> Self { // allocate some memory to write our instructions let size = size * PAGE_SIZE; let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap(); let contents = unsafe { let raw = alloc(layout); write_bytes(raw, 0xc3, size); libc::mprotect(raw as *mut libc::c_void, size, libc::PROT_NONE); raw }; Program { contents, size } } pub fn into_sliceable(self) -> SliceableProgram { SliceableProgram::new(self) } pub fn into_callable(self) -> CallableProgram { CallableProgram::new(self) } } impl Drop for Program { fn drop(&mut self) { let layout = Layout::from_size_align(self.size, PAGE_SIZE).unwrap(); unsafe { dealloc(self.contents, layout); } } } pub struct SliceableProgram { program: Program, } impl SliceableProgram { pub fn new(program: Program) -> Self { unsafe { libc::mprotect( program.contents as *mut libc::c_void, program.size, libc::PROT_READ | libc::PROT_WRITE, ); } SliceableProgram { program } } pub fn as_slice(&self) -> &[u8] { unsafe { slice::from_raw_parts(self.program.contents, self.program.size) }
unsafe { slice::from_raw_parts_mut(self.program.contents, self.program.size) } } pub fn lock(self) -> Program { unsafe { libc::mprotect( self.program.contents as *mut libc::c_void, self.program.size, libc::PROT_NONE, ); } self.program } } pub struct CallableProgram { program: Program, } impl CallableProgram { pub fn new(program: Program) -> Self { unsafe { libc::mprotect( program.contents as *mut libc::c_void, program.size, libc::PROT_READ | libc::PROT_EXEC, ); } CallableProgram { program } } pub fn as_function( &mut self, ) -> unsafe extern "C" fn( *mut u8, *mut c_void, *mut WriteWrapper, *mut c_void, *mut ReadWrapper, ) -> i32 { unsafe { transmute(self.program.contents) } } pub fn lock(self) -> Program { self.program } } #[derive(Debug)] struct JumpInfo { asm_offset: usize, target: usize, } pub fn transform(instructions: &[Instruction]) -> Program { // we'll emit something that respects x86_64 system-v: // rdi (1st parameter): pointer to cell array // rsi (2nd parameter): pointer to output function // rdx (3rd parameter): pointer to WriteWrapper // rcx (4th parameter): pointer to input function // r8 (5th parameter): pointer to ReadWrapper let program = Program::new(8); let mut sliceable = program.into_sliceable(); let slice = sliceable.as_mut_slice(); let mut emitter = x86::Emitter::new(slice); // we receive a stack that's misaligned by 8 bytes at the start of the function // we always push on argument onto it and that aligns it :) // move arguments to saved registers // rsi -> rbp // rdx -> r12 // rcx -> r13 // r8 -> r14 emitter.push(x86::Register::Rbp); emitter.push(x86::Register::R12); emitter.push(x86::Register::R13); emitter.push(x86::Register::R14); emitter.mov64_reg(x86::Register::Rbp, x86::Register::Rsi); emitter.mov64_reg(x86::Register::R12, x86::Register::Rdx); emitter.mov64_reg(x86::Register::R13, x86::Register::Rcx); emitter.mov64_reg(x86::Register::R14, x86::Register::R8); let mut jumps = BTreeMap::new(); for (idx, instr) in instructions.iter().enumerate() { match instr { Instruction::IncrementPointer(inc) => { if inc.is_positive() { emitter.addu8_reg(x86::Register::Rdi, *inc as u8); } else if inc.is_negative() { emitter.subu8_reg(x86::Register::Rdi, -*inc as u8); } } Instruction::IncrementByte(inc) => { if inc.is_positive() { emitter.addu8_ptr(x86::Register::Rdi, *inc as u8); } else if inc.is_negative() { emitter.subu8_ptr(x86::Register::Rdi, -*inc as u8); } } Instruction::IncrementPointerAndByte(pointer_inc, byte_inc) => { if byte_inc.is_positive() { emitter.addu8_ptr_u8disp( x86::Register::Rdi, *pointer_inc as u8, *byte_inc as u8, ); } else if byte_inc.is_negative() { emitter.subu8_ptr_u8disp( x86::Register::Rdi, *pointer_inc as u8, -*byte_inc as u8, ); } if pointer_inc.is_positive() { emitter.addu8_reg(x86::Register::Rdi, *pointer_inc as u8); } else if pointer_inc.is_negative() { emitter.subu8_reg(x86::Register::Rdi, -*pointer_inc as u8); } } // The way I've implemented jumps is terribly hacky. I should probably find a better solution someday Instruction::JumpBackwardsIfNotZero(jmp) => { emitter.cmpu8_ptr(x86::Register::Rdi, 0); let jumpinfo = JumpInfo { target: idx - jmp, asm_offset: emitter.index, }; jumps.insert(idx, jumpinfo); // bogus temp value emitter.jneu32(42); } Instruction::JumpForwardsIfZero(jmp) => { emitter.cmpu8_ptr(x86::Register::Rdi, 0); let jumpinfo = JumpInfo { target: idx + jmp, asm_offset: emitter.index, }; jumps.insert(idx, jumpinfo); // bogus temp value emitter.jeu32(42); } Instruction::OutputByte => { // move ptr to WriteWrapper to Rsi emitter.mov64_reg(x86::Register::Rsi, x86::Register::R12); emitter.push(x86::Register::Rdi); emitter.call64(x86::Register::Rbp); emitter.pop(x86::Register::Rdi); } Instruction::ReadByte => { // move ptr to ReadWrapper to Rsi emitter.mov64_reg(x86::Register::Rsi, x86::Register::R14); emitter.push(x86::Register::Rdi); emitter.call64(x86::Register::R13); emitter.pop(x86::Register::Rdi); } } } emitter.pop(x86::Register::R14); emitter.pop(x86::Register::R13); emitter.pop(x86::Register::R12); emitter.pop(x86::Register::Rbp); for jumpinfo in jumps.values() { let target = jumps.get(&jumpinfo.target).unwrap(); // this is kinda nuts, but I'll try to explain // we encode jumps as x86 *near* (used to be short but brainfuck hates me) jumps // which are *six* bytes: two opcodes and 7 bytes of offset from the NEXT INSTRUCTION (I think?) // we do this indexing crazyness to rewrite our offset to our target's next instruction offset // TODO: x86 jumps are hard. IIRC MIPS also does this. Check when I'm less sleepy and fix these comments let offset = (target.asm_offset as isize) - (jumpinfo.asm_offset as isize); let le_bytes = i32::try_from(offset) .expect("offset overflowed i32") .to_le_bytes(); slice[jumpinfo.asm_offset + 2] = le_bytes[0]; slice[jumpinfo.asm_offset + 3] = le_bytes[1]; slice[jumpinfo.asm_offset + 4] = le_bytes[2]; slice[jumpinfo.asm_offset + 5] = le_bytes[3]; } sliceable.lock() } unsafe extern "C" fn write_trampoline(byte_ptr: *mut u8, wrapper_ptr: *mut WriteWrapper) { let wrapper = &*wrapper_ptr; let output = &mut *wrapper.write; let byte = *byte_ptr; output.write_all(&[byte]).unwrap(); } unsafe extern "C" fn read_trampoline(byte_ptr: *mut u8, wrapper_ptr: *mut ReadWrapper) { let wrapper = &*wrapper_ptr; let input = &mut *wrapper.read; let slice = slice::from_raw_parts_mut(byte_ptr, 1); input.read_exact(slice).unwrap(); } // I thought about a Wrapper<T>, but I'm not going to muck aroung with generics here pub struct WriteWrapper { write: *mut dyn Write, } pub struct ReadWrapper { read: *mut dyn Read, } pub struct Vm { program: CallableProgram, cells: [u8; 30000], } impl Vm { pub fn new(program: Program) -> Self { Vm { program: program.into_callable(), cells: [0; 30000], } } pub fn vm_loop(&mut self, input: &mut dyn Read, output: &mut dyn Write) { let program = self.program.as_function(); let mut out_wrapper = WriteWrapper { write: output as *const dyn Write as *mut dyn Write, }; let mut in_wrapper = ReadWrapper { read: input as *const dyn Read as *mut dyn Read, }; unsafe { program( self.cells.as_mut_ptr() as *mut u8, write_trampoline as *mut c_void, &mut out_wrapper as *mut WriteWrapper, read_trampoline as *mut c_void, &mut in_wrapper as *mut ReadWrapper, ) }; } }
} pub fn as_mut_slice(&mut self) -> &mut [u8] {
random_line_split
file_system.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use once_cell::sync::OnceCell; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Weak}; use super::*; use crate::auth::FsCred; use crate::lock::Mutex; use crate::task::Kernel; use crate::types::*; /// A file system that can be mounted in a namespace. pub struct FileSystem { root: OnceCell<DirEntryHandle>, next_inode: AtomicU64, ops: Box<dyn FileSystemOps>, /// The device ID of this filesystem. Returned in the st_dev field when stating an inode in /// this filesystem. pub dev_id: DeviceType, /// Whether DirEntries added to this filesystem should be considered permanent, instead of a /// cache of the backing storage. An example is tmpfs: the DirEntry tree *is* the backing /// storage, as opposed to ext4, which uses the DirEntry tree as a cache and removes unused /// nodes from it. pub permanent_entries: bool, /// A file-system global mutex to serialize rename operations. /// /// This mutex is useful because the invariants enforced during a rename /// operation involve many DirEntry objects. In the future, we might be /// able to remove this mutex, but we will need to think carefully about /// how rename operations can interleave. /// /// See DirEntry::rename. pub rename_mutex: Mutex<()>, /// The FsNode cache for this file system. /// /// When two directory entries are hard links to the same underlying inode, /// this cache lets us re-use the same FsNode object for both directory /// entries. /// /// Rather than calling FsNode::new directly, file systems should call /// FileSystem::get_or_create_node to see if the FsNode already exists in /// the cache. nodes: Mutex<HashMap<ino_t, Weak<FsNode>>>, /// DirEntryHandle cache for the filesystem. Currently only used by filesystems that set the /// permanent_entries flag, to store every node and make sure it doesn't get freed without /// being explicitly unlinked. entries: Mutex<HashMap<usize, DirEntryHandle>>, /// Hack meant to stand in for the fs_use_trans selinux feature. If set, this value will be set /// as the selinux label on any newly created inodes in the filesystem. pub selinux_context: OnceCell<FsString>, } impl FileSystem { /// Create a new filesystem. pub fn new(kernel: &Kernel, ops: impl FileSystemOps) -> FileSystemHandle { Self::new_internal(kernel, ops, false) } /// Create a new filesystem with the permanent_entries flag set. pub fn new_with_permanent_entries( kernel: &Kernel, ops: impl FileSystemOps, ) -> FileSystemHandle { Self::new_internal(kernel, ops, true) } /// Create a new filesystem and call set_root in one step. pub fn new_with_root( kernel: &Kernel, ops: impl FileSystemOps, root_node: FsNode, ) -> FileSystemHandle { let fs = Self::new_with_permanent_entries(kernel, ops); fs.set_root_node(root_node); fs } pub fn set_root(self: &FileSystemHandle, root: impl FsNodeOps) { self.set_root_node(FsNode::new_root(root)); } /// Set up the root of the filesystem. Must not be called more than once. pub fn set_root_node(self: &FileSystemHandle, mut root: FsNode) { if root.inode_num == 0 { root.inode_num = self.next_inode_num(); } root.set_fs(self); let root_node = Arc::new(root); self.nodes.lock().insert(root_node.inode_num, Arc::downgrade(&root_node)); let root = DirEntry::new(root_node, None, FsString::new()); assert!(self.root.set(root).is_ok(), "FileSystem::set_root can't be called more than once"); } fn new_internal( kernel: &Kernel, ops: impl FileSystemOps, permanent_entries: bool, ) -> FileSystemHandle { Arc::new(FileSystem { root: OnceCell::new(), next_inode: AtomicU64::new(1), ops: Box::new(ops), dev_id: kernel.device_registry.write().next_anonymous_dev_id(), permanent_entries, rename_mutex: Mutex::new(()), nodes: Mutex::new(HashMap::new()), entries: Mutex::new(HashMap::new()), selinux_context: OnceCell::new(), }) } /// The root directory entry of this file system. /// /// Panics if this file system does not have a root directory. pub fn root(&self) -> &DirEntryHandle { self.root.get().unwrap() } /// Get or create an FsNode for this file system. /// /// If inode_num is Some, then this function checks the node cache to /// determine whether this node is already open. If so, the function /// returns the existing FsNode. If not, the function calls the given /// create_fn function to create the FsNode. /// /// If inode_num is None, then this function assigns a new inode number /// and calls the given create_fn function to create the FsNode with the /// assigned number. /// /// Returns Err only if create_fn returns Err. pub fn get_or_create_node<F>( &self, inode_num: Option<ino_t>, create_fn: F, ) -> Result<FsNodeHandle, Errno> where F: FnOnce(ino_t) -> Result<FsNodeHandle, Errno>, { let inode_num = inode_num.unwrap_or_else(|| self.next_inode_num()); let mut nodes = self.nodes.lock(); match nodes.entry(inode_num) { Entry::Vacant(entry) => { let node = create_fn(inode_num)?; entry.insert(Arc::downgrade(&node)); Ok(node) } Entry::Occupied(mut entry) => { if let Some(node) = entry.get().upgrade() { return Ok(node); } let node = create_fn(inode_num)?; entry.insert(Arc::downgrade(&node)); Ok(node) } } } /// File systems that produce their own IDs for nodes should invoke this /// function. The ones who leave to this object to assign the IDs should /// call |create_node|. pub fn create_node_with_id( self: &Arc<Self>, ops: Box<dyn FsNodeOps>, id: ino_t, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { if let Some(label) = self.selinux_context.get() { let _ = ops.set_xattr(b"security.selinux", label, XattrOp::Create); } let node = FsNode::new_uncached(ops, self, id, mode, owner); self.nodes.lock().insert(node.inode_num, Arc::downgrade(&node)); node } pub fn create_node( self: &Arc<Self>, ops: Box<dyn FsNodeOps>, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { let inode_num = self.next_inode_num(); self.create_node_with_id(ops, inode_num, mode, owner) } pub fn create_node_with_ops( self: &Arc<Self>, ops: impl FsNodeOps, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { self.create_node(Box::new(ops), mode, owner) } /// Remove the given FsNode from the node cache. /// /// Called from the Drop trait of FsNode. pub fn remove_node(&self, node: &mut FsNode) { let mut nodes = self.nodes.lock(); if let Some(weak_node) = nodes.get(&node.inode_num) { if std::ptr::eq(weak_node.as_ptr(), node) { nodes.remove(&node.inode_num); } } } pub fn next_inode_num(&self) -> ino_t { assert!(!self.ops.generate_node_ids()); self.next_inode.fetch_add(1, Ordering::Relaxed) } /// Move |renamed| that is at |old_name| in |old_parent| to |new_name| in |new_parent| /// replacing |replaced|. /// If |replaced| exists and is a directory, this function must check that |renamed| is n /// directory and that |replaced| is empty. pub fn rename( &self, old_parent: &FsNodeHandle, old_name: &FsStr, new_parent: &FsNodeHandle, new_name: &FsStr, renamed: &FsNodeHandle, replaced: Option<&FsNodeHandle>, ) -> Result<(), Errno> { self.ops.rename(self, old_parent, old_name, new_parent, new_name, renamed, replaced) } /// Returns the `statfs` for this filesystem. /// /// Each `FileSystemOps` impl is expected to override this to return the specific statfs for /// the filesystem. /// /// Returns `ENOSYS` if the `FileSystemOps` don't implement `stat`. pub fn statfs(&self) -> Result<statfs, Errno> { let mut stat = self.ops.statfs(self)?; if stat.f_frsize == 0
Ok(stat) } pub fn did_create_dir_entry(&self, entry: &DirEntryHandle) { if self.permanent_entries { self.entries.lock().insert(Arc::as_ptr(entry) as usize, entry.clone()); } } pub fn will_destroy_dir_entry(&self, entry: &DirEntryHandle) { if self.permanent_entries { self.entries.lock().remove(&(Arc::as_ptr(entry) as usize)); } } } /// The filesystem-implementation-specific data for FileSystem. pub trait FileSystemOps: Send + Sync +'static { /// Return information about this filesystem. /// /// A typical implementation looks like this: /// ``` /// Ok(statfs::default(FILE_SYSTEM_MAGIC)) /// ``` /// or, if the filesystem wants to customize fields: /// ``` /// Ok(statfs { /// f_blocks: self.blocks, /// ..statfs::default(FILE_SYSTEM_MAGIC) /// }) /// ``` fn statfs(&self, _fs: &FileSystem) -> Result<statfs, Errno>; /// Whether this file system generates its own node IDs. fn generate_node_ids(&self) -> bool { false } /// Rename the given node. /// /// The node to be renamed is passed as "renamed". It currently has /// old_name in old_parent. After the rename operation, it should have /// new_name in new_parent. /// /// If new_parent already has a child named new_name, that node is passed as /// "replaced". In that case, both "renamed" and "replaced" will be /// directories and the rename operation should succeed only if "replaced" /// is empty. The VFS will check that there are no children of "replaced" in /// the DirEntry cache, but the implementation of this function is /// responsible for checking that there are no children of replaced that are /// known only to the file system implementation (e.g., present on-disk but /// not in the DirEntry cache). fn rename( &self, _fs: &FileSystem, _old_parent: &FsNodeHandle, _old_name: &FsStr, _new_parent: &FsNodeHandle, _new_name: &FsStr, _renamed: &FsNodeHandle, _replaced: Option<&FsNodeHandle>, ) -> Result<(), Errno> { error!(EROFS) } } pub type FileSystemHandle = Arc<FileSystem>;
{ stat.f_frsize = stat.f_bsize as i64; }
conditional_block
file_system.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use once_cell::sync::OnceCell; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Weak}; use super::*; use crate::auth::FsCred; use crate::lock::Mutex; use crate::task::Kernel; use crate::types::*; /// A file system that can be mounted in a namespace. pub struct FileSystem { root: OnceCell<DirEntryHandle>, next_inode: AtomicU64, ops: Box<dyn FileSystemOps>, /// The device ID of this filesystem. Returned in the st_dev field when stating an inode in /// this filesystem. pub dev_id: DeviceType, /// Whether DirEntries added to this filesystem should be considered permanent, instead of a /// cache of the backing storage. An example is tmpfs: the DirEntry tree *is* the backing /// storage, as opposed to ext4, which uses the DirEntry tree as a cache and removes unused /// nodes from it. pub permanent_entries: bool, /// A file-system global mutex to serialize rename operations. /// /// This mutex is useful because the invariants enforced during a rename /// operation involve many DirEntry objects. In the future, we might be /// able to remove this mutex, but we will need to think carefully about /// how rename operations can interleave. /// /// See DirEntry::rename. pub rename_mutex: Mutex<()>, /// The FsNode cache for this file system. /// /// When two directory entries are hard links to the same underlying inode, /// this cache lets us re-use the same FsNode object for both directory /// entries. /// /// Rather than calling FsNode::new directly, file systems should call /// FileSystem::get_or_create_node to see if the FsNode already exists in /// the cache. nodes: Mutex<HashMap<ino_t, Weak<FsNode>>>, /// DirEntryHandle cache for the filesystem. Currently only used by filesystems that set the /// permanent_entries flag, to store every node and make sure it doesn't get freed without /// being explicitly unlinked. entries: Mutex<HashMap<usize, DirEntryHandle>>, /// Hack meant to stand in for the fs_use_trans selinux feature. If set, this value will be set /// as the selinux label on any newly created inodes in the filesystem. pub selinux_context: OnceCell<FsString>, } impl FileSystem { /// Create a new filesystem. pub fn new(kernel: &Kernel, ops: impl FileSystemOps) -> FileSystemHandle { Self::new_internal(kernel, ops, false) } /// Create a new filesystem with the permanent_entries flag set. pub fn new_with_permanent_entries( kernel: &Kernel, ops: impl FileSystemOps, ) -> FileSystemHandle { Self::new_internal(kernel, ops, true) } /// Create a new filesystem and call set_root in one step. pub fn new_with_root( kernel: &Kernel, ops: impl FileSystemOps, root_node: FsNode, ) -> FileSystemHandle { let fs = Self::new_with_permanent_entries(kernel, ops); fs.set_root_node(root_node); fs } pub fn set_root(self: &FileSystemHandle, root: impl FsNodeOps) { self.set_root_node(FsNode::new_root(root)); } /// Set up the root of the filesystem. Must not be called more than once. pub fn set_root_node(self: &FileSystemHandle, mut root: FsNode) { if root.inode_num == 0 { root.inode_num = self.next_inode_num(); } root.set_fs(self); let root_node = Arc::new(root); self.nodes.lock().insert(root_node.inode_num, Arc::downgrade(&root_node)); let root = DirEntry::new(root_node, None, FsString::new()); assert!(self.root.set(root).is_ok(), "FileSystem::set_root can't be called more than once"); } fn new_internal( kernel: &Kernel, ops: impl FileSystemOps, permanent_entries: bool, ) -> FileSystemHandle { Arc::new(FileSystem { root: OnceCell::new(), next_inode: AtomicU64::new(1), ops: Box::new(ops), dev_id: kernel.device_registry.write().next_anonymous_dev_id(), permanent_entries, rename_mutex: Mutex::new(()), nodes: Mutex::new(HashMap::new()), entries: Mutex::new(HashMap::new()), selinux_context: OnceCell::new(), }) } /// The root directory entry of this file system. /// /// Panics if this file system does not have a root directory. pub fn root(&self) -> &DirEntryHandle { self.root.get().unwrap() } /// Get or create an FsNode for this file system. /// /// If inode_num is Some, then this function checks the node cache to /// determine whether this node is already open. If so, the function /// returns the existing FsNode. If not, the function calls the given /// create_fn function to create the FsNode. /// /// If inode_num is None, then this function assigns a new inode number /// and calls the given create_fn function to create the FsNode with the /// assigned number. /// /// Returns Err only if create_fn returns Err. pub fn get_or_create_node<F>( &self, inode_num: Option<ino_t>, create_fn: F, ) -> Result<FsNodeHandle, Errno> where F: FnOnce(ino_t) -> Result<FsNodeHandle, Errno>, { let inode_num = inode_num.unwrap_or_else(|| self.next_inode_num()); let mut nodes = self.nodes.lock(); match nodes.entry(inode_num) { Entry::Vacant(entry) => { let node = create_fn(inode_num)?; entry.insert(Arc::downgrade(&node)); Ok(node) } Entry::Occupied(mut entry) => { if let Some(node) = entry.get().upgrade() { return Ok(node); } let node = create_fn(inode_num)?; entry.insert(Arc::downgrade(&node)); Ok(node) } } } /// File systems that produce their own IDs for nodes should invoke this /// function. The ones who leave to this object to assign the IDs should /// call |create_node|. pub fn create_node_with_id( self: &Arc<Self>, ops: Box<dyn FsNodeOps>, id: ino_t, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { if let Some(label) = self.selinux_context.get() { let _ = ops.set_xattr(b"security.selinux", label, XattrOp::Create); } let node = FsNode::new_uncached(ops, self, id, mode, owner); self.nodes.lock().insert(node.inode_num, Arc::downgrade(&node)); node } pub fn create_node( self: &Arc<Self>, ops: Box<dyn FsNodeOps>, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { let inode_num = self.next_inode_num(); self.create_node_with_id(ops, inode_num, mode, owner) } pub fn create_node_with_ops( self: &Arc<Self>, ops: impl FsNodeOps, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { self.create_node(Box::new(ops), mode, owner) } /// Remove the given FsNode from the node cache. /// /// Called from the Drop trait of FsNode. pub fn remove_node(&self, node: &mut FsNode) { let mut nodes = self.nodes.lock(); if let Some(weak_node) = nodes.get(&node.inode_num) { if std::ptr::eq(weak_node.as_ptr(), node) { nodes.remove(&node.inode_num); } } } pub fn next_inode_num(&self) -> ino_t { assert!(!self.ops.generate_node_ids()); self.next_inode.fetch_add(1, Ordering::Relaxed) } /// Move |renamed| that is at |old_name| in |old_parent| to |new_name| in |new_parent| /// replacing |replaced|. /// If |replaced| exists and is a directory, this function must check that |renamed| is n /// directory and that |replaced| is empty. pub fn rename( &self, old_parent: &FsNodeHandle, old_name: &FsStr, new_parent: &FsNodeHandle, new_name: &FsStr, renamed: &FsNodeHandle, replaced: Option<&FsNodeHandle>, ) -> Result<(), Errno> { self.ops.rename(self, old_parent, old_name, new_parent, new_name, renamed, replaced) } /// Returns the `statfs` for this filesystem.
/// Returns `ENOSYS` if the `FileSystemOps` don't implement `stat`. pub fn statfs(&self) -> Result<statfs, Errno> { let mut stat = self.ops.statfs(self)?; if stat.f_frsize == 0 { stat.f_frsize = stat.f_bsize as i64; } Ok(stat) } pub fn did_create_dir_entry(&self, entry: &DirEntryHandle) { if self.permanent_entries { self.entries.lock().insert(Arc::as_ptr(entry) as usize, entry.clone()); } } pub fn will_destroy_dir_entry(&self, entry: &DirEntryHandle) { if self.permanent_entries { self.entries.lock().remove(&(Arc::as_ptr(entry) as usize)); } } } /// The filesystem-implementation-specific data for FileSystem. pub trait FileSystemOps: Send + Sync +'static { /// Return information about this filesystem. /// /// A typical implementation looks like this: /// ``` /// Ok(statfs::default(FILE_SYSTEM_MAGIC)) /// ``` /// or, if the filesystem wants to customize fields: /// ``` /// Ok(statfs { /// f_blocks: self.blocks, /// ..statfs::default(FILE_SYSTEM_MAGIC) /// }) /// ``` fn statfs(&self, _fs: &FileSystem) -> Result<statfs, Errno>; /// Whether this file system generates its own node IDs. fn generate_node_ids(&self) -> bool { false } /// Rename the given node. /// /// The node to be renamed is passed as "renamed". It currently has /// old_name in old_parent. After the rename operation, it should have /// new_name in new_parent. /// /// If new_parent already has a child named new_name, that node is passed as /// "replaced". In that case, both "renamed" and "replaced" will be /// directories and the rename operation should succeed only if "replaced" /// is empty. The VFS will check that there are no children of "replaced" in /// the DirEntry cache, but the implementation of this function is /// responsible for checking that there are no children of replaced that are /// known only to the file system implementation (e.g., present on-disk but /// not in the DirEntry cache). fn rename( &self, _fs: &FileSystem, _old_parent: &FsNodeHandle, _old_name: &FsStr, _new_parent: &FsNodeHandle, _new_name: &FsStr, _renamed: &FsNodeHandle, _replaced: Option<&FsNodeHandle>, ) -> Result<(), Errno> { error!(EROFS) } } pub type FileSystemHandle = Arc<FileSystem>;
/// /// Each `FileSystemOps` impl is expected to override this to return the specific statfs for /// the filesystem. ///
random_line_split
file_system.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use once_cell::sync::OnceCell; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Weak}; use super::*; use crate::auth::FsCred; use crate::lock::Mutex; use crate::task::Kernel; use crate::types::*; /// A file system that can be mounted in a namespace. pub struct FileSystem { root: OnceCell<DirEntryHandle>, next_inode: AtomicU64, ops: Box<dyn FileSystemOps>, /// The device ID of this filesystem. Returned in the st_dev field when stating an inode in /// this filesystem. pub dev_id: DeviceType, /// Whether DirEntries added to this filesystem should be considered permanent, instead of a /// cache of the backing storage. An example is tmpfs: the DirEntry tree *is* the backing /// storage, as opposed to ext4, which uses the DirEntry tree as a cache and removes unused /// nodes from it. pub permanent_entries: bool, /// A file-system global mutex to serialize rename operations. /// /// This mutex is useful because the invariants enforced during a rename /// operation involve many DirEntry objects. In the future, we might be /// able to remove this mutex, but we will need to think carefully about /// how rename operations can interleave. /// /// See DirEntry::rename. pub rename_mutex: Mutex<()>, /// The FsNode cache for this file system. /// /// When two directory entries are hard links to the same underlying inode, /// this cache lets us re-use the same FsNode object for both directory /// entries. /// /// Rather than calling FsNode::new directly, file systems should call /// FileSystem::get_or_create_node to see if the FsNode already exists in /// the cache. nodes: Mutex<HashMap<ino_t, Weak<FsNode>>>, /// DirEntryHandle cache for the filesystem. Currently only used by filesystems that set the /// permanent_entries flag, to store every node and make sure it doesn't get freed without /// being explicitly unlinked. entries: Mutex<HashMap<usize, DirEntryHandle>>, /// Hack meant to stand in for the fs_use_trans selinux feature. If set, this value will be set /// as the selinux label on any newly created inodes in the filesystem. pub selinux_context: OnceCell<FsString>, } impl FileSystem { /// Create a new filesystem. pub fn new(kernel: &Kernel, ops: impl FileSystemOps) -> FileSystemHandle { Self::new_internal(kernel, ops, false) } /// Create a new filesystem with the permanent_entries flag set. pub fn new_with_permanent_entries( kernel: &Kernel, ops: impl FileSystemOps, ) -> FileSystemHandle { Self::new_internal(kernel, ops, true) } /// Create a new filesystem and call set_root in one step. pub fn new_with_root( kernel: &Kernel, ops: impl FileSystemOps, root_node: FsNode, ) -> FileSystemHandle { let fs = Self::new_with_permanent_entries(kernel, ops); fs.set_root_node(root_node); fs } pub fn set_root(self: &FileSystemHandle, root: impl FsNodeOps) { self.set_root_node(FsNode::new_root(root)); } /// Set up the root of the filesystem. Must not be called more than once. pub fn set_root_node(self: &FileSystemHandle, mut root: FsNode) { if root.inode_num == 0 { root.inode_num = self.next_inode_num(); } root.set_fs(self); let root_node = Arc::new(root); self.nodes.lock().insert(root_node.inode_num, Arc::downgrade(&root_node)); let root = DirEntry::new(root_node, None, FsString::new()); assert!(self.root.set(root).is_ok(), "FileSystem::set_root can't be called more than once"); } fn new_internal( kernel: &Kernel, ops: impl FileSystemOps, permanent_entries: bool, ) -> FileSystemHandle { Arc::new(FileSystem { root: OnceCell::new(), next_inode: AtomicU64::new(1), ops: Box::new(ops), dev_id: kernel.device_registry.write().next_anonymous_dev_id(), permanent_entries, rename_mutex: Mutex::new(()), nodes: Mutex::new(HashMap::new()), entries: Mutex::new(HashMap::new()), selinux_context: OnceCell::new(), }) } /// The root directory entry of this file system. /// /// Panics if this file system does not have a root directory. pub fn
(&self) -> &DirEntryHandle { self.root.get().unwrap() } /// Get or create an FsNode for this file system. /// /// If inode_num is Some, then this function checks the node cache to /// determine whether this node is already open. If so, the function /// returns the existing FsNode. If not, the function calls the given /// create_fn function to create the FsNode. /// /// If inode_num is None, then this function assigns a new inode number /// and calls the given create_fn function to create the FsNode with the /// assigned number. /// /// Returns Err only if create_fn returns Err. pub fn get_or_create_node<F>( &self, inode_num: Option<ino_t>, create_fn: F, ) -> Result<FsNodeHandle, Errno> where F: FnOnce(ino_t) -> Result<FsNodeHandle, Errno>, { let inode_num = inode_num.unwrap_or_else(|| self.next_inode_num()); let mut nodes = self.nodes.lock(); match nodes.entry(inode_num) { Entry::Vacant(entry) => { let node = create_fn(inode_num)?; entry.insert(Arc::downgrade(&node)); Ok(node) } Entry::Occupied(mut entry) => { if let Some(node) = entry.get().upgrade() { return Ok(node); } let node = create_fn(inode_num)?; entry.insert(Arc::downgrade(&node)); Ok(node) } } } /// File systems that produce their own IDs for nodes should invoke this /// function. The ones who leave to this object to assign the IDs should /// call |create_node|. pub fn create_node_with_id( self: &Arc<Self>, ops: Box<dyn FsNodeOps>, id: ino_t, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { if let Some(label) = self.selinux_context.get() { let _ = ops.set_xattr(b"security.selinux", label, XattrOp::Create); } let node = FsNode::new_uncached(ops, self, id, mode, owner); self.nodes.lock().insert(node.inode_num, Arc::downgrade(&node)); node } pub fn create_node( self: &Arc<Self>, ops: Box<dyn FsNodeOps>, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { let inode_num = self.next_inode_num(); self.create_node_with_id(ops, inode_num, mode, owner) } pub fn create_node_with_ops( self: &Arc<Self>, ops: impl FsNodeOps, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { self.create_node(Box::new(ops), mode, owner) } /// Remove the given FsNode from the node cache. /// /// Called from the Drop trait of FsNode. pub fn remove_node(&self, node: &mut FsNode) { let mut nodes = self.nodes.lock(); if let Some(weak_node) = nodes.get(&node.inode_num) { if std::ptr::eq(weak_node.as_ptr(), node) { nodes.remove(&node.inode_num); } } } pub fn next_inode_num(&self) -> ino_t { assert!(!self.ops.generate_node_ids()); self.next_inode.fetch_add(1, Ordering::Relaxed) } /// Move |renamed| that is at |old_name| in |old_parent| to |new_name| in |new_parent| /// replacing |replaced|. /// If |replaced| exists and is a directory, this function must check that |renamed| is n /// directory and that |replaced| is empty. pub fn rename( &self, old_parent: &FsNodeHandle, old_name: &FsStr, new_parent: &FsNodeHandle, new_name: &FsStr, renamed: &FsNodeHandle, replaced: Option<&FsNodeHandle>, ) -> Result<(), Errno> { self.ops.rename(self, old_parent, old_name, new_parent, new_name, renamed, replaced) } /// Returns the `statfs` for this filesystem. /// /// Each `FileSystemOps` impl is expected to override this to return the specific statfs for /// the filesystem. /// /// Returns `ENOSYS` if the `FileSystemOps` don't implement `stat`. pub fn statfs(&self) -> Result<statfs, Errno> { let mut stat = self.ops.statfs(self)?; if stat.f_frsize == 0 { stat.f_frsize = stat.f_bsize as i64; } Ok(stat) } pub fn did_create_dir_entry(&self, entry: &DirEntryHandle) { if self.permanent_entries { self.entries.lock().insert(Arc::as_ptr(entry) as usize, entry.clone()); } } pub fn will_destroy_dir_entry(&self, entry: &DirEntryHandle) { if self.permanent_entries { self.entries.lock().remove(&(Arc::as_ptr(entry) as usize)); } } } /// The filesystem-implementation-specific data for FileSystem. pub trait FileSystemOps: Send + Sync +'static { /// Return information about this filesystem. /// /// A typical implementation looks like this: /// ``` /// Ok(statfs::default(FILE_SYSTEM_MAGIC)) /// ``` /// or, if the filesystem wants to customize fields: /// ``` /// Ok(statfs { /// f_blocks: self.blocks, /// ..statfs::default(FILE_SYSTEM_MAGIC) /// }) /// ``` fn statfs(&self, _fs: &FileSystem) -> Result<statfs, Errno>; /// Whether this file system generates its own node IDs. fn generate_node_ids(&self) -> bool { false } /// Rename the given node. /// /// The node to be renamed is passed as "renamed". It currently has /// old_name in old_parent. After the rename operation, it should have /// new_name in new_parent. /// /// If new_parent already has a child named new_name, that node is passed as /// "replaced". In that case, both "renamed" and "replaced" will be /// directories and the rename operation should succeed only if "replaced" /// is empty. The VFS will check that there are no children of "replaced" in /// the DirEntry cache, but the implementation of this function is /// responsible for checking that there are no children of replaced that are /// known only to the file system implementation (e.g., present on-disk but /// not in the DirEntry cache). fn rename( &self, _fs: &FileSystem, _old_parent: &FsNodeHandle, _old_name: &FsStr, _new_parent: &FsNodeHandle, _new_name: &FsStr, _renamed: &FsNodeHandle, _replaced: Option<&FsNodeHandle>, ) -> Result<(), Errno> { error!(EROFS) } } pub type FileSystemHandle = Arc<FileSystem>;
root
identifier_name
file_system.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use once_cell::sync::OnceCell; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Weak}; use super::*; use crate::auth::FsCred; use crate::lock::Mutex; use crate::task::Kernel; use crate::types::*; /// A file system that can be mounted in a namespace. pub struct FileSystem { root: OnceCell<DirEntryHandle>, next_inode: AtomicU64, ops: Box<dyn FileSystemOps>, /// The device ID of this filesystem. Returned in the st_dev field when stating an inode in /// this filesystem. pub dev_id: DeviceType, /// Whether DirEntries added to this filesystem should be considered permanent, instead of a /// cache of the backing storage. An example is tmpfs: the DirEntry tree *is* the backing /// storage, as opposed to ext4, which uses the DirEntry tree as a cache and removes unused /// nodes from it. pub permanent_entries: bool, /// A file-system global mutex to serialize rename operations. /// /// This mutex is useful because the invariants enforced during a rename /// operation involve many DirEntry objects. In the future, we might be /// able to remove this mutex, but we will need to think carefully about /// how rename operations can interleave. /// /// See DirEntry::rename. pub rename_mutex: Mutex<()>, /// The FsNode cache for this file system. /// /// When two directory entries are hard links to the same underlying inode, /// this cache lets us re-use the same FsNode object for both directory /// entries. /// /// Rather than calling FsNode::new directly, file systems should call /// FileSystem::get_or_create_node to see if the FsNode already exists in /// the cache. nodes: Mutex<HashMap<ino_t, Weak<FsNode>>>, /// DirEntryHandle cache for the filesystem. Currently only used by filesystems that set the /// permanent_entries flag, to store every node and make sure it doesn't get freed without /// being explicitly unlinked. entries: Mutex<HashMap<usize, DirEntryHandle>>, /// Hack meant to stand in for the fs_use_trans selinux feature. If set, this value will be set /// as the selinux label on any newly created inodes in the filesystem. pub selinux_context: OnceCell<FsString>, } impl FileSystem { /// Create a new filesystem. pub fn new(kernel: &Kernel, ops: impl FileSystemOps) -> FileSystemHandle { Self::new_internal(kernel, ops, false) } /// Create a new filesystem with the permanent_entries flag set. pub fn new_with_permanent_entries( kernel: &Kernel, ops: impl FileSystemOps, ) -> FileSystemHandle { Self::new_internal(kernel, ops, true) } /// Create a new filesystem and call set_root in one step. pub fn new_with_root( kernel: &Kernel, ops: impl FileSystemOps, root_node: FsNode, ) -> FileSystemHandle { let fs = Self::new_with_permanent_entries(kernel, ops); fs.set_root_node(root_node); fs } pub fn set_root(self: &FileSystemHandle, root: impl FsNodeOps) { self.set_root_node(FsNode::new_root(root)); } /// Set up the root of the filesystem. Must not be called more than once. pub fn set_root_node(self: &FileSystemHandle, mut root: FsNode)
fn new_internal( kernel: &Kernel, ops: impl FileSystemOps, permanent_entries: bool, ) -> FileSystemHandle { Arc::new(FileSystem { root: OnceCell::new(), next_inode: AtomicU64::new(1), ops: Box::new(ops), dev_id: kernel.device_registry.write().next_anonymous_dev_id(), permanent_entries, rename_mutex: Mutex::new(()), nodes: Mutex::new(HashMap::new()), entries: Mutex::new(HashMap::new()), selinux_context: OnceCell::new(), }) } /// The root directory entry of this file system. /// /// Panics if this file system does not have a root directory. pub fn root(&self) -> &DirEntryHandle { self.root.get().unwrap() } /// Get or create an FsNode for this file system. /// /// If inode_num is Some, then this function checks the node cache to /// determine whether this node is already open. If so, the function /// returns the existing FsNode. If not, the function calls the given /// create_fn function to create the FsNode. /// /// If inode_num is None, then this function assigns a new inode number /// and calls the given create_fn function to create the FsNode with the /// assigned number. /// /// Returns Err only if create_fn returns Err. pub fn get_or_create_node<F>( &self, inode_num: Option<ino_t>, create_fn: F, ) -> Result<FsNodeHandle, Errno> where F: FnOnce(ino_t) -> Result<FsNodeHandle, Errno>, { let inode_num = inode_num.unwrap_or_else(|| self.next_inode_num()); let mut nodes = self.nodes.lock(); match nodes.entry(inode_num) { Entry::Vacant(entry) => { let node = create_fn(inode_num)?; entry.insert(Arc::downgrade(&node)); Ok(node) } Entry::Occupied(mut entry) => { if let Some(node) = entry.get().upgrade() { return Ok(node); } let node = create_fn(inode_num)?; entry.insert(Arc::downgrade(&node)); Ok(node) } } } /// File systems that produce their own IDs for nodes should invoke this /// function. The ones who leave to this object to assign the IDs should /// call |create_node|. pub fn create_node_with_id( self: &Arc<Self>, ops: Box<dyn FsNodeOps>, id: ino_t, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { if let Some(label) = self.selinux_context.get() { let _ = ops.set_xattr(b"security.selinux", label, XattrOp::Create); } let node = FsNode::new_uncached(ops, self, id, mode, owner); self.nodes.lock().insert(node.inode_num, Arc::downgrade(&node)); node } pub fn create_node( self: &Arc<Self>, ops: Box<dyn FsNodeOps>, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { let inode_num = self.next_inode_num(); self.create_node_with_id(ops, inode_num, mode, owner) } pub fn create_node_with_ops( self: &Arc<Self>, ops: impl FsNodeOps, mode: FileMode, owner: FsCred, ) -> FsNodeHandle { self.create_node(Box::new(ops), mode, owner) } /// Remove the given FsNode from the node cache. /// /// Called from the Drop trait of FsNode. pub fn remove_node(&self, node: &mut FsNode) { let mut nodes = self.nodes.lock(); if let Some(weak_node) = nodes.get(&node.inode_num) { if std::ptr::eq(weak_node.as_ptr(), node) { nodes.remove(&node.inode_num); } } } pub fn next_inode_num(&self) -> ino_t { assert!(!self.ops.generate_node_ids()); self.next_inode.fetch_add(1, Ordering::Relaxed) } /// Move |renamed| that is at |old_name| in |old_parent| to |new_name| in |new_parent| /// replacing |replaced|. /// If |replaced| exists and is a directory, this function must check that |renamed| is n /// directory and that |replaced| is empty. pub fn rename( &self, old_parent: &FsNodeHandle, old_name: &FsStr, new_parent: &FsNodeHandle, new_name: &FsStr, renamed: &FsNodeHandle, replaced: Option<&FsNodeHandle>, ) -> Result<(), Errno> { self.ops.rename(self, old_parent, old_name, new_parent, new_name, renamed, replaced) } /// Returns the `statfs` for this filesystem. /// /// Each `FileSystemOps` impl is expected to override this to return the specific statfs for /// the filesystem. /// /// Returns `ENOSYS` if the `FileSystemOps` don't implement `stat`. pub fn statfs(&self) -> Result<statfs, Errno> { let mut stat = self.ops.statfs(self)?; if stat.f_frsize == 0 { stat.f_frsize = stat.f_bsize as i64; } Ok(stat) } pub fn did_create_dir_entry(&self, entry: &DirEntryHandle) { if self.permanent_entries { self.entries.lock().insert(Arc::as_ptr(entry) as usize, entry.clone()); } } pub fn will_destroy_dir_entry(&self, entry: &DirEntryHandle) { if self.permanent_entries { self.entries.lock().remove(&(Arc::as_ptr(entry) as usize)); } } } /// The filesystem-implementation-specific data for FileSystem. pub trait FileSystemOps: Send + Sync +'static { /// Return information about this filesystem. /// /// A typical implementation looks like this: /// ``` /// Ok(statfs::default(FILE_SYSTEM_MAGIC)) /// ``` /// or, if the filesystem wants to customize fields: /// ``` /// Ok(statfs { /// f_blocks: self.blocks, /// ..statfs::default(FILE_SYSTEM_MAGIC) /// }) /// ``` fn statfs(&self, _fs: &FileSystem) -> Result<statfs, Errno>; /// Whether this file system generates its own node IDs. fn generate_node_ids(&self) -> bool { false } /// Rename the given node. /// /// The node to be renamed is passed as "renamed". It currently has /// old_name in old_parent. After the rename operation, it should have /// new_name in new_parent. /// /// If new_parent already has a child named new_name, that node is passed as /// "replaced". In that case, both "renamed" and "replaced" will be /// directories and the rename operation should succeed only if "replaced" /// is empty. The VFS will check that there are no children of "replaced" in /// the DirEntry cache, but the implementation of this function is /// responsible for checking that there are no children of replaced that are /// known only to the file system implementation (e.g., present on-disk but /// not in the DirEntry cache). fn rename( &self, _fs: &FileSystem, _old_parent: &FsNodeHandle, _old_name: &FsStr, _new_parent: &FsNodeHandle, _new_name: &FsStr, _renamed: &FsNodeHandle, _replaced: Option<&FsNodeHandle>, ) -> Result<(), Errno> { error!(EROFS) } } pub type FileSystemHandle = Arc<FileSystem>;
{ if root.inode_num == 0 { root.inode_num = self.next_inode_num(); } root.set_fs(self); let root_node = Arc::new(root); self.nodes.lock().insert(root_node.inode_num, Arc::downgrade(&root_node)); let root = DirEntry::new(root_node, None, FsString::new()); assert!(self.root.set(root).is_ok(), "FileSystem::set_root can't be called more than once"); }
identifier_body
arena.rs
use core::convert::TryFrom; use core::mem::{self, ManuallyDrop}; use core::ops::Deref; use core::pin::Pin; use pin_project::pin_project; use crate::list::*; use crate::lock::{Spinlock, SpinlockGuard}; use crate::pinned_array::IterPinMut; use crate::rc_cell::{RcCell, Ref, RefMut}; /// A homogeneous memory allocator, equipped with the box type representing an allocation. pub trait Arena: Sized { /// The value type of the allocator. type Data: ArenaObject; /// The guard type for arena. type Guard<'s>; /// Find or alloc. fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>>; fn find_or_alloc<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Rc<Self>> { let inner = self.find_or_alloc_handle(c, n)?; // SAFETY: `inner` was allocated from `self`. Some(unsafe { Rc::from_unchecked(self, inner) }) } /// Failable allocation. fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>>; fn alloc<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Rc<Self>> { let inner = self.alloc_handle(f)?; // SAFETY: `inner` was allocated from `self`. Some(unsafe { Rc::from_unchecked(self, inner) }) } /// Duplicate a given handle, and increase the reference count. /// /// # Safety /// /// `handle` must be allocated from `self`. // TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `clone` instead. unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data>; /// Deallocate a given handle, and finalize the referred object if there are /// no more handles. /// /// # Safety /// /// `handle` must be allocated from `self`. // TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `drop` instead. unsafe fn dealloc(&self, handle: Ref<Self::Data>); /// Temporarily releases the lock while calling `f`, and re-acquires the lock after `f` returned. /// /// # Safety /// /// The caller must be careful when calling this inside `ArenaObject::finalize`. /// If you use this while finalizing an `ArenaObject`, the `Arena`'s lock will be temporarily released, /// and hence, another thread may use `Arena::find_or_alloc` to obtain an `Rc` referring to the `ArenaObject` /// we are **currently finalizing**. Therefore, in this case, make sure no thread tries to `find_or_alloc` /// for an `ArenaObject` that may be under finalization. unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R; } pub trait ArenaObject { /// Finalizes the `ArenaObject`. /// This function is automatically called when the last `Rc` refereing to this `ArenaObject` gets dropped. fn finalize<'s, A: Arena>(&'s mut self, guard: &'s mut A::Guard<'_>); } /// A homogeneous memory allocator equipped with reference counts. #[pin_project] pub struct ArrayArena<T, const CAPACITY: usize> { #[pin] entries: [RcCell<T>; CAPACITY], } #[pin_project] #[repr(C)] pub struct MruEntry<T> { #[pin] list_entry: ListEntry, #[pin] data: RcCell<T>, } /// A homogeneous memory allocator equipped with reference counts. #[pin_project] pub struct MruArena<T, const CAPACITY: usize> { #[pin] entries: [MruEntry<T>; CAPACITY], #[pin] list: List<MruEntry<T>>, } /// A thread-safe reference counted pointer, allocated from `A: Arena`. /// The data type is same as `A::Data`. /// /// # Safety /// /// `inner` is allocated from `arena`. /// We can safely dereference `arena` until `inner` gets dropped, /// because we panic if the arena drops earlier than `inner`. pub struct Rc<A: Arena> { arena: *const A, inner: ManuallyDrop<Ref<A::Data>>, } // `Rc` is `Send` because it does not impl `DerefMut`, // and when we access the inner `Arena`, we do it after acquiring `Arena`'s lock. // Also, `Rc` does not point to thread-local data. unsafe impl<T: Sync, A: Arena<Data = T>> Send for Rc<A> {} impl<T, const CAPACITY: usize> ArrayArena<T, CAPACITY> { // TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe... pub const fn new(entries: [RcCell<T>; CAPACITY]) -> Self { Self { entries } } } impl<T:'static + ArenaObject + Unpin, const CAPACITY: usize> Arena for Spinlock<ArrayArena<T, CAPACITY>> { type Data = T; type Guard<'s> = SpinlockGuard<'s, ArrayArena<T, CAPACITY>>; fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); let mut empty: Option<*mut RcCell<T>> = None; for entry in IterPinMut::from(this.entries) { if!entry.is_borrowed() { if empty.is_none() { empty = Some(entry.as_ref().get_ref() as *const _ as *mut _) } // Note: Do not use `break` here. // We must first search through all entries, and then alloc at empty // only if the entry we're finding for doesn't exist. } else if let Some(r) = entry.try_borrow() { // The entry is not under finalization. Check its data. if c(&r) { return Some(r); } } } empty.map(|cell_raw| { // SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned. let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) }; n(cell.as_mut().get_pin_mut().unwrap().get_mut()); cell.borrow() }) } fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); for mut entry in IterPinMut::from(this.entries) { if!entry.is_borrowed() { f(entry.as_mut().get_pin_mut().unwrap().get_mut()); return Some(entry.borrow()); } } None } unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> { let mut _this = self.lock(); handle.clone() } unsafe fn dealloc(&self, handle: Ref<Self::Data>) { let mut this = self.lock(); if let Ok(mut rm) = RefMut::<T>::try_from(handle) { rm.finalize::<Self>(&mut this); } } unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R, { guard.reacquire_after(f) } } impl<T> MruEntry<T> { // TODO(https://github.com/kaist-cp/rv6/issues/369) // A workarond for https://github.com/Gilnaa/memoffset/issues/49. // Assumes `list_entry` is located at the beginning of `MruEntry` // and `data` is located at `mem::size_of::<ListEntry>()`. const DATA_OFFSET: usize = mem::size_of::<ListEntry>(); const LIST_ENTRY_OFFSET: usize = 0; // const DATA_OFFSET: usize = offset_of!(MruEntry<T>, data); // const LIST_ENTRY_OFFSET: usize = offset_of!(MruEntry<T>, list_entry); pub const fn new(data: T) -> Self { Self { list_entry: unsafe { ListEntry::new() }, data: RcCell::new(data), } } /// For the `MruEntry<T>` that corresponds to the given `RefMut<T>`, we move it to the front of the list. /// /// # Safety /// /// Only use this if the given `RefMut<T>` was obtained from an `MruEntry<T>`, /// which is contained inside the `list`. unsafe fn finalize_entry(r: RefMut<T>, list: &List<MruEntry<T>>) { let ptr = (r.get_cell() as *const _ as usize - Self::DATA_OFFSET) as *mut MruEntry<T>; let entry = unsafe { &*ptr }; list.push_back(entry); } } // SAFETY: `MruEntry` owns a `ListEntry`. unsafe impl<T> ListNode for MruEntry<T> { fn get_list_entry(&self) -> &ListEntry { &self.list_entry } fn from_list_entry(list_entry: *const ListEntry) -> *const Self
} impl<T, const CAPACITY: usize> MruArena<T, CAPACITY> { // TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe... pub const fn new(entries: [MruEntry<T>; CAPACITY]) -> Self { Self { entries, list: unsafe { List::new() }, } } pub fn init(self: Pin<&mut Self>) { let mut this = self.project(); this.list.as_mut().init(); for mut entry in IterPinMut::from(this.entries) { entry.as_mut().project().list_entry.init(); this.list.push_front(&entry); } } } impl<T:'static + ArenaObject + Unpin, const CAPACITY: usize> Arena for Spinlock<MruArena<T, CAPACITY>> { type Data = T; type Guard<'s> = SpinlockGuard<'s, MruArena<T, CAPACITY>>; fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); let mut empty: Option<*mut RcCell<T>> = None; // SAFETY: the whole `MruArena` is protected by a lock. for entry in unsafe { this.list.iter_pin_mut_unchecked() } { if!entry.data.is_borrowed() { empty = Some(&entry.data as *const _ as *mut _); } if let Some(r) = entry.data.try_borrow() { if c(&r) { return Some(r); } } } empty.map(|cell_raw| { // SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned. let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) }; n(cell.as_mut().get_pin_mut().unwrap().get_mut()); cell.borrow() }) } fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); // SAFETY: the whole `MruArena` is protected by a lock. for mut entry in unsafe { this.list.iter_pin_mut_unchecked().rev() } { if!entry.data.is_borrowed() { f(entry .as_mut() .project() .data .get_pin_mut() .unwrap() .get_mut()); return Some(entry.data.borrow()); } } None } unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> { let mut _this = self.lock(); handle.clone() } unsafe fn dealloc(&self, handle: Ref<Self::Data>) { let mut this = self.lock(); if let Ok(mut rm) = RefMut::<T>::try_from(handle) { rm.finalize::<Self>(&mut this); // SAFETY: the `handle` was obtained from an `MruEntry`, // which is contained inside `&this.list`. unsafe { MruEntry::finalize_entry(rm, &this.list) }; } } unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R, { guard.reacquire_after(f) } } impl<T, A: Arena<Data = T>> Rc<A> { /// # Safety /// /// `inner` must be allocated from `arena` pub unsafe fn from_unchecked(arena: &A, inner: Ref<T>) -> Self { let inner = ManuallyDrop::new(inner); Self { arena, inner } } /// Returns a reference to the arena that the `Rc` was allocated from. fn get_arena(&self) -> &A { // SAFETY: Safe because of `Rc`'s invariant. unsafe { &*self.arena } } } impl<T, A: Arena<Data = T>> Deref for Rc<A> { type Target = T; fn deref(&self) -> &T { self.inner.deref() } } impl<A: Arena> Drop for Rc<A> { fn drop(&mut self) { // SAFETY: `inner` was allocated from `arena`. unsafe { (&*self.arena).dealloc(ManuallyDrop::take(&mut self.inner)) }; } } impl<A: Arena> Clone for Rc<A> { fn clone(&self) -> Self { // SAFETY: `inner` was allocated from `arena`. let inner = ManuallyDrop::new(unsafe { self.get_arena().dup(&self.inner) }); Self { arena: self.arena, inner, } } }
{ (list_entry as *const _ as usize - Self::LIST_ENTRY_OFFSET) as *const Self }
identifier_body
arena.rs
use core::convert::TryFrom; use core::mem::{self, ManuallyDrop}; use core::ops::Deref; use core::pin::Pin; use pin_project::pin_project; use crate::list::*; use crate::lock::{Spinlock, SpinlockGuard}; use crate::pinned_array::IterPinMut; use crate::rc_cell::{RcCell, Ref, RefMut}; /// A homogeneous memory allocator, equipped with the box type representing an allocation. pub trait Arena: Sized { /// The value type of the allocator. type Data: ArenaObject; /// The guard type for arena. type Guard<'s>; /// Find or alloc. fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>>; fn find_or_alloc<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Rc<Self>> { let inner = self.find_or_alloc_handle(c, n)?; // SAFETY: `inner` was allocated from `self`. Some(unsafe { Rc::from_unchecked(self, inner) }) } /// Failable allocation. fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>>; fn alloc<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Rc<Self>> { let inner = self.alloc_handle(f)?; // SAFETY: `inner` was allocated from `self`. Some(unsafe { Rc::from_unchecked(self, inner) }) } /// Duplicate a given handle, and increase the reference count. /// /// # Safety /// /// `handle` must be allocated from `self`. // TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `clone` instead. unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data>; /// Deallocate a given handle, and finalize the referred object if there are /// no more handles. /// /// # Safety /// /// `handle` must be allocated from `self`. // TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `drop` instead. unsafe fn dealloc(&self, handle: Ref<Self::Data>); /// Temporarily releases the lock while calling `f`, and re-acquires the lock after `f` returned. /// /// # Safety /// /// The caller must be careful when calling this inside `ArenaObject::finalize`. /// If you use this while finalizing an `ArenaObject`, the `Arena`'s lock will be temporarily released, /// and hence, another thread may use `Arena::find_or_alloc` to obtain an `Rc` referring to the `ArenaObject` /// we are **currently finalizing**. Therefore, in this case, make sure no thread tries to `find_or_alloc` /// for an `ArenaObject` that may be under finalization. unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R; } pub trait ArenaObject { /// Finalizes the `ArenaObject`. /// This function is automatically called when the last `Rc` refereing to this `ArenaObject` gets dropped. fn finalize<'s, A: Arena>(&'s mut self, guard: &'s mut A::Guard<'_>); } /// A homogeneous memory allocator equipped with reference counts. #[pin_project] pub struct ArrayArena<T, const CAPACITY: usize> { #[pin] entries: [RcCell<T>; CAPACITY], } #[pin_project] #[repr(C)] pub struct MruEntry<T> { #[pin] list_entry: ListEntry, #[pin] data: RcCell<T>, } /// A homogeneous memory allocator equipped with reference counts. #[pin_project] pub struct MruArena<T, const CAPACITY: usize> { #[pin] entries: [MruEntry<T>; CAPACITY], #[pin] list: List<MruEntry<T>>, } /// A thread-safe reference counted pointer, allocated from `A: Arena`. /// The data type is same as `A::Data`. /// /// # Safety /// /// `inner` is allocated from `arena`. /// We can safely dereference `arena` until `inner` gets dropped, /// because we panic if the arena drops earlier than `inner`. pub struct Rc<A: Arena> { arena: *const A, inner: ManuallyDrop<Ref<A::Data>>, } // `Rc` is `Send` because it does not impl `DerefMut`, // and when we access the inner `Arena`, we do it after acquiring `Arena`'s lock. // Also, `Rc` does not point to thread-local data. unsafe impl<T: Sync, A: Arena<Data = T>> Send for Rc<A> {} impl<T, const CAPACITY: usize> ArrayArena<T, CAPACITY> { // TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe... pub const fn new(entries: [RcCell<T>; CAPACITY]) -> Self { Self { entries } } } impl<T:'static + ArenaObject + Unpin, const CAPACITY: usize> Arena for Spinlock<ArrayArena<T, CAPACITY>> { type Data = T; type Guard<'s> = SpinlockGuard<'s, ArrayArena<T, CAPACITY>>; fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); let mut empty: Option<*mut RcCell<T>> = None; for entry in IterPinMut::from(this.entries) { if!entry.is_borrowed() { if empty.is_none() { empty = Some(entry.as_ref().get_ref() as *const _ as *mut _) } // Note: Do not use `break` here. // We must first search through all entries, and then alloc at empty // only if the entry we're finding for doesn't exist. } else if let Some(r) = entry.try_borrow() { // The entry is not under finalization. Check its data. if c(&r)
} } empty.map(|cell_raw| { // SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned. let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) }; n(cell.as_mut().get_pin_mut().unwrap().get_mut()); cell.borrow() }) } fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); for mut entry in IterPinMut::from(this.entries) { if!entry.is_borrowed() { f(entry.as_mut().get_pin_mut().unwrap().get_mut()); return Some(entry.borrow()); } } None } unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> { let mut _this = self.lock(); handle.clone() } unsafe fn dealloc(&self, handle: Ref<Self::Data>) { let mut this = self.lock(); if let Ok(mut rm) = RefMut::<T>::try_from(handle) { rm.finalize::<Self>(&mut this); } } unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R, { guard.reacquire_after(f) } } impl<T> MruEntry<T> { // TODO(https://github.com/kaist-cp/rv6/issues/369) // A workarond for https://github.com/Gilnaa/memoffset/issues/49. // Assumes `list_entry` is located at the beginning of `MruEntry` // and `data` is located at `mem::size_of::<ListEntry>()`. const DATA_OFFSET: usize = mem::size_of::<ListEntry>(); const LIST_ENTRY_OFFSET: usize = 0; // const DATA_OFFSET: usize = offset_of!(MruEntry<T>, data); // const LIST_ENTRY_OFFSET: usize = offset_of!(MruEntry<T>, list_entry); pub const fn new(data: T) -> Self { Self { list_entry: unsafe { ListEntry::new() }, data: RcCell::new(data), } } /// For the `MruEntry<T>` that corresponds to the given `RefMut<T>`, we move it to the front of the list. /// /// # Safety /// /// Only use this if the given `RefMut<T>` was obtained from an `MruEntry<T>`, /// which is contained inside the `list`. unsafe fn finalize_entry(r: RefMut<T>, list: &List<MruEntry<T>>) { let ptr = (r.get_cell() as *const _ as usize - Self::DATA_OFFSET) as *mut MruEntry<T>; let entry = unsafe { &*ptr }; list.push_back(entry); } } // SAFETY: `MruEntry` owns a `ListEntry`. unsafe impl<T> ListNode for MruEntry<T> { fn get_list_entry(&self) -> &ListEntry { &self.list_entry } fn from_list_entry(list_entry: *const ListEntry) -> *const Self { (list_entry as *const _ as usize - Self::LIST_ENTRY_OFFSET) as *const Self } } impl<T, const CAPACITY: usize> MruArena<T, CAPACITY> { // TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe... pub const fn new(entries: [MruEntry<T>; CAPACITY]) -> Self { Self { entries, list: unsafe { List::new() }, } } pub fn init(self: Pin<&mut Self>) { let mut this = self.project(); this.list.as_mut().init(); for mut entry in IterPinMut::from(this.entries) { entry.as_mut().project().list_entry.init(); this.list.push_front(&entry); } } } impl<T:'static + ArenaObject + Unpin, const CAPACITY: usize> Arena for Spinlock<MruArena<T, CAPACITY>> { type Data = T; type Guard<'s> = SpinlockGuard<'s, MruArena<T, CAPACITY>>; fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); let mut empty: Option<*mut RcCell<T>> = None; // SAFETY: the whole `MruArena` is protected by a lock. for entry in unsafe { this.list.iter_pin_mut_unchecked() } { if!entry.data.is_borrowed() { empty = Some(&entry.data as *const _ as *mut _); } if let Some(r) = entry.data.try_borrow() { if c(&r) { return Some(r); } } } empty.map(|cell_raw| { // SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned. let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) }; n(cell.as_mut().get_pin_mut().unwrap().get_mut()); cell.borrow() }) } fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); // SAFETY: the whole `MruArena` is protected by a lock. for mut entry in unsafe { this.list.iter_pin_mut_unchecked().rev() } { if!entry.data.is_borrowed() { f(entry .as_mut() .project() .data .get_pin_mut() .unwrap() .get_mut()); return Some(entry.data.borrow()); } } None } unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> { let mut _this = self.lock(); handle.clone() } unsafe fn dealloc(&self, handle: Ref<Self::Data>) { let mut this = self.lock(); if let Ok(mut rm) = RefMut::<T>::try_from(handle) { rm.finalize::<Self>(&mut this); // SAFETY: the `handle` was obtained from an `MruEntry`, // which is contained inside `&this.list`. unsafe { MruEntry::finalize_entry(rm, &this.list) }; } } unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R, { guard.reacquire_after(f) } } impl<T, A: Arena<Data = T>> Rc<A> { /// # Safety /// /// `inner` must be allocated from `arena` pub unsafe fn from_unchecked(arena: &A, inner: Ref<T>) -> Self { let inner = ManuallyDrop::new(inner); Self { arena, inner } } /// Returns a reference to the arena that the `Rc` was allocated from. fn get_arena(&self) -> &A { // SAFETY: Safe because of `Rc`'s invariant. unsafe { &*self.arena } } } impl<T, A: Arena<Data = T>> Deref for Rc<A> { type Target = T; fn deref(&self) -> &T { self.inner.deref() } } impl<A: Arena> Drop for Rc<A> { fn drop(&mut self) { // SAFETY: `inner` was allocated from `arena`. unsafe { (&*self.arena).dealloc(ManuallyDrop::take(&mut self.inner)) }; } } impl<A: Arena> Clone for Rc<A> { fn clone(&self) -> Self { // SAFETY: `inner` was allocated from `arena`. let inner = ManuallyDrop::new(unsafe { self.get_arena().dup(&self.inner) }); Self { arena: self.arena, inner, } } }
{ return Some(r); }
conditional_block
arena.rs
use core::convert::TryFrom; use core::mem::{self, ManuallyDrop}; use core::ops::Deref; use core::pin::Pin; use pin_project::pin_project; use crate::list::*; use crate::lock::{Spinlock, SpinlockGuard}; use crate::pinned_array::IterPinMut; use crate::rc_cell::{RcCell, Ref, RefMut}; /// A homogeneous memory allocator, equipped with the box type representing an allocation. pub trait Arena: Sized { /// The value type of the allocator. type Data: ArenaObject; /// The guard type for arena. type Guard<'s>; /// Find or alloc. fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>>; fn find_or_alloc<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Rc<Self>> { let inner = self.find_or_alloc_handle(c, n)?; // SAFETY: `inner` was allocated from `self`. Some(unsafe { Rc::from_unchecked(self, inner) }) } /// Failable allocation. fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>>; fn
<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Rc<Self>> { let inner = self.alloc_handle(f)?; // SAFETY: `inner` was allocated from `self`. Some(unsafe { Rc::from_unchecked(self, inner) }) } /// Duplicate a given handle, and increase the reference count. /// /// # Safety /// /// `handle` must be allocated from `self`. // TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `clone` instead. unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data>; /// Deallocate a given handle, and finalize the referred object if there are /// no more handles. /// /// # Safety /// /// `handle` must be allocated from `self`. // TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `drop` instead. unsafe fn dealloc(&self, handle: Ref<Self::Data>); /// Temporarily releases the lock while calling `f`, and re-acquires the lock after `f` returned. /// /// # Safety /// /// The caller must be careful when calling this inside `ArenaObject::finalize`. /// If you use this while finalizing an `ArenaObject`, the `Arena`'s lock will be temporarily released, /// and hence, another thread may use `Arena::find_or_alloc` to obtain an `Rc` referring to the `ArenaObject` /// we are **currently finalizing**. Therefore, in this case, make sure no thread tries to `find_or_alloc` /// for an `ArenaObject` that may be under finalization. unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R; } pub trait ArenaObject { /// Finalizes the `ArenaObject`. /// This function is automatically called when the last `Rc` refereing to this `ArenaObject` gets dropped. fn finalize<'s, A: Arena>(&'s mut self, guard: &'s mut A::Guard<'_>); } /// A homogeneous memory allocator equipped with reference counts. #[pin_project] pub struct ArrayArena<T, const CAPACITY: usize> { #[pin] entries: [RcCell<T>; CAPACITY], } #[pin_project] #[repr(C)] pub struct MruEntry<T> { #[pin] list_entry: ListEntry, #[pin] data: RcCell<T>, } /// A homogeneous memory allocator equipped with reference counts. #[pin_project] pub struct MruArena<T, const CAPACITY: usize> { #[pin] entries: [MruEntry<T>; CAPACITY], #[pin] list: List<MruEntry<T>>, } /// A thread-safe reference counted pointer, allocated from `A: Arena`. /// The data type is same as `A::Data`. /// /// # Safety /// /// `inner` is allocated from `arena`. /// We can safely dereference `arena` until `inner` gets dropped, /// because we panic if the arena drops earlier than `inner`. pub struct Rc<A: Arena> { arena: *const A, inner: ManuallyDrop<Ref<A::Data>>, } // `Rc` is `Send` because it does not impl `DerefMut`, // and when we access the inner `Arena`, we do it after acquiring `Arena`'s lock. // Also, `Rc` does not point to thread-local data. unsafe impl<T: Sync, A: Arena<Data = T>> Send for Rc<A> {} impl<T, const CAPACITY: usize> ArrayArena<T, CAPACITY> { // TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe... pub const fn new(entries: [RcCell<T>; CAPACITY]) -> Self { Self { entries } } } impl<T:'static + ArenaObject + Unpin, const CAPACITY: usize> Arena for Spinlock<ArrayArena<T, CAPACITY>> { type Data = T; type Guard<'s> = SpinlockGuard<'s, ArrayArena<T, CAPACITY>>; fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); let mut empty: Option<*mut RcCell<T>> = None; for entry in IterPinMut::from(this.entries) { if!entry.is_borrowed() { if empty.is_none() { empty = Some(entry.as_ref().get_ref() as *const _ as *mut _) } // Note: Do not use `break` here. // We must first search through all entries, and then alloc at empty // only if the entry we're finding for doesn't exist. } else if let Some(r) = entry.try_borrow() { // The entry is not under finalization. Check its data. if c(&r) { return Some(r); } } } empty.map(|cell_raw| { // SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned. let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) }; n(cell.as_mut().get_pin_mut().unwrap().get_mut()); cell.borrow() }) } fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); for mut entry in IterPinMut::from(this.entries) { if!entry.is_borrowed() { f(entry.as_mut().get_pin_mut().unwrap().get_mut()); return Some(entry.borrow()); } } None } unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> { let mut _this = self.lock(); handle.clone() } unsafe fn dealloc(&self, handle: Ref<Self::Data>) { let mut this = self.lock(); if let Ok(mut rm) = RefMut::<T>::try_from(handle) { rm.finalize::<Self>(&mut this); } } unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R, { guard.reacquire_after(f) } } impl<T> MruEntry<T> { // TODO(https://github.com/kaist-cp/rv6/issues/369) // A workarond for https://github.com/Gilnaa/memoffset/issues/49. // Assumes `list_entry` is located at the beginning of `MruEntry` // and `data` is located at `mem::size_of::<ListEntry>()`. const DATA_OFFSET: usize = mem::size_of::<ListEntry>(); const LIST_ENTRY_OFFSET: usize = 0; // const DATA_OFFSET: usize = offset_of!(MruEntry<T>, data); // const LIST_ENTRY_OFFSET: usize = offset_of!(MruEntry<T>, list_entry); pub const fn new(data: T) -> Self { Self { list_entry: unsafe { ListEntry::new() }, data: RcCell::new(data), } } /// For the `MruEntry<T>` that corresponds to the given `RefMut<T>`, we move it to the front of the list. /// /// # Safety /// /// Only use this if the given `RefMut<T>` was obtained from an `MruEntry<T>`, /// which is contained inside the `list`. unsafe fn finalize_entry(r: RefMut<T>, list: &List<MruEntry<T>>) { let ptr = (r.get_cell() as *const _ as usize - Self::DATA_OFFSET) as *mut MruEntry<T>; let entry = unsafe { &*ptr }; list.push_back(entry); } } // SAFETY: `MruEntry` owns a `ListEntry`. unsafe impl<T> ListNode for MruEntry<T> { fn get_list_entry(&self) -> &ListEntry { &self.list_entry } fn from_list_entry(list_entry: *const ListEntry) -> *const Self { (list_entry as *const _ as usize - Self::LIST_ENTRY_OFFSET) as *const Self } } impl<T, const CAPACITY: usize> MruArena<T, CAPACITY> { // TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe... pub const fn new(entries: [MruEntry<T>; CAPACITY]) -> Self { Self { entries, list: unsafe { List::new() }, } } pub fn init(self: Pin<&mut Self>) { let mut this = self.project(); this.list.as_mut().init(); for mut entry in IterPinMut::from(this.entries) { entry.as_mut().project().list_entry.init(); this.list.push_front(&entry); } } } impl<T:'static + ArenaObject + Unpin, const CAPACITY: usize> Arena for Spinlock<MruArena<T, CAPACITY>> { type Data = T; type Guard<'s> = SpinlockGuard<'s, MruArena<T, CAPACITY>>; fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); let mut empty: Option<*mut RcCell<T>> = None; // SAFETY: the whole `MruArena` is protected by a lock. for entry in unsafe { this.list.iter_pin_mut_unchecked() } { if!entry.data.is_borrowed() { empty = Some(&entry.data as *const _ as *mut _); } if let Some(r) = entry.data.try_borrow() { if c(&r) { return Some(r); } } } empty.map(|cell_raw| { // SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned. let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) }; n(cell.as_mut().get_pin_mut().unwrap().get_mut()); cell.borrow() }) } fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); // SAFETY: the whole `MruArena` is protected by a lock. for mut entry in unsafe { this.list.iter_pin_mut_unchecked().rev() } { if!entry.data.is_borrowed() { f(entry .as_mut() .project() .data .get_pin_mut() .unwrap() .get_mut()); return Some(entry.data.borrow()); } } None } unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> { let mut _this = self.lock(); handle.clone() } unsafe fn dealloc(&self, handle: Ref<Self::Data>) { let mut this = self.lock(); if let Ok(mut rm) = RefMut::<T>::try_from(handle) { rm.finalize::<Self>(&mut this); // SAFETY: the `handle` was obtained from an `MruEntry`, // which is contained inside `&this.list`. unsafe { MruEntry::finalize_entry(rm, &this.list) }; } } unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R, { guard.reacquire_after(f) } } impl<T, A: Arena<Data = T>> Rc<A> { /// # Safety /// /// `inner` must be allocated from `arena` pub unsafe fn from_unchecked(arena: &A, inner: Ref<T>) -> Self { let inner = ManuallyDrop::new(inner); Self { arena, inner } } /// Returns a reference to the arena that the `Rc` was allocated from. fn get_arena(&self) -> &A { // SAFETY: Safe because of `Rc`'s invariant. unsafe { &*self.arena } } } impl<T, A: Arena<Data = T>> Deref for Rc<A> { type Target = T; fn deref(&self) -> &T { self.inner.deref() } } impl<A: Arena> Drop for Rc<A> { fn drop(&mut self) { // SAFETY: `inner` was allocated from `arena`. unsafe { (&*self.arena).dealloc(ManuallyDrop::take(&mut self.inner)) }; } } impl<A: Arena> Clone for Rc<A> { fn clone(&self) -> Self { // SAFETY: `inner` was allocated from `arena`. let inner = ManuallyDrop::new(unsafe { self.get_arena().dup(&self.inner) }); Self { arena: self.arena, inner, } } }
alloc
identifier_name
arena.rs
use core::convert::TryFrom; use core::mem::{self, ManuallyDrop}; use core::ops::Deref; use core::pin::Pin; use pin_project::pin_project; use crate::list::*; use crate::lock::{Spinlock, SpinlockGuard}; use crate::pinned_array::IterPinMut; use crate::rc_cell::{RcCell, Ref, RefMut}; /// A homogeneous memory allocator, equipped with the box type representing an allocation. pub trait Arena: Sized { /// The value type of the allocator. type Data: ArenaObject; /// The guard type for arena. type Guard<'s>; /// Find or alloc. fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>>; fn find_or_alloc<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Rc<Self>> { let inner = self.find_or_alloc_handle(c, n)?; // SAFETY: `inner` was allocated from `self`. Some(unsafe { Rc::from_unchecked(self, inner) }) } /// Failable allocation. fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>>; fn alloc<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Rc<Self>> { let inner = self.alloc_handle(f)?; // SAFETY: `inner` was allocated from `self`. Some(unsafe { Rc::from_unchecked(self, inner) }) } /// Duplicate a given handle, and increase the reference count. /// /// # Safety /// /// `handle` must be allocated from `self`. // TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `clone` instead. unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data>; /// Deallocate a given handle, and finalize the referred object if there are /// no more handles. /// /// # Safety /// /// `handle` must be allocated from `self`. // TODO: If we wrap `ArrayPtr::r` with `SpinlockProtected`, then we can just use `drop` instead. unsafe fn dealloc(&self, handle: Ref<Self::Data>); /// Temporarily releases the lock while calling `f`, and re-acquires the lock after `f` returned. /// /// # Safety /// /// The caller must be careful when calling this inside `ArenaObject::finalize`. /// If you use this while finalizing an `ArenaObject`, the `Arena`'s lock will be temporarily released, /// and hence, another thread may use `Arena::find_or_alloc` to obtain an `Rc` referring to the `ArenaObject` /// we are **currently finalizing**. Therefore, in this case, make sure no thread tries to `find_or_alloc` /// for an `ArenaObject` that may be under finalization. unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R; } pub trait ArenaObject { /// Finalizes the `ArenaObject`. /// This function is automatically called when the last `Rc` refereing to this `ArenaObject` gets dropped. fn finalize<'s, A: Arena>(&'s mut self, guard: &'s mut A::Guard<'_>); } /// A homogeneous memory allocator equipped with reference counts. #[pin_project] pub struct ArrayArena<T, const CAPACITY: usize> { #[pin] entries: [RcCell<T>; CAPACITY], } #[pin_project] #[repr(C)] pub struct MruEntry<T> { #[pin] list_entry: ListEntry, #[pin] data: RcCell<T>, } /// A homogeneous memory allocator equipped with reference counts. #[pin_project] pub struct MruArena<T, const CAPACITY: usize> { #[pin] entries: [MruEntry<T>; CAPACITY], #[pin] list: List<MruEntry<T>>, } /// A thread-safe reference counted pointer, allocated from `A: Arena`. /// The data type is same as `A::Data`. /// /// # Safety /// /// `inner` is allocated from `arena`. /// We can safely dereference `arena` until `inner` gets dropped, /// because we panic if the arena drops earlier than `inner`. pub struct Rc<A: Arena> { arena: *const A, inner: ManuallyDrop<Ref<A::Data>>, } // `Rc` is `Send` because it does not impl `DerefMut`, // and when we access the inner `Arena`, we do it after acquiring `Arena`'s lock. // Also, `Rc` does not point to thread-local data. unsafe impl<T: Sync, A: Arena<Data = T>> Send for Rc<A> {} impl<T, const CAPACITY: usize> ArrayArena<T, CAPACITY> { // TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe... pub const fn new(entries: [RcCell<T>; CAPACITY]) -> Self { Self { entries } } } impl<T:'static + ArenaObject + Unpin, const CAPACITY: usize> Arena for Spinlock<ArrayArena<T, CAPACITY>> { type Data = T; type Guard<'s> = SpinlockGuard<'s, ArrayArena<T, CAPACITY>>; fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); let mut empty: Option<*mut RcCell<T>> = None; for entry in IterPinMut::from(this.entries) { if!entry.is_borrowed() { if empty.is_none() { empty = Some(entry.as_ref().get_ref() as *const _ as *mut _) } // Note: Do not use `break` here. // We must first search through all entries, and then alloc at empty // only if the entry we're finding for doesn't exist. } else if let Some(r) = entry.try_borrow() { // The entry is not under finalization. Check its data. if c(&r) { return Some(r); } } } empty.map(|cell_raw| { // SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned. let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) }; n(cell.as_mut().get_pin_mut().unwrap().get_mut()); cell.borrow() }) } fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); for mut entry in IterPinMut::from(this.entries) { if!entry.is_borrowed() { f(entry.as_mut().get_pin_mut().unwrap().get_mut()); return Some(entry.borrow()); } } None } unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> { let mut _this = self.lock(); handle.clone() } unsafe fn dealloc(&self, handle: Ref<Self::Data>) { let mut this = self.lock(); if let Ok(mut rm) = RefMut::<T>::try_from(handle) { rm.finalize::<Self>(&mut this); } } unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R,
impl<T> MruEntry<T> { // TODO(https://github.com/kaist-cp/rv6/issues/369) // A workarond for https://github.com/Gilnaa/memoffset/issues/49. // Assumes `list_entry` is located at the beginning of `MruEntry` // and `data` is located at `mem::size_of::<ListEntry>()`. const DATA_OFFSET: usize = mem::size_of::<ListEntry>(); const LIST_ENTRY_OFFSET: usize = 0; // const DATA_OFFSET: usize = offset_of!(MruEntry<T>, data); // const LIST_ENTRY_OFFSET: usize = offset_of!(MruEntry<T>, list_entry); pub const fn new(data: T) -> Self { Self { list_entry: unsafe { ListEntry::new() }, data: RcCell::new(data), } } /// For the `MruEntry<T>` that corresponds to the given `RefMut<T>`, we move it to the front of the list. /// /// # Safety /// /// Only use this if the given `RefMut<T>` was obtained from an `MruEntry<T>`, /// which is contained inside the `list`. unsafe fn finalize_entry(r: RefMut<T>, list: &List<MruEntry<T>>) { let ptr = (r.get_cell() as *const _ as usize - Self::DATA_OFFSET) as *mut MruEntry<T>; let entry = unsafe { &*ptr }; list.push_back(entry); } } // SAFETY: `MruEntry` owns a `ListEntry`. unsafe impl<T> ListNode for MruEntry<T> { fn get_list_entry(&self) -> &ListEntry { &self.list_entry } fn from_list_entry(list_entry: *const ListEntry) -> *const Self { (list_entry as *const _ as usize - Self::LIST_ENTRY_OFFSET) as *const Self } } impl<T, const CAPACITY: usize> MruArena<T, CAPACITY> { // TODO(https://github.com/kaist-cp/rv6/issues/371): unsafe... pub const fn new(entries: [MruEntry<T>; CAPACITY]) -> Self { Self { entries, list: unsafe { List::new() }, } } pub fn init(self: Pin<&mut Self>) { let mut this = self.project(); this.list.as_mut().init(); for mut entry in IterPinMut::from(this.entries) { entry.as_mut().project().list_entry.init(); this.list.push_front(&entry); } } } impl<T:'static + ArenaObject + Unpin, const CAPACITY: usize> Arena for Spinlock<MruArena<T, CAPACITY>> { type Data = T; type Guard<'s> = SpinlockGuard<'s, MruArena<T, CAPACITY>>; fn find_or_alloc_handle<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>( &self, c: C, n: N, ) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); let mut empty: Option<*mut RcCell<T>> = None; // SAFETY: the whole `MruArena` is protected by a lock. for entry in unsafe { this.list.iter_pin_mut_unchecked() } { if!entry.data.is_borrowed() { empty = Some(&entry.data as *const _ as *mut _); } if let Some(r) = entry.data.try_borrow() { if c(&r) { return Some(r); } } } empty.map(|cell_raw| { // SAFETY: `cell` is not referenced or borrowed. Also, it is already pinned. let mut cell = unsafe { Pin::new_unchecked(&mut *cell_raw) }; n(cell.as_mut().get_pin_mut().unwrap().get_mut()); cell.borrow() }) } fn alloc_handle<F: FnOnce(&mut Self::Data)>(&self, f: F) -> Option<Ref<Self::Data>> { let mut guard = self.lock(); let this = guard.get_pin_mut().project(); // SAFETY: the whole `MruArena` is protected by a lock. for mut entry in unsafe { this.list.iter_pin_mut_unchecked().rev() } { if!entry.data.is_borrowed() { f(entry .as_mut() .project() .data .get_pin_mut() .unwrap() .get_mut()); return Some(entry.data.borrow()); } } None } unsafe fn dup(&self, handle: &Ref<Self::Data>) -> Ref<Self::Data> { let mut _this = self.lock(); handle.clone() } unsafe fn dealloc(&self, handle: Ref<Self::Data>) { let mut this = self.lock(); if let Ok(mut rm) = RefMut::<T>::try_from(handle) { rm.finalize::<Self>(&mut this); // SAFETY: the `handle` was obtained from an `MruEntry`, // which is contained inside `&this.list`. unsafe { MruEntry::finalize_entry(rm, &this.list) }; } } unsafe fn reacquire_after<'s, 'g:'s, F, R:'s>(guard: &'s mut Self::Guard<'g>, f: F) -> R where F: FnOnce() -> R, { guard.reacquire_after(f) } } impl<T, A: Arena<Data = T>> Rc<A> { /// # Safety /// /// `inner` must be allocated from `arena` pub unsafe fn from_unchecked(arena: &A, inner: Ref<T>) -> Self { let inner = ManuallyDrop::new(inner); Self { arena, inner } } /// Returns a reference to the arena that the `Rc` was allocated from. fn get_arena(&self) -> &A { // SAFETY: Safe because of `Rc`'s invariant. unsafe { &*self.arena } } } impl<T, A: Arena<Data = T>> Deref for Rc<A> { type Target = T; fn deref(&self) -> &T { self.inner.deref() } } impl<A: Arena> Drop for Rc<A> { fn drop(&mut self) { // SAFETY: `inner` was allocated from `arena`. unsafe { (&*self.arena).dealloc(ManuallyDrop::take(&mut self.inner)) }; } } impl<A: Arena> Clone for Rc<A> { fn clone(&self) -> Self { // SAFETY: `inner` was allocated from `arena`. let inner = ManuallyDrop::new(unsafe { self.get_arena().dup(&self.inner) }); Self { arena: self.arena, inner, } } }
{ guard.reacquire_after(f) } }
random_line_split
driver.rs
use std::sync::Arc; use std::sync::Mutex; use std::sync::MutexGuard; use std::fs; use catt_core::util::always_lock; use catt_core::binding::Binding; use catt_core::binding::Notification; use catt_core::value::Value; use catt_core::item::Item as CItem; use openzwave as ozw; use openzwave::manager::Manager; use openzwave::options::Options; use openzwave::value_classes::value_id::ValueID; use openzwave::value_classes::value_id::ValueGenre; use openzwave::value_classes::value_id::ValueType; use openzwave::notification::Notification as ZWaveNotification; use openzwave::notification::NotificationType; use openzwave::notification::ControllerState; use serial_ports::{ListPortInfo, ListPorts}; use serial_ports::ListPortType::UsbPort; use tokio_core::reactor::Handle; use tokio_core::channel::channel; use tokio_core::channel::Sender; use tokio_core::channel::Receiver; use config::Config; use errors::*; use item::Item; use device::DB; #[cfg(windows)] fn get_default_devices() -> Vec<String> { vec!["\\\\.\\COM6".to_owned()] } #[cfg(unix)] fn is_usb_zwave_device(port: &ListPortInfo) -> bool { let default_usb_devices = [// VID PID // ----- ----- (0x0658, 0x0200), // Aeotech Z-Stick Gen-5 (0x0658, 0x0280), // UZB1 (0x10c4, 0xea60) /* Aeotech Z-Stick S2 */]; // Is it one of the vid/pids in the table? if let UsbPort(ref info) = port.port_type { default_usb_devices.contains(&(info.vid, info.pid)) } else { false } } #[cfg(unix)] fn get_default_devices() -> Vec<String> { // Enumerate all of the serial devices and see if any of them match our // known VID:PID. let mut ports: Vec<String> = Vec::new(); let usb_ports: Vec<String> = ListPorts::new() .iter() .filter(|port| is_usb_zwave_device(port)) .map(|port| port.device.to_string_lossy().into_owned()) .collect(); ports.extend(usb_ports); if ports.is_empty() { // The following is only included temporarily until we can get a more // comprehensive list of VIDs and PIDs. error!("[OpenzwaveStateful] Unable to locate ZWave USB dongle. The following VID:PIDs \ were found:"); for port in ListPorts::new().iter() { if let UsbPort(ref info) = port.port_type
} // The following should be removed, once we have all of the devices captured using the above let default_devices = ["/dev/cu.usbserial", // MacOS X (presumably) "/dev/cu.SLAB_USBtoUART", // MacOS X (Aeotech Z-Stick S2) "/dev/cu.usbmodem14211", // Yoric (Aeotech Z-Stick Gen-5) "/dev/cu.usbmodem1421", // Isabel (UZB Static Controller) "/dev/ttyUSB0", // Linux (Aeotech Z-Stick S2) "/dev/ttyACM0" /* Linux (Aeotech Z-Stick Gen-5) */]; if let Some(default_device) = default_devices.iter() .find(|device_name| fs::metadata(device_name).is_ok()) .map(|&str| str.to_owned()) { ports.push(default_device); } } ports } #[derive(Clone)] pub struct ZWave { #[allow(dead_code)] ozw_manager: Arc<Mutex<ozw::manager::Manager>>, // TODO improve this system - ideally, we should hide these behind another struct // so that only one call is needed to update both. items: Arc<Mutex<DB>>, } impl ZWave { pub fn new(handle: &Handle, cfg: &Config) -> Result<(ZWave, Receiver<Notification<Item>>)> { let cfg = cfg.clone(); let mut manager = { let config_path = match cfg.sys_config { Some(ref path) => path.as_ref(), None => "/etc/openzwave", }; let user_path = match cfg.user_config { Some(ref path) => path.as_ref(), None => "./config", }; let opts = Options::create(config_path, user_path, "--SaveConfiguration true --DumpTriggerLevel 0 \ --ConsoleOutput false")?; ozw::manager::Manager::create(opts)? }; let devices = cfg.port.clone().map(|p| vec![p]).unwrap_or(get_default_devices()); for device in devices { fs::File::open(&device)?; manager.add_driver(&device)?; } let manager = Arc::new(Mutex::new(manager)); let items = Arc::new(Mutex::new(Default::default())); let (tx, rx) = channel(handle)?; let driver = ZWave { ozw_manager: manager.clone(), items: items, }; let watcher = Watcher { cfg: cfg, driver: driver.clone(), output: Mutex::new(tx), }; always_lock(manager.lock()).add_watcher(watcher)?; Ok((driver, rx)) } pub fn get_manager(&self) -> MutexGuard<Manager> { always_lock(self.ozw_manager.lock()) } } impl Binding for ZWave { type Config = Config; type Error = Error; type Item = Item; fn new(handle: &Handle, cfg: &Self::Config) -> Result<(Self, Receiver<Notification<Item>>)> { ZWave::new(handle, cfg) } fn get_value(&self, name: &str) -> Option<Item> { always_lock(self.items.lock()).get_item(&String::from(name)).map(|i| i.clone()) } } struct Watcher { driver: ZWave, cfg: Config, output: Mutex<Sender<Notification<Item>>>, } impl Watcher { fn get_out(&self) -> MutexGuard<Sender<Notification<Item>>> { ::catt_core::util::always_lock(self.output.lock()) } } impl ozw::manager::NotificationWatcher for Watcher { fn on_notification(&self, zwave_notification: &ZWaveNotification) { let notification: Notification<Item> = match zwave_notification.get_type() { NotificationType::Type_DriverReady => { let home_id = zwave_notification.get_home_id(); let controller = Item::controller(&format!("zwave_{}_Controller", home_id), self.driver.clone(), home_id); always_lock(self.driver.items.lock()) .add_item(controller.get_name(), controller.clone()); let _ = self.get_out().send(Notification::Added(controller.clone())); Notification::Changed(controller) } NotificationType::Type_AllNodesQueried | NotificationType::Type_AwakeNodesQueried | NotificationType::Type_AllNodesQueriedSomeDead => { debug!("Controller ready"); // self.driver.ozw_manager.write_configs(); return; } NotificationType::Type_ValueAdded => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let mut db = always_lock(self.driver.items.lock()); let (name, exists) = match self.cfg.lookup_device(v) { Some(name) => { let exists = if let Some(_) = db.get_name(&v) { warn!("duplicate match found for {}", name); true } else { false }; (name, exists) } None => { if self.cfg.expose_unbound.unwrap_or(true) { if let Some(name) = db.get_name(&v) { warn!("duplicate match found for unconfigured {}", name); (name.clone(), true) } else { (format!("zwave_{}_{}_{}", v.get_home_id(), v.get_node_id(), v.get_label()), false) } } else { debug!("no configured devices matched {}", v); return; } } }; let item = if!exists { debug!("adding value {} to db", name); db.add_value(name.clone(), v) } else { Item::item(&name, v) }; Notification::Added(item) } NotificationType::Type_ValueChanged => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let db = always_lock(self.driver.items.lock()); let name = match db.get_name(&v) { Some(n) => n, None => return, }; let item = Item::item(&name, v); debug!("value {} changed: {:?}", item.get_name(), item.get_value()); Notification::Changed(item) } NotificationType::Type_ValueRemoved => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let mut db = always_lock(self.driver.items.lock()); let name = match db.get_name(&v) { Some(n) => n.clone(), None => return, }; debug!("removing value {} from db", name); Notification::Removed(match db.remove_value(v) { Some(it) => it, None => Item::item(&name, v), }) } // TODO new implementation for this // ZWaveNotification::Generic(s) => { // if s.contains("Type_DriverRemoved") { // warn!("controller removed! shutting down."); // ::std::process::exit(1); // } // return; // } NotificationType::Type_ControllerCommand => { let home_id = zwave_notification.get_home_id(); let db_name = format!("zwave_{}_Controller", home_id); let controller = match self.driver.get_value(&db_name) { Some(c) => c, None => { debug!("controller not found in item db"); return; } }; let state = match ControllerState::from_u8(zwave_notification.get_event() .unwrap()) { Some(s) => s, None => { debug!("unknown controller state: {}", zwave_notification.get_event().unwrap()); return; } }; match state { ControllerState::Completed => { let _ = controller.set_value(Value::String("idle".into())); } ControllerState::Failed => { let _ = controller.set_value(Value::String("failed".into())); } ControllerState::Starting => {} _ => { debug!("unhandled controller state: {:?}", state); return; } } Notification::Changed(controller) } _ => { debug!("unmatched notification: {}", zwave_notification); return; } }; match self.get_out().send(notification) { Ok(_) => {} Err(e) => { warn!("zwave notification send error: {}", e); return; } } } } fn should_expose(v: ValueID) -> bool { match v.get_genre() { ValueGenre::ValueGenre_Basic | ValueGenre::ValueGenre_User => {} _ => return false, } match v.get_type() { ValueType::ValueType_Bool | ValueType::ValueType_Byte | ValueType::ValueType_Decimal | ValueType::ValueType_Int | ValueType::ValueType_Short | ValueType::ValueType_String | ValueType::ValueType_Raw => {} _ => return false, } true }
{ error!("[OpenzwaveStateful] {:04x}:{:04x} {}", info.vid, info.pid, port.device.display()); }
conditional_block
driver.rs
use std::sync::Arc; use std::sync::Mutex; use std::sync::MutexGuard; use std::fs; use catt_core::util::always_lock; use catt_core::binding::Binding; use catt_core::binding::Notification; use catt_core::value::Value; use catt_core::item::Item as CItem; use openzwave as ozw; use openzwave::manager::Manager; use openzwave::options::Options; use openzwave::value_classes::value_id::ValueID; use openzwave::value_classes::value_id::ValueGenre; use openzwave::value_classes::value_id::ValueType; use openzwave::notification::Notification as ZWaveNotification; use openzwave::notification::NotificationType; use openzwave::notification::ControllerState; use serial_ports::{ListPortInfo, ListPorts}; use serial_ports::ListPortType::UsbPort; use tokio_core::reactor::Handle; use tokio_core::channel::channel; use tokio_core::channel::Sender; use tokio_core::channel::Receiver; use config::Config; use errors::*; use item::Item; use device::DB; #[cfg(windows)] fn get_default_devices() -> Vec<String> { vec!["\\\\.\\COM6".to_owned()] } #[cfg(unix)] fn is_usb_zwave_device(port: &ListPortInfo) -> bool { let default_usb_devices = [// VID PID // ----- ----- (0x0658, 0x0200), // Aeotech Z-Stick Gen-5 (0x0658, 0x0280), // UZB1 (0x10c4, 0xea60) /* Aeotech Z-Stick S2 */]; // Is it one of the vid/pids in the table? if let UsbPort(ref info) = port.port_type { default_usb_devices.contains(&(info.vid, info.pid)) } else { false } } #[cfg(unix)] fn get_default_devices() -> Vec<String> { // Enumerate all of the serial devices and see if any of them match our // known VID:PID. let mut ports: Vec<String> = Vec::new(); let usb_ports: Vec<String> = ListPorts::new() .iter() .filter(|port| is_usb_zwave_device(port)) .map(|port| port.device.to_string_lossy().into_owned()) .collect(); ports.extend(usb_ports); if ports.is_empty() { // The following is only included temporarily until we can get a more // comprehensive list of VIDs and PIDs. error!("[OpenzwaveStateful] Unable to locate ZWave USB dongle. The following VID:PIDs \ were found:"); for port in ListPorts::new().iter() { if let UsbPort(ref info) = port.port_type { error!("[OpenzwaveStateful] {:04x}:{:04x} {}", info.vid, info.pid, port.device.display()); } } // The following should be removed, once we have all of the devices captured using the above let default_devices = ["/dev/cu.usbserial", // MacOS X (presumably) "/dev/cu.SLAB_USBtoUART", // MacOS X (Aeotech Z-Stick S2) "/dev/cu.usbmodem14211", // Yoric (Aeotech Z-Stick Gen-5) "/dev/cu.usbmodem1421", // Isabel (UZB Static Controller) "/dev/ttyUSB0", // Linux (Aeotech Z-Stick S2) "/dev/ttyACM0" /* Linux (Aeotech Z-Stick Gen-5) */]; if let Some(default_device) = default_devices.iter() .find(|device_name| fs::metadata(device_name).is_ok()) .map(|&str| str.to_owned()) { ports.push(default_device); } } ports } #[derive(Clone)] pub struct ZWave { #[allow(dead_code)] ozw_manager: Arc<Mutex<ozw::manager::Manager>>, // TODO improve this system - ideally, we should hide these behind another struct // so that only one call is needed to update both. items: Arc<Mutex<DB>>, } impl ZWave { pub fn
(handle: &Handle, cfg: &Config) -> Result<(ZWave, Receiver<Notification<Item>>)> { let cfg = cfg.clone(); let mut manager = { let config_path = match cfg.sys_config { Some(ref path) => path.as_ref(), None => "/etc/openzwave", }; let user_path = match cfg.user_config { Some(ref path) => path.as_ref(), None => "./config", }; let opts = Options::create(config_path, user_path, "--SaveConfiguration true --DumpTriggerLevel 0 \ --ConsoleOutput false")?; ozw::manager::Manager::create(opts)? }; let devices = cfg.port.clone().map(|p| vec![p]).unwrap_or(get_default_devices()); for device in devices { fs::File::open(&device)?; manager.add_driver(&device)?; } let manager = Arc::new(Mutex::new(manager)); let items = Arc::new(Mutex::new(Default::default())); let (tx, rx) = channel(handle)?; let driver = ZWave { ozw_manager: manager.clone(), items: items, }; let watcher = Watcher { cfg: cfg, driver: driver.clone(), output: Mutex::new(tx), }; always_lock(manager.lock()).add_watcher(watcher)?; Ok((driver, rx)) } pub fn get_manager(&self) -> MutexGuard<Manager> { always_lock(self.ozw_manager.lock()) } } impl Binding for ZWave { type Config = Config; type Error = Error; type Item = Item; fn new(handle: &Handle, cfg: &Self::Config) -> Result<(Self, Receiver<Notification<Item>>)> { ZWave::new(handle, cfg) } fn get_value(&self, name: &str) -> Option<Item> { always_lock(self.items.lock()).get_item(&String::from(name)).map(|i| i.clone()) } } struct Watcher { driver: ZWave, cfg: Config, output: Mutex<Sender<Notification<Item>>>, } impl Watcher { fn get_out(&self) -> MutexGuard<Sender<Notification<Item>>> { ::catt_core::util::always_lock(self.output.lock()) } } impl ozw::manager::NotificationWatcher for Watcher { fn on_notification(&self, zwave_notification: &ZWaveNotification) { let notification: Notification<Item> = match zwave_notification.get_type() { NotificationType::Type_DriverReady => { let home_id = zwave_notification.get_home_id(); let controller = Item::controller(&format!("zwave_{}_Controller", home_id), self.driver.clone(), home_id); always_lock(self.driver.items.lock()) .add_item(controller.get_name(), controller.clone()); let _ = self.get_out().send(Notification::Added(controller.clone())); Notification::Changed(controller) } NotificationType::Type_AllNodesQueried | NotificationType::Type_AwakeNodesQueried | NotificationType::Type_AllNodesQueriedSomeDead => { debug!("Controller ready"); // self.driver.ozw_manager.write_configs(); return; } NotificationType::Type_ValueAdded => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let mut db = always_lock(self.driver.items.lock()); let (name, exists) = match self.cfg.lookup_device(v) { Some(name) => { let exists = if let Some(_) = db.get_name(&v) { warn!("duplicate match found for {}", name); true } else { false }; (name, exists) } None => { if self.cfg.expose_unbound.unwrap_or(true) { if let Some(name) = db.get_name(&v) { warn!("duplicate match found for unconfigured {}", name); (name.clone(), true) } else { (format!("zwave_{}_{}_{}", v.get_home_id(), v.get_node_id(), v.get_label()), false) } } else { debug!("no configured devices matched {}", v); return; } } }; let item = if!exists { debug!("adding value {} to db", name); db.add_value(name.clone(), v) } else { Item::item(&name, v) }; Notification::Added(item) } NotificationType::Type_ValueChanged => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let db = always_lock(self.driver.items.lock()); let name = match db.get_name(&v) { Some(n) => n, None => return, }; let item = Item::item(&name, v); debug!("value {} changed: {:?}", item.get_name(), item.get_value()); Notification::Changed(item) } NotificationType::Type_ValueRemoved => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let mut db = always_lock(self.driver.items.lock()); let name = match db.get_name(&v) { Some(n) => n.clone(), None => return, }; debug!("removing value {} from db", name); Notification::Removed(match db.remove_value(v) { Some(it) => it, None => Item::item(&name, v), }) } // TODO new implementation for this // ZWaveNotification::Generic(s) => { // if s.contains("Type_DriverRemoved") { // warn!("controller removed! shutting down."); // ::std::process::exit(1); // } // return; // } NotificationType::Type_ControllerCommand => { let home_id = zwave_notification.get_home_id(); let db_name = format!("zwave_{}_Controller", home_id); let controller = match self.driver.get_value(&db_name) { Some(c) => c, None => { debug!("controller not found in item db"); return; } }; let state = match ControllerState::from_u8(zwave_notification.get_event() .unwrap()) { Some(s) => s, None => { debug!("unknown controller state: {}", zwave_notification.get_event().unwrap()); return; } }; match state { ControllerState::Completed => { let _ = controller.set_value(Value::String("idle".into())); } ControllerState::Failed => { let _ = controller.set_value(Value::String("failed".into())); } ControllerState::Starting => {} _ => { debug!("unhandled controller state: {:?}", state); return; } } Notification::Changed(controller) } _ => { debug!("unmatched notification: {}", zwave_notification); return; } }; match self.get_out().send(notification) { Ok(_) => {} Err(e) => { warn!("zwave notification send error: {}", e); return; } } } } fn should_expose(v: ValueID) -> bool { match v.get_genre() { ValueGenre::ValueGenre_Basic | ValueGenre::ValueGenre_User => {} _ => return false, } match v.get_type() { ValueType::ValueType_Bool | ValueType::ValueType_Byte | ValueType::ValueType_Decimal | ValueType::ValueType_Int | ValueType::ValueType_Short | ValueType::ValueType_String | ValueType::ValueType_Raw => {} _ => return false, } true }
new
identifier_name
driver.rs
use std::sync::Arc; use std::sync::Mutex; use std::sync::MutexGuard; use std::fs; use catt_core::util::always_lock; use catt_core::binding::Binding; use catt_core::binding::Notification; use catt_core::value::Value; use catt_core::item::Item as CItem; use openzwave as ozw; use openzwave::manager::Manager; use openzwave::options::Options; use openzwave::value_classes::value_id::ValueID; use openzwave::value_classes::value_id::ValueGenre; use openzwave::value_classes::value_id::ValueType; use openzwave::notification::Notification as ZWaveNotification; use openzwave::notification::NotificationType; use openzwave::notification::ControllerState; use serial_ports::{ListPortInfo, ListPorts}; use serial_ports::ListPortType::UsbPort; use tokio_core::reactor::Handle; use tokio_core::channel::channel; use tokio_core::channel::Sender; use tokio_core::channel::Receiver; use config::Config; use errors::*; use item::Item; use device::DB; #[cfg(windows)] fn get_default_devices() -> Vec<String> { vec!["\\\\.\\COM6".to_owned()] } #[cfg(unix)] fn is_usb_zwave_device(port: &ListPortInfo) -> bool { let default_usb_devices = [// VID PID // ----- ----- (0x0658, 0x0200), // Aeotech Z-Stick Gen-5 (0x0658, 0x0280), // UZB1 (0x10c4, 0xea60) /* Aeotech Z-Stick S2 */]; // Is it one of the vid/pids in the table? if let UsbPort(ref info) = port.port_type { default_usb_devices.contains(&(info.vid, info.pid)) } else { false } } #[cfg(unix)] fn get_default_devices() -> Vec<String> { // Enumerate all of the serial devices and see if any of them match our // known VID:PID. let mut ports: Vec<String> = Vec::new(); let usb_ports: Vec<String> = ListPorts::new() .iter() .filter(|port| is_usb_zwave_device(port)) .map(|port| port.device.to_string_lossy().into_owned()) .collect(); ports.extend(usb_ports); if ports.is_empty() { // The following is only included temporarily until we can get a more // comprehensive list of VIDs and PIDs. error!("[OpenzwaveStateful] Unable to locate ZWave USB dongle. The following VID:PIDs \ were found:"); for port in ListPorts::new().iter() { if let UsbPort(ref info) = port.port_type { error!("[OpenzwaveStateful] {:04x}:{:04x} {}", info.vid, info.pid, port.device.display()); } } // The following should be removed, once we have all of the devices captured using the above let default_devices = ["/dev/cu.usbserial", // MacOS X (presumably) "/dev/cu.SLAB_USBtoUART", // MacOS X (Aeotech Z-Stick S2) "/dev/cu.usbmodem14211", // Yoric (Aeotech Z-Stick Gen-5) "/dev/cu.usbmodem1421", // Isabel (UZB Static Controller) "/dev/ttyUSB0", // Linux (Aeotech Z-Stick S2) "/dev/ttyACM0" /* Linux (Aeotech Z-Stick Gen-5) */]; if let Some(default_device) = default_devices.iter() .find(|device_name| fs::metadata(device_name).is_ok()) .map(|&str| str.to_owned()) { ports.push(default_device); } } ports } #[derive(Clone)] pub struct ZWave { #[allow(dead_code)] ozw_manager: Arc<Mutex<ozw::manager::Manager>>, // TODO improve this system - ideally, we should hide these behind another struct // so that only one call is needed to update both. items: Arc<Mutex<DB>>, } impl ZWave { pub fn new(handle: &Handle, cfg: &Config) -> Result<(ZWave, Receiver<Notification<Item>>)> { let cfg = cfg.clone(); let mut manager = { let config_path = match cfg.sys_config { Some(ref path) => path.as_ref(), None => "/etc/openzwave", }; let user_path = match cfg.user_config { Some(ref path) => path.as_ref(), None => "./config", }; let opts = Options::create(config_path, user_path, "--SaveConfiguration true --DumpTriggerLevel 0 \ --ConsoleOutput false")?; ozw::manager::Manager::create(opts)? }; let devices = cfg.port.clone().map(|p| vec![p]).unwrap_or(get_default_devices()); for device in devices { fs::File::open(&device)?; manager.add_driver(&device)?; } let manager = Arc::new(Mutex::new(manager)); let items = Arc::new(Mutex::new(Default::default())); let (tx, rx) = channel(handle)?; let driver = ZWave { ozw_manager: manager.clone(), items: items, }; let watcher = Watcher { cfg: cfg, driver: driver.clone(), output: Mutex::new(tx), }; always_lock(manager.lock()).add_watcher(watcher)?; Ok((driver, rx)) } pub fn get_manager(&self) -> MutexGuard<Manager>
} impl Binding for ZWave { type Config = Config; type Error = Error; type Item = Item; fn new(handle: &Handle, cfg: &Self::Config) -> Result<(Self, Receiver<Notification<Item>>)> { ZWave::new(handle, cfg) } fn get_value(&self, name: &str) -> Option<Item> { always_lock(self.items.lock()).get_item(&String::from(name)).map(|i| i.clone()) } } struct Watcher { driver: ZWave, cfg: Config, output: Mutex<Sender<Notification<Item>>>, } impl Watcher { fn get_out(&self) -> MutexGuard<Sender<Notification<Item>>> { ::catt_core::util::always_lock(self.output.lock()) } } impl ozw::manager::NotificationWatcher for Watcher { fn on_notification(&self, zwave_notification: &ZWaveNotification) { let notification: Notification<Item> = match zwave_notification.get_type() { NotificationType::Type_DriverReady => { let home_id = zwave_notification.get_home_id(); let controller = Item::controller(&format!("zwave_{}_Controller", home_id), self.driver.clone(), home_id); always_lock(self.driver.items.lock()) .add_item(controller.get_name(), controller.clone()); let _ = self.get_out().send(Notification::Added(controller.clone())); Notification::Changed(controller) } NotificationType::Type_AllNodesQueried | NotificationType::Type_AwakeNodesQueried | NotificationType::Type_AllNodesQueriedSomeDead => { debug!("Controller ready"); // self.driver.ozw_manager.write_configs(); return; } NotificationType::Type_ValueAdded => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let mut db = always_lock(self.driver.items.lock()); let (name, exists) = match self.cfg.lookup_device(v) { Some(name) => { let exists = if let Some(_) = db.get_name(&v) { warn!("duplicate match found for {}", name); true } else { false }; (name, exists) } None => { if self.cfg.expose_unbound.unwrap_or(true) { if let Some(name) = db.get_name(&v) { warn!("duplicate match found for unconfigured {}", name); (name.clone(), true) } else { (format!("zwave_{}_{}_{}", v.get_home_id(), v.get_node_id(), v.get_label()), false) } } else { debug!("no configured devices matched {}", v); return; } } }; let item = if!exists { debug!("adding value {} to db", name); db.add_value(name.clone(), v) } else { Item::item(&name, v) }; Notification::Added(item) } NotificationType::Type_ValueChanged => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let db = always_lock(self.driver.items.lock()); let name = match db.get_name(&v) { Some(n) => n, None => return, }; let item = Item::item(&name, v); debug!("value {} changed: {:?}", item.get_name(), item.get_value()); Notification::Changed(item) } NotificationType::Type_ValueRemoved => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let mut db = always_lock(self.driver.items.lock()); let name = match db.get_name(&v) { Some(n) => n.clone(), None => return, }; debug!("removing value {} from db", name); Notification::Removed(match db.remove_value(v) { Some(it) => it, None => Item::item(&name, v), }) } // TODO new implementation for this // ZWaveNotification::Generic(s) => { // if s.contains("Type_DriverRemoved") { // warn!("controller removed! shutting down."); // ::std::process::exit(1); // } // return; // } NotificationType::Type_ControllerCommand => { let home_id = zwave_notification.get_home_id(); let db_name = format!("zwave_{}_Controller", home_id); let controller = match self.driver.get_value(&db_name) { Some(c) => c, None => { debug!("controller not found in item db"); return; } }; let state = match ControllerState::from_u8(zwave_notification.get_event() .unwrap()) { Some(s) => s, None => { debug!("unknown controller state: {}", zwave_notification.get_event().unwrap()); return; } }; match state { ControllerState::Completed => { let _ = controller.set_value(Value::String("idle".into())); } ControllerState::Failed => { let _ = controller.set_value(Value::String("failed".into())); } ControllerState::Starting => {} _ => { debug!("unhandled controller state: {:?}", state); return; } } Notification::Changed(controller) } _ => { debug!("unmatched notification: {}", zwave_notification); return; } }; match self.get_out().send(notification) { Ok(_) => {} Err(e) => { warn!("zwave notification send error: {}", e); return; } } } } fn should_expose(v: ValueID) -> bool { match v.get_genre() { ValueGenre::ValueGenre_Basic | ValueGenre::ValueGenre_User => {} _ => return false, } match v.get_type() { ValueType::ValueType_Bool | ValueType::ValueType_Byte | ValueType::ValueType_Decimal | ValueType::ValueType_Int | ValueType::ValueType_Short | ValueType::ValueType_String | ValueType::ValueType_Raw => {} _ => return false, } true }
{ always_lock(self.ozw_manager.lock()) }
identifier_body
driver.rs
use std::sync::Arc; use std::sync::Mutex; use std::sync::MutexGuard; use std::fs; use catt_core::util::always_lock; use catt_core::binding::Binding; use catt_core::binding::Notification; use catt_core::value::Value; use catt_core::item::Item as CItem; use openzwave as ozw; use openzwave::manager::Manager; use openzwave::options::Options; use openzwave::value_classes::value_id::ValueID; use openzwave::value_classes::value_id::ValueGenre; use openzwave::value_classes::value_id::ValueType; use openzwave::notification::Notification as ZWaveNotification; use openzwave::notification::NotificationType; use openzwave::notification::ControllerState; use serial_ports::{ListPortInfo, ListPorts}; use serial_ports::ListPortType::UsbPort; use tokio_core::reactor::Handle; use tokio_core::channel::channel; use tokio_core::channel::Sender; use tokio_core::channel::Receiver; use config::Config; use errors::*; use item::Item; use device::DB; #[cfg(windows)] fn get_default_devices() -> Vec<String> { vec!["\\\\.\\COM6".to_owned()] } #[cfg(unix)] fn is_usb_zwave_device(port: &ListPortInfo) -> bool { let default_usb_devices = [// VID PID // ----- ----- (0x0658, 0x0200), // Aeotech Z-Stick Gen-5 (0x0658, 0x0280), // UZB1 (0x10c4, 0xea60) /* Aeotech Z-Stick S2 */]; // Is it one of the vid/pids in the table? if let UsbPort(ref info) = port.port_type { default_usb_devices.contains(&(info.vid, info.pid)) } else { false } } #[cfg(unix)] fn get_default_devices() -> Vec<String> { // Enumerate all of the serial devices and see if any of them match our // known VID:PID. let mut ports: Vec<String> = Vec::new(); let usb_ports: Vec<String> = ListPorts::new() .iter() .filter(|port| is_usb_zwave_device(port)) .map(|port| port.device.to_string_lossy().into_owned()) .collect(); ports.extend(usb_ports); if ports.is_empty() { // The following is only included temporarily until we can get a more // comprehensive list of VIDs and PIDs. error!("[OpenzwaveStateful] Unable to locate ZWave USB dongle. The following VID:PIDs \ were found:"); for port in ListPorts::new().iter() { if let UsbPort(ref info) = port.port_type { error!("[OpenzwaveStateful] {:04x}:{:04x} {}", info.vid, info.pid, port.device.display()); } } // The following should be removed, once we have all of the devices captured using the above let default_devices = ["/dev/cu.usbserial", // MacOS X (presumably) "/dev/cu.SLAB_USBtoUART", // MacOS X (Aeotech Z-Stick S2) "/dev/cu.usbmodem14211", // Yoric (Aeotech Z-Stick Gen-5) "/dev/cu.usbmodem1421", // Isabel (UZB Static Controller) "/dev/ttyUSB0", // Linux (Aeotech Z-Stick S2) "/dev/ttyACM0" /* Linux (Aeotech Z-Stick Gen-5) */]; if let Some(default_device) = default_devices.iter() .find(|device_name| fs::metadata(device_name).is_ok()) .map(|&str| str.to_owned()) { ports.push(default_device); } } ports } #[derive(Clone)] pub struct ZWave { #[allow(dead_code)] ozw_manager: Arc<Mutex<ozw::manager::Manager>>, // TODO improve this system - ideally, we should hide these behind another struct // so that only one call is needed to update both. items: Arc<Mutex<DB>>, } impl ZWave { pub fn new(handle: &Handle, cfg: &Config) -> Result<(ZWave, Receiver<Notification<Item>>)> { let cfg = cfg.clone(); let mut manager = { let config_path = match cfg.sys_config { Some(ref path) => path.as_ref(), None => "/etc/openzwave", }; let user_path = match cfg.user_config { Some(ref path) => path.as_ref(), None => "./config", }; let opts = Options::create(config_path, user_path, "--SaveConfiguration true --DumpTriggerLevel 0 \ --ConsoleOutput false")?; ozw::manager::Manager::create(opts)? }; let devices = cfg.port.clone().map(|p| vec![p]).unwrap_or(get_default_devices()); for device in devices { fs::File::open(&device)?; manager.add_driver(&device)?; } let manager = Arc::new(Mutex::new(manager)); let items = Arc::new(Mutex::new(Default::default())); let (tx, rx) = channel(handle)?; let driver = ZWave { ozw_manager: manager.clone(), items: items, }; let watcher = Watcher { cfg: cfg, driver: driver.clone(), output: Mutex::new(tx), }; always_lock(manager.lock()).add_watcher(watcher)?; Ok((driver, rx)) } pub fn get_manager(&self) -> MutexGuard<Manager> { always_lock(self.ozw_manager.lock()) } } impl Binding for ZWave { type Config = Config; type Error = Error; type Item = Item; fn new(handle: &Handle, cfg: &Self::Config) -> Result<(Self, Receiver<Notification<Item>>)> { ZWave::new(handle, cfg) } fn get_value(&self, name: &str) -> Option<Item> { always_lock(self.items.lock()).get_item(&String::from(name)).map(|i| i.clone()) } } struct Watcher { driver: ZWave, cfg: Config, output: Mutex<Sender<Notification<Item>>>, } impl Watcher { fn get_out(&self) -> MutexGuard<Sender<Notification<Item>>> { ::catt_core::util::always_lock(self.output.lock()) } } impl ozw::manager::NotificationWatcher for Watcher { fn on_notification(&self, zwave_notification: &ZWaveNotification) { let notification: Notification<Item> = match zwave_notification.get_type() { NotificationType::Type_DriverReady => { let home_id = zwave_notification.get_home_id(); let controller = Item::controller(&format!("zwave_{}_Controller", home_id), self.driver.clone(), home_id); always_lock(self.driver.items.lock()) .add_item(controller.get_name(), controller.clone()); let _ = self.get_out().send(Notification::Added(controller.clone())); Notification::Changed(controller) } NotificationType::Type_AllNodesQueried | NotificationType::Type_AwakeNodesQueried | NotificationType::Type_AllNodesQueriedSomeDead => { debug!("Controller ready"); // self.driver.ozw_manager.write_configs(); return; } NotificationType::Type_ValueAdded => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let mut db = always_lock(self.driver.items.lock()); let (name, exists) = match self.cfg.lookup_device(v) { Some(name) => { let exists = if let Some(_) = db.get_name(&v) { warn!("duplicate match found for {}", name); true } else { false }; (name, exists) } None => { if self.cfg.expose_unbound.unwrap_or(true) { if let Some(name) = db.get_name(&v) { warn!("duplicate match found for unconfigured {}", name); (name.clone(), true) } else { (format!("zwave_{}_{}_{}", v.get_home_id(), v.get_node_id(), v.get_label()), false) } } else { debug!("no configured devices matched {}", v); return; } } }; let item = if!exists { debug!("adding value {} to db", name); db.add_value(name.clone(), v) } else { Item::item(&name, v) }; Notification::Added(item) } NotificationType::Type_ValueChanged => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let db = always_lock(self.driver.items.lock()); let name = match db.get_name(&v) { Some(n) => n, None => return, }; let item = Item::item(&name, v); debug!("value {} changed: {:?}", item.get_name(), item.get_value()); Notification::Changed(item) } NotificationType::Type_ValueRemoved => { let v = zwave_notification.get_value_id(); if!should_expose(v) { return; } let mut db = always_lock(self.driver.items.lock()); let name = match db.get_name(&v) { Some(n) => n.clone(), None => return, }; debug!("removing value {} from db", name); Notification::Removed(match db.remove_value(v) { Some(it) => it, None => Item::item(&name, v), }) } // TODO new implementation for this // ZWaveNotification::Generic(s) => { // if s.contains("Type_DriverRemoved") { // warn!("controller removed! shutting down."); // ::std::process::exit(1); // } // return; // } NotificationType::Type_ControllerCommand => { let home_id = zwave_notification.get_home_id(); let db_name = format!("zwave_{}_Controller", home_id); let controller = match self.driver.get_value(&db_name) { Some(c) => c, None => { debug!("controller not found in item db"); return; } }; let state = match ControllerState::from_u8(zwave_notification.get_event() .unwrap()) {
debug!("unknown controller state: {}", zwave_notification.get_event().unwrap()); return; } }; match state { ControllerState::Completed => { let _ = controller.set_value(Value::String("idle".into())); } ControllerState::Failed => { let _ = controller.set_value(Value::String("failed".into())); } ControllerState::Starting => {} _ => { debug!("unhandled controller state: {:?}", state); return; } } Notification::Changed(controller) } _ => { debug!("unmatched notification: {}", zwave_notification); return; } }; match self.get_out().send(notification) { Ok(_) => {} Err(e) => { warn!("zwave notification send error: {}", e); return; } } } } fn should_expose(v: ValueID) -> bool { match v.get_genre() { ValueGenre::ValueGenre_Basic | ValueGenre::ValueGenre_User => {} _ => return false, } match v.get_type() { ValueType::ValueType_Bool | ValueType::ValueType_Byte | ValueType::ValueType_Decimal | ValueType::ValueType_Int | ValueType::ValueType_Short | ValueType::ValueType_String | ValueType::ValueType_Raw => {} _ => return false, } true }
Some(s) => s, None => {
random_line_split