file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
utils.rs | use std::ffi::{c_void, CStr};
use std::mem::{MaybeUninit, size_of_val, transmute};
use std::path::{Path, PathBuf};
use std::ptr::{null, null_mut};
use itertools::Itertools;
use log::error;
use ntapi::ntpebteb::PEB;
use ntapi::ntpsapi::{NtQueryInformationProcess, PROCESS_BASIC_INFORMATION, ProcessBasicInformation};
use ntapi::ntrtl::RTL_USER_PROCESS_PARAMETERS;
use winapi::shared::guiddef::GUID;
use winapi::shared::minwindef::{BOOL, DWORD, FALSE, LPARAM, TRUE};
use winapi::shared::ntdef::{HANDLE, UNICODE_STRING};
use winapi::shared::ntstatus::STATUS_SUCCESS;
use winapi::shared::windef::HWND;
use winapi::um::combaseapi::CoTaskMemFree;
use winapi::um::errhandlingapi::GetLastError;
use winapi::um::handleapi::CloseHandle;
use winapi::um::memoryapi::ReadProcessMemory;
use winapi::um::processthreadsapi::OpenProcess;
use winapi::um::psapi::GetModuleFileNameExW;
use winapi::um::shlobj::SHGetKnownFolderPath;
use winapi::um::winbase::{FORMAT_MESSAGE_ALLOCATE_BUFFER, FORMAT_MESSAGE_FROM_SYSTEM, FormatMessageA, LocalFree};
use winapi::um::winnt::{LANG_USER_DEFAULT, LPSTR, PROCESS_QUERY_LIMITED_INFORMATION, PROCESS_VM_READ, PWSTR};
use winapi::um::winuser::{EnumWindows, GetWindowTextLengthW, GetWindowTextW, GetWindowThreadProcessId, IsWindowVisible};
use winapi::um::winver::{GetFileVersionInfoSizeW, GetFileVersionInfoW, VerQueryValueW};
use wrapperrs::Error;
use crate::agent::RequesterInfo;
use crate::config::Config;
use crate::utils::Finally;
use super::process_describers::describe;
pub trait StrExt {
fn to_utf16_null(&self) -> Vec<u16>;
}
impl StrExt for &str {
fn to_utf16_null(&self) -> Vec<u16> {
let mut v: Vec<_> = self.encode_utf16().collect();
v.push(0);
v
}
}
pub fn check_error() -> wrapperrs::Result<()> {
format_error(unsafe { GetLastError() })
}
pub fn format_error(err: u32) -> wrapperrs::Result<()> {
unsafe {
if err == 0 {
return Ok(());
}
let msg_ptr: LPSTR = null_mut();
FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
null(),
err as u32,
LANG_USER_DEFAULT as u32,
transmute(&msg_ptr),
0,
null_mut(),
);
let msg = CStr::from_ptr(msg_ptr).to_str().unwrap();
let err = wrapperrs::Error::new(&format!("(win32) {}", &msg[..msg.len() - 2]));
LocalFree(msg_ptr as *mut c_void);
Err(err.into())
}
}
pub unsafe fn close_handle(handle: *mut c_void) -> impl Drop {
Finally::new(move || { CloseHandle(handle); })
}
pub fn get_known_folder(folder_id: GUID) -> PathBuf {
unsafe {
let mut wstr: PWSTR = null_mut();
SHGetKnownFolderPath(&folder_id, 0, null_mut(), &mut wstr);
let length = (0..).into_iter()
.take_while(|i| wstr.offset(*i).read()!= 0)
.count();
let str = String::from_utf16(
std::slice::from_raw_parts(wstr, length)).unwrap();
CoTaskMemFree(wstr as *mut c_void);
PathBuf::from(str)
}
}
pub unsafe fn get_executable_from_pid(pid: u32) -> wrapperrs::Result<PathBuf> |
pub unsafe fn get_executable_description(exe: &Path) -> Result<String, ()> {
let exe_utf16 = exe.to_str().unwrap().to_utf16_null();
let mut handle: DWORD = 0;
let size = GetFileVersionInfoSizeW(exe_utf16.as_ptr(), &mut handle);
if size == 0 {
error!("GetFileVersionInfoSizeW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data = vec![0u8; size as _];
if GetFileVersionInfoW(exe_utf16.as_ptr(), 0, data.len() as _,
data.as_mut_ptr() as _) == 0 {
error!("GetFileVersionInfoW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data_ptr: *mut DWORD = null_mut();
let mut size: u32 = 0;
if VerQueryValueW(data.as_ptr() as _,
r"\VarFileInfo\Translation".to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _), &mut size as _) == 0 {
error!("VerQueryValueW (translation), err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let language = *data_ptr;
let lang_id = language & 0xffff;
let code_page = language >> 16 & 0xffff;
let mut data_ptr: *mut u16 = null_mut();
let mut size: u32 = 0;
let query = format!(r"\StringFileInfo\{:0>4x}{:0>4x}\FileDescription", lang_id, code_page);
if VerQueryValueW(data.as_ptr() as _, query.as_str().to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _),
&mut size as _) == 0 {
let err = GetLastError();
// 1813 - FileDescription resource type not found
if err!= 1813 {
error!("VerQueryValueW (file description), err={}, exe={}, query={}", err,
exe.to_str().unwrap(), query);
}
return Err(());
};
let data: Vec<_> = (0..).step_by(2)
.map(|offset| data_ptr.offset(offset / 2).read())
.take_while(|c| *c!= 0)
.collect();
Ok(String::from_utf16(&data).unwrap())
}
pub unsafe fn get_parent_pid(pid: u32) -> u32 {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return 0;
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
if NtQueryInformationProcess(process, ProcessBasicInformation, &mut info as *mut _ as _,
size_of_val(&info) as _, null_mut())!= STATUS_SUCCESS {
return 0;
}
info.InheritedFromUniqueProcessId as _
}
pub unsafe fn find_primary_window(process_id: u32) -> Option<HWND> {
struct Data {
process_id: u32,
windows: Vec<HWND>,
}
unsafe extern "system" fn window_proc(hwnd: HWND, lparam: LPARAM) -> BOOL {
let data = &mut *(lparam as *mut Data);
let mut process_id = 0;
GetWindowThreadProcessId(hwnd, &mut process_id);
if process_id == data.process_id {
data.windows.push(hwnd);
};
TRUE
}
let mut data = Data {
process_id,
windows: Vec::new(),
};
EnumWindows(Some(window_proc), &mut data as *mut _ as _);
if data.windows.is_empty() {
return None;
};
data.windows
.iter()
.find(|&&hwnd| IsWindowVisible(hwnd) == TRUE)
.or_else(|| data.windows.first())
.copied()
}
pub unsafe fn get_window_text(win: HWND) -> Result<String, ()> {
let mut title = vec![0; (GetWindowTextLengthW(win) + 1) as _];
let length = GetWindowTextW(win, title.as_mut_ptr(), title.len() as _);
if length > 0 {
Ok(String::from_utf16(&title[..length as _]).unwrap())
} else {
Err(())
}
}
pub unsafe fn get_process_command_line(pid: u32) -> Result<String, ()> {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION | PROCESS_VM_READ,
FALSE, pid);
if process == null_mut() {
return Err(());
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
let res = NtQueryInformationProcess(process, ProcessBasicInformation,
&mut info as *mut _ as _,
size_of_val(&info) as u32, null_mut());
if res!= STATUS_SUCCESS {
return Err(());
}
unsafe fn read_process<T>(process: HANDLE, addr: *mut c_void) -> std::result::Result<T, ()> {
let mut dst: T = MaybeUninit::zeroed().assume_init();
if ReadProcessMemory(process, addr, &mut dst as *mut _ as _, size_of_val(&dst),
null_mut()) == 0 {
dbg!(GetLastError());
Err(())
} else {
Ok(dst)
}
}
unsafe fn read_process_unicode_string(process: HANDLE, s: UNICODE_STRING)
-> std::result::Result<String, ()> {
let mut buffer = vec![0u16; (s.Length / 2) as _];
if ReadProcessMemory(process, s.Buffer as _, buffer.as_mut_ptr() as _,
s.Length as _, null_mut()) == 0 {
dbg!(GetLastError());
return Err(());
}
Ok(String::from_utf16(&buffer).unwrap())
}
if let Ok(command_line) = (|| -> std::result::Result<_, ()> {
let peb: PEB = read_process(process, info.PebBaseAddress as _)?;
let parameters: RTL_USER_PROCESS_PARAMETERS = read_process(process,
peb.ProcessParameters as _)?;
read_process_unicode_string(process, parameters.CommandLine)
})() {
Ok(command_line)
} else {
Err(())
}
}
pub unsafe fn collect_requester_info(_config: &Config, mut pid: u32)
-> wrapperrs::Result<RequesterInfo> {
let mut process_stack = Vec::new();
while pid!= 0 {
let window = find_primary_window(pid);
process_stack.push((pid, window));
pid = get_parent_pid(pid);
}
let main_process = process_stack.iter()
.find(|(_, window)| match *window {
Some(window) if IsWindowVisible(window) == TRUE => true,
_ => false
})
.or_else(|| process_stack.iter()
.find(|(_, window)| window.is_some()))
.or(process_stack.first())
.unwrap();
let short = describe(main_process.0, main_process.1)?.0;
let long = process_stack
.iter()
.filter_map(|(pid, window)| describe(*pid, *window)
.map(|(_, long)| long)
.ok())
.intersperse("\n\n".into())
.collect::<String>();
Ok(RequesterInfo {
description_short: short,
description_long: long,
})
}
| {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return Err(Error::new("OpenProcess").into());
};
let _close_process = close_handle(process);
let mut name = [0u16; 32 * 1024];
let length = GetModuleFileNameExW(process, null_mut(), name.as_mut_ptr(), name.len() as _);
if length == 0 {
Err(Error::new("GetModuleFileNameExW").into())
} else {
Ok(PathBuf::from(String::from_utf16(&name[..length as _]).unwrap()))
}
} | identifier_body |
utils.rs | use std::ffi::{c_void, CStr};
use std::mem::{MaybeUninit, size_of_val, transmute}; |
use itertools::Itertools;
use log::error;
use ntapi::ntpebteb::PEB;
use ntapi::ntpsapi::{NtQueryInformationProcess, PROCESS_BASIC_INFORMATION, ProcessBasicInformation};
use ntapi::ntrtl::RTL_USER_PROCESS_PARAMETERS;
use winapi::shared::guiddef::GUID;
use winapi::shared::minwindef::{BOOL, DWORD, FALSE, LPARAM, TRUE};
use winapi::shared::ntdef::{HANDLE, UNICODE_STRING};
use winapi::shared::ntstatus::STATUS_SUCCESS;
use winapi::shared::windef::HWND;
use winapi::um::combaseapi::CoTaskMemFree;
use winapi::um::errhandlingapi::GetLastError;
use winapi::um::handleapi::CloseHandle;
use winapi::um::memoryapi::ReadProcessMemory;
use winapi::um::processthreadsapi::OpenProcess;
use winapi::um::psapi::GetModuleFileNameExW;
use winapi::um::shlobj::SHGetKnownFolderPath;
use winapi::um::winbase::{FORMAT_MESSAGE_ALLOCATE_BUFFER, FORMAT_MESSAGE_FROM_SYSTEM, FormatMessageA, LocalFree};
use winapi::um::winnt::{LANG_USER_DEFAULT, LPSTR, PROCESS_QUERY_LIMITED_INFORMATION, PROCESS_VM_READ, PWSTR};
use winapi::um::winuser::{EnumWindows, GetWindowTextLengthW, GetWindowTextW, GetWindowThreadProcessId, IsWindowVisible};
use winapi::um::winver::{GetFileVersionInfoSizeW, GetFileVersionInfoW, VerQueryValueW};
use wrapperrs::Error;
use crate::agent::RequesterInfo;
use crate::config::Config;
use crate::utils::Finally;
use super::process_describers::describe;
pub trait StrExt {
fn to_utf16_null(&self) -> Vec<u16>;
}
impl StrExt for &str {
fn to_utf16_null(&self) -> Vec<u16> {
let mut v: Vec<_> = self.encode_utf16().collect();
v.push(0);
v
}
}
pub fn check_error() -> wrapperrs::Result<()> {
format_error(unsafe { GetLastError() })
}
pub fn format_error(err: u32) -> wrapperrs::Result<()> {
unsafe {
if err == 0 {
return Ok(());
}
let msg_ptr: LPSTR = null_mut();
FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
null(),
err as u32,
LANG_USER_DEFAULT as u32,
transmute(&msg_ptr),
0,
null_mut(),
);
let msg = CStr::from_ptr(msg_ptr).to_str().unwrap();
let err = wrapperrs::Error::new(&format!("(win32) {}", &msg[..msg.len() - 2]));
LocalFree(msg_ptr as *mut c_void);
Err(err.into())
}
}
pub unsafe fn close_handle(handle: *mut c_void) -> impl Drop {
Finally::new(move || { CloseHandle(handle); })
}
pub fn get_known_folder(folder_id: GUID) -> PathBuf {
unsafe {
let mut wstr: PWSTR = null_mut();
SHGetKnownFolderPath(&folder_id, 0, null_mut(), &mut wstr);
let length = (0..).into_iter()
.take_while(|i| wstr.offset(*i).read()!= 0)
.count();
let str = String::from_utf16(
std::slice::from_raw_parts(wstr, length)).unwrap();
CoTaskMemFree(wstr as *mut c_void);
PathBuf::from(str)
}
}
pub unsafe fn get_executable_from_pid(pid: u32) -> wrapperrs::Result<PathBuf> {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return Err(Error::new("OpenProcess").into());
};
let _close_process = close_handle(process);
let mut name = [0u16; 32 * 1024];
let length = GetModuleFileNameExW(process, null_mut(), name.as_mut_ptr(), name.len() as _);
if length == 0 {
Err(Error::new("GetModuleFileNameExW").into())
} else {
Ok(PathBuf::from(String::from_utf16(&name[..length as _]).unwrap()))
}
}
pub unsafe fn get_executable_description(exe: &Path) -> Result<String, ()> {
let exe_utf16 = exe.to_str().unwrap().to_utf16_null();
let mut handle: DWORD = 0;
let size = GetFileVersionInfoSizeW(exe_utf16.as_ptr(), &mut handle);
if size == 0 {
error!("GetFileVersionInfoSizeW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data = vec![0u8; size as _];
if GetFileVersionInfoW(exe_utf16.as_ptr(), 0, data.len() as _,
data.as_mut_ptr() as _) == 0 {
error!("GetFileVersionInfoW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data_ptr: *mut DWORD = null_mut();
let mut size: u32 = 0;
if VerQueryValueW(data.as_ptr() as _,
r"\VarFileInfo\Translation".to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _), &mut size as _) == 0 {
error!("VerQueryValueW (translation), err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let language = *data_ptr;
let lang_id = language & 0xffff;
let code_page = language >> 16 & 0xffff;
let mut data_ptr: *mut u16 = null_mut();
let mut size: u32 = 0;
let query = format!(r"\StringFileInfo\{:0>4x}{:0>4x}\FileDescription", lang_id, code_page);
if VerQueryValueW(data.as_ptr() as _, query.as_str().to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _),
&mut size as _) == 0 {
let err = GetLastError();
// 1813 - FileDescription resource type not found
if err!= 1813 {
error!("VerQueryValueW (file description), err={}, exe={}, query={}", err,
exe.to_str().unwrap(), query);
}
return Err(());
};
let data: Vec<_> = (0..).step_by(2)
.map(|offset| data_ptr.offset(offset / 2).read())
.take_while(|c| *c!= 0)
.collect();
Ok(String::from_utf16(&data).unwrap())
}
pub unsafe fn get_parent_pid(pid: u32) -> u32 {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return 0;
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
if NtQueryInformationProcess(process, ProcessBasicInformation, &mut info as *mut _ as _,
size_of_val(&info) as _, null_mut())!= STATUS_SUCCESS {
return 0;
}
info.InheritedFromUniqueProcessId as _
}
pub unsafe fn find_primary_window(process_id: u32) -> Option<HWND> {
struct Data {
process_id: u32,
windows: Vec<HWND>,
}
unsafe extern "system" fn window_proc(hwnd: HWND, lparam: LPARAM) -> BOOL {
let data = &mut *(lparam as *mut Data);
let mut process_id = 0;
GetWindowThreadProcessId(hwnd, &mut process_id);
if process_id == data.process_id {
data.windows.push(hwnd);
};
TRUE
}
let mut data = Data {
process_id,
windows: Vec::new(),
};
EnumWindows(Some(window_proc), &mut data as *mut _ as _);
if data.windows.is_empty() {
return None;
};
data.windows
.iter()
.find(|&&hwnd| IsWindowVisible(hwnd) == TRUE)
.or_else(|| data.windows.first())
.copied()
}
pub unsafe fn get_window_text(win: HWND) -> Result<String, ()> {
let mut title = vec![0; (GetWindowTextLengthW(win) + 1) as _];
let length = GetWindowTextW(win, title.as_mut_ptr(), title.len() as _);
if length > 0 {
Ok(String::from_utf16(&title[..length as _]).unwrap())
} else {
Err(())
}
}
pub unsafe fn get_process_command_line(pid: u32) -> Result<String, ()> {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION | PROCESS_VM_READ,
FALSE, pid);
if process == null_mut() {
return Err(());
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
let res = NtQueryInformationProcess(process, ProcessBasicInformation,
&mut info as *mut _ as _,
size_of_val(&info) as u32, null_mut());
if res!= STATUS_SUCCESS {
return Err(());
}
unsafe fn read_process<T>(process: HANDLE, addr: *mut c_void) -> std::result::Result<T, ()> {
let mut dst: T = MaybeUninit::zeroed().assume_init();
if ReadProcessMemory(process, addr, &mut dst as *mut _ as _, size_of_val(&dst),
null_mut()) == 0 {
dbg!(GetLastError());
Err(())
} else {
Ok(dst)
}
}
unsafe fn read_process_unicode_string(process: HANDLE, s: UNICODE_STRING)
-> std::result::Result<String, ()> {
let mut buffer = vec![0u16; (s.Length / 2) as _];
if ReadProcessMemory(process, s.Buffer as _, buffer.as_mut_ptr() as _,
s.Length as _, null_mut()) == 0 {
dbg!(GetLastError());
return Err(());
}
Ok(String::from_utf16(&buffer).unwrap())
}
if let Ok(command_line) = (|| -> std::result::Result<_, ()> {
let peb: PEB = read_process(process, info.PebBaseAddress as _)?;
let parameters: RTL_USER_PROCESS_PARAMETERS = read_process(process,
peb.ProcessParameters as _)?;
read_process_unicode_string(process, parameters.CommandLine)
})() {
Ok(command_line)
} else {
Err(())
}
}
pub unsafe fn collect_requester_info(_config: &Config, mut pid: u32)
-> wrapperrs::Result<RequesterInfo> {
let mut process_stack = Vec::new();
while pid!= 0 {
let window = find_primary_window(pid);
process_stack.push((pid, window));
pid = get_parent_pid(pid);
}
let main_process = process_stack.iter()
.find(|(_, window)| match *window {
Some(window) if IsWindowVisible(window) == TRUE => true,
_ => false
})
.or_else(|| process_stack.iter()
.find(|(_, window)| window.is_some()))
.or(process_stack.first())
.unwrap();
let short = describe(main_process.0, main_process.1)?.0;
let long = process_stack
.iter()
.filter_map(|(pid, window)| describe(*pid, *window)
.map(|(_, long)| long)
.ok())
.intersperse("\n\n".into())
.collect::<String>();
Ok(RequesterInfo {
description_short: short,
description_long: long,
})
} | use std::path::{Path, PathBuf};
use std::ptr::{null, null_mut}; | random_line_split |
utils.rs | use std::ffi::{c_void, CStr};
use std::mem::{MaybeUninit, size_of_val, transmute};
use std::path::{Path, PathBuf};
use std::ptr::{null, null_mut};
use itertools::Itertools;
use log::error;
use ntapi::ntpebteb::PEB;
use ntapi::ntpsapi::{NtQueryInformationProcess, PROCESS_BASIC_INFORMATION, ProcessBasicInformation};
use ntapi::ntrtl::RTL_USER_PROCESS_PARAMETERS;
use winapi::shared::guiddef::GUID;
use winapi::shared::minwindef::{BOOL, DWORD, FALSE, LPARAM, TRUE};
use winapi::shared::ntdef::{HANDLE, UNICODE_STRING};
use winapi::shared::ntstatus::STATUS_SUCCESS;
use winapi::shared::windef::HWND;
use winapi::um::combaseapi::CoTaskMemFree;
use winapi::um::errhandlingapi::GetLastError;
use winapi::um::handleapi::CloseHandle;
use winapi::um::memoryapi::ReadProcessMemory;
use winapi::um::processthreadsapi::OpenProcess;
use winapi::um::psapi::GetModuleFileNameExW;
use winapi::um::shlobj::SHGetKnownFolderPath;
use winapi::um::winbase::{FORMAT_MESSAGE_ALLOCATE_BUFFER, FORMAT_MESSAGE_FROM_SYSTEM, FormatMessageA, LocalFree};
use winapi::um::winnt::{LANG_USER_DEFAULT, LPSTR, PROCESS_QUERY_LIMITED_INFORMATION, PROCESS_VM_READ, PWSTR};
use winapi::um::winuser::{EnumWindows, GetWindowTextLengthW, GetWindowTextW, GetWindowThreadProcessId, IsWindowVisible};
use winapi::um::winver::{GetFileVersionInfoSizeW, GetFileVersionInfoW, VerQueryValueW};
use wrapperrs::Error;
use crate::agent::RequesterInfo;
use crate::config::Config;
use crate::utils::Finally;
use super::process_describers::describe;
pub trait StrExt {
fn to_utf16_null(&self) -> Vec<u16>;
}
impl StrExt for &str {
fn to_utf16_null(&self) -> Vec<u16> {
let mut v: Vec<_> = self.encode_utf16().collect();
v.push(0);
v
}
}
pub fn check_error() -> wrapperrs::Result<()> {
format_error(unsafe { GetLastError() })
}
pub fn format_error(err: u32) -> wrapperrs::Result<()> {
unsafe {
if err == 0 {
return Ok(());
}
let msg_ptr: LPSTR = null_mut();
FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
null(),
err as u32,
LANG_USER_DEFAULT as u32,
transmute(&msg_ptr),
0,
null_mut(),
);
let msg = CStr::from_ptr(msg_ptr).to_str().unwrap();
let err = wrapperrs::Error::new(&format!("(win32) {}", &msg[..msg.len() - 2]));
LocalFree(msg_ptr as *mut c_void);
Err(err.into())
}
}
pub unsafe fn close_handle(handle: *mut c_void) -> impl Drop {
Finally::new(move || { CloseHandle(handle); })
}
pub fn get_known_folder(folder_id: GUID) -> PathBuf {
unsafe {
let mut wstr: PWSTR = null_mut();
SHGetKnownFolderPath(&folder_id, 0, null_mut(), &mut wstr);
let length = (0..).into_iter()
.take_while(|i| wstr.offset(*i).read()!= 0)
.count();
let str = String::from_utf16(
std::slice::from_raw_parts(wstr, length)).unwrap();
CoTaskMemFree(wstr as *mut c_void);
PathBuf::from(str)
}
}
pub unsafe fn get_executable_from_pid(pid: u32) -> wrapperrs::Result<PathBuf> {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return Err(Error::new("OpenProcess").into());
};
let _close_process = close_handle(process);
let mut name = [0u16; 32 * 1024];
let length = GetModuleFileNameExW(process, null_mut(), name.as_mut_ptr(), name.len() as _);
if length == 0 {
Err(Error::new("GetModuleFileNameExW").into())
} else {
Ok(PathBuf::from(String::from_utf16(&name[..length as _]).unwrap()))
}
}
pub unsafe fn get_executable_description(exe: &Path) -> Result<String, ()> {
let exe_utf16 = exe.to_str().unwrap().to_utf16_null();
let mut handle: DWORD = 0;
let size = GetFileVersionInfoSizeW(exe_utf16.as_ptr(), &mut handle);
if size == 0 {
error!("GetFileVersionInfoSizeW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data = vec![0u8; size as _];
if GetFileVersionInfoW(exe_utf16.as_ptr(), 0, data.len() as _,
data.as_mut_ptr() as _) == 0 {
error!("GetFileVersionInfoW, err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let mut data_ptr: *mut DWORD = null_mut();
let mut size: u32 = 0;
if VerQueryValueW(data.as_ptr() as _,
r"\VarFileInfo\Translation".to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _), &mut size as _) == 0 {
error!("VerQueryValueW (translation), err={}, exe={}", GetLastError(), exe.to_str().unwrap());
return Err(());
}
let language = *data_ptr;
let lang_id = language & 0xffff;
let code_page = language >> 16 & 0xffff;
let mut data_ptr: *mut u16 = null_mut();
let mut size: u32 = 0;
let query = format!(r"\StringFileInfo\{:0>4x}{:0>4x}\FileDescription", lang_id, code_page);
if VerQueryValueW(data.as_ptr() as _, query.as_str().to_utf16_null().as_ptr(),
&mut *(&mut data_ptr as *mut _ as *mut *mut _),
&mut size as _) == 0 {
let err = GetLastError();
// 1813 - FileDescription resource type not found
if err!= 1813 {
error!("VerQueryValueW (file description), err={}, exe={}, query={}", err,
exe.to_str().unwrap(), query);
}
return Err(());
};
let data: Vec<_> = (0..).step_by(2)
.map(|offset| data_ptr.offset(offset / 2).read())
.take_while(|c| *c!= 0)
.collect();
Ok(String::from_utf16(&data).unwrap())
}
pub unsafe fn get_parent_pid(pid: u32) -> u32 {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, FALSE, pid);
if process == null_mut() {
return 0;
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
if NtQueryInformationProcess(process, ProcessBasicInformation, &mut info as *mut _ as _,
size_of_val(&info) as _, null_mut())!= STATUS_SUCCESS {
return 0;
}
info.InheritedFromUniqueProcessId as _
}
pub unsafe fn find_primary_window(process_id: u32) -> Option<HWND> {
struct Data {
process_id: u32,
windows: Vec<HWND>,
}
unsafe extern "system" fn window_proc(hwnd: HWND, lparam: LPARAM) -> BOOL {
let data = &mut *(lparam as *mut Data);
let mut process_id = 0;
GetWindowThreadProcessId(hwnd, &mut process_id);
if process_id == data.process_id {
data.windows.push(hwnd);
};
TRUE
}
let mut data = Data {
process_id,
windows: Vec::new(),
};
EnumWindows(Some(window_proc), &mut data as *mut _ as _);
if data.windows.is_empty() {
return None;
};
data.windows
.iter()
.find(|&&hwnd| IsWindowVisible(hwnd) == TRUE)
.or_else(|| data.windows.first())
.copied()
}
pub unsafe fn get_window_text(win: HWND) -> Result<String, ()> {
let mut title = vec![0; (GetWindowTextLengthW(win) + 1) as _];
let length = GetWindowTextW(win, title.as_mut_ptr(), title.len() as _);
if length > 0 {
Ok(String::from_utf16(&title[..length as _]).unwrap())
} else {
Err(())
}
}
pub unsafe fn get_process_command_line(pid: u32) -> Result<String, ()> {
let process = OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION | PROCESS_VM_READ,
FALSE, pid);
if process == null_mut() {
return Err(());
}
let _close_process = close_handle(process);
let mut info: PROCESS_BASIC_INFORMATION = MaybeUninit::zeroed().assume_init();
let res = NtQueryInformationProcess(process, ProcessBasicInformation,
&mut info as *mut _ as _,
size_of_val(&info) as u32, null_mut());
if res!= STATUS_SUCCESS {
return Err(());
}
unsafe fn read_process<T>(process: HANDLE, addr: *mut c_void) -> std::result::Result<T, ()> {
let mut dst: T = MaybeUninit::zeroed().assume_init();
if ReadProcessMemory(process, addr, &mut dst as *mut _ as _, size_of_val(&dst),
null_mut()) == 0 {
dbg!(GetLastError());
Err(())
} else {
Ok(dst)
}
}
unsafe fn read_process_unicode_string(process: HANDLE, s: UNICODE_STRING)
-> std::result::Result<String, ()> {
let mut buffer = vec![0u16; (s.Length / 2) as _];
if ReadProcessMemory(process, s.Buffer as _, buffer.as_mut_ptr() as _,
s.Length as _, null_mut()) == 0 {
dbg!(GetLastError());
return Err(());
}
Ok(String::from_utf16(&buffer).unwrap())
}
if let Ok(command_line) = (|| -> std::result::Result<_, ()> {
let peb: PEB = read_process(process, info.PebBaseAddress as _)?;
let parameters: RTL_USER_PROCESS_PARAMETERS = read_process(process,
peb.ProcessParameters as _)?;
read_process_unicode_string(process, parameters.CommandLine)
})() {
Ok(command_line)
} else {
Err(())
}
}
pub unsafe fn | (_config: &Config, mut pid: u32)
-> wrapperrs::Result<RequesterInfo> {
let mut process_stack = Vec::new();
while pid!= 0 {
let window = find_primary_window(pid);
process_stack.push((pid, window));
pid = get_parent_pid(pid);
}
let main_process = process_stack.iter()
.find(|(_, window)| match *window {
Some(window) if IsWindowVisible(window) == TRUE => true,
_ => false
})
.or_else(|| process_stack.iter()
.find(|(_, window)| window.is_some()))
.or(process_stack.first())
.unwrap();
let short = describe(main_process.0, main_process.1)?.0;
let long = process_stack
.iter()
.filter_map(|(pid, window)| describe(*pid, *window)
.map(|(_, long)| long)
.ok())
.intersperse("\n\n".into())
.collect::<String>();
Ok(RequesterInfo {
description_short: short,
description_long: long,
})
}
| collect_requester_info | identifier_name |
mod.rs | Debug, serde::Serialize, serde::Deserialize)]
enum Msg {
SetupMsg(SetupMsg),
CommMsg(CommMsg),
}
// Control messages exchanged during the setup phase only
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
enum SetupMsg {
MyPortInfo(MyPortInfo),
LeaderWave { wave_leader: ConnectorId },
LeaderAnnounce { tree_leader: ConnectorId },
YouAreMyParent,
}
// Control message particular to the communication phase.
// as such, it's annotated with a round_index
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct CommMsg {
round_index: usize,
contents: CommMsgContents,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
enum CommMsgContents {
SendPayload(SendPayloadMsg),
CommCtrl(CommCtrlMsg),
}
// Connector <-> connector control messages for use in the communication phase
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
enum CommCtrlMsg {
Suggest { suggestion: Decision }, // child->parent
Announce { decision: Decision }, // parent->child
}
// Speculative payload message, communicating the value for the given
// port's message predecated on the given speculative variable assignments.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct SendPayloadMsg {
predicate: Predicate,
payload: Payload,
}
// Return result of `Predicate::assignment_union`, communicating the contents
// of the predicate which represents the (consistent) union of their mappings,
// if it exists (no variable mapped distinctly by the input predicates)
#[derive(Debug, PartialEq)]
enum AssignmentUnionResult {
FormerNotLatter,
LatterNotFormer, | }
// One of two endpoints for a control channel with a connector on either end.
// The underlying transport is TCP, so we use an inbox buffer to allow
// discrete payload receipt.
struct NetEndpoint {
inbox: Vec<u8>,
stream: TcpStream,
}
// Datastructure used during the setup phase representing a NetEndpoint TO BE SETUP
#[derive(Debug, Clone)]
struct NetEndpointSetup {
getter_for_incoming: PortId,
sock_addr: SocketAddr,
endpoint_polarity: EndpointPolarity,
}
// Datastructure used during the setup phase representing a UdpEndpoint TO BE SETUP
#[derive(Debug, Clone)]
struct UdpEndpointSetup {
getter_for_incoming: PortId,
local_addr: SocketAddr,
peer_addr: SocketAddr,
}
// NetEndpoint annotated with the ID of the port that receives payload
// messages received through the endpoint. This approach assumes that NetEndpoints
// DO NOT multiplex port->port channels, and so a mapping such as this is possible.
// As a result, the messages themselves don't need to carry the PortID with them.
#[derive(Debug)]
struct NetEndpointExt {
net_endpoint: NetEndpoint,
getter_for_incoming: PortId,
}
// Endpoint for a "raw" UDP endpoint. Corresponds to the "Udp Mediator Component"
// described in the literature.
// It acts as an endpoint by receiving messages via the poller etc. (managed by EndpointManager),
// It acts as a native component by managing a (speculative) set of payload messages (an outbox,
// protecting the peer on the other side of the network).
#[derive(Debug)]
struct UdpEndpointExt {
sock: UdpSocket, // already bound and connected
received_this_round: bool,
outgoing_payloads: HashMap<Predicate, Payload>,
getter_for_incoming: PortId,
}
// Meta-data for the connector: its role in the consensus tree.
#[derive(Debug)]
struct Neighborhood {
parent: Option<usize>,
children: VecSet<usize>,
}
// Manages the connector's ID, and manages allocations for connector/port IDs.
#[derive(Debug, Clone)]
struct IdManager {
connector_id: ConnectorId,
port_suffix_stream: U32Stream,
component_suffix_stream: U32Stream,
}
// Newtype wrapper around a byte buffer, used for UDP mediators to receive incoming datagrams.
struct IoByteBuffer {
byte_vec: Vec<u8>,
}
// A generator of speculative variables. Created on-demand during the synchronous round
// by the IdManager.
#[derive(Debug)]
struct SpecVarStream {
connector_id: ConnectorId,
port_suffix_stream: U32Stream,
}
// Manages the messy state of the various endpoints, pollers, buffers, etc.
#[derive(Debug)]
struct EndpointManager {
// invariants:
// 1. net and udp endpoints are registered with poll with tokens computed with TargetToken::into
// 2. Events is empty
poll: Poll,
events: Events,
delayed_messages: Vec<(usize, Msg)>,
undelayed_messages: Vec<(usize, Msg)>, // ready to yield
net_endpoint_store: EndpointStore<NetEndpointExt>,
udp_endpoint_store: EndpointStore<UdpEndpointExt>,
io_byte_buffer: IoByteBuffer,
}
// A storage of endpoints, which keeps track of which components have raised
// an event during poll(), signifying that they need to be checked for new incoming data
#[derive(Debug)]
struct EndpointStore<T> {
endpoint_exts: Vec<T>,
polled_undrained: VecSet<usize>,
}
// The information associated with a port identifier, designed for local storage.
#[derive(Clone, Debug)]
struct PortInfo {
owner: ComponentId,
peer: Option<PortId>,
polarity: Polarity,
route: Route,
}
// Similar to `PortInfo`, but designed for communication during the setup procedure.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct MyPortInfo {
polarity: Polarity,
port: PortId,
owner: ComponentId,
}
// Newtype around port info map, allowing the implementation of some
// useful methods
#[derive(Default, Debug, Clone)]
struct PortInfoMap {
// invariant: self.invariant_preserved()
// `owned` is redundant information, allowing for fast lookup
// of a component's owned ports (which occurs during the sync round a lot)
map: HashMap<PortId, PortInfo>,
owned: HashMap<ComponentId, HashSet<PortId>>,
}
// A convenient substructure for containing port info and the ID manager.
// Houses the bulk of the connector's persistent state between rounds.
// It turns out several situations require access to both things.
#[derive(Debug, Clone)]
struct IdAndPortState {
port_info: PortInfoMap,
id_manager: IdManager,
}
// A component's setup-phase-specific data
#[derive(Debug)]
struct ConnectorCommunication {
round_index: usize,
endpoint_manager: EndpointManager,
neighborhood: Neighborhood,
native_batches: Vec<NativeBatch>,
round_result: Result<Option<RoundEndedNative>, SyncError>,
}
// A component's data common to both setup and communication phases
#[derive(Debug)]
struct ConnectorUnphased {
proto_description: Arc<ProtocolDescription>,
proto_components: HashMap<ComponentId, ComponentState>,
logger: Box<dyn Logger>,
ips: IdAndPortState,
native_component_id: ComponentId,
}
// A connector's phase-specific data
#[derive(Debug)]
enum ConnectorPhased {
Setup(Box<ConnectorSetup>),
Communication(Box<ConnectorCommunication>),
}
// A connector's setup-phase-specific data
#[derive(Debug)]
struct ConnectorSetup {
net_endpoint_setups: Vec<NetEndpointSetup>,
udp_endpoint_setups: Vec<UdpEndpointSetup>,
}
// A newtype wrapper for a map from speculative variable to speculative value
// A missing mapping corresponds with "unspecified".
#[derive(Default, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)]
struct Predicate {
assigned: BTreeMap<SpecVar, SpecVal>,
}
// Identifies a child of this connector in the _solution tree_.
// Each connector creates its own local solutions for the consensus procedure during `sync`,
// from the solutions of its children. Those children are either locally-managed components,
// (which are leaves in the solution tree), or other connectors reachable through the given
// network endpoint (which are internal nodes in the solution tree).
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
enum SubtreeId {
LocalComponent(ComponentId),
NetEndpoint { index: usize },
}
// An accumulation of the connector's knowledge of all (a) the local solutions its children
// in the solution tree have found, and (b) its own solutions derivable from those of its children.
// This structure starts off each round with an empty set, and accumulates solutions as they are found
// by local components, or received over the network in control messages.
// IMPORTANT: solutions, once found, don't go away until the end of the round. That is to
// say that these sets GROW until the round is over, and all solutions are reset.
#[derive(Debug)]
struct SolutionStorage {
// invariant: old_local U new_local solutions are those that can be created from
// the UNION of one element from each set in `subtree_solution`.
// invariant is maintained by potentially populating new_local whenever subtree_solutions is populated.
old_local: HashSet<Predicate>, // already sent to this connector's parent OR decided
new_local: HashSet<Predicate>, // not yet sent to this connector's parent OR decided
// this pair acts as SubtreeId -> HashSet<Predicate> which is friendlier to iteration
subtree_solutions: Vec<HashSet<Predicate>>,
subtree_id_to_index: HashMap<SubtreeId, usize>,
}
// Stores the transient data of a synchronous round.
// Some of it is for bookkeeping, and the rest is a temporary mirror of fields of
// `ConnectorUnphased`, such that any changes are safely contained within RoundCtx,
// and can be undone if the round fails.
struct RoundCtx {
solution_storage: SolutionStorage,
spec_var_stream: SpecVarStream,
payload_inbox: Vec<(PortId, SendPayloadMsg)>,
deadline: Option<Instant>,
ips: IdAndPortState,
}
// A trait intended to limit the access of the ConnectorUnphased structure
// such that we don't accidentally modify any important component/port data
// while the results of the round are undecided. Why? Any actions during Connector::sync
// are _speculative_ until the round is decided, and we need a safe way of rolling
// back any changes.
trait CuUndecided {
fn logger(&mut self) -> &mut dyn Logger;
fn proto_description(&self) -> &ProtocolDescription;
fn native_component_id(&self) -> ComponentId;
fn logger_and_protocol_description(&mut self) -> (&mut dyn Logger, &ProtocolDescription);
fn logger_and_protocol_components(
&mut self,
) -> (&mut dyn Logger, &mut HashMap<ComponentId, ComponentState>);
}
// Represents a set of synchronous port operations that the native component
// has described as an "option" for completing during the synchronous rounds.
// Operations contained here succeed together or not at all.
// A native with N=2+ batches are expressing an N-way nondeterministic choice
#[derive(Debug, Default)]
struct NativeBatch {
// invariant: putters' and getters' polarities respected
to_put: HashMap<PortId, Payload>,
to_get: HashSet<PortId>,
}
// Parallels a mio::Token type, but more clearly communicates
// the way it identifies the evented structre it corresponds to.
// See runtime/setup for methods converting between TokenTarget and mio::Token
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TokenTarget {
NetEndpoint { index: usize },
UdpEndpoint { index: usize },
}
// Returned by the endpoint manager as a result of comm_recv, telling the connector what happened,
// such that it can know when to continue polling, and when to block.
enum CommRecvOk {
TimeoutWithoutNew,
NewPayloadMsgs,
NewControlMsg { net_index: usize, msg: CommCtrlMsg },
}
////////////////
fn err_would_block(err: &std::io::Error) -> bool {
err.kind() == std::io::ErrorKind::WouldBlock
}
impl<T: std::cmp::Ord> VecSet<T> {
fn new(mut vec: Vec<T>) -> Self {
// establish the invariant
vec.sort();
vec.dedup();
Self { vec }
}
fn contains(&self, element: &T) -> bool {
self.vec.binary_search(element).is_ok()
}
// Insert the given element. Returns whether it was already present.
fn insert(&mut self, element: T) -> bool {
match self.vec.binary_search(&element) {
Ok(_) => false,
Err(index) => {
self.vec.insert(index, element);
true
}
}
}
fn iter(&self) -> std::slice::Iter<T> {
self.vec.iter()
}
fn pop(&mut self) -> Option<T> {
self.vec.pop()
}
}
impl PortInfoMap {
fn ports_owned_by(&self, owner: ComponentId) -> impl Iterator<Item = &PortId> {
self.owned.get(&owner).into_iter().flat_map(HashSet::iter)
}
fn spec_var_for(&self, port: PortId) -> SpecVar {
// Every port maps to a speculative variable
// Two distinct ports map to the same variable
// IFF they are two ends of the same logical channel.
let info = self.map.get(&port).unwrap();
SpecVar(match info.polarity {
Getter => port,
Putter => info.peer.unwrap(),
})
}
fn invariant_preserved(&self) -> bool {
// for every port P with some owner O,
// P is in O's owned set
for (port, info) in self.map.iter() {
match self.owned.get(&info.owner) {
Some(set) if set.contains(port) => {}
_ => {
println!("{:#?}\n WITH port {:?}", self, port);
return false;
}
}
}
// for every port P owned by every owner O,
// P's owner is O
for (&owner, set) in self.owned.iter() {
for port in set {
match self.map.get(port) {
Some(info) if info.owner == owner => {}
_ => {
println!("{:#?}\n WITH owner {:?} port {:?}", self, owner, port);
return false;
}
}
}
}
true
}
}
impl SpecVarStream {
fn next(&mut self) -> SpecVar {
let phantom_port: PortId =
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }
.into();
SpecVar(phantom_port)
}
}
impl IdManager {
fn new(connector_id: ConnectorId) -> Self {
Self {
connector_id,
port_suffix_stream: Default::default(),
component_suffix_stream: Default::default(),
}
}
fn new_spec_var_stream(&self) -> SpecVarStream {
// Spec var stream starts where the current port_id stream ends, with gap of SKIP_N.
// This gap is entirely unnecessary (i.e. 0 is fine)
// It's purpose is only to make SpecVars easier to spot in logs.
// E.g. spot the spec var: { v0_0, v1_2, v1_103 }
const SKIP_N: u32 = 100;
let port_suffix_stream = self.port_suffix_stream.clone().n_skipped(SKIP_N);
SpecVarStream { connector_id: self.connector_id, port_suffix_stream }
}
fn new_port_id(&mut self) -> PortId {
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }.into()
}
fn new_component_id(&mut self) -> ComponentId {
Id { connector_id: self.connector_id, u32_suffix: self.component_suffix_stream.next() }
.into()
}
}
impl Drop for Connector {
fn drop(&mut self) {
log!(self.unphased.logger(), "Connector dropping. Goodbye!");
}
}
// Given a slice of ports, return the first, if any, port is present repeatedly
fn duplicate_port(slice: &[PortId]) -> Option<PortId> {
let mut vec = Vec::with_capacity(slice.len());
for port in slice.iter() {
match vec.binary_search(port) {
Err(index) => vec.insert(index, *port),
Ok(_) => return Some(*port),
}
}
None
}
impl Connector {
/// Generate a random connector identifier from the system's source of randomness.
pub fn random_id() -> ConnectorId {
type Bytes8 = [u8; std::mem::size_of::<ConnectorId>()];
unsafe {
let mut bytes = std::mem::MaybeUninit::<Bytes8>::uninit();
// getrandom is the canonical crate for a small, secure rng
getrandom::getrandom(&mut *bytes.as_mut_ptr()).unwrap();
// safe! representations of all valid Byte8 values are valid ConnectorId values
std::mem::transmute::<_, _>(bytes.assume_init())
}
}
/// Returns true iff the connector is in connected state, i.e., it's setup phase is complete,
/// and it is ready to participate in synchronous rounds of communication.
pub fn is_connected(&self) -> bool {
// If designed for Rust usage, connectors would be exposed as an enum type from the start.
// consequently, this "phased" business would also include connector variants and this would
// get a lot closer to the connector impl. itself.
// Instead, the C-oriented implementation doesn't distinguish connector states as types,
// and distinguish them as enum variants instead
match self.phased {
ConnectorPhased::Setup(..) => false,
ConnectorPhased::Communication(..) => true,
}
}
/// Enables the connector's current logger to be swapped out for another
pub fn swap_logger(&mut self, mut new_logger: Box<dyn Logger>) -> Box<dyn Logger> {
std::mem::swap(&mut self.unphased.logger, &mut new_logger);
new_logger
}
/// Access the connector's current logger
pub fn get_logger(&mut self) -> &mut dyn Logger {
&mut *self.unphased.logger
}
/// Create a new synchronous channel, returning its ends as a pair of ports,
/// with polarity output, input respectively. Available during either setup/communication phase.
/// # Panics
/// This function panics if the connector's (large) port id space is exhausted.
pub fn new_port_pair(&mut self) -> [PortId; 2] {
let cu = &mut self.unphased;
// adds two new associated ports, related to each other, and exposed to the native
let mut new_cid = || cu.ips.id_manager.new_port_id();
// allocate two fresh port identifiers
let [o, i] = [new_cid(), new_cid()];
// store info for each:
// - they are each others' peers
// - they are owned by a local component with id `cid`
// - polarity putter, getter respectively
cu.ips.port_info.map.insert(
o,
PortInfo {
route: Route::LocalComponent,
peer: Some(i),
owner: cu.native_component_id,
polarity: Putter,
},
);
cu.ips.port_info.map.insert(
i,
PortInfo {
route: Route::LocalComponent,
peer: Some(o),
owner: cu.native_component_id,
polarity: Getter,
},
);
cu.ips
.port_info
.owned
.entry(cu.native_component_id)
.or_default()
.extend([o, i].iter().copied());
log!(cu.logger, "Added port pair (out->in) {:?} -> {:?}", o, i);
[o, i]
}
/// Instantiates a new component for the connector runtime to manage, and passing
/// the given set of ports from the interface of the native component, to that of the
/// newly created component (passing their ownership).
/// # Errors
/// Error is returned if the moved ports are not owned by the native component,
/// if the given component name is not defined in the connector's protocol,
/// the given sequence of ports contains a duplicate port,
/// or if the component is unfit for instantiation with the given port sequence.
/// # Panics
/// This function panics if the connector's (large) component id space is exhausted.
pub fn add_component(
&mut self,
module_name: &[u8],
identifier: &[u8],
ports: &[PortId],
) -> Result<(), AddComponentError> {
// Check for error cases first before modifying `cu`
use AddComponentError as Ace;
let cu = &self.unphased;
if let Some(port) = duplicate_port(ports) {
return Err(Ace::DuplicatePort(port));
}
let expected_polarities = cu.proto_description.component_polarities(module_name, identifier)?;
if expected_polarities.len()!= ports.len() {
return Err(Ace::WrongNumberOfParamaters { expected: expected_polarities.len() });
}
for (&expected_polarity, &port) in expected_polarities.iter().zip(ports.iter()) {
let info = cu.ips.port_info.map.get(&port).ok_or(Ace::UnknownPort(port))?;
if info.owner!= cu.native_component_id {
return Err(Ace::UnknownPort(port));
}
if info.polarity!= expected_polarity {
return Err(Ace::WrongPortPolarity { port, expected_polarity });
}
}
// No errors! Time to modify `cu`
// create a new component and identifier
let Connector { phased, unphased: cu } = self;
let new_cid = cu.ips.id_manager.new_component_id();
cu.proto_components.insert(new_cid, cu.proto_description.new_component(module_name, identifier, ports));
// update the ownership of moved ports
for port in ports.iter() {
match cu.ips.port_info.map.get_mut(port) {
Some(port_info) => port_info.owner = new_cid,
None => unreachable!(),
}
}
if let Some(set) = cu.ips.port_info.owned.get_mut(&cu.native_component_id) {
set.retain(|x|!ports.contains(x));
}
let moved_port_set: HashSet<PortId> = ports.iter().copied().collect();
if let ConnectorPhased::Communication(comm) = phased {
// Preserve invariant: batches only reason about native's ports.
// Remove batch puts/gets for moved ports.
for batch in comm.native_batches.iter_mut() {
batch.to_put.retain(|port, _|!moved_port_set.contains(port));
batch.to_get.retain(|port|!moved_port_set.contains(port));
}
}
cu.ips.port_info.owned.insert(new_cid, moved_port_set);
Ok(())
}
}
impl Predicate {
#[inline]
pub fn singleton(k: SpecVar, v: SpecVal) -> Self {
Self::default().inserted(k, v)
}
#[inline]
pub fn inserted(mut self, k: SpecVar, v: SpecVal) -> Self {
self.assigned.insert(k, v);
self
}
// Return true whether `self` is a subset of `maybe_superset`
pub fn assigns_subset(&self, maybe_superset: &Self) -> bool {
for (var, val) in self.assigned.iter() {
match maybe_superset.assigned.get(var) {
Some(val2) if val2 == val => {}
_ => return false, // var unmapped, or mapped differently
}
}
// `maybe_superset` mirrored all my assignments!
true
}
/// Given the two predicates {self, other}, return that whose
/// assignments are the union of those of both.
fn assignment_union(&self, other: &Self) -> AssignmentUnionResult {
use AssignmentUnionResult as Aur;
// iterators over assignments of both predicates. Rely on SORTED ordering of BTreeMap's keys.
let [mut s_it, mut o_it | Equivalent,
New(Predicate),
Nonexistant, | random_line_split |
mod.rs | , serde::Serialize, serde::Deserialize)]
enum Msg {
SetupMsg(SetupMsg),
CommMsg(CommMsg),
}
// Control messages exchanged during the setup phase only
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
enum SetupMsg {
MyPortInfo(MyPortInfo),
LeaderWave { wave_leader: ConnectorId },
LeaderAnnounce { tree_leader: ConnectorId },
YouAreMyParent,
}
// Control message particular to the communication phase.
// as such, it's annotated with a round_index
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct CommMsg {
round_index: usize,
contents: CommMsgContents,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
enum CommMsgContents {
SendPayload(SendPayloadMsg),
CommCtrl(CommCtrlMsg),
}
// Connector <-> connector control messages for use in the communication phase
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
enum CommCtrlMsg {
Suggest { suggestion: Decision }, // child->parent
Announce { decision: Decision }, // parent->child
}
// Speculative payload message, communicating the value for the given
// port's message predecated on the given speculative variable assignments.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct SendPayloadMsg {
predicate: Predicate,
payload: Payload,
}
// Return result of `Predicate::assignment_union`, communicating the contents
// of the predicate which represents the (consistent) union of their mappings,
// if it exists (no variable mapped distinctly by the input predicates)
#[derive(Debug, PartialEq)]
enum AssignmentUnionResult {
FormerNotLatter,
LatterNotFormer,
Equivalent,
New(Predicate),
Nonexistant,
}
// One of two endpoints for a control channel with a connector on either end.
// The underlying transport is TCP, so we use an inbox buffer to allow
// discrete payload receipt.
struct NetEndpoint {
inbox: Vec<u8>,
stream: TcpStream,
}
// Datastructure used during the setup phase representing a NetEndpoint TO BE SETUP
#[derive(Debug, Clone)]
struct NetEndpointSetup {
getter_for_incoming: PortId,
sock_addr: SocketAddr,
endpoint_polarity: EndpointPolarity,
}
// Datastructure used during the setup phase representing a UdpEndpoint TO BE SETUP
#[derive(Debug, Clone)]
struct UdpEndpointSetup {
getter_for_incoming: PortId,
local_addr: SocketAddr,
peer_addr: SocketAddr,
}
// NetEndpoint annotated with the ID of the port that receives payload
// messages received through the endpoint. This approach assumes that NetEndpoints
// DO NOT multiplex port->port channels, and so a mapping such as this is possible.
// As a result, the messages themselves don't need to carry the PortID with them.
#[derive(Debug)]
struct NetEndpointExt {
net_endpoint: NetEndpoint,
getter_for_incoming: PortId,
}
// Endpoint for a "raw" UDP endpoint. Corresponds to the "Udp Mediator Component"
// described in the literature.
// It acts as an endpoint by receiving messages via the poller etc. (managed by EndpointManager),
// It acts as a native component by managing a (speculative) set of payload messages (an outbox,
// protecting the peer on the other side of the network).
#[derive(Debug)]
struct UdpEndpointExt {
sock: UdpSocket, // already bound and connected
received_this_round: bool,
outgoing_payloads: HashMap<Predicate, Payload>,
getter_for_incoming: PortId,
}
// Meta-data for the connector: its role in the consensus tree.
#[derive(Debug)]
struct Neighborhood {
parent: Option<usize>,
children: VecSet<usize>,
}
// Manages the connector's ID, and manages allocations for connector/port IDs.
#[derive(Debug, Clone)]
struct IdManager {
connector_id: ConnectorId,
port_suffix_stream: U32Stream,
component_suffix_stream: U32Stream,
}
// Newtype wrapper around a byte buffer, used for UDP mediators to receive incoming datagrams.
struct IoByteBuffer {
byte_vec: Vec<u8>,
}
// A generator of speculative variables. Created on-demand during the synchronous round
// by the IdManager.
#[derive(Debug)]
struct SpecVarStream {
connector_id: ConnectorId,
port_suffix_stream: U32Stream,
}
// Manages the messy state of the various endpoints, pollers, buffers, etc.
#[derive(Debug)]
struct EndpointManager {
// invariants:
// 1. net and udp endpoints are registered with poll with tokens computed with TargetToken::into
// 2. Events is empty
poll: Poll,
events: Events,
delayed_messages: Vec<(usize, Msg)>,
undelayed_messages: Vec<(usize, Msg)>, // ready to yield
net_endpoint_store: EndpointStore<NetEndpointExt>,
udp_endpoint_store: EndpointStore<UdpEndpointExt>,
io_byte_buffer: IoByteBuffer,
}
// A storage of endpoints, which keeps track of which components have raised
// an event during poll(), signifying that they need to be checked for new incoming data
#[derive(Debug)]
struct EndpointStore<T> {
endpoint_exts: Vec<T>,
polled_undrained: VecSet<usize>,
}
// The information associated with a port identifier, designed for local storage.
#[derive(Clone, Debug)]
struct PortInfo {
owner: ComponentId,
peer: Option<PortId>,
polarity: Polarity,
route: Route,
}
// Similar to `PortInfo`, but designed for communication during the setup procedure.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct MyPortInfo {
polarity: Polarity,
port: PortId,
owner: ComponentId,
}
// Newtype around port info map, allowing the implementation of some
// useful methods
#[derive(Default, Debug, Clone)]
struct PortInfoMap {
// invariant: self.invariant_preserved()
// `owned` is redundant information, allowing for fast lookup
// of a component's owned ports (which occurs during the sync round a lot)
map: HashMap<PortId, PortInfo>,
owned: HashMap<ComponentId, HashSet<PortId>>,
}
// A convenient substructure for containing port info and the ID manager.
// Houses the bulk of the connector's persistent state between rounds.
// It turns out several situations require access to both things.
#[derive(Debug, Clone)]
struct IdAndPortState {
port_info: PortInfoMap,
id_manager: IdManager,
}
// A component's setup-phase-specific data
#[derive(Debug)]
struct ConnectorCommunication {
round_index: usize,
endpoint_manager: EndpointManager,
neighborhood: Neighborhood,
native_batches: Vec<NativeBatch>,
round_result: Result<Option<RoundEndedNative>, SyncError>,
}
// A component's data common to both setup and communication phases
#[derive(Debug)]
struct ConnectorUnphased {
proto_description: Arc<ProtocolDescription>,
proto_components: HashMap<ComponentId, ComponentState>,
logger: Box<dyn Logger>,
ips: IdAndPortState,
native_component_id: ComponentId,
}
// A connector's phase-specific data
#[derive(Debug)]
enum ConnectorPhased {
Setup(Box<ConnectorSetup>),
Communication(Box<ConnectorCommunication>),
}
// A connector's setup-phase-specific data
#[derive(Debug)]
struct ConnectorSetup {
net_endpoint_setups: Vec<NetEndpointSetup>,
udp_endpoint_setups: Vec<UdpEndpointSetup>,
}
// A newtype wrapper for a map from speculative variable to speculative value
// A missing mapping corresponds with "unspecified".
#[derive(Default, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)]
struct Predicate {
assigned: BTreeMap<SpecVar, SpecVal>,
}
// Identifies a child of this connector in the _solution tree_.
// Each connector creates its own local solutions for the consensus procedure during `sync`,
// from the solutions of its children. Those children are either locally-managed components,
// (which are leaves in the solution tree), or other connectors reachable through the given
// network endpoint (which are internal nodes in the solution tree).
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
enum SubtreeId {
LocalComponent(ComponentId),
NetEndpoint { index: usize },
}
// An accumulation of the connector's knowledge of all (a) the local solutions its children
// in the solution tree have found, and (b) its own solutions derivable from those of its children.
// This structure starts off each round with an empty set, and accumulates solutions as they are found
// by local components, or received over the network in control messages.
// IMPORTANT: solutions, once found, don't go away until the end of the round. That is to
// say that these sets GROW until the round is over, and all solutions are reset.
#[derive(Debug)]
struct SolutionStorage {
// invariant: old_local U new_local solutions are those that can be created from
// the UNION of one element from each set in `subtree_solution`.
// invariant is maintained by potentially populating new_local whenever subtree_solutions is populated.
old_local: HashSet<Predicate>, // already sent to this connector's parent OR decided
new_local: HashSet<Predicate>, // not yet sent to this connector's parent OR decided
// this pair acts as SubtreeId -> HashSet<Predicate> which is friendlier to iteration
subtree_solutions: Vec<HashSet<Predicate>>,
subtree_id_to_index: HashMap<SubtreeId, usize>,
}
// Stores the transient data of a synchronous round.
// Some of it is for bookkeeping, and the rest is a temporary mirror of fields of
// `ConnectorUnphased`, such that any changes are safely contained within RoundCtx,
// and can be undone if the round fails.
struct RoundCtx {
solution_storage: SolutionStorage,
spec_var_stream: SpecVarStream,
payload_inbox: Vec<(PortId, SendPayloadMsg)>,
deadline: Option<Instant>,
ips: IdAndPortState,
}
// A trait intended to limit the access of the ConnectorUnphased structure
// such that we don't accidentally modify any important component/port data
// while the results of the round are undecided. Why? Any actions during Connector::sync
// are _speculative_ until the round is decided, and we need a safe way of rolling
// back any changes.
trait CuUndecided {
fn logger(&mut self) -> &mut dyn Logger;
fn proto_description(&self) -> &ProtocolDescription;
fn native_component_id(&self) -> ComponentId;
fn logger_and_protocol_description(&mut self) -> (&mut dyn Logger, &ProtocolDescription);
fn logger_and_protocol_components(
&mut self,
) -> (&mut dyn Logger, &mut HashMap<ComponentId, ComponentState>);
}
// Represents a set of synchronous port operations that the native component
// has described as an "option" for completing during the synchronous rounds.
// Operations contained here succeed together or not at all.
// A native with N=2+ batches are expressing an N-way nondeterministic choice
#[derive(Debug, Default)]
struct NativeBatch {
// invariant: putters' and getters' polarities respected
to_put: HashMap<PortId, Payload>,
to_get: HashSet<PortId>,
}
// Parallels a mio::Token type, but more clearly communicates
// the way it identifies the evented structre it corresponds to.
// See runtime/setup for methods converting between TokenTarget and mio::Token
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TokenTarget {
NetEndpoint { index: usize },
UdpEndpoint { index: usize },
}
// Returned by the endpoint manager as a result of comm_recv, telling the connector what happened,
// such that it can know when to continue polling, and when to block.
enum CommRecvOk {
TimeoutWithoutNew,
NewPayloadMsgs,
NewControlMsg { net_index: usize, msg: CommCtrlMsg },
}
////////////////
fn err_would_block(err: &std::io::Error) -> bool {
err.kind() == std::io::ErrorKind::WouldBlock
}
impl<T: std::cmp::Ord> VecSet<T> {
fn new(mut vec: Vec<T>) -> Self {
// establish the invariant
vec.sort();
vec.dedup();
Self { vec }
}
fn contains(&self, element: &T) -> bool {
self.vec.binary_search(element).is_ok()
}
// Insert the given element. Returns whether it was already present.
fn insert(&mut self, element: T) -> bool {
match self.vec.binary_search(&element) {
Ok(_) => false,
Err(index) => {
self.vec.insert(index, element);
true
}
}
}
fn iter(&self) -> std::slice::Iter<T> {
self.vec.iter()
}
fn pop(&mut self) -> Option<T> {
self.vec.pop()
}
}
impl PortInfoMap {
fn ports_owned_by(&self, owner: ComponentId) -> impl Iterator<Item = &PortId> {
self.owned.get(&owner).into_iter().flat_map(HashSet::iter)
}
fn spec_var_for(&self, port: PortId) -> SpecVar {
// Every port maps to a speculative variable
// Two distinct ports map to the same variable
// IFF they are two ends of the same logical channel.
let info = self.map.get(&port).unwrap();
SpecVar(match info.polarity {
Getter => port,
Putter => info.peer.unwrap(),
})
}
fn invariant_preserved(&self) -> bool {
// for every port P with some owner O,
// P is in O's owned set
for (port, info) in self.map.iter() {
match self.owned.get(&info.owner) {
Some(set) if set.contains(port) => {}
_ => {
println!("{:#?}\n WITH port {:?}", self, port);
return false;
}
}
}
// for every port P owned by every owner O,
// P's owner is O
for (&owner, set) in self.owned.iter() {
for port in set {
match self.map.get(port) {
Some(info) if info.owner == owner => {}
_ => {
println!("{:#?}\n WITH owner {:?} port {:?}", self, owner, port);
return false;
}
}
}
}
true
}
}
impl SpecVarStream {
fn next(&mut self) -> SpecVar {
let phantom_port: PortId =
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }
.into();
SpecVar(phantom_port)
}
}
impl IdManager {
fn new(connector_id: ConnectorId) -> Self {
Self {
connector_id,
port_suffix_stream: Default::default(),
component_suffix_stream: Default::default(),
}
}
fn new_spec_var_stream(&self) -> SpecVarStream {
// Spec var stream starts where the current port_id stream ends, with gap of SKIP_N.
// This gap is entirely unnecessary (i.e. 0 is fine)
// It's purpose is only to make SpecVars easier to spot in logs.
// E.g. spot the spec var: { v0_0, v1_2, v1_103 }
const SKIP_N: u32 = 100;
let port_suffix_stream = self.port_suffix_stream.clone().n_skipped(SKIP_N);
SpecVarStream { connector_id: self.connector_id, port_suffix_stream }
}
fn new_port_id(&mut self) -> PortId {
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }.into()
}
fn new_component_id(&mut self) -> ComponentId {
Id { connector_id: self.connector_id, u32_suffix: self.component_suffix_stream.next() }
.into()
}
}
impl Drop for Connector {
fn drop(&mut self) {
log!(self.unphased.logger(), "Connector dropping. Goodbye!");
}
}
// Given a slice of ports, return the first, if any, port is present repeatedly
fn duplicate_port(slice: &[PortId]) -> Option<PortId> {
let mut vec = Vec::with_capacity(slice.len());
for port in slice.iter() {
match vec.binary_search(port) {
Err(index) => vec.insert(index, *port),
Ok(_) => return Some(*port),
}
}
None
}
impl Connector {
/// Generate a random connector identifier from the system's source of randomness.
pub fn random_id() -> ConnectorId {
type Bytes8 = [u8; std::mem::size_of::<ConnectorId>()];
unsafe {
let mut bytes = std::mem::MaybeUninit::<Bytes8>::uninit();
// getrandom is the canonical crate for a small, secure rng
getrandom::getrandom(&mut *bytes.as_mut_ptr()).unwrap();
// safe! representations of all valid Byte8 values are valid ConnectorId values
std::mem::transmute::<_, _>(bytes.assume_init())
}
}
/// Returns true iff the connector is in connected state, i.e., it's setup phase is complete,
/// and it is ready to participate in synchronous rounds of communication.
pub fn is_connected(&self) -> bool {
// If designed for Rust usage, connectors would be exposed as an enum type from the start.
// consequently, this "phased" business would also include connector variants and this would
// get a lot closer to the connector impl. itself.
// Instead, the C-oriented implementation doesn't distinguish connector states as types,
// and distinguish them as enum variants instead
match self.phased {
ConnectorPhased::Setup(..) => false,
ConnectorPhased::Communication(..) => true,
}
}
/// Enables the connector's current logger to be swapped out for another
pub fn swap_logger(&mut self, mut new_logger: Box<dyn Logger>) -> Box<dyn Logger> {
std::mem::swap(&mut self.unphased.logger, &mut new_logger);
new_logger
}
/// Access the connector's current logger
pub fn get_logger(&mut self) -> &mut dyn Logger {
&mut *self.unphased.logger
}
/// Create a new synchronous channel, returning its ends as a pair of ports,
/// with polarity output, input respectively. Available during either setup/communication phase.
/// # Panics
/// This function panics if the connector's (large) port id space is exhausted.
pub fn new_port_pair(&mut self) -> [PortId; 2] {
let cu = &mut self.unphased;
// adds two new associated ports, related to each other, and exposed to the native
let mut new_cid = || cu.ips.id_manager.new_port_id();
// allocate two fresh port identifiers
let [o, i] = [new_cid(), new_cid()];
// store info for each:
// - they are each others' peers
// - they are owned by a local component with id `cid`
// - polarity putter, getter respectively
cu.ips.port_info.map.insert(
o,
PortInfo {
route: Route::LocalComponent,
peer: Some(i),
owner: cu.native_component_id,
polarity: Putter,
},
);
cu.ips.port_info.map.insert(
i,
PortInfo {
route: Route::LocalComponent,
peer: Some(o),
owner: cu.native_component_id,
polarity: Getter,
},
);
cu.ips
.port_info
.owned
.entry(cu.native_component_id)
.or_default()
.extend([o, i].iter().copied());
log!(cu.logger, "Added port pair (out->in) {:?} -> {:?}", o, i);
[o, i]
}
/// Instantiates a new component for the connector runtime to manage, and passing
/// the given set of ports from the interface of the native component, to that of the
/// newly created component (passing their ownership).
/// # Errors
/// Error is returned if the moved ports are not owned by the native component,
/// if the given component name is not defined in the connector's protocol,
/// the given sequence of ports contains a duplicate port,
/// or if the component is unfit for instantiation with the given port sequence.
/// # Panics
/// This function panics if the connector's (large) component id space is exhausted.
pub fn add_component(
&mut self,
module_name: &[u8],
identifier: &[u8],
ports: &[PortId],
) -> Result<(), AddComponentError> {
// Check for error cases first before modifying `cu`
use AddComponentError as Ace;
let cu = &self.unphased;
if let Some(port) = duplicate_port(ports) {
return Err(Ace::DuplicatePort(port));
}
let expected_polarities = cu.proto_description.component_polarities(module_name, identifier)?;
if expected_polarities.len()!= ports.len() {
return Err(Ace::WrongNumberOfParamaters { expected: expected_polarities.len() });
}
for (&expected_polarity, &port) in expected_polarities.iter().zip(ports.iter()) {
let info = cu.ips.port_info.map.get(&port).ok_or(Ace::UnknownPort(port))?;
if info.owner!= cu.native_component_id {
return Err(Ace::UnknownPort(port));
}
if info.polarity!= expected_polarity |
}
// No errors! Time to modify `cu`
// create a new component and identifier
let Connector { phased, unphased: cu } = self;
let new_cid = cu.ips.id_manager.new_component_id();
cu.proto_components.insert(new_cid, cu.proto_description.new_component(module_name, identifier, ports));
// update the ownership of moved ports
for port in ports.iter() {
match cu.ips.port_info.map.get_mut(port) {
Some(port_info) => port_info.owner = new_cid,
None => unreachable!(),
}
}
if let Some(set) = cu.ips.port_info.owned.get_mut(&cu.native_component_id) {
set.retain(|x|!ports.contains(x));
}
let moved_port_set: HashSet<PortId> = ports.iter().copied().collect();
if let ConnectorPhased::Communication(comm) = phased {
// Preserve invariant: batches only reason about native's ports.
// Remove batch puts/gets for moved ports.
for batch in comm.native_batches.iter_mut() {
batch.to_put.retain(|port, _|!moved_port_set.contains(port));
batch.to_get.retain(|port|!moved_port_set.contains(port));
}
}
cu.ips.port_info.owned.insert(new_cid, moved_port_set);
Ok(())
}
}
impl Predicate {
#[inline]
pub fn singleton(k: SpecVar, v: SpecVal) -> Self {
Self::default().inserted(k, v)
}
#[inline]
pub fn inserted(mut self, k: SpecVar, v: SpecVal) -> Self {
self.assigned.insert(k, v);
self
}
// Return true whether `self` is a subset of `maybe_superset`
pub fn assigns_subset(&self, maybe_superset: &Self) -> bool {
for (var, val) in self.assigned.iter() {
match maybe_superset.assigned.get(var) {
Some(val2) if val2 == val => {}
_ => return false, // var unmapped, or mapped differently
}
}
// `maybe_superset` mirrored all my assignments!
true
}
/// Given the two predicates {self, other}, return that whose
/// assignments are the union of those of both.
fn assignment_union(&self, other: &Self) -> AssignmentUnionResult {
use AssignmentUnionResult as Aur;
// iterators over assignments of both predicates. Rely on SORTED ordering of BTreeMap's keys.
let [mut s_it, mut o | {
return Err(Ace::WrongPortPolarity { port, expected_polarity });
} | conditional_block |
mod.rs | if the round fails.
struct RoundCtx {
solution_storage: SolutionStorage,
spec_var_stream: SpecVarStream,
payload_inbox: Vec<(PortId, SendPayloadMsg)>,
deadline: Option<Instant>,
ips: IdAndPortState,
}
// A trait intended to limit the access of the ConnectorUnphased structure
// such that we don't accidentally modify any important component/port data
// while the results of the round are undecided. Why? Any actions during Connector::sync
// are _speculative_ until the round is decided, and we need a safe way of rolling
// back any changes.
trait CuUndecided {
fn logger(&mut self) -> &mut dyn Logger;
fn proto_description(&self) -> &ProtocolDescription;
fn native_component_id(&self) -> ComponentId;
fn logger_and_protocol_description(&mut self) -> (&mut dyn Logger, &ProtocolDescription);
fn logger_and_protocol_components(
&mut self,
) -> (&mut dyn Logger, &mut HashMap<ComponentId, ComponentState>);
}
// Represents a set of synchronous port operations that the native component
// has described as an "option" for completing during the synchronous rounds.
// Operations contained here succeed together or not at all.
// A native with N=2+ batches are expressing an N-way nondeterministic choice
#[derive(Debug, Default)]
struct NativeBatch {
// invariant: putters' and getters' polarities respected
to_put: HashMap<PortId, Payload>,
to_get: HashSet<PortId>,
}
// Parallels a mio::Token type, but more clearly communicates
// the way it identifies the evented structre it corresponds to.
// See runtime/setup for methods converting between TokenTarget and mio::Token
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TokenTarget {
NetEndpoint { index: usize },
UdpEndpoint { index: usize },
}
// Returned by the endpoint manager as a result of comm_recv, telling the connector what happened,
// such that it can know when to continue polling, and when to block.
enum CommRecvOk {
TimeoutWithoutNew,
NewPayloadMsgs,
NewControlMsg { net_index: usize, msg: CommCtrlMsg },
}
////////////////
fn err_would_block(err: &std::io::Error) -> bool {
err.kind() == std::io::ErrorKind::WouldBlock
}
impl<T: std::cmp::Ord> VecSet<T> {
fn new(mut vec: Vec<T>) -> Self {
// establish the invariant
vec.sort();
vec.dedup();
Self { vec }
}
fn contains(&self, element: &T) -> bool {
self.vec.binary_search(element).is_ok()
}
// Insert the given element. Returns whether it was already present.
fn insert(&mut self, element: T) -> bool {
match self.vec.binary_search(&element) {
Ok(_) => false,
Err(index) => {
self.vec.insert(index, element);
true
}
}
}
fn iter(&self) -> std::slice::Iter<T> {
self.vec.iter()
}
fn pop(&mut self) -> Option<T> {
self.vec.pop()
}
}
impl PortInfoMap {
fn ports_owned_by(&self, owner: ComponentId) -> impl Iterator<Item = &PortId> {
self.owned.get(&owner).into_iter().flat_map(HashSet::iter)
}
fn spec_var_for(&self, port: PortId) -> SpecVar {
// Every port maps to a speculative variable
// Two distinct ports map to the same variable
// IFF they are two ends of the same logical channel.
let info = self.map.get(&port).unwrap();
SpecVar(match info.polarity {
Getter => port,
Putter => info.peer.unwrap(),
})
}
fn invariant_preserved(&self) -> bool {
// for every port P with some owner O,
// P is in O's owned set
for (port, info) in self.map.iter() {
match self.owned.get(&info.owner) {
Some(set) if set.contains(port) => {}
_ => {
println!("{:#?}\n WITH port {:?}", self, port);
return false;
}
}
}
// for every port P owned by every owner O,
// P's owner is O
for (&owner, set) in self.owned.iter() {
for port in set {
match self.map.get(port) {
Some(info) if info.owner == owner => {}
_ => {
println!("{:#?}\n WITH owner {:?} port {:?}", self, owner, port);
return false;
}
}
}
}
true
}
}
impl SpecVarStream {
fn next(&mut self) -> SpecVar {
let phantom_port: PortId =
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }
.into();
SpecVar(phantom_port)
}
}
impl IdManager {
fn new(connector_id: ConnectorId) -> Self {
Self {
connector_id,
port_suffix_stream: Default::default(),
component_suffix_stream: Default::default(),
}
}
fn new_spec_var_stream(&self) -> SpecVarStream {
// Spec var stream starts where the current port_id stream ends, with gap of SKIP_N.
// This gap is entirely unnecessary (i.e. 0 is fine)
// It's purpose is only to make SpecVars easier to spot in logs.
// E.g. spot the spec var: { v0_0, v1_2, v1_103 }
const SKIP_N: u32 = 100;
let port_suffix_stream = self.port_suffix_stream.clone().n_skipped(SKIP_N);
SpecVarStream { connector_id: self.connector_id, port_suffix_stream }
}
fn new_port_id(&mut self) -> PortId {
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }.into()
}
fn new_component_id(&mut self) -> ComponentId {
Id { connector_id: self.connector_id, u32_suffix: self.component_suffix_stream.next() }
.into()
}
}
impl Drop for Connector {
fn drop(&mut self) {
log!(self.unphased.logger(), "Connector dropping. Goodbye!");
}
}
// Given a slice of ports, return the first, if any, port is present repeatedly
fn duplicate_port(slice: &[PortId]) -> Option<PortId> {
let mut vec = Vec::with_capacity(slice.len());
for port in slice.iter() {
match vec.binary_search(port) {
Err(index) => vec.insert(index, *port),
Ok(_) => return Some(*port),
}
}
None
}
impl Connector {
/// Generate a random connector identifier from the system's source of randomness.
pub fn random_id() -> ConnectorId {
type Bytes8 = [u8; std::mem::size_of::<ConnectorId>()];
unsafe {
let mut bytes = std::mem::MaybeUninit::<Bytes8>::uninit();
// getrandom is the canonical crate for a small, secure rng
getrandom::getrandom(&mut *bytes.as_mut_ptr()).unwrap();
// safe! representations of all valid Byte8 values are valid ConnectorId values
std::mem::transmute::<_, _>(bytes.assume_init())
}
}
/// Returns true iff the connector is in connected state, i.e., it's setup phase is complete,
/// and it is ready to participate in synchronous rounds of communication.
pub fn is_connected(&self) -> bool {
// If designed for Rust usage, connectors would be exposed as an enum type from the start.
// consequently, this "phased" business would also include connector variants and this would
// get a lot closer to the connector impl. itself.
// Instead, the C-oriented implementation doesn't distinguish connector states as types,
// and distinguish them as enum variants instead
match self.phased {
ConnectorPhased::Setup(..) => false,
ConnectorPhased::Communication(..) => true,
}
}
/// Enables the connector's current logger to be swapped out for another
pub fn swap_logger(&mut self, mut new_logger: Box<dyn Logger>) -> Box<dyn Logger> {
std::mem::swap(&mut self.unphased.logger, &mut new_logger);
new_logger
}
/// Access the connector's current logger
pub fn get_logger(&mut self) -> &mut dyn Logger {
&mut *self.unphased.logger
}
/// Create a new synchronous channel, returning its ends as a pair of ports,
/// with polarity output, input respectively. Available during either setup/communication phase.
/// # Panics
/// This function panics if the connector's (large) port id space is exhausted.
pub fn new_port_pair(&mut self) -> [PortId; 2] {
let cu = &mut self.unphased;
// adds two new associated ports, related to each other, and exposed to the native
let mut new_cid = || cu.ips.id_manager.new_port_id();
// allocate two fresh port identifiers
let [o, i] = [new_cid(), new_cid()];
// store info for each:
// - they are each others' peers
// - they are owned by a local component with id `cid`
// - polarity putter, getter respectively
cu.ips.port_info.map.insert(
o,
PortInfo {
route: Route::LocalComponent,
peer: Some(i),
owner: cu.native_component_id,
polarity: Putter,
},
);
cu.ips.port_info.map.insert(
i,
PortInfo {
route: Route::LocalComponent,
peer: Some(o),
owner: cu.native_component_id,
polarity: Getter,
},
);
cu.ips
.port_info
.owned
.entry(cu.native_component_id)
.or_default()
.extend([o, i].iter().copied());
log!(cu.logger, "Added port pair (out->in) {:?} -> {:?}", o, i);
[o, i]
}
/// Instantiates a new component for the connector runtime to manage, and passing
/// the given set of ports from the interface of the native component, to that of the
/// newly created component (passing their ownership).
/// # Errors
/// Error is returned if the moved ports are not owned by the native component,
/// if the given component name is not defined in the connector's protocol,
/// the given sequence of ports contains a duplicate port,
/// or if the component is unfit for instantiation with the given port sequence.
/// # Panics
/// This function panics if the connector's (large) component id space is exhausted.
pub fn add_component(
&mut self,
module_name: &[u8],
identifier: &[u8],
ports: &[PortId],
) -> Result<(), AddComponentError> {
// Check for error cases first before modifying `cu`
use AddComponentError as Ace;
let cu = &self.unphased;
if let Some(port) = duplicate_port(ports) {
return Err(Ace::DuplicatePort(port));
}
let expected_polarities = cu.proto_description.component_polarities(module_name, identifier)?;
if expected_polarities.len()!= ports.len() {
return Err(Ace::WrongNumberOfParamaters { expected: expected_polarities.len() });
}
for (&expected_polarity, &port) in expected_polarities.iter().zip(ports.iter()) {
let info = cu.ips.port_info.map.get(&port).ok_or(Ace::UnknownPort(port))?;
if info.owner!= cu.native_component_id {
return Err(Ace::UnknownPort(port));
}
if info.polarity!= expected_polarity {
return Err(Ace::WrongPortPolarity { port, expected_polarity });
}
}
// No errors! Time to modify `cu`
// create a new component and identifier
let Connector { phased, unphased: cu } = self;
let new_cid = cu.ips.id_manager.new_component_id();
cu.proto_components.insert(new_cid, cu.proto_description.new_component(module_name, identifier, ports));
// update the ownership of moved ports
for port in ports.iter() {
match cu.ips.port_info.map.get_mut(port) {
Some(port_info) => port_info.owner = new_cid,
None => unreachable!(),
}
}
if let Some(set) = cu.ips.port_info.owned.get_mut(&cu.native_component_id) {
set.retain(|x|!ports.contains(x));
}
let moved_port_set: HashSet<PortId> = ports.iter().copied().collect();
if let ConnectorPhased::Communication(comm) = phased {
// Preserve invariant: batches only reason about native's ports.
// Remove batch puts/gets for moved ports.
for batch in comm.native_batches.iter_mut() {
batch.to_put.retain(|port, _|!moved_port_set.contains(port));
batch.to_get.retain(|port|!moved_port_set.contains(port));
}
}
cu.ips.port_info.owned.insert(new_cid, moved_port_set);
Ok(())
}
}
impl Predicate {
#[inline]
pub fn singleton(k: SpecVar, v: SpecVal) -> Self {
Self::default().inserted(k, v)
}
#[inline]
pub fn inserted(mut self, k: SpecVar, v: SpecVal) -> Self {
self.assigned.insert(k, v);
self
}
// Return true whether `self` is a subset of `maybe_superset`
pub fn assigns_subset(&self, maybe_superset: &Self) -> bool {
for (var, val) in self.assigned.iter() {
match maybe_superset.assigned.get(var) {
Some(val2) if val2 == val => {}
_ => return false, // var unmapped, or mapped differently
}
}
// `maybe_superset` mirrored all my assignments!
true
}
/// Given the two predicates {self, other}, return that whose
/// assignments are the union of those of both.
fn assignment_union(&self, other: &Self) -> AssignmentUnionResult {
use AssignmentUnionResult as Aur;
// iterators over assignments of both predicates. Rely on SORTED ordering of BTreeMap's keys.
let [mut s_it, mut o_it] = [self.assigned.iter(), other.assigned.iter()];
let [mut s, mut o] = [s_it.next(), o_it.next()];
// populate lists of assignments in self but not other and vice versa.
// do this by incrementally unfolding the iterators, keeping an eye
// on the ordering between the head elements [s, o].
// whenever s<o, other is certainly missing element's', etc.
let [mut s_not_o, mut o_not_s] = [vec![], vec![]];
loop {
match [s, o] {
[None, None] => break, // both iterators are empty
[None, Some(x)] => {
// self's iterator is empty.
// all remaning elements are in other but not self
o_not_s.push(x);
o_not_s.extend(o_it);
break;
}
[Some(x), None] => {
// other's iterator is empty.
// all remaning elements are in self but not other
s_not_o.push(x);
s_not_o.extend(s_it);
break;
}
[Some((sid, sb)), Some((oid, ob))] => {
if sid < oid {
// o is missing this element
s_not_o.push((sid, sb));
s = s_it.next();
} else if sid > oid {
// s is missing this element
o_not_s.push((oid, ob));
o = o_it.next();
} else if sb!= ob {
assert_eq!(sid, oid);
// both predicates assign the variable but differ on the value
// No predicate exists which satisfies both!
return Aur::Nonexistant;
} else {
// both predicates assign the variable to the same value
s = s_it.next();
o = o_it.next();
}
}
}
}
// Observed zero inconsistencies. A unified predicate exists...
match [s_not_o.is_empty(), o_not_s.is_empty()] {
[true, true] => Aur::Equivalent, //... equivalent to both.
[false, true] => Aur::FormerNotLatter, //... equivalent to self.
[true, false] => Aur::LatterNotFormer, //... equivalent to other.
[false, false] => {
//... which is the union of the predicates' assignments but
// is equivalent to neither self nor other.
let mut new = self.clone();
for (&id, &b) in o_not_s {
new.assigned.insert(id, b);
}
Aur::New(new)
}
}
}
// Compute the union of the assignments of the two given predicates, if it exists.
// It doesn't exist if there is some value which the predicates assign to different values.
pub(crate) fn union_with(&self, other: &Self) -> Option<Self> {
let mut res = self.clone();
for (&channel_id, &assignment_1) in other.assigned.iter() {
match res.assigned.insert(channel_id, assignment_1) {
Some(assignment_2) if assignment_1!= assignment_2 => return None,
_ => {}
}
}
Some(res)
}
pub(crate) fn query(&self, var: SpecVar) -> Option<SpecVal> {
self.assigned.get(&var).copied()
}
}
impl RoundCtx {
// remove an arbitrary buffered message, along with the ID of the getter who receives it
fn getter_pop(&mut self) -> Option<(PortId, SendPayloadMsg)> {
self.payload_inbox.pop()
}
// buffer a message along with the ID of the getter who receives it
fn getter_push(&mut self, getter: PortId, msg: SendPayloadMsg) {
self.payload_inbox.push((getter, msg));
}
// buffer a message along with the ID of the putter who sent it
fn putter_push(&mut self, cu: &mut impl CuUndecided, putter: PortId, msg: SendPayloadMsg) {
if let Some(getter) = self.ips.port_info.map.get(&putter).unwrap().peer {
log!(cu.logger(), "Putter add (putter:{:?} => getter:{:?})", putter, getter);
self.getter_push(getter, msg);
} else {
log!(cu.logger(), "Putter {:?} has no known peer!", putter);
panic!("Putter {:?} has no known peer!", putter);
}
}
}
impl<T: Debug + std::cmp::Ord> Debug for VecSet<T> {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
f.debug_set().entries(self.vec.iter()).finish()
}
}
impl Debug for Predicate {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
struct Assignment<'a>((&'a SpecVar, &'a SpecVal));
impl Debug for Assignment<'_> {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(f, "{:?}={:?}", (self.0).0, (self.0).1)
}
}
f.debug_set().entries(self.assigned.iter().map(Assignment)).finish()
}
}
impl IdParts for SpecVar {
fn id_parts(self) -> (ConnectorId, U32Suffix) {
self.0.id_parts()
}
}
impl Debug for SpecVar {
fn | fmt | identifier_name |
|
mod.rs | , serde::Serialize, serde::Deserialize)]
enum Msg {
SetupMsg(SetupMsg),
CommMsg(CommMsg),
}
// Control messages exchanged during the setup phase only
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
enum SetupMsg {
MyPortInfo(MyPortInfo),
LeaderWave { wave_leader: ConnectorId },
LeaderAnnounce { tree_leader: ConnectorId },
YouAreMyParent,
}
// Control message particular to the communication phase.
// as such, it's annotated with a round_index
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct CommMsg {
round_index: usize,
contents: CommMsgContents,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
enum CommMsgContents {
SendPayload(SendPayloadMsg),
CommCtrl(CommCtrlMsg),
}
// Connector <-> connector control messages for use in the communication phase
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
enum CommCtrlMsg {
Suggest { suggestion: Decision }, // child->parent
Announce { decision: Decision }, // parent->child
}
// Speculative payload message, communicating the value for the given
// port's message predecated on the given speculative variable assignments.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct SendPayloadMsg {
predicate: Predicate,
payload: Payload,
}
// Return result of `Predicate::assignment_union`, communicating the contents
// of the predicate which represents the (consistent) union of their mappings,
// if it exists (no variable mapped distinctly by the input predicates)
#[derive(Debug, PartialEq)]
enum AssignmentUnionResult {
FormerNotLatter,
LatterNotFormer,
Equivalent,
New(Predicate),
Nonexistant,
}
// One of two endpoints for a control channel with a connector on either end.
// The underlying transport is TCP, so we use an inbox buffer to allow
// discrete payload receipt.
struct NetEndpoint {
inbox: Vec<u8>,
stream: TcpStream,
}
// Datastructure used during the setup phase representing a NetEndpoint TO BE SETUP
#[derive(Debug, Clone)]
struct NetEndpointSetup {
getter_for_incoming: PortId,
sock_addr: SocketAddr,
endpoint_polarity: EndpointPolarity,
}
// Datastructure used during the setup phase representing a UdpEndpoint TO BE SETUP
#[derive(Debug, Clone)]
struct UdpEndpointSetup {
getter_for_incoming: PortId,
local_addr: SocketAddr,
peer_addr: SocketAddr,
}
// NetEndpoint annotated with the ID of the port that receives payload
// messages received through the endpoint. This approach assumes that NetEndpoints
// DO NOT multiplex port->port channels, and so a mapping such as this is possible.
// As a result, the messages themselves don't need to carry the PortID with them.
#[derive(Debug)]
struct NetEndpointExt {
net_endpoint: NetEndpoint,
getter_for_incoming: PortId,
}
// Endpoint for a "raw" UDP endpoint. Corresponds to the "Udp Mediator Component"
// described in the literature.
// It acts as an endpoint by receiving messages via the poller etc. (managed by EndpointManager),
// It acts as a native component by managing a (speculative) set of payload messages (an outbox,
// protecting the peer on the other side of the network).
#[derive(Debug)]
struct UdpEndpointExt {
sock: UdpSocket, // already bound and connected
received_this_round: bool,
outgoing_payloads: HashMap<Predicate, Payload>,
getter_for_incoming: PortId,
}
// Meta-data for the connector: its role in the consensus tree.
#[derive(Debug)]
struct Neighborhood {
parent: Option<usize>,
children: VecSet<usize>,
}
// Manages the connector's ID, and manages allocations for connector/port IDs.
#[derive(Debug, Clone)]
struct IdManager {
connector_id: ConnectorId,
port_suffix_stream: U32Stream,
component_suffix_stream: U32Stream,
}
// Newtype wrapper around a byte buffer, used for UDP mediators to receive incoming datagrams.
struct IoByteBuffer {
byte_vec: Vec<u8>,
}
// A generator of speculative variables. Created on-demand during the synchronous round
// by the IdManager.
#[derive(Debug)]
struct SpecVarStream {
connector_id: ConnectorId,
port_suffix_stream: U32Stream,
}
// Manages the messy state of the various endpoints, pollers, buffers, etc.
#[derive(Debug)]
struct EndpointManager {
// invariants:
// 1. net and udp endpoints are registered with poll with tokens computed with TargetToken::into
// 2. Events is empty
poll: Poll,
events: Events,
delayed_messages: Vec<(usize, Msg)>,
undelayed_messages: Vec<(usize, Msg)>, // ready to yield
net_endpoint_store: EndpointStore<NetEndpointExt>,
udp_endpoint_store: EndpointStore<UdpEndpointExt>,
io_byte_buffer: IoByteBuffer,
}
// A storage of endpoints, which keeps track of which components have raised
// an event during poll(), signifying that they need to be checked for new incoming data
#[derive(Debug)]
struct EndpointStore<T> {
endpoint_exts: Vec<T>,
polled_undrained: VecSet<usize>,
}
// The information associated with a port identifier, designed for local storage.
#[derive(Clone, Debug)]
struct PortInfo {
owner: ComponentId,
peer: Option<PortId>,
polarity: Polarity,
route: Route,
}
// Similar to `PortInfo`, but designed for communication during the setup procedure.
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct MyPortInfo {
polarity: Polarity,
port: PortId,
owner: ComponentId,
}
// Newtype around port info map, allowing the implementation of some
// useful methods
#[derive(Default, Debug, Clone)]
struct PortInfoMap {
// invariant: self.invariant_preserved()
// `owned` is redundant information, allowing for fast lookup
// of a component's owned ports (which occurs during the sync round a lot)
map: HashMap<PortId, PortInfo>,
owned: HashMap<ComponentId, HashSet<PortId>>,
}
// A convenient substructure for containing port info and the ID manager.
// Houses the bulk of the connector's persistent state between rounds.
// It turns out several situations require access to both things.
#[derive(Debug, Clone)]
struct IdAndPortState {
port_info: PortInfoMap,
id_manager: IdManager,
}
// A component's setup-phase-specific data
#[derive(Debug)]
struct ConnectorCommunication {
round_index: usize,
endpoint_manager: EndpointManager,
neighborhood: Neighborhood,
native_batches: Vec<NativeBatch>,
round_result: Result<Option<RoundEndedNative>, SyncError>,
}
// A component's data common to both setup and communication phases
#[derive(Debug)]
struct ConnectorUnphased {
proto_description: Arc<ProtocolDescription>,
proto_components: HashMap<ComponentId, ComponentState>,
logger: Box<dyn Logger>,
ips: IdAndPortState,
native_component_id: ComponentId,
}
// A connector's phase-specific data
#[derive(Debug)]
enum ConnectorPhased {
Setup(Box<ConnectorSetup>),
Communication(Box<ConnectorCommunication>),
}
// A connector's setup-phase-specific data
#[derive(Debug)]
struct ConnectorSetup {
net_endpoint_setups: Vec<NetEndpointSetup>,
udp_endpoint_setups: Vec<UdpEndpointSetup>,
}
// A newtype wrapper for a map from speculative variable to speculative value
// A missing mapping corresponds with "unspecified".
#[derive(Default, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)]
struct Predicate {
assigned: BTreeMap<SpecVar, SpecVal>,
}
// Identifies a child of this connector in the _solution tree_.
// Each connector creates its own local solutions for the consensus procedure during `sync`,
// from the solutions of its children. Those children are either locally-managed components,
// (which are leaves in the solution tree), or other connectors reachable through the given
// network endpoint (which are internal nodes in the solution tree).
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
enum SubtreeId {
LocalComponent(ComponentId),
NetEndpoint { index: usize },
}
// An accumulation of the connector's knowledge of all (a) the local solutions its children
// in the solution tree have found, and (b) its own solutions derivable from those of its children.
// This structure starts off each round with an empty set, and accumulates solutions as they are found
// by local components, or received over the network in control messages.
// IMPORTANT: solutions, once found, don't go away until the end of the round. That is to
// say that these sets GROW until the round is over, and all solutions are reset.
#[derive(Debug)]
struct SolutionStorage {
// invariant: old_local U new_local solutions are those that can be created from
// the UNION of one element from each set in `subtree_solution`.
// invariant is maintained by potentially populating new_local whenever subtree_solutions is populated.
old_local: HashSet<Predicate>, // already sent to this connector's parent OR decided
new_local: HashSet<Predicate>, // not yet sent to this connector's parent OR decided
// this pair acts as SubtreeId -> HashSet<Predicate> which is friendlier to iteration
subtree_solutions: Vec<HashSet<Predicate>>,
subtree_id_to_index: HashMap<SubtreeId, usize>,
}
// Stores the transient data of a synchronous round.
// Some of it is for bookkeeping, and the rest is a temporary mirror of fields of
// `ConnectorUnphased`, such that any changes are safely contained within RoundCtx,
// and can be undone if the round fails.
struct RoundCtx {
solution_storage: SolutionStorage,
spec_var_stream: SpecVarStream,
payload_inbox: Vec<(PortId, SendPayloadMsg)>,
deadline: Option<Instant>,
ips: IdAndPortState,
}
// A trait intended to limit the access of the ConnectorUnphased structure
// such that we don't accidentally modify any important component/port data
// while the results of the round are undecided. Why? Any actions during Connector::sync
// are _speculative_ until the round is decided, and we need a safe way of rolling
// back any changes.
trait CuUndecided {
fn logger(&mut self) -> &mut dyn Logger;
fn proto_description(&self) -> &ProtocolDescription;
fn native_component_id(&self) -> ComponentId;
fn logger_and_protocol_description(&mut self) -> (&mut dyn Logger, &ProtocolDescription);
fn logger_and_protocol_components(
&mut self,
) -> (&mut dyn Logger, &mut HashMap<ComponentId, ComponentState>);
}
// Represents a set of synchronous port operations that the native component
// has described as an "option" for completing during the synchronous rounds.
// Operations contained here succeed together or not at all.
// A native with N=2+ batches are expressing an N-way nondeterministic choice
#[derive(Debug, Default)]
struct NativeBatch {
// invariant: putters' and getters' polarities respected
to_put: HashMap<PortId, Payload>,
to_get: HashSet<PortId>,
}
// Parallels a mio::Token type, but more clearly communicates
// the way it identifies the evented structre it corresponds to.
// See runtime/setup for methods converting between TokenTarget and mio::Token
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
enum TokenTarget {
NetEndpoint { index: usize },
UdpEndpoint { index: usize },
}
// Returned by the endpoint manager as a result of comm_recv, telling the connector what happened,
// such that it can know when to continue polling, and when to block.
enum CommRecvOk {
TimeoutWithoutNew,
NewPayloadMsgs,
NewControlMsg { net_index: usize, msg: CommCtrlMsg },
}
////////////////
fn err_would_block(err: &std::io::Error) -> bool {
err.kind() == std::io::ErrorKind::WouldBlock
}
impl<T: std::cmp::Ord> VecSet<T> {
fn new(mut vec: Vec<T>) -> Self {
// establish the invariant
vec.sort();
vec.dedup();
Self { vec }
}
fn contains(&self, element: &T) -> bool {
self.vec.binary_search(element).is_ok()
}
// Insert the given element. Returns whether it was already present.
fn insert(&mut self, element: T) -> bool {
match self.vec.binary_search(&element) {
Ok(_) => false,
Err(index) => {
self.vec.insert(index, element);
true
}
}
}
fn iter(&self) -> std::slice::Iter<T> {
self.vec.iter()
}
fn pop(&mut self) -> Option<T> {
self.vec.pop()
}
}
impl PortInfoMap {
fn ports_owned_by(&self, owner: ComponentId) -> impl Iterator<Item = &PortId> {
self.owned.get(&owner).into_iter().flat_map(HashSet::iter)
}
fn spec_var_for(&self, port: PortId) -> SpecVar {
// Every port maps to a speculative variable
// Two distinct ports map to the same variable
// IFF they are two ends of the same logical channel.
let info = self.map.get(&port).unwrap();
SpecVar(match info.polarity {
Getter => port,
Putter => info.peer.unwrap(),
})
}
fn invariant_preserved(&self) -> bool {
// for every port P with some owner O,
// P is in O's owned set
for (port, info) in self.map.iter() {
match self.owned.get(&info.owner) {
Some(set) if set.contains(port) => {}
_ => {
println!("{:#?}\n WITH port {:?}", self, port);
return false;
}
}
}
// for every port P owned by every owner O,
// P's owner is O
for (&owner, set) in self.owned.iter() {
for port in set {
match self.map.get(port) {
Some(info) if info.owner == owner => {}
_ => {
println!("{:#?}\n WITH owner {:?} port {:?}", self, owner, port);
return false;
}
}
}
}
true
}
}
impl SpecVarStream {
fn next(&mut self) -> SpecVar {
let phantom_port: PortId =
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }
.into();
SpecVar(phantom_port)
}
}
impl IdManager {
fn new(connector_id: ConnectorId) -> Self |
fn new_spec_var_stream(&self) -> SpecVarStream {
// Spec var stream starts where the current port_id stream ends, with gap of SKIP_N.
// This gap is entirely unnecessary (i.e. 0 is fine)
// It's purpose is only to make SpecVars easier to spot in logs.
// E.g. spot the spec var: { v0_0, v1_2, v1_103 }
const SKIP_N: u32 = 100;
let port_suffix_stream = self.port_suffix_stream.clone().n_skipped(SKIP_N);
SpecVarStream { connector_id: self.connector_id, port_suffix_stream }
}
fn new_port_id(&mut self) -> PortId {
Id { connector_id: self.connector_id, u32_suffix: self.port_suffix_stream.next() }.into()
}
fn new_component_id(&mut self) -> ComponentId {
Id { connector_id: self.connector_id, u32_suffix: self.component_suffix_stream.next() }
.into()
}
}
impl Drop for Connector {
fn drop(&mut self) {
log!(self.unphased.logger(), "Connector dropping. Goodbye!");
}
}
// Given a slice of ports, return the first, if any, port is present repeatedly
fn duplicate_port(slice: &[PortId]) -> Option<PortId> {
let mut vec = Vec::with_capacity(slice.len());
for port in slice.iter() {
match vec.binary_search(port) {
Err(index) => vec.insert(index, *port),
Ok(_) => return Some(*port),
}
}
None
}
impl Connector {
/// Generate a random connector identifier from the system's source of randomness.
pub fn random_id() -> ConnectorId {
type Bytes8 = [u8; std::mem::size_of::<ConnectorId>()];
unsafe {
let mut bytes = std::mem::MaybeUninit::<Bytes8>::uninit();
// getrandom is the canonical crate for a small, secure rng
getrandom::getrandom(&mut *bytes.as_mut_ptr()).unwrap();
// safe! representations of all valid Byte8 values are valid ConnectorId values
std::mem::transmute::<_, _>(bytes.assume_init())
}
}
/// Returns true iff the connector is in connected state, i.e., it's setup phase is complete,
/// and it is ready to participate in synchronous rounds of communication.
pub fn is_connected(&self) -> bool {
// If designed for Rust usage, connectors would be exposed as an enum type from the start.
// consequently, this "phased" business would also include connector variants and this would
// get a lot closer to the connector impl. itself.
// Instead, the C-oriented implementation doesn't distinguish connector states as types,
// and distinguish them as enum variants instead
match self.phased {
ConnectorPhased::Setup(..) => false,
ConnectorPhased::Communication(..) => true,
}
}
/// Enables the connector's current logger to be swapped out for another
pub fn swap_logger(&mut self, mut new_logger: Box<dyn Logger>) -> Box<dyn Logger> {
std::mem::swap(&mut self.unphased.logger, &mut new_logger);
new_logger
}
/// Access the connector's current logger
pub fn get_logger(&mut self) -> &mut dyn Logger {
&mut *self.unphased.logger
}
/// Create a new synchronous channel, returning its ends as a pair of ports,
/// with polarity output, input respectively. Available during either setup/communication phase.
/// # Panics
/// This function panics if the connector's (large) port id space is exhausted.
pub fn new_port_pair(&mut self) -> [PortId; 2] {
let cu = &mut self.unphased;
// adds two new associated ports, related to each other, and exposed to the native
let mut new_cid = || cu.ips.id_manager.new_port_id();
// allocate two fresh port identifiers
let [o, i] = [new_cid(), new_cid()];
// store info for each:
// - they are each others' peers
// - they are owned by a local component with id `cid`
// - polarity putter, getter respectively
cu.ips.port_info.map.insert(
o,
PortInfo {
route: Route::LocalComponent,
peer: Some(i),
owner: cu.native_component_id,
polarity: Putter,
},
);
cu.ips.port_info.map.insert(
i,
PortInfo {
route: Route::LocalComponent,
peer: Some(o),
owner: cu.native_component_id,
polarity: Getter,
},
);
cu.ips
.port_info
.owned
.entry(cu.native_component_id)
.or_default()
.extend([o, i].iter().copied());
log!(cu.logger, "Added port pair (out->in) {:?} -> {:?}", o, i);
[o, i]
}
/// Instantiates a new component for the connector runtime to manage, and passing
/// the given set of ports from the interface of the native component, to that of the
/// newly created component (passing their ownership).
/// # Errors
/// Error is returned if the moved ports are not owned by the native component,
/// if the given component name is not defined in the connector's protocol,
/// the given sequence of ports contains a duplicate port,
/// or if the component is unfit for instantiation with the given port sequence.
/// # Panics
/// This function panics if the connector's (large) component id space is exhausted.
pub fn add_component(
&mut self,
module_name: &[u8],
identifier: &[u8],
ports: &[PortId],
) -> Result<(), AddComponentError> {
// Check for error cases first before modifying `cu`
use AddComponentError as Ace;
let cu = &self.unphased;
if let Some(port) = duplicate_port(ports) {
return Err(Ace::DuplicatePort(port));
}
let expected_polarities = cu.proto_description.component_polarities(module_name, identifier)?;
if expected_polarities.len()!= ports.len() {
return Err(Ace::WrongNumberOfParamaters { expected: expected_polarities.len() });
}
for (&expected_polarity, &port) in expected_polarities.iter().zip(ports.iter()) {
let info = cu.ips.port_info.map.get(&port).ok_or(Ace::UnknownPort(port))?;
if info.owner!= cu.native_component_id {
return Err(Ace::UnknownPort(port));
}
if info.polarity!= expected_polarity {
return Err(Ace::WrongPortPolarity { port, expected_polarity });
}
}
// No errors! Time to modify `cu`
// create a new component and identifier
let Connector { phased, unphased: cu } = self;
let new_cid = cu.ips.id_manager.new_component_id();
cu.proto_components.insert(new_cid, cu.proto_description.new_component(module_name, identifier, ports));
// update the ownership of moved ports
for port in ports.iter() {
match cu.ips.port_info.map.get_mut(port) {
Some(port_info) => port_info.owner = new_cid,
None => unreachable!(),
}
}
if let Some(set) = cu.ips.port_info.owned.get_mut(&cu.native_component_id) {
set.retain(|x|!ports.contains(x));
}
let moved_port_set: HashSet<PortId> = ports.iter().copied().collect();
if let ConnectorPhased::Communication(comm) = phased {
// Preserve invariant: batches only reason about native's ports.
// Remove batch puts/gets for moved ports.
for batch in comm.native_batches.iter_mut() {
batch.to_put.retain(|port, _|!moved_port_set.contains(port));
batch.to_get.retain(|port|!moved_port_set.contains(port));
}
}
cu.ips.port_info.owned.insert(new_cid, moved_port_set);
Ok(())
}
}
impl Predicate {
#[inline]
pub fn singleton(k: SpecVar, v: SpecVal) -> Self {
Self::default().inserted(k, v)
}
#[inline]
pub fn inserted(mut self, k: SpecVar, v: SpecVal) -> Self {
self.assigned.insert(k, v);
self
}
// Return true whether `self` is a subset of `maybe_superset`
pub fn assigns_subset(&self, maybe_superset: &Self) -> bool {
for (var, val) in self.assigned.iter() {
match maybe_superset.assigned.get(var) {
Some(val2) if val2 == val => {}
_ => return false, // var unmapped, or mapped differently
}
}
// `maybe_superset` mirrored all my assignments!
true
}
/// Given the two predicates {self, other}, return that whose
/// assignments are the union of those of both.
fn assignment_union(&self, other: &Self) -> AssignmentUnionResult {
use AssignmentUnionResult as Aur;
// iterators over assignments of both predicates. Rely on SORTED ordering of BTreeMap's keys.
let [mut s_it, mut o | {
Self {
connector_id,
port_suffix_stream: Default::default(),
component_suffix_stream: Default::default(),
}
} | identifier_body |
obj.rs | libcalls that relocations are applied
/// against.
///
/// Note that this isn't typically used. It's only used for SSE-disabled
/// builds without SIMD on x86_64 right now.
libcall_symbols: HashMap<LibCall, SymbolId>,
ctrl_plane: ControlPlane,
}
impl<'a> ModuleTextBuilder<'a> {
/// Creates a new builder for the text section of an executable.
///
/// The `.text` section will be appended to the specified `obj` along with
/// any unwinding or such information as necessary. The `num_funcs`
/// parameter indicates the number of times the `append_func` function will
/// be called. The `finish` function will panic if this contract is not met.
pub fn new(
obj: &'a mut Object<'static>,
compiler: &'a dyn Compiler,
text: Box<dyn TextSectionBuilder>,
) -> Self {
// Entire code (functions and trampolines) will be placed
// in the ".text" section.
let text_section = obj.add_section(
obj.segment_name(StandardSegment::Text).to_vec(),
TEXT_SECTION_NAME.to_vec(),
SectionKind::Text,
);
Self {
compiler,
obj,
text_section,
unwind_info: Default::default(),
text,
libcall_symbols: HashMap::default(),
ctrl_plane: ControlPlane::default(),
}
}
/// Appends the `func` specified named `name` to this object.
///
/// The `resolve_reloc_target` closure is used to resolve a relocation
/// target to an adjacent function which has already been added or will be
/// added to this object. The argument is the relocation target specified | ///
/// Returns the symbol associated with the function as well as the range
/// that the function resides within the text section.
pub fn append_func(
&mut self,
name: &str,
compiled_func: &'a CompiledFunction<impl CompiledFuncEnv>,
resolve_reloc_target: impl Fn(FuncIndex) -> usize,
) -> (SymbolId, Range<u64>) {
let body = compiled_func.buffer.data();
let alignment = compiled_func.alignment;
let body_len = body.len() as u64;
let off = self
.text
.append(true, &body, alignment, &mut self.ctrl_plane);
let symbol_id = self.obj.add_symbol(Symbol {
name: name.as_bytes().to_vec(),
value: off,
size: body_len,
kind: SymbolKind::Text,
scope: SymbolScope::Compilation,
weak: false,
section: SymbolSection::Section(self.text_section),
flags: SymbolFlags::None,
});
if let Some(info) = compiled_func.unwind_info() {
self.unwind_info.push(off, body_len, info);
}
for r in compiled_func.relocations() {
match r.reloc_target {
// Relocations against user-defined functions means that this is
// a relocation against a module-local function, typically a
// call between functions. The `text` field is given priority to
// resolve this relocation before we actually emit an object
// file, but if it can't handle it then we pass through the
// relocation.
RelocationTarget::UserFunc(index) => {
let target = resolve_reloc_target(index);
if self
.text
.resolve_reloc(off + u64::from(r.offset), r.reloc, r.addend, target)
{
continue;
}
// At this time it's expected that all relocations are
// handled by `text.resolve_reloc`, and anything that isn't
// handled is a bug in `text.resolve_reloc` or something
// transitively there. If truly necessary, though, then this
// loop could also be updated to forward the relocation to
// the final object file as well.
panic!(
"unresolved relocation could not be processed against \
{index:?}: {r:?}"
);
}
// Relocations against libcalls are not common at this time and
// are only used in non-default configurations that disable wasm
// SIMD, disable SSE features, and for wasm modules that still
// use floating point operations.
//
// Currently these relocations are all expected to be absolute
// 8-byte relocations so that's asserted here and then encoded
// directly into the object as a normal object relocation. This
// is processed at module load time to resolve the relocations.
RelocationTarget::LibCall(call) => {
let symbol = *self.libcall_symbols.entry(call).or_insert_with(|| {
self.obj.add_symbol(Symbol {
name: libcall_name(call).as_bytes().to_vec(),
value: 0,
size: 0,
kind: SymbolKind::Text,
scope: SymbolScope::Linkage,
weak: false,
section: SymbolSection::Undefined,
flags: SymbolFlags::None,
})
});
let (encoding, kind, size) = match r.reloc {
Reloc::Abs8 => (
object::RelocationEncoding::Generic,
object::RelocationKind::Absolute,
8,
),
other => unimplemented!("unimplemented relocation kind {other:?}"),
};
self.obj
.add_relocation(
self.text_section,
object::write::Relocation {
symbol,
size,
kind,
encoding,
offset: off + u64::from(r.offset),
addend: r.addend,
},
)
.unwrap();
}
};
}
(symbol_id, off..off + body_len)
}
/// Forces "veneers" to be used for inter-function calls in the text
/// section which means that in-bounds optimized addresses are never used.
///
/// This is only useful for debugging cranelift itself and typically this
/// option is disabled.
pub fn force_veneers(&mut self) {
self.text.force_veneers();
}
/// Appends the specified amount of bytes of padding into the text section.
///
/// This is only useful when fuzzing and/or debugging cranelift itself and
/// for production scenarios `padding` is 0 and this function does nothing.
pub fn append_padding(&mut self, padding: usize) {
if padding == 0 {
return;
}
self.text
.append(false, &vec![0; padding], 1, &mut self.ctrl_plane);
}
/// Indicates that the text section has been written completely and this
/// will finish appending it to the original object.
///
/// Note that this will also write out the unwind information sections if
/// necessary.
pub fn finish(mut self) {
// Finish up the text section now that we're done adding functions.
let text = self.text.finish(&mut self.ctrl_plane);
self.obj
.section_mut(self.text_section)
.set_data(text, self.compiler.page_size_align());
// Append the unwind information for all our functions, if necessary.
self.unwind_info
.append_section(self.compiler, self.obj, self.text_section);
}
}
/// Builder used to create unwind information for a set of functions added to a
/// text section.
#[derive(Default)]
struct UnwindInfoBuilder<'a> {
windows_xdata: Vec<u8>,
windows_pdata: Vec<RUNTIME_FUNCTION>,
systemv_unwind_info: Vec<(u64, &'a systemv::UnwindInfo)>,
}
// This is a mirror of `RUNTIME_FUNCTION` in the Windows API, but defined here
// to ensure everything is always `u32` and to have it available on all
// platforms. Note that all of these specifiers here are relative to a "base
// address" which we define as the base of where the text section is eventually
// loaded.
#[allow(non_camel_case_types)]
struct RUNTIME_FUNCTION {
begin: u32,
end: u32,
unwind_address: u32,
}
impl<'a> UnwindInfoBuilder<'a> {
/// Pushes the unwind information for a function into this builder.
///
/// The function being described must be located at `function_offset` within
/// the text section itself, and the function's size is specified by
/// `function_len`.
///
/// The `info` should come from Cranelift. and is handled here depending on
/// its flavor.
fn push(&mut self, function_offset: u64, function_len: u64, info: &'a UnwindInfo) {
match info {
// Windows unwind information is stored in two locations:
//
// * First is the actual unwinding information which is stored
// in the `.xdata` section. This is where `info`'s emitted
// information will go into.
// * Second are pointers to connect all this unwind information,
// stored in the `.pdata` section. The `.pdata` section is an
// array of `RUNTIME_FUNCTION` structures.
//
// Due to how these will be loaded at runtime the `.pdata` isn't
// actually assembled byte-wise here. Instead that's deferred to
// happen later during `write_windows_unwind_info` which will apply
// a further offset to `unwind_address`.
UnwindInfo::WindowsX64(info) => {
let unwind_size = info.emit_size();
let mut unwind_info = vec![0; unwind_size];
info.emit(&mut unwind_info);
// `.xdata` entries are always 4-byte aligned
//
// FIXME: in theory we could "intern" the `unwind_info` value
// here within the `.xdata` section. Most of our unwind
// information for functions is probably pretty similar in which
// case the `.xdata` could be quite small and `.pdata` could
// have multiple functions point to the same unwinding
// information.
while self.windows_xdata.len() % 4!= 0 {
self.windows_xdata.push(0x00);
}
let unwind_address = self.windows_xdata.len();
self.windows_xdata.extend_from_slice(&unwind_info);
// Record a `RUNTIME_FUNCTION` which this will point to.
self.windows_pdata.push(RUNTIME_FUNCTION {
begin: u32::try_from(function_offset).unwrap(),
end: u32::try_from(function_offset + function_len).unwrap(),
unwind_address: u32::try_from(unwind_address).unwrap(),
});
}
// System-V is different enough that we just record the unwinding
// information to get processed at a later time.
UnwindInfo::SystemV(info) => {
self.systemv_unwind_info.push((function_offset, info));
}
_ => panic!("some unwind info isn't handled here"),
}
}
/// Appends the unwind information section, if any, to the `obj` specified.
///
/// This function must be called immediately after the text section was
/// added to a builder. The unwind information section must trail the text
/// section immediately.
///
/// The `text_section`'s section identifier is passed into this function.
fn append_section(
&self,
compiler: &dyn Compiler,
obj: &mut Object<'_>,
text_section: SectionId,
) {
// This write will align the text section to a page boundary and then
// return the offset at that point. This gives us the full size of the
// text section at that point, after alignment.
let text_section_size =
obj.append_section_data(text_section, &[], compiler.page_size_align());
if self.windows_xdata.len() > 0 {
assert!(self.systemv_unwind_info.len() == 0);
// The `.xdata` section must come first to be just-after the `.text`
// section for the reasons documented in `write_windows_unwind_info`
// below.
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let xdata_id = obj.add_section(segment, b".xdata".to_vec(), SectionKind::ReadOnlyData);
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let pdata_id = obj.add_section(segment, b".pdata".to_vec(), SectionKind::ReadOnlyData);
self.write_windows_unwind_info(obj, xdata_id, pdata_id, text_section_size);
}
if self.systemv_unwind_info.len() > 0 {
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let section_id =
obj.add_section(segment, b".eh_frame".to_vec(), SectionKind::ReadOnlyData);
self.write_systemv_unwind_info(compiler, obj, section_id, text_section_size)
}
}
/// This function appends a nonstandard section to the object which is only
/// used during `CodeMemory::publish`.
///
/// This custom section effectively stores a `[RUNTIME_FUNCTION; N]` into
/// the object file itself. This way registration of unwind info can simply
/// pass this slice to the OS itself and there's no need to recalculate
/// anything on the other end of loading a module from a precompiled object.
///
/// Support for reading this is in `crates/jit/src/unwind/winx64.rs`.
fn write_windows_unwind_info(
&self,
obj: &mut Object<'_>,
xdata_id: SectionId,
pdata_id: SectionId,
text_section_size: u64,
) {
// Currently the binary format supported here only supports
// little-endian for x86_64, or at least that's all where it's tested.
// This may need updates for other platforms.
assert_eq!(obj.architecture(), Architecture::X86_64);
// Append the `.xdata` section, or the actual unwinding information
// codes and such which were built as we found unwind information for
// functions.
obj.append_section_data(xdata_id, &self.windows_xdata, 4);
// Next append the `.pdata` section, or the array of `RUNTIME_FUNCTION`
// structures stored in the binary.
//
// This memory will be passed at runtime to `RtlAddFunctionTable` which
// takes a "base address" and the entries within `RUNTIME_FUNCTION` are
// all relative to this base address. The base address we pass is the
// address of the text section itself so all the pointers here must be
// text-section-relative. The `begin` and `end` fields for the function
// it describes are already text-section-relative, but the
// `unwind_address` field needs to be updated here since the value
// stored right now is `xdata`-section-relative. We know that the
// `xdata` section follows the `.text` section so the
// `text_section_size` is added in to calculate the final
// `.text`-section-relative address of the unwind information.
let mut pdata = Vec::with_capacity(self.windows_pdata.len() * 3 * 4);
for info in self.windows_pdata.iter() {
pdata.extend_from_slice(&info.begin.to_le_bytes());
pdata.extend_from_slice(&info.end.to_le_bytes());
let address = text_section_size + u64::from(info.unwind_address);
let address = u32::try_from(address).unwrap();
pdata.extend_from_slice(&address.to_le_bytes());
}
obj.append_section_data(pdata_id, &pdata, 4);
}
/// This function appends a nonstandard section to the object which is only
/// used during `CodeMemory::publish`.
///
/// This will generate a `.eh_frame` section, but not one that can be
/// naively loaded. The goal of this section is that we can create the
/// section once here and never again does it need to change. To describe
/// dynamically loaded functions though each individual FDE needs to talk
/// about the function's absolute address that it's referencing. Naturally
/// we don't actually know the function's absolute address when we're
/// creating an object here.
///
/// To solve this problem the FDE address encoding mode is set to
/// `DW_EH_PE_pcrel`. This means that the actual effective address that the
/// FDE describes is a relative to the address of the FDE itself. By
/// leveraging this relative-ness we can assume that the relative distance
/// between the FDE and the function it describes is constant, which should
/// allow us to generate an FDE ahead-of-time here.
///
/// For now this assumes that all the code of functions will start at a
/// page-aligned address when loaded into memory. The eh_frame encoded here
/// then assumes that the text section is itself page aligned to its size
/// and the eh_frame will follow just after the text section. This means
/// that the relative offsets we're using here is the FDE going backwards
/// into the text section itself.
///
/// Note that the library we're using to create the FDEs, `gimli`, doesn't
/// actually encode addresses relative to the FDE itself. Instead the
/// addresses are encoded relative to the start of the `.eh_frame` section.
/// This makes it much easier for us where we provide the relative offset
/// from the start of `.eh_frame` to the function in the text section, which
/// given our layout basically means the offset of the function in the text
/// section from the end of the text section.
///
/// A final note is that the reason we page-align the text section's size is
/// so the.eh_frame lives on a separate page from the text section itself.
/// This allows `.eh_frame` to have different virtual memory permissions,
/// such as being purely read-only instead of read/execute like the code
/// bits.
fn write_systemv_unwind_info(
&self,
compiler: &dyn Compiler,
obj: &mut Object<'_>,
section_id: SectionId,
text_section_size: u64,
) {
let mut cie = compiler
.create_systemv_cie()
.expect("must be able to create a CIE for system-v unwind info");
let mut table = FrameTable::default();
cie.fde_address_encoding = gimli::constants::DW_EH_PE_pcrel;
let cie_id = table.add_cie(cie);
for (text_section_off, unwind_info) in self.systemv_unwind_info.iter() {
let backwards_off = text_section_size - text_section_off;
let actual_offset = -i64::try_from(backwards_off).unwrap();
// Note that gimli wants an unsigned 64-bit integer here, but
// unwinders just use this constant for a relative addition with the
// address of the FDE, which means that the sign doesn't actually
// matter.
let fde = unwind_info.to_fde(Address::Constant(actual_offset as | /// within `CompiledFunction` and the return value must be an index where
/// the target will be defined by the `n`th call to `append_func`. | random_line_split |
obj.rs | calls that relocations are applied
/// against.
///
/// Note that this isn't typically used. It's only used for SSE-disabled
/// builds without SIMD on x86_64 right now.
libcall_symbols: HashMap<LibCall, SymbolId>,
ctrl_plane: ControlPlane,
}
impl<'a> ModuleTextBuilder<'a> {
/// Creates a new builder for the text section of an executable.
///
/// The `.text` section will be appended to the specified `obj` along with
/// any unwinding or such information as necessary. The `num_funcs`
/// parameter indicates the number of times the `append_func` function will
/// be called. The `finish` function will panic if this contract is not met.
pub fn new(
obj: &'a mut Object<'static>,
compiler: &'a dyn Compiler,
text: Box<dyn TextSectionBuilder>,
) -> Self {
// Entire code (functions and trampolines) will be placed
// in the ".text" section.
let text_section = obj.add_section(
obj.segment_name(StandardSegment::Text).to_vec(),
TEXT_SECTION_NAME.to_vec(),
SectionKind::Text,
);
Self {
compiler,
obj,
text_section,
unwind_info: Default::default(),
text,
libcall_symbols: HashMap::default(),
ctrl_plane: ControlPlane::default(),
}
}
/// Appends the `func` specified named `name` to this object.
///
/// The `resolve_reloc_target` closure is used to resolve a relocation
/// target to an adjacent function which has already been added or will be
/// added to this object. The argument is the relocation target specified
/// within `CompiledFunction` and the return value must be an index where
/// the target will be defined by the `n`th call to `append_func`.
///
/// Returns the symbol associated with the function as well as the range
/// that the function resides within the text section.
pub fn append_func(
&mut self,
name: &str,
compiled_func: &'a CompiledFunction<impl CompiledFuncEnv>,
resolve_reloc_target: impl Fn(FuncIndex) -> usize,
) -> (SymbolId, Range<u64>) | self.unwind_info.push(off, body_len, info);
}
for r in compiled_func.relocations() {
match r.reloc_target {
// Relocations against user-defined functions means that this is
// a relocation against a module-local function, typically a
// call between functions. The `text` field is given priority to
// resolve this relocation before we actually emit an object
// file, but if it can't handle it then we pass through the
// relocation.
RelocationTarget::UserFunc(index) => {
let target = resolve_reloc_target(index);
if self
.text
.resolve_reloc(off + u64::from(r.offset), r.reloc, r.addend, target)
{
continue;
}
// At this time it's expected that all relocations are
// handled by `text.resolve_reloc`, and anything that isn't
// handled is a bug in `text.resolve_reloc` or something
// transitively there. If truly necessary, though, then this
// loop could also be updated to forward the relocation to
// the final object file as well.
panic!(
"unresolved relocation could not be processed against \
{index:?}: {r:?}"
);
}
// Relocations against libcalls are not common at this time and
// are only used in non-default configurations that disable wasm
// SIMD, disable SSE features, and for wasm modules that still
// use floating point operations.
//
// Currently these relocations are all expected to be absolute
// 8-byte relocations so that's asserted here and then encoded
// directly into the object as a normal object relocation. This
// is processed at module load time to resolve the relocations.
RelocationTarget::LibCall(call) => {
let symbol = *self.libcall_symbols.entry(call).or_insert_with(|| {
self.obj.add_symbol(Symbol {
name: libcall_name(call).as_bytes().to_vec(),
value: 0,
size: 0,
kind: SymbolKind::Text,
scope: SymbolScope::Linkage,
weak: false,
section: SymbolSection::Undefined,
flags: SymbolFlags::None,
})
});
let (encoding, kind, size) = match r.reloc {
Reloc::Abs8 => (
object::RelocationEncoding::Generic,
object::RelocationKind::Absolute,
8,
),
other => unimplemented!("unimplemented relocation kind {other:?}"),
};
self.obj
.add_relocation(
self.text_section,
object::write::Relocation {
symbol,
size,
kind,
encoding,
offset: off + u64::from(r.offset),
addend: r.addend,
},
)
.unwrap();
}
};
}
(symbol_id, off..off + body_len)
}
/// Forces "veneers" to be used for inter-function calls in the text
/// section which means that in-bounds optimized addresses are never used.
///
/// This is only useful for debugging cranelift itself and typically this
/// option is disabled.
pub fn force_veneers(&mut self) {
self.text.force_veneers();
}
/// Appends the specified amount of bytes of padding into the text section.
///
/// This is only useful when fuzzing and/or debugging cranelift itself and
/// for production scenarios `padding` is 0 and this function does nothing.
pub fn append_padding(&mut self, padding: usize) {
if padding == 0 {
return;
}
self.text
.append(false, &vec![0; padding], 1, &mut self.ctrl_plane);
}
/// Indicates that the text section has been written completely and this
/// will finish appending it to the original object.
///
/// Note that this will also write out the unwind information sections if
/// necessary.
pub fn finish(mut self) {
// Finish up the text section now that we're done adding functions.
let text = self.text.finish(&mut self.ctrl_plane);
self.obj
.section_mut(self.text_section)
.set_data(text, self.compiler.page_size_align());
// Append the unwind information for all our functions, if necessary.
self.unwind_info
.append_section(self.compiler, self.obj, self.text_section);
}
}
/// Builder used to create unwind information for a set of functions added to a
/// text section.
#[derive(Default)]
struct UnwindInfoBuilder<'a> {
windows_xdata: Vec<u8>,
windows_pdata: Vec<RUNTIME_FUNCTION>,
systemv_unwind_info: Vec<(u64, &'a systemv::UnwindInfo)>,
}
// This is a mirror of `RUNTIME_FUNCTION` in the Windows API, but defined here
// to ensure everything is always `u32` and to have it available on all
// platforms. Note that all of these specifiers here are relative to a "base
// address" which we define as the base of where the text section is eventually
// loaded.
#[allow(non_camel_case_types)]
struct RUNTIME_FUNCTION {
begin: u32,
end: u32,
unwind_address: u32,
}
impl<'a> UnwindInfoBuilder<'a> {
/// Pushes the unwind information for a function into this builder.
///
/// The function being described must be located at `function_offset` within
/// the text section itself, and the function's size is specified by
/// `function_len`.
///
/// The `info` should come from Cranelift. and is handled here depending on
/// its flavor.
fn push(&mut self, function_offset: u64, function_len: u64, info: &'a UnwindInfo) {
match info {
// Windows unwind information is stored in two locations:
//
// * First is the actual unwinding information which is stored
// in the `.xdata` section. This is where `info`'s emitted
// information will go into.
// * Second are pointers to connect all this unwind information,
// stored in the `.pdata` section. The `.pdata` section is an
// array of `RUNTIME_FUNCTION` structures.
//
// Due to how these will be loaded at runtime the `.pdata` isn't
// actually assembled byte-wise here. Instead that's deferred to
// happen later during `write_windows_unwind_info` which will apply
// a further offset to `unwind_address`.
UnwindInfo::WindowsX64(info) => {
let unwind_size = info.emit_size();
let mut unwind_info = vec![0; unwind_size];
info.emit(&mut unwind_info);
// `.xdata` entries are always 4-byte aligned
//
// FIXME: in theory we could "intern" the `unwind_info` value
// here within the `.xdata` section. Most of our unwind
// information for functions is probably pretty similar in which
// case the `.xdata` could be quite small and `.pdata` could
// have multiple functions point to the same unwinding
// information.
while self.windows_xdata.len() % 4!= 0 {
self.windows_xdata.push(0x00);
}
let unwind_address = self.windows_xdata.len();
self.windows_xdata.extend_from_slice(&unwind_info);
// Record a `RUNTIME_FUNCTION` which this will point to.
self.windows_pdata.push(RUNTIME_FUNCTION {
begin: u32::try_from(function_offset).unwrap(),
end: u32::try_from(function_offset + function_len).unwrap(),
unwind_address: u32::try_from(unwind_address).unwrap(),
});
}
// System-V is different enough that we just record the unwinding
// information to get processed at a later time.
UnwindInfo::SystemV(info) => {
self.systemv_unwind_info.push((function_offset, info));
}
_ => panic!("some unwind info isn't handled here"),
}
}
/// Appends the unwind information section, if any, to the `obj` specified.
///
/// This function must be called immediately after the text section was
/// added to a builder. The unwind information section must trail the text
/// section immediately.
///
/// The `text_section`'s section identifier is passed into this function.
fn append_section(
&self,
compiler: &dyn Compiler,
obj: &mut Object<'_>,
text_section: SectionId,
) {
// This write will align the text section to a page boundary and then
// return the offset at that point. This gives us the full size of the
// text section at that point, after alignment.
let text_section_size =
obj.append_section_data(text_section, &[], compiler.page_size_align());
if self.windows_xdata.len() > 0 {
assert!(self.systemv_unwind_info.len() == 0);
// The `.xdata` section must come first to be just-after the `.text`
// section for the reasons documented in `write_windows_unwind_info`
// below.
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let xdata_id = obj.add_section(segment, b".xdata".to_vec(), SectionKind::ReadOnlyData);
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let pdata_id = obj.add_section(segment, b".pdata".to_vec(), SectionKind::ReadOnlyData);
self.write_windows_unwind_info(obj, xdata_id, pdata_id, text_section_size);
}
if self.systemv_unwind_info.len() > 0 {
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let section_id =
obj.add_section(segment, b".eh_frame".to_vec(), SectionKind::ReadOnlyData);
self.write_systemv_unwind_info(compiler, obj, section_id, text_section_size)
}
}
/// This function appends a nonstandard section to the object which is only
/// used during `CodeMemory::publish`.
///
/// This custom section effectively stores a `[RUNTIME_FUNCTION; N]` into
/// the object file itself. This way registration of unwind info can simply
/// pass this slice to the OS itself and there's no need to recalculate
/// anything on the other end of loading a module from a precompiled object.
///
/// Support for reading this is in `crates/jit/src/unwind/winx64.rs`.
fn write_windows_unwind_info(
&self,
obj: &mut Object<'_>,
xdata_id: SectionId,
pdata_id: SectionId,
text_section_size: u64,
) {
// Currently the binary format supported here only supports
// little-endian for x86_64, or at least that's all where it's tested.
// This may need updates for other platforms.
assert_eq!(obj.architecture(), Architecture::X86_64);
// Append the `.xdata` section, or the actual unwinding information
// codes and such which were built as we found unwind information for
// functions.
obj.append_section_data(xdata_id, &self.windows_xdata, 4);
// Next append the `.pdata` section, or the array of `RUNTIME_FUNCTION`
// structures stored in the binary.
//
// This memory will be passed at runtime to `RtlAddFunctionTable` which
// takes a "base address" and the entries within `RUNTIME_FUNCTION` are
// all relative to this base address. The base address we pass is the
// address of the text section itself so all the pointers here must be
// text-section-relative. The `begin` and `end` fields for the function
// it describes are already text-section-relative, but the
// `unwind_address` field needs to be updated here since the value
// stored right now is `xdata`-section-relative. We know that the
// `xdata` section follows the `.text` section so the
// `text_section_size` is added in to calculate the final
// `.text`-section-relative address of the unwind information.
let mut pdata = Vec::with_capacity(self.windows_pdata.len() * 3 * 4);
for info in self.windows_pdata.iter() {
pdata.extend_from_slice(&info.begin.to_le_bytes());
pdata.extend_from_slice(&info.end.to_le_bytes());
let address = text_section_size + u64::from(info.unwind_address);
let address = u32::try_from(address).unwrap();
pdata.extend_from_slice(&address.to_le_bytes());
}
obj.append_section_data(pdata_id, &pdata, 4);
}
/// This function appends a nonstandard section to the object which is only
/// used during `CodeMemory::publish`.
///
/// This will generate a `.eh_frame` section, but not one that can be
/// naively loaded. The goal of this section is that we can create the
/// section once here and never again does it need to change. To describe
/// dynamically loaded functions though each individual FDE needs to talk
/// about the function's absolute address that it's referencing. Naturally
/// we don't actually know the function's absolute address when we're
/// creating an object here.
///
/// To solve this problem the FDE address encoding mode is set to
/// `DW_EH_PE_pcrel`. This means that the actual effective address that the
/// FDE describes is a relative to the address of the FDE itself. By
/// leveraging this relative-ness we can assume that the relative distance
/// between the FDE and the function it describes is constant, which should
/// allow us to generate an FDE ahead-of-time here.
///
/// For now this assumes that all the code of functions will start at a
/// page-aligned address when loaded into memory. The eh_frame encoded here
/// then assumes that the text section is itself page aligned to its size
/// and the eh_frame will follow just after the text section. This means
/// that the relative offsets we're using here is the FDE going backwards
/// into the text section itself.
///
/// Note that the library we're using to create the FDEs, `gimli`, doesn't
/// actually encode addresses relative to the FDE itself. Instead the
/// addresses are encoded relative to the start of the `.eh_frame` section.
/// This makes it much easier for us where we provide the relative offset
/// from the start of `.eh_frame` to the function in the text section, which
/// given our layout basically means the offset of the function in the text
/// section from the end of the text section.
///
/// A final note is that the reason we page-align the text section's size is
/// so the.eh_frame lives on a separate page from the text section itself.
/// This allows `.eh_frame` to have different virtual memory permissions,
/// such as being purely read-only instead of read/execute like the code
/// bits.
fn write_systemv_unwind_info(
&self,
compiler: &dyn Compiler,
obj: &mut Object<'_>,
section_id: SectionId,
text_section_size: u64,
) {
let mut cie = compiler
.create_systemv_cie()
.expect("must be able to create a CIE for system-v unwind info");
let mut table = FrameTable::default();
cie.fde_address_encoding = gimli::constants::DW_EH_PE_pcrel;
let cie_id = table.add_cie(cie);
for (text_section_off, unwind_info) in self.systemv_unwind_info.iter() {
let backwards_off = text_section_size - text_section_off;
let actual_offset = -i64::try_from(backwards_off).unwrap();
// Note that gimli wants an unsigned 64-bit integer here, but
// unwinders just use this constant for a relative addition with the
// address of the FDE, which means that the sign doesn't actually
// matter.
let fde = unwind_info.to_fde(Address::Constant(actual_ | {
let body = compiled_func.buffer.data();
let alignment = compiled_func.alignment;
let body_len = body.len() as u64;
let off = self
.text
.append(true, &body, alignment, &mut self.ctrl_plane);
let symbol_id = self.obj.add_symbol(Symbol {
name: name.as_bytes().to_vec(),
value: off,
size: body_len,
kind: SymbolKind::Text,
scope: SymbolScope::Compilation,
weak: false,
section: SymbolSection::Section(self.text_section),
flags: SymbolFlags::None,
});
if let Some(info) = compiled_func.unwind_info() { | identifier_body |
obj.rs | calls that relocations are applied
/// against.
///
/// Note that this isn't typically used. It's only used for SSE-disabled
/// builds without SIMD on x86_64 right now.
libcall_symbols: HashMap<LibCall, SymbolId>,
ctrl_plane: ControlPlane,
}
impl<'a> ModuleTextBuilder<'a> {
/// Creates a new builder for the text section of an executable.
///
/// The `.text` section will be appended to the specified `obj` along with
/// any unwinding or such information as necessary. The `num_funcs`
/// parameter indicates the number of times the `append_func` function will
/// be called. The `finish` function will panic if this contract is not met.
pub fn new(
obj: &'a mut Object<'static>,
compiler: &'a dyn Compiler,
text: Box<dyn TextSectionBuilder>,
) -> Self {
// Entire code (functions and trampolines) will be placed
// in the ".text" section.
let text_section = obj.add_section(
obj.segment_name(StandardSegment::Text).to_vec(),
TEXT_SECTION_NAME.to_vec(),
SectionKind::Text,
);
Self {
compiler,
obj,
text_section,
unwind_info: Default::default(),
text,
libcall_symbols: HashMap::default(),
ctrl_plane: ControlPlane::default(),
}
}
/// Appends the `func` specified named `name` to this object.
///
/// The `resolve_reloc_target` closure is used to resolve a relocation
/// target to an adjacent function which has already been added or will be
/// added to this object. The argument is the relocation target specified
/// within `CompiledFunction` and the return value must be an index where
/// the target will be defined by the `n`th call to `append_func`.
///
/// Returns the symbol associated with the function as well as the range
/// that the function resides within the text section.
pub fn append_func(
&mut self,
name: &str,
compiled_func: &'a CompiledFunction<impl CompiledFuncEnv>,
resolve_reloc_target: impl Fn(FuncIndex) -> usize,
) -> (SymbolId, Range<u64>) {
let body = compiled_func.buffer.data();
let alignment = compiled_func.alignment;
let body_len = body.len() as u64;
let off = self
.text
.append(true, &body, alignment, &mut self.ctrl_plane);
let symbol_id = self.obj.add_symbol(Symbol {
name: name.as_bytes().to_vec(),
value: off,
size: body_len,
kind: SymbolKind::Text,
scope: SymbolScope::Compilation,
weak: false,
section: SymbolSection::Section(self.text_section),
flags: SymbolFlags::None,
});
if let Some(info) = compiled_func.unwind_info() {
self.unwind_info.push(off, body_len, info);
}
for r in compiled_func.relocations() {
match r.reloc_target {
// Relocations against user-defined functions means that this is
// a relocation against a module-local function, typically a
// call between functions. The `text` field is given priority to
// resolve this relocation before we actually emit an object
// file, but if it can't handle it then we pass through the
// relocation.
RelocationTarget::UserFunc(index) => {
let target = resolve_reloc_target(index);
if self
.text
.resolve_reloc(off + u64::from(r.offset), r.reloc, r.addend, target)
{
continue;
}
// At this time it's expected that all relocations are
// handled by `text.resolve_reloc`, and anything that isn't
// handled is a bug in `text.resolve_reloc` or something
// transitively there. If truly necessary, though, then this
// loop could also be updated to forward the relocation to
// the final object file as well.
panic!(
"unresolved relocation could not be processed against \
{index:?}: {r:?}"
);
}
// Relocations against libcalls are not common at this time and
// are only used in non-default configurations that disable wasm
// SIMD, disable SSE features, and for wasm modules that still
// use floating point operations.
//
// Currently these relocations are all expected to be absolute
// 8-byte relocations so that's asserted here and then encoded
// directly into the object as a normal object relocation. This
// is processed at module load time to resolve the relocations.
RelocationTarget::LibCall(call) => {
let symbol = *self.libcall_symbols.entry(call).or_insert_with(|| {
self.obj.add_symbol(Symbol {
name: libcall_name(call).as_bytes().to_vec(),
value: 0,
size: 0,
kind: SymbolKind::Text,
scope: SymbolScope::Linkage,
weak: false,
section: SymbolSection::Undefined,
flags: SymbolFlags::None,
})
});
let (encoding, kind, size) = match r.reloc {
Reloc::Abs8 => (
object::RelocationEncoding::Generic,
object::RelocationKind::Absolute,
8,
),
other => unimplemented!("unimplemented relocation kind {other:?}"),
};
self.obj
.add_relocation(
self.text_section,
object::write::Relocation {
symbol,
size,
kind,
encoding,
offset: off + u64::from(r.offset),
addend: r.addend,
},
)
.unwrap();
}
};
}
(symbol_id, off..off + body_len)
}
/// Forces "veneers" to be used for inter-function calls in the text
/// section which means that in-bounds optimized addresses are never used.
///
/// This is only useful for debugging cranelift itself and typically this
/// option is disabled.
pub fn | (&mut self) {
self.text.force_veneers();
}
/// Appends the specified amount of bytes of padding into the text section.
///
/// This is only useful when fuzzing and/or debugging cranelift itself and
/// for production scenarios `padding` is 0 and this function does nothing.
pub fn append_padding(&mut self, padding: usize) {
if padding == 0 {
return;
}
self.text
.append(false, &vec![0; padding], 1, &mut self.ctrl_plane);
}
/// Indicates that the text section has been written completely and this
/// will finish appending it to the original object.
///
/// Note that this will also write out the unwind information sections if
/// necessary.
pub fn finish(mut self) {
// Finish up the text section now that we're done adding functions.
let text = self.text.finish(&mut self.ctrl_plane);
self.obj
.section_mut(self.text_section)
.set_data(text, self.compiler.page_size_align());
// Append the unwind information for all our functions, if necessary.
self.unwind_info
.append_section(self.compiler, self.obj, self.text_section);
}
}
/// Builder used to create unwind information for a set of functions added to a
/// text section.
#[derive(Default)]
struct UnwindInfoBuilder<'a> {
windows_xdata: Vec<u8>,
windows_pdata: Vec<RUNTIME_FUNCTION>,
systemv_unwind_info: Vec<(u64, &'a systemv::UnwindInfo)>,
}
// This is a mirror of `RUNTIME_FUNCTION` in the Windows API, but defined here
// to ensure everything is always `u32` and to have it available on all
// platforms. Note that all of these specifiers here are relative to a "base
// address" which we define as the base of where the text section is eventually
// loaded.
#[allow(non_camel_case_types)]
struct RUNTIME_FUNCTION {
begin: u32,
end: u32,
unwind_address: u32,
}
impl<'a> UnwindInfoBuilder<'a> {
/// Pushes the unwind information for a function into this builder.
///
/// The function being described must be located at `function_offset` within
/// the text section itself, and the function's size is specified by
/// `function_len`.
///
/// The `info` should come from Cranelift. and is handled here depending on
/// its flavor.
fn push(&mut self, function_offset: u64, function_len: u64, info: &'a UnwindInfo) {
match info {
// Windows unwind information is stored in two locations:
//
// * First is the actual unwinding information which is stored
// in the `.xdata` section. This is where `info`'s emitted
// information will go into.
// * Second are pointers to connect all this unwind information,
// stored in the `.pdata` section. The `.pdata` section is an
// array of `RUNTIME_FUNCTION` structures.
//
// Due to how these will be loaded at runtime the `.pdata` isn't
// actually assembled byte-wise here. Instead that's deferred to
// happen later during `write_windows_unwind_info` which will apply
// a further offset to `unwind_address`.
UnwindInfo::WindowsX64(info) => {
let unwind_size = info.emit_size();
let mut unwind_info = vec![0; unwind_size];
info.emit(&mut unwind_info);
// `.xdata` entries are always 4-byte aligned
//
// FIXME: in theory we could "intern" the `unwind_info` value
// here within the `.xdata` section. Most of our unwind
// information for functions is probably pretty similar in which
// case the `.xdata` could be quite small and `.pdata` could
// have multiple functions point to the same unwinding
// information.
while self.windows_xdata.len() % 4!= 0 {
self.windows_xdata.push(0x00);
}
let unwind_address = self.windows_xdata.len();
self.windows_xdata.extend_from_slice(&unwind_info);
// Record a `RUNTIME_FUNCTION` which this will point to.
self.windows_pdata.push(RUNTIME_FUNCTION {
begin: u32::try_from(function_offset).unwrap(),
end: u32::try_from(function_offset + function_len).unwrap(),
unwind_address: u32::try_from(unwind_address).unwrap(),
});
}
// System-V is different enough that we just record the unwinding
// information to get processed at a later time.
UnwindInfo::SystemV(info) => {
self.systemv_unwind_info.push((function_offset, info));
}
_ => panic!("some unwind info isn't handled here"),
}
}
/// Appends the unwind information section, if any, to the `obj` specified.
///
/// This function must be called immediately after the text section was
/// added to a builder. The unwind information section must trail the text
/// section immediately.
///
/// The `text_section`'s section identifier is passed into this function.
fn append_section(
&self,
compiler: &dyn Compiler,
obj: &mut Object<'_>,
text_section: SectionId,
) {
// This write will align the text section to a page boundary and then
// return the offset at that point. This gives us the full size of the
// text section at that point, after alignment.
let text_section_size =
obj.append_section_data(text_section, &[], compiler.page_size_align());
if self.windows_xdata.len() > 0 {
assert!(self.systemv_unwind_info.len() == 0);
// The `.xdata` section must come first to be just-after the `.text`
// section for the reasons documented in `write_windows_unwind_info`
// below.
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let xdata_id = obj.add_section(segment, b".xdata".to_vec(), SectionKind::ReadOnlyData);
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let pdata_id = obj.add_section(segment, b".pdata".to_vec(), SectionKind::ReadOnlyData);
self.write_windows_unwind_info(obj, xdata_id, pdata_id, text_section_size);
}
if self.systemv_unwind_info.len() > 0 {
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let section_id =
obj.add_section(segment, b".eh_frame".to_vec(), SectionKind::ReadOnlyData);
self.write_systemv_unwind_info(compiler, obj, section_id, text_section_size)
}
}
/// This function appends a nonstandard section to the object which is only
/// used during `CodeMemory::publish`.
///
/// This custom section effectively stores a `[RUNTIME_FUNCTION; N]` into
/// the object file itself. This way registration of unwind info can simply
/// pass this slice to the OS itself and there's no need to recalculate
/// anything on the other end of loading a module from a precompiled object.
///
/// Support for reading this is in `crates/jit/src/unwind/winx64.rs`.
fn write_windows_unwind_info(
&self,
obj: &mut Object<'_>,
xdata_id: SectionId,
pdata_id: SectionId,
text_section_size: u64,
) {
// Currently the binary format supported here only supports
// little-endian for x86_64, or at least that's all where it's tested.
// This may need updates for other platforms.
assert_eq!(obj.architecture(), Architecture::X86_64);
// Append the `.xdata` section, or the actual unwinding information
// codes and such which were built as we found unwind information for
// functions.
obj.append_section_data(xdata_id, &self.windows_xdata, 4);
// Next append the `.pdata` section, or the array of `RUNTIME_FUNCTION`
// structures stored in the binary.
//
// This memory will be passed at runtime to `RtlAddFunctionTable` which
// takes a "base address" and the entries within `RUNTIME_FUNCTION` are
// all relative to this base address. The base address we pass is the
// address of the text section itself so all the pointers here must be
// text-section-relative. The `begin` and `end` fields for the function
// it describes are already text-section-relative, but the
// `unwind_address` field needs to be updated here since the value
// stored right now is `xdata`-section-relative. We know that the
// `xdata` section follows the `.text` section so the
// `text_section_size` is added in to calculate the final
// `.text`-section-relative address of the unwind information.
let mut pdata = Vec::with_capacity(self.windows_pdata.len() * 3 * 4);
for info in self.windows_pdata.iter() {
pdata.extend_from_slice(&info.begin.to_le_bytes());
pdata.extend_from_slice(&info.end.to_le_bytes());
let address = text_section_size + u64::from(info.unwind_address);
let address = u32::try_from(address).unwrap();
pdata.extend_from_slice(&address.to_le_bytes());
}
obj.append_section_data(pdata_id, &pdata, 4);
}
/// This function appends a nonstandard section to the object which is only
/// used during `CodeMemory::publish`.
///
/// This will generate a `.eh_frame` section, but not one that can be
/// naively loaded. The goal of this section is that we can create the
/// section once here and never again does it need to change. To describe
/// dynamically loaded functions though each individual FDE needs to talk
/// about the function's absolute address that it's referencing. Naturally
/// we don't actually know the function's absolute address when we're
/// creating an object here.
///
/// To solve this problem the FDE address encoding mode is set to
/// `DW_EH_PE_pcrel`. This means that the actual effective address that the
/// FDE describes is a relative to the address of the FDE itself. By
/// leveraging this relative-ness we can assume that the relative distance
/// between the FDE and the function it describes is constant, which should
/// allow us to generate an FDE ahead-of-time here.
///
/// For now this assumes that all the code of functions will start at a
/// page-aligned address when loaded into memory. The eh_frame encoded here
/// then assumes that the text section is itself page aligned to its size
/// and the eh_frame will follow just after the text section. This means
/// that the relative offsets we're using here is the FDE going backwards
/// into the text section itself.
///
/// Note that the library we're using to create the FDEs, `gimli`, doesn't
/// actually encode addresses relative to the FDE itself. Instead the
/// addresses are encoded relative to the start of the `.eh_frame` section.
/// This makes it much easier for us where we provide the relative offset
/// from the start of `.eh_frame` to the function in the text section, which
/// given our layout basically means the offset of the function in the text
/// section from the end of the text section.
///
/// A final note is that the reason we page-align the text section's size is
/// so the.eh_frame lives on a separate page from the text section itself.
/// This allows `.eh_frame` to have different virtual memory permissions,
/// such as being purely read-only instead of read/execute like the code
/// bits.
fn write_systemv_unwind_info(
&self,
compiler: &dyn Compiler,
obj: &mut Object<'_>,
section_id: SectionId,
text_section_size: u64,
) {
let mut cie = compiler
.create_systemv_cie()
.expect("must be able to create a CIE for system-v unwind info");
let mut table = FrameTable::default();
cie.fde_address_encoding = gimli::constants::DW_EH_PE_pcrel;
let cie_id = table.add_cie(cie);
for (text_section_off, unwind_info) in self.systemv_unwind_info.iter() {
let backwards_off = text_section_size - text_section_off;
let actual_offset = -i64::try_from(backwards_off).unwrap();
// Note that gimli wants an unsigned 64-bit integer here, but
// unwinders just use this constant for a relative addition with the
// address of the FDE, which means that the sign doesn't actually
// matter.
let fde = unwind_info.to_fde(Address::Constant(actual_ | force_veneers | identifier_name |
obj.rs | calls that relocations are applied
/// against.
///
/// Note that this isn't typically used. It's only used for SSE-disabled
/// builds without SIMD on x86_64 right now.
libcall_symbols: HashMap<LibCall, SymbolId>,
ctrl_plane: ControlPlane,
}
impl<'a> ModuleTextBuilder<'a> {
/// Creates a new builder for the text section of an executable.
///
/// The `.text` section will be appended to the specified `obj` along with
/// any unwinding or such information as necessary. The `num_funcs`
/// parameter indicates the number of times the `append_func` function will
/// be called. The `finish` function will panic if this contract is not met.
pub fn new(
obj: &'a mut Object<'static>,
compiler: &'a dyn Compiler,
text: Box<dyn TextSectionBuilder>,
) -> Self {
// Entire code (functions and trampolines) will be placed
// in the ".text" section.
let text_section = obj.add_section(
obj.segment_name(StandardSegment::Text).to_vec(),
TEXT_SECTION_NAME.to_vec(),
SectionKind::Text,
);
Self {
compiler,
obj,
text_section,
unwind_info: Default::default(),
text,
libcall_symbols: HashMap::default(),
ctrl_plane: ControlPlane::default(),
}
}
/// Appends the `func` specified named `name` to this object.
///
/// The `resolve_reloc_target` closure is used to resolve a relocation
/// target to an adjacent function which has already been added or will be
/// added to this object. The argument is the relocation target specified
/// within `CompiledFunction` and the return value must be an index where
/// the target will be defined by the `n`th call to `append_func`.
///
/// Returns the symbol associated with the function as well as the range
/// that the function resides within the text section.
pub fn append_func(
&mut self,
name: &str,
compiled_func: &'a CompiledFunction<impl CompiledFuncEnv>,
resolve_reloc_target: impl Fn(FuncIndex) -> usize,
) -> (SymbolId, Range<u64>) {
let body = compiled_func.buffer.data();
let alignment = compiled_func.alignment;
let body_len = body.len() as u64;
let off = self
.text
.append(true, &body, alignment, &mut self.ctrl_plane);
let symbol_id = self.obj.add_symbol(Symbol {
name: name.as_bytes().to_vec(),
value: off,
size: body_len,
kind: SymbolKind::Text,
scope: SymbolScope::Compilation,
weak: false,
section: SymbolSection::Section(self.text_section),
flags: SymbolFlags::None,
});
if let Some(info) = compiled_func.unwind_info() |
for r in compiled_func.relocations() {
match r.reloc_target {
// Relocations against user-defined functions means that this is
// a relocation against a module-local function, typically a
// call between functions. The `text` field is given priority to
// resolve this relocation before we actually emit an object
// file, but if it can't handle it then we pass through the
// relocation.
RelocationTarget::UserFunc(index) => {
let target = resolve_reloc_target(index);
if self
.text
.resolve_reloc(off + u64::from(r.offset), r.reloc, r.addend, target)
{
continue;
}
// At this time it's expected that all relocations are
// handled by `text.resolve_reloc`, and anything that isn't
// handled is a bug in `text.resolve_reloc` or something
// transitively there. If truly necessary, though, then this
// loop could also be updated to forward the relocation to
// the final object file as well.
panic!(
"unresolved relocation could not be processed against \
{index:?}: {r:?}"
);
}
// Relocations against libcalls are not common at this time and
// are only used in non-default configurations that disable wasm
// SIMD, disable SSE features, and for wasm modules that still
// use floating point operations.
//
// Currently these relocations are all expected to be absolute
// 8-byte relocations so that's asserted here and then encoded
// directly into the object as a normal object relocation. This
// is processed at module load time to resolve the relocations.
RelocationTarget::LibCall(call) => {
let symbol = *self.libcall_symbols.entry(call).or_insert_with(|| {
self.obj.add_symbol(Symbol {
name: libcall_name(call).as_bytes().to_vec(),
value: 0,
size: 0,
kind: SymbolKind::Text,
scope: SymbolScope::Linkage,
weak: false,
section: SymbolSection::Undefined,
flags: SymbolFlags::None,
})
});
let (encoding, kind, size) = match r.reloc {
Reloc::Abs8 => (
object::RelocationEncoding::Generic,
object::RelocationKind::Absolute,
8,
),
other => unimplemented!("unimplemented relocation kind {other:?}"),
};
self.obj
.add_relocation(
self.text_section,
object::write::Relocation {
symbol,
size,
kind,
encoding,
offset: off + u64::from(r.offset),
addend: r.addend,
},
)
.unwrap();
}
};
}
(symbol_id, off..off + body_len)
}
/// Forces "veneers" to be used for inter-function calls in the text
/// section which means that in-bounds optimized addresses are never used.
///
/// This is only useful for debugging cranelift itself and typically this
/// option is disabled.
pub fn force_veneers(&mut self) {
self.text.force_veneers();
}
/// Appends the specified amount of bytes of padding into the text section.
///
/// This is only useful when fuzzing and/or debugging cranelift itself and
/// for production scenarios `padding` is 0 and this function does nothing.
pub fn append_padding(&mut self, padding: usize) {
if padding == 0 {
return;
}
self.text
.append(false, &vec![0; padding], 1, &mut self.ctrl_plane);
}
/// Indicates that the text section has been written completely and this
/// will finish appending it to the original object.
///
/// Note that this will also write out the unwind information sections if
/// necessary.
pub fn finish(mut self) {
// Finish up the text section now that we're done adding functions.
let text = self.text.finish(&mut self.ctrl_plane);
self.obj
.section_mut(self.text_section)
.set_data(text, self.compiler.page_size_align());
// Append the unwind information for all our functions, if necessary.
self.unwind_info
.append_section(self.compiler, self.obj, self.text_section);
}
}
/// Builder used to create unwind information for a set of functions added to a
/// text section.
#[derive(Default)]
struct UnwindInfoBuilder<'a> {
windows_xdata: Vec<u8>,
windows_pdata: Vec<RUNTIME_FUNCTION>,
systemv_unwind_info: Vec<(u64, &'a systemv::UnwindInfo)>,
}
// This is a mirror of `RUNTIME_FUNCTION` in the Windows API, but defined here
// to ensure everything is always `u32` and to have it available on all
// platforms. Note that all of these specifiers here are relative to a "base
// address" which we define as the base of where the text section is eventually
// loaded.
#[allow(non_camel_case_types)]
struct RUNTIME_FUNCTION {
begin: u32,
end: u32,
unwind_address: u32,
}
impl<'a> UnwindInfoBuilder<'a> {
/// Pushes the unwind information for a function into this builder.
///
/// The function being described must be located at `function_offset` within
/// the text section itself, and the function's size is specified by
/// `function_len`.
///
/// The `info` should come from Cranelift. and is handled here depending on
/// its flavor.
fn push(&mut self, function_offset: u64, function_len: u64, info: &'a UnwindInfo) {
match info {
// Windows unwind information is stored in two locations:
//
// * First is the actual unwinding information which is stored
// in the `.xdata` section. This is where `info`'s emitted
// information will go into.
// * Second are pointers to connect all this unwind information,
// stored in the `.pdata` section. The `.pdata` section is an
// array of `RUNTIME_FUNCTION` structures.
//
// Due to how these will be loaded at runtime the `.pdata` isn't
// actually assembled byte-wise here. Instead that's deferred to
// happen later during `write_windows_unwind_info` which will apply
// a further offset to `unwind_address`.
UnwindInfo::WindowsX64(info) => {
let unwind_size = info.emit_size();
let mut unwind_info = vec![0; unwind_size];
info.emit(&mut unwind_info);
// `.xdata` entries are always 4-byte aligned
//
// FIXME: in theory we could "intern" the `unwind_info` value
// here within the `.xdata` section. Most of our unwind
// information for functions is probably pretty similar in which
// case the `.xdata` could be quite small and `.pdata` could
// have multiple functions point to the same unwinding
// information.
while self.windows_xdata.len() % 4!= 0 {
self.windows_xdata.push(0x00);
}
let unwind_address = self.windows_xdata.len();
self.windows_xdata.extend_from_slice(&unwind_info);
// Record a `RUNTIME_FUNCTION` which this will point to.
self.windows_pdata.push(RUNTIME_FUNCTION {
begin: u32::try_from(function_offset).unwrap(),
end: u32::try_from(function_offset + function_len).unwrap(),
unwind_address: u32::try_from(unwind_address).unwrap(),
});
}
// System-V is different enough that we just record the unwinding
// information to get processed at a later time.
UnwindInfo::SystemV(info) => {
self.systemv_unwind_info.push((function_offset, info));
}
_ => panic!("some unwind info isn't handled here"),
}
}
/// Appends the unwind information section, if any, to the `obj` specified.
///
/// This function must be called immediately after the text section was
/// added to a builder. The unwind information section must trail the text
/// section immediately.
///
/// The `text_section`'s section identifier is passed into this function.
fn append_section(
&self,
compiler: &dyn Compiler,
obj: &mut Object<'_>,
text_section: SectionId,
) {
// This write will align the text section to a page boundary and then
// return the offset at that point. This gives us the full size of the
// text section at that point, after alignment.
let text_section_size =
obj.append_section_data(text_section, &[], compiler.page_size_align());
if self.windows_xdata.len() > 0 {
assert!(self.systemv_unwind_info.len() == 0);
// The `.xdata` section must come first to be just-after the `.text`
// section for the reasons documented in `write_windows_unwind_info`
// below.
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let xdata_id = obj.add_section(segment, b".xdata".to_vec(), SectionKind::ReadOnlyData);
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let pdata_id = obj.add_section(segment, b".pdata".to_vec(), SectionKind::ReadOnlyData);
self.write_windows_unwind_info(obj, xdata_id, pdata_id, text_section_size);
}
if self.systemv_unwind_info.len() > 0 {
let segment = obj.segment_name(StandardSegment::Data).to_vec();
let section_id =
obj.add_section(segment, b".eh_frame".to_vec(), SectionKind::ReadOnlyData);
self.write_systemv_unwind_info(compiler, obj, section_id, text_section_size)
}
}
/// This function appends a nonstandard section to the object which is only
/// used during `CodeMemory::publish`.
///
/// This custom section effectively stores a `[RUNTIME_FUNCTION; N]` into
/// the object file itself. This way registration of unwind info can simply
/// pass this slice to the OS itself and there's no need to recalculate
/// anything on the other end of loading a module from a precompiled object.
///
/// Support for reading this is in `crates/jit/src/unwind/winx64.rs`.
fn write_windows_unwind_info(
&self,
obj: &mut Object<'_>,
xdata_id: SectionId,
pdata_id: SectionId,
text_section_size: u64,
) {
// Currently the binary format supported here only supports
// little-endian for x86_64, or at least that's all where it's tested.
// This may need updates for other platforms.
assert_eq!(obj.architecture(), Architecture::X86_64);
// Append the `.xdata` section, or the actual unwinding information
// codes and such which were built as we found unwind information for
// functions.
obj.append_section_data(xdata_id, &self.windows_xdata, 4);
// Next append the `.pdata` section, or the array of `RUNTIME_FUNCTION`
// structures stored in the binary.
//
// This memory will be passed at runtime to `RtlAddFunctionTable` which
// takes a "base address" and the entries within `RUNTIME_FUNCTION` are
// all relative to this base address. The base address we pass is the
// address of the text section itself so all the pointers here must be
// text-section-relative. The `begin` and `end` fields for the function
// it describes are already text-section-relative, but the
// `unwind_address` field needs to be updated here since the value
// stored right now is `xdata`-section-relative. We know that the
// `xdata` section follows the `.text` section so the
// `text_section_size` is added in to calculate the final
// `.text`-section-relative address of the unwind information.
let mut pdata = Vec::with_capacity(self.windows_pdata.len() * 3 * 4);
for info in self.windows_pdata.iter() {
pdata.extend_from_slice(&info.begin.to_le_bytes());
pdata.extend_from_slice(&info.end.to_le_bytes());
let address = text_section_size + u64::from(info.unwind_address);
let address = u32::try_from(address).unwrap();
pdata.extend_from_slice(&address.to_le_bytes());
}
obj.append_section_data(pdata_id, &pdata, 4);
}
/// This function appends a nonstandard section to the object which is only
/// used during `CodeMemory::publish`.
///
/// This will generate a `.eh_frame` section, but not one that can be
/// naively loaded. The goal of this section is that we can create the
/// section once here and never again does it need to change. To describe
/// dynamically loaded functions though each individual FDE needs to talk
/// about the function's absolute address that it's referencing. Naturally
/// we don't actually know the function's absolute address when we're
/// creating an object here.
///
/// To solve this problem the FDE address encoding mode is set to
/// `DW_EH_PE_pcrel`. This means that the actual effective address that the
/// FDE describes is a relative to the address of the FDE itself. By
/// leveraging this relative-ness we can assume that the relative distance
/// between the FDE and the function it describes is constant, which should
/// allow us to generate an FDE ahead-of-time here.
///
/// For now this assumes that all the code of functions will start at a
/// page-aligned address when loaded into memory. The eh_frame encoded here
/// then assumes that the text section is itself page aligned to its size
/// and the eh_frame will follow just after the text section. This means
/// that the relative offsets we're using here is the FDE going backwards
/// into the text section itself.
///
/// Note that the library we're using to create the FDEs, `gimli`, doesn't
/// actually encode addresses relative to the FDE itself. Instead the
/// addresses are encoded relative to the start of the `.eh_frame` section.
/// This makes it much easier for us where we provide the relative offset
/// from the start of `.eh_frame` to the function in the text section, which
/// given our layout basically means the offset of the function in the text
/// section from the end of the text section.
///
/// A final note is that the reason we page-align the text section's size is
/// so the.eh_frame lives on a separate page from the text section itself.
/// This allows `.eh_frame` to have different virtual memory permissions,
/// such as being purely read-only instead of read/execute like the code
/// bits.
fn write_systemv_unwind_info(
&self,
compiler: &dyn Compiler,
obj: &mut Object<'_>,
section_id: SectionId,
text_section_size: u64,
) {
let mut cie = compiler
.create_systemv_cie()
.expect("must be able to create a CIE for system-v unwind info");
let mut table = FrameTable::default();
cie.fde_address_encoding = gimli::constants::DW_EH_PE_pcrel;
let cie_id = table.add_cie(cie);
for (text_section_off, unwind_info) in self.systemv_unwind_info.iter() {
let backwards_off = text_section_size - text_section_off;
let actual_offset = -i64::try_from(backwards_off).unwrap();
// Note that gimli wants an unsigned 64-bit integer here, but
// unwinders just use this constant for a relative addition with the
// address of the FDE, which means that the sign doesn't actually
// matter.
let fde = unwind_info.to_fde(Address::Constant(actual_ | {
self.unwind_info.push(off, body_len, info);
} | conditional_block |
util.rs | use libc;
use std::ffi::CStr;
use std::io;
use std::net::SocketAddr;
use std::net::TcpStream as StdTcpStream;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
use bytes::{BufMut, BytesMut};
use futures::future::Either;
use futures::sync::mpsc::Sender;
use futures::{Async, Future, IntoFuture, Poll, Sink, Stream};
use net2::TcpBuilder;
use resolve::resolver;
use slog::{info, o, warn, Drain, Logger};
use tokio::executor::current_thread::spawn;
use tokio::net::TcpListener;
use tokio::timer::{Delay, Interval};
use crate::task::Task;
use crate::Float;
use crate::{AGG_ERRORS, DROPS, EGRESS, INGRESS, INGRESS_METRICS, PARSE_ERRORS, PEER_ERRORS};
use bioyino_metric::{name::MetricName, Metric, MetricType};
use crate::{ConsensusState, CONSENSUS_STATE, IS_LEADER};
pub fn prepare_log(root: &'static str) -> Logger {
// Set logging
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let filter = slog::LevelFilter::new(drain, slog::Level::Trace).fuse();
let drain = slog_async::Async::new(filter).build().fuse();
slog::Logger::root(drain, o!("program"=>"test", "test"=>root))
}
pub fn try_resolve(s: &str) -> SocketAddr {
s.parse().unwrap_or_else(|_| {
// for name that have failed to be parsed we try to resolve it via DNS
let mut split = s.split(':');
let host = split.next().unwrap(); // Split always has first element
let port = split.next().expect("port not found");
let port = port.parse().expect("bad port value");
let first_ip = resolver::resolve_host(host)
.unwrap_or_else(|_| panic!("failed resolving {:}", &host))
.next()
.expect("at least one IP address required");
SocketAddr::new(first_ip, port)
})
}
pub fn bound_stream(addr: &SocketAddr) -> Result<StdTcpStream, io::Error> {
let builder = TcpBuilder::new_v4()?;
builder.bind(addr)?;
builder.to_tcp_stream()
}
pub fn reusing_listener(addr: &SocketAddr) -> Result<TcpListener, io::Error> {
let builder = TcpBuilder::new_v4()?;
builder.reuse_address(true)?;
builder.bind(addr)?;
// backlog parameter will be limited by SOMAXCONN on Linux, which is usually set to 128
let listener = builder.listen(65536)?;
listener.set_nonblocking(true)?;
TcpListener::from_std(listener, &tokio::reactor::Handle::default())
}
// TODO impl this correctly and use instead of try_resolve
// PROFIT: gives libnss-aware behaviour
/*
fn _try_resolve_nss(name: &str) {
use std::io;
use std::ffi::CString;
use std::ptr::{null_mut, null};
use libc::*;
let domain= CString::new(Vec::from(name)).unwrap().into_raw();
let mut result: *mut addrinfo = null_mut();
let success = unsafe {
getaddrinfo(domain, null_mut(), null(), &mut result)
};
if success!= 0 {
// let errno = unsafe { *__errno_location() };
println!("{:?}", io::Error::last_os_error());
} else {
let mut cur = result;
while cur!= null_mut() {
unsafe{
println!("LEN {:?}", (*result).ai_addrlen);
println!("DATA {:?}", (*(*result).ai_addr).sa_data);
cur = (*result).ai_next;
}
}
}
}
*/
/// Get hostname. Copypasted from some crate
pub fn get_hostname() -> Option<String> {
let len = 255;
let mut buf = Vec::<u8>::with_capacity(len);
let ptr = buf.as_mut_ptr() as *mut libc::c_char;
unsafe {
if libc::gethostname(ptr, len as libc::size_t)!= 0 {
return None;
}
Some(CStr::from_ptr(ptr).to_string_lossy().into_owned())
}
}
pub fn switch_leader(acquired: bool, log: &Logger) {
let should_set = {
let state = &*CONSENSUS_STATE.lock().unwrap();
// only set leader when consensus is enabled
state == &ConsensusState::Enabled
};
if should_set {
let is_leader = IS_LEADER.load(Ordering::SeqCst);
if is_leader!= acquired {
warn!(log, "leader state change: {} -> {}", is_leader, acquired);
}
IS_LEADER.store(acquired, Ordering::SeqCst);
}
}
#[cfg(test)]
pub(crate) fn new_test_graphite_name(s: &'static str) -> MetricName {
let mut intermediate = Vec::new();
intermediate.resize(9000, 0u8);
let mode = bioyino_metric::name::TagFormat::Graphite;
MetricName::new(s.into(), mode, &mut intermediate).unwrap()
}
// A future to send own stats. Never gets ready.
pub struct OwnStats {
interval: u64,
prefix: String,
timer: Interval,
chan: Sender<Task>,
log: Logger,
}
impl OwnStats {
pub fn new(interval: u64, prefix: String, chan: Sender<Task>, log: Logger) -> Self {
let log = log.new(o!("source"=>"stats"));
let now = Instant::now();
let dur = Duration::from_millis(if interval < 100 { 1000 } else { interval }); // exclude too small intervals
Self {
interval,
prefix,
timer: Interval::new(now + dur, dur),
chan,
log,
}
}
pub fn get_stats(&mut self) {
let mut buf = BytesMut::with_capacity((self.prefix.len() + 10) * 7); // 10 is suffix len, 7 is number of metrics
macro_rules! add_metric {
($global:ident, $value:ident, $suffix:expr) => {
let $value = $global.swap(0, Ordering::Relaxed) as Float;
if self.interval > 0 {
buf.put(&self.prefix);
buf.put(".");
buf.put(&$suffix);
let name = MetricName::new_untagged(buf.take());
let metric = Metric::new($value, MetricType::Counter, None, None).unwrap();
let log = self.log.clone();
let sender = self
.chan
.clone()
.send(Task::AddMetric(name, metric))
.map(|_| ())
.map_err(move |_| warn!(log, "stats future could not send metric to task"));
spawn(sender);
}
};
};
add_metric!(EGRESS, egress, "egress");
add_metric!(INGRESS, ingress, "ingress");
add_metric!(INGRESS_METRICS, ingress_m, "ingress-metric");
add_metric!(AGG_ERRORS, agr_errors, "agg-error");
add_metric!(PARSE_ERRORS, parse_errors, "parse-error");
add_metric!(PEER_ERRORS, peer_errors, "peer-error");
add_metric!(DROPS, drops, "drop");
if self.interval > 0 {
let s_interval = self.interval as f64 / 1000f64;
info!(self.log, "stats";
"egress" => format!("{:2}", egress / s_interval),
"ingress" => format!("{:2}", ingress / s_interval),
"ingress-m" => format!("{:2}", ingress_m / s_interval),
"a-err" => format!("{:2}", agr_errors / s_interval),
"p-err" => format!("{:2}", parse_errors / s_interval),
"pe-err" => format!("{:2}", peer_errors / s_interval),
"drops" => format!("{:2}", drops / s_interval),
);
}
}
}
impl Future for OwnStats {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
match self.timer.poll() {
Ok(Async::Ready(Some(_))) => {
self.get_stats();
}
Ok(Async::Ready(None)) => unreachable!(),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => return Err(()),
}
}
}
}
#[derive(Clone, Debug)]
/// Builder for `BackoffRetry`, delays are specified in milliseconds
pub struct BackoffRetryBuilder {
pub delay: u64,
pub delay_mul: f32,
pub delay_max: u64,
pub retries: usize,
}
impl Default for BackoffRetryBuilder {
fn default() -> Self {
Self {
delay: 500,
delay_mul: 2f32,
delay_max: 10000,
retries: 25,
}
}
}
impl BackoffRetryBuilder {
pub fn spawn<F>(self, action: F) -> BackoffRetry<F>
where
F: IntoFuture + Clone,
{
let inner = Either::A(action.clone().into_future());
BackoffRetry { action, inner, options: self }
}
}
/// TCP client that is able to reconnect with customizable settings
pub struct | <F: IntoFuture> {
action: F,
inner: Either<F::Future, Delay>,
options: BackoffRetryBuilder,
}
impl<F> Future for BackoffRetry<F>
where
F: IntoFuture + Clone,
{
type Item = F::Item;
type Error = Option<F::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let (rotate_f, rotate_t) = match self.inner {
// we are polling a future currently
Either::A(ref mut future) => match future.poll() {
Ok(Async::Ready(item)) => {
return Ok(Async::Ready(item));
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => {
if self.options.retries == 0 {
return Err(Some(e));
} else {
(true, false)
}
}
},
Either::B(ref mut timer) => {
match timer.poll() {
// we are waiting for the delay
Ok(Async::Ready(())) => (false, true),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => unreachable!(), // timer should not return error
}
}
};
if rotate_f {
self.options.retries -= 1;
let delay = self.options.delay as f32 * self.options.delay_mul;
let delay = if delay <= self.options.delay_max as f32 {
delay as u64
} else {
self.options.delay_max as u64
};
let delay = Delay::new(Instant::now() + Duration::from_millis(delay));
self.inner = Either::B(delay);
} else if rotate_t {
self.inner = Either::A(self.action.clone().into_future());
}
}
}
}
| BackoffRetry | identifier_name |
util.rs | use libc;
use std::ffi::CStr;
use std::io;
use std::net::SocketAddr;
use std::net::TcpStream as StdTcpStream;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
use bytes::{BufMut, BytesMut};
use futures::future::Either;
use futures::sync::mpsc::Sender;
use futures::{Async, Future, IntoFuture, Poll, Sink, Stream};
use net2::TcpBuilder;
use resolve::resolver;
use slog::{info, o, warn, Drain, Logger};
use tokio::executor::current_thread::spawn;
use tokio::net::TcpListener;
use tokio::timer::{Delay, Interval};
use crate::task::Task;
use crate::Float;
use crate::{AGG_ERRORS, DROPS, EGRESS, INGRESS, INGRESS_METRICS, PARSE_ERRORS, PEER_ERRORS};
use bioyino_metric::{name::MetricName, Metric, MetricType};
use crate::{ConsensusState, CONSENSUS_STATE, IS_LEADER};
pub fn prepare_log(root: &'static str) -> Logger {
// Set logging
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let filter = slog::LevelFilter::new(drain, slog::Level::Trace).fuse();
let drain = slog_async::Async::new(filter).build().fuse();
slog::Logger::root(drain, o!("program"=>"test", "test"=>root))
}
pub fn try_resolve(s: &str) -> SocketAddr {
s.parse().unwrap_or_else(|_| {
// for name that have failed to be parsed we try to resolve it via DNS
let mut split = s.split(':');
let host = split.next().unwrap(); // Split always has first element
let port = split.next().expect("port not found");
let port = port.parse().expect("bad port value");
let first_ip = resolver::resolve_host(host)
.unwrap_or_else(|_| panic!("failed resolving {:}", &host))
.next()
.expect("at least one IP address required");
SocketAddr::new(first_ip, port)
})
}
pub fn bound_stream(addr: &SocketAddr) -> Result<StdTcpStream, io::Error> {
let builder = TcpBuilder::new_v4()?;
builder.bind(addr)?;
builder.to_tcp_stream()
}
pub fn reusing_listener(addr: &SocketAddr) -> Result<TcpListener, io::Error> {
let builder = TcpBuilder::new_v4()?;
builder.reuse_address(true)?;
builder.bind(addr)?;
// backlog parameter will be limited by SOMAXCONN on Linux, which is usually set to 128
let listener = builder.listen(65536)?;
listener.set_nonblocking(true)?;
TcpListener::from_std(listener, &tokio::reactor::Handle::default())
}
// TODO impl this correctly and use instead of try_resolve
// PROFIT: gives libnss-aware behaviour
/*
fn _try_resolve_nss(name: &str) {
use std::io;
use std::ffi::CString;
use std::ptr::{null_mut, null};
use libc::*;
let domain= CString::new(Vec::from(name)).unwrap().into_raw();
let mut result: *mut addrinfo = null_mut();
let success = unsafe {
getaddrinfo(domain, null_mut(), null(), &mut result)
};
if success!= 0 {
// let errno = unsafe { *__errno_location() };
println!("{:?}", io::Error::last_os_error());
} else {
let mut cur = result;
while cur!= null_mut() {
unsafe{
println!("LEN {:?}", (*result).ai_addrlen);
println!("DATA {:?}", (*(*result).ai_addr).sa_data);
cur = (*result).ai_next;
}
}
}
}
*/
/// Get hostname. Copypasted from some crate
pub fn get_hostname() -> Option<String> {
let len = 255;
let mut buf = Vec::<u8>::with_capacity(len);
let ptr = buf.as_mut_ptr() as *mut libc::c_char;
unsafe {
if libc::gethostname(ptr, len as libc::size_t)!= 0 {
return None;
}
Some(CStr::from_ptr(ptr).to_string_lossy().into_owned())
}
}
pub fn switch_leader(acquired: bool, log: &Logger) {
let should_set = {
let state = &*CONSENSUS_STATE.lock().unwrap();
// only set leader when consensus is enabled
state == &ConsensusState::Enabled
};
if should_set {
let is_leader = IS_LEADER.load(Ordering::SeqCst);
if is_leader!= acquired {
warn!(log, "leader state change: {} -> {}", is_leader, acquired);
}
IS_LEADER.store(acquired, Ordering::SeqCst);
}
}
#[cfg(test)]
pub(crate) fn new_test_graphite_name(s: &'static str) -> MetricName {
let mut intermediate = Vec::new();
intermediate.resize(9000, 0u8);
let mode = bioyino_metric::name::TagFormat::Graphite;
MetricName::new(s.into(), mode, &mut intermediate).unwrap()
}
// A future to send own stats. Never gets ready.
pub struct OwnStats {
interval: u64,
prefix: String,
timer: Interval,
chan: Sender<Task>,
log: Logger,
}
impl OwnStats {
pub fn new(interval: u64, prefix: String, chan: Sender<Task>, log: Logger) -> Self {
let log = log.new(o!("source"=>"stats"));
let now = Instant::now();
let dur = Duration::from_millis(if interval < 100 { 1000 } else { interval }); // exclude too small intervals
Self {
interval,
prefix,
timer: Interval::new(now + dur, dur),
chan,
log,
}
}
pub fn get_stats(&mut self) {
let mut buf = BytesMut::with_capacity((self.prefix.len() + 10) * 7); // 10 is suffix len, 7 is number of metrics
macro_rules! add_metric {
($global:ident, $value:ident, $suffix:expr) => {
let $value = $global.swap(0, Ordering::Relaxed) as Float;
if self.interval > 0 {
buf.put(&self.prefix);
buf.put(".");
buf.put(&$suffix);
let name = MetricName::new_untagged(buf.take());
let metric = Metric::new($value, MetricType::Counter, None, None).unwrap();
let log = self.log.clone();
let sender = self
.chan
.clone()
.send(Task::AddMetric(name, metric))
.map(|_| ())
.map_err(move |_| warn!(log, "stats future could not send metric to task"));
spawn(sender);
}
};
};
add_metric!(EGRESS, egress, "egress");
add_metric!(INGRESS, ingress, "ingress"); | if self.interval > 0 {
let s_interval = self.interval as f64 / 1000f64;
info!(self.log, "stats";
"egress" => format!("{:2}", egress / s_interval),
"ingress" => format!("{:2}", ingress / s_interval),
"ingress-m" => format!("{:2}", ingress_m / s_interval),
"a-err" => format!("{:2}", agr_errors / s_interval),
"p-err" => format!("{:2}", parse_errors / s_interval),
"pe-err" => format!("{:2}", peer_errors / s_interval),
"drops" => format!("{:2}", drops / s_interval),
);
}
}
}
impl Future for OwnStats {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
match self.timer.poll() {
Ok(Async::Ready(Some(_))) => {
self.get_stats();
}
Ok(Async::Ready(None)) => unreachable!(),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => return Err(()),
}
}
}
}
#[derive(Clone, Debug)]
/// Builder for `BackoffRetry`, delays are specified in milliseconds
pub struct BackoffRetryBuilder {
pub delay: u64,
pub delay_mul: f32,
pub delay_max: u64,
pub retries: usize,
}
impl Default for BackoffRetryBuilder {
fn default() -> Self {
Self {
delay: 500,
delay_mul: 2f32,
delay_max: 10000,
retries: 25,
}
}
}
impl BackoffRetryBuilder {
pub fn spawn<F>(self, action: F) -> BackoffRetry<F>
where
F: IntoFuture + Clone,
{
let inner = Either::A(action.clone().into_future());
BackoffRetry { action, inner, options: self }
}
}
/// TCP client that is able to reconnect with customizable settings
pub struct BackoffRetry<F: IntoFuture> {
action: F,
inner: Either<F::Future, Delay>,
options: BackoffRetryBuilder,
}
impl<F> Future for BackoffRetry<F>
where
F: IntoFuture + Clone,
{
type Item = F::Item;
type Error = Option<F::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let (rotate_f, rotate_t) = match self.inner {
// we are polling a future currently
Either::A(ref mut future) => match future.poll() {
Ok(Async::Ready(item)) => {
return Ok(Async::Ready(item));
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => {
if self.options.retries == 0 {
return Err(Some(e));
} else {
(true, false)
}
}
},
Either::B(ref mut timer) => {
match timer.poll() {
// we are waiting for the delay
Ok(Async::Ready(())) => (false, true),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => unreachable!(), // timer should not return error
}
}
};
if rotate_f {
self.options.retries -= 1;
let delay = self.options.delay as f32 * self.options.delay_mul;
let delay = if delay <= self.options.delay_max as f32 {
delay as u64
} else {
self.options.delay_max as u64
};
let delay = Delay::new(Instant::now() + Duration::from_millis(delay));
self.inner = Either::B(delay);
} else if rotate_t {
self.inner = Either::A(self.action.clone().into_future());
}
}
}
} | add_metric!(INGRESS_METRICS, ingress_m, "ingress-metric");
add_metric!(AGG_ERRORS, agr_errors, "agg-error");
add_metric!(PARSE_ERRORS, parse_errors, "parse-error");
add_metric!(PEER_ERRORS, peer_errors, "peer-error");
add_metric!(DROPS, drops, "drop"); | random_line_split |
util.rs | use libc;
use std::ffi::CStr;
use std::io;
use std::net::SocketAddr;
use std::net::TcpStream as StdTcpStream;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
use bytes::{BufMut, BytesMut};
use futures::future::Either;
use futures::sync::mpsc::Sender;
use futures::{Async, Future, IntoFuture, Poll, Sink, Stream};
use net2::TcpBuilder;
use resolve::resolver;
use slog::{info, o, warn, Drain, Logger};
use tokio::executor::current_thread::spawn;
use tokio::net::TcpListener;
use tokio::timer::{Delay, Interval};
use crate::task::Task;
use crate::Float;
use crate::{AGG_ERRORS, DROPS, EGRESS, INGRESS, INGRESS_METRICS, PARSE_ERRORS, PEER_ERRORS};
use bioyino_metric::{name::MetricName, Metric, MetricType};
use crate::{ConsensusState, CONSENSUS_STATE, IS_LEADER};
pub fn prepare_log(root: &'static str) -> Logger {
// Set logging
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let filter = slog::LevelFilter::new(drain, slog::Level::Trace).fuse();
let drain = slog_async::Async::new(filter).build().fuse();
slog::Logger::root(drain, o!("program"=>"test", "test"=>root))
}
pub fn try_resolve(s: &str) -> SocketAddr {
s.parse().unwrap_or_else(|_| {
// for name that have failed to be parsed we try to resolve it via DNS
let mut split = s.split(':');
let host = split.next().unwrap(); // Split always has first element
let port = split.next().expect("port not found");
let port = port.parse().expect("bad port value");
let first_ip = resolver::resolve_host(host)
.unwrap_or_else(|_| panic!("failed resolving {:}", &host))
.next()
.expect("at least one IP address required");
SocketAddr::new(first_ip, port)
})
}
pub fn bound_stream(addr: &SocketAddr) -> Result<StdTcpStream, io::Error> {
let builder = TcpBuilder::new_v4()?;
builder.bind(addr)?;
builder.to_tcp_stream()
}
pub fn reusing_listener(addr: &SocketAddr) -> Result<TcpListener, io::Error> {
let builder = TcpBuilder::new_v4()?;
builder.reuse_address(true)?;
builder.bind(addr)?;
// backlog parameter will be limited by SOMAXCONN on Linux, which is usually set to 128
let listener = builder.listen(65536)?;
listener.set_nonblocking(true)?;
TcpListener::from_std(listener, &tokio::reactor::Handle::default())
}
// TODO impl this correctly and use instead of try_resolve
// PROFIT: gives libnss-aware behaviour
/*
fn _try_resolve_nss(name: &str) {
use std::io;
use std::ffi::CString;
use std::ptr::{null_mut, null};
use libc::*;
let domain= CString::new(Vec::from(name)).unwrap().into_raw();
let mut result: *mut addrinfo = null_mut();
let success = unsafe {
getaddrinfo(domain, null_mut(), null(), &mut result)
};
if success!= 0 {
// let errno = unsafe { *__errno_location() };
println!("{:?}", io::Error::last_os_error());
} else {
let mut cur = result;
while cur!= null_mut() {
unsafe{
println!("LEN {:?}", (*result).ai_addrlen);
println!("DATA {:?}", (*(*result).ai_addr).sa_data);
cur = (*result).ai_next;
}
}
}
}
*/
/// Get hostname. Copypasted from some crate
pub fn get_hostname() -> Option<String> {
let len = 255;
let mut buf = Vec::<u8>::with_capacity(len);
let ptr = buf.as_mut_ptr() as *mut libc::c_char;
unsafe {
if libc::gethostname(ptr, len as libc::size_t)!= 0 {
return None;
}
Some(CStr::from_ptr(ptr).to_string_lossy().into_owned())
}
}
pub fn switch_leader(acquired: bool, log: &Logger) {
let should_set = {
let state = &*CONSENSUS_STATE.lock().unwrap();
// only set leader when consensus is enabled
state == &ConsensusState::Enabled
};
if should_set |
}
#[cfg(test)]
pub(crate) fn new_test_graphite_name(s: &'static str) -> MetricName {
let mut intermediate = Vec::new();
intermediate.resize(9000, 0u8);
let mode = bioyino_metric::name::TagFormat::Graphite;
MetricName::new(s.into(), mode, &mut intermediate).unwrap()
}
// A future to send own stats. Never gets ready.
pub struct OwnStats {
interval: u64,
prefix: String,
timer: Interval,
chan: Sender<Task>,
log: Logger,
}
impl OwnStats {
pub fn new(interval: u64, prefix: String, chan: Sender<Task>, log: Logger) -> Self {
let log = log.new(o!("source"=>"stats"));
let now = Instant::now();
let dur = Duration::from_millis(if interval < 100 { 1000 } else { interval }); // exclude too small intervals
Self {
interval,
prefix,
timer: Interval::new(now + dur, dur),
chan,
log,
}
}
pub fn get_stats(&mut self) {
let mut buf = BytesMut::with_capacity((self.prefix.len() + 10) * 7); // 10 is suffix len, 7 is number of metrics
macro_rules! add_metric {
($global:ident, $value:ident, $suffix:expr) => {
let $value = $global.swap(0, Ordering::Relaxed) as Float;
if self.interval > 0 {
buf.put(&self.prefix);
buf.put(".");
buf.put(&$suffix);
let name = MetricName::new_untagged(buf.take());
let metric = Metric::new($value, MetricType::Counter, None, None).unwrap();
let log = self.log.clone();
let sender = self
.chan
.clone()
.send(Task::AddMetric(name, metric))
.map(|_| ())
.map_err(move |_| warn!(log, "stats future could not send metric to task"));
spawn(sender);
}
};
};
add_metric!(EGRESS, egress, "egress");
add_metric!(INGRESS, ingress, "ingress");
add_metric!(INGRESS_METRICS, ingress_m, "ingress-metric");
add_metric!(AGG_ERRORS, agr_errors, "agg-error");
add_metric!(PARSE_ERRORS, parse_errors, "parse-error");
add_metric!(PEER_ERRORS, peer_errors, "peer-error");
add_metric!(DROPS, drops, "drop");
if self.interval > 0 {
let s_interval = self.interval as f64 / 1000f64;
info!(self.log, "stats";
"egress" => format!("{:2}", egress / s_interval),
"ingress" => format!("{:2}", ingress / s_interval),
"ingress-m" => format!("{:2}", ingress_m / s_interval),
"a-err" => format!("{:2}", agr_errors / s_interval),
"p-err" => format!("{:2}", parse_errors / s_interval),
"pe-err" => format!("{:2}", peer_errors / s_interval),
"drops" => format!("{:2}", drops / s_interval),
);
}
}
}
impl Future for OwnStats {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
match self.timer.poll() {
Ok(Async::Ready(Some(_))) => {
self.get_stats();
}
Ok(Async::Ready(None)) => unreachable!(),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => return Err(()),
}
}
}
}
#[derive(Clone, Debug)]
/// Builder for `BackoffRetry`, delays are specified in milliseconds
pub struct BackoffRetryBuilder {
pub delay: u64,
pub delay_mul: f32,
pub delay_max: u64,
pub retries: usize,
}
impl Default for BackoffRetryBuilder {
fn default() -> Self {
Self {
delay: 500,
delay_mul: 2f32,
delay_max: 10000,
retries: 25,
}
}
}
impl BackoffRetryBuilder {
pub fn spawn<F>(self, action: F) -> BackoffRetry<F>
where
F: IntoFuture + Clone,
{
let inner = Either::A(action.clone().into_future());
BackoffRetry { action, inner, options: self }
}
}
/// TCP client that is able to reconnect with customizable settings
pub struct BackoffRetry<F: IntoFuture> {
action: F,
inner: Either<F::Future, Delay>,
options: BackoffRetryBuilder,
}
impl<F> Future for BackoffRetry<F>
where
F: IntoFuture + Clone,
{
type Item = F::Item;
type Error = Option<F::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let (rotate_f, rotate_t) = match self.inner {
// we are polling a future currently
Either::A(ref mut future) => match future.poll() {
Ok(Async::Ready(item)) => {
return Ok(Async::Ready(item));
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => {
if self.options.retries == 0 {
return Err(Some(e));
} else {
(true, false)
}
}
},
Either::B(ref mut timer) => {
match timer.poll() {
// we are waiting for the delay
Ok(Async::Ready(())) => (false, true),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => unreachable!(), // timer should not return error
}
}
};
if rotate_f {
self.options.retries -= 1;
let delay = self.options.delay as f32 * self.options.delay_mul;
let delay = if delay <= self.options.delay_max as f32 {
delay as u64
} else {
self.options.delay_max as u64
};
let delay = Delay::new(Instant::now() + Duration::from_millis(delay));
self.inner = Either::B(delay);
} else if rotate_t {
self.inner = Either::A(self.action.clone().into_future());
}
}
}
}
| {
let is_leader = IS_LEADER.load(Ordering::SeqCst);
if is_leader != acquired {
warn!(log, "leader state change: {} -> {}", is_leader, acquired);
}
IS_LEADER.store(acquired, Ordering::SeqCst);
} | conditional_block |
util.rs | use libc;
use std::ffi::CStr;
use std::io;
use std::net::SocketAddr;
use std::net::TcpStream as StdTcpStream;
use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
use bytes::{BufMut, BytesMut};
use futures::future::Either;
use futures::sync::mpsc::Sender;
use futures::{Async, Future, IntoFuture, Poll, Sink, Stream};
use net2::TcpBuilder;
use resolve::resolver;
use slog::{info, o, warn, Drain, Logger};
use tokio::executor::current_thread::spawn;
use tokio::net::TcpListener;
use tokio::timer::{Delay, Interval};
use crate::task::Task;
use crate::Float;
use crate::{AGG_ERRORS, DROPS, EGRESS, INGRESS, INGRESS_METRICS, PARSE_ERRORS, PEER_ERRORS};
use bioyino_metric::{name::MetricName, Metric, MetricType};
use crate::{ConsensusState, CONSENSUS_STATE, IS_LEADER};
pub fn prepare_log(root: &'static str) -> Logger {
// Set logging
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let filter = slog::LevelFilter::new(drain, slog::Level::Trace).fuse();
let drain = slog_async::Async::new(filter).build().fuse();
slog::Logger::root(drain, o!("program"=>"test", "test"=>root))
}
pub fn try_resolve(s: &str) -> SocketAddr {
s.parse().unwrap_or_else(|_| {
// for name that have failed to be parsed we try to resolve it via DNS
let mut split = s.split(':');
let host = split.next().unwrap(); // Split always has first element
let port = split.next().expect("port not found");
let port = port.parse().expect("bad port value");
let first_ip = resolver::resolve_host(host)
.unwrap_or_else(|_| panic!("failed resolving {:}", &host))
.next()
.expect("at least one IP address required");
SocketAddr::new(first_ip, port)
})
}
pub fn bound_stream(addr: &SocketAddr) -> Result<StdTcpStream, io::Error> {
let builder = TcpBuilder::new_v4()?;
builder.bind(addr)?;
builder.to_tcp_stream()
}
pub fn reusing_listener(addr: &SocketAddr) -> Result<TcpListener, io::Error> {
let builder = TcpBuilder::new_v4()?;
builder.reuse_address(true)?;
builder.bind(addr)?;
// backlog parameter will be limited by SOMAXCONN on Linux, which is usually set to 128
let listener = builder.listen(65536)?;
listener.set_nonblocking(true)?;
TcpListener::from_std(listener, &tokio::reactor::Handle::default())
}
// TODO impl this correctly and use instead of try_resolve
// PROFIT: gives libnss-aware behaviour
/*
fn _try_resolve_nss(name: &str) {
use std::io;
use std::ffi::CString;
use std::ptr::{null_mut, null};
use libc::*;
let domain= CString::new(Vec::from(name)).unwrap().into_raw();
let mut result: *mut addrinfo = null_mut();
let success = unsafe {
getaddrinfo(domain, null_mut(), null(), &mut result)
};
if success!= 0 {
// let errno = unsafe { *__errno_location() };
println!("{:?}", io::Error::last_os_error());
} else {
let mut cur = result;
while cur!= null_mut() {
unsafe{
println!("LEN {:?}", (*result).ai_addrlen);
println!("DATA {:?}", (*(*result).ai_addr).sa_data);
cur = (*result).ai_next;
}
}
}
}
*/
/// Get hostname. Copypasted from some crate
pub fn get_hostname() -> Option<String> {
let len = 255;
let mut buf = Vec::<u8>::with_capacity(len);
let ptr = buf.as_mut_ptr() as *mut libc::c_char;
unsafe {
if libc::gethostname(ptr, len as libc::size_t)!= 0 {
return None;
}
Some(CStr::from_ptr(ptr).to_string_lossy().into_owned())
}
}
pub fn switch_leader(acquired: bool, log: &Logger) {
let should_set = {
let state = &*CONSENSUS_STATE.lock().unwrap();
// only set leader when consensus is enabled
state == &ConsensusState::Enabled
};
if should_set {
let is_leader = IS_LEADER.load(Ordering::SeqCst);
if is_leader!= acquired {
warn!(log, "leader state change: {} -> {}", is_leader, acquired);
}
IS_LEADER.store(acquired, Ordering::SeqCst);
}
}
#[cfg(test)]
pub(crate) fn new_test_graphite_name(s: &'static str) -> MetricName {
let mut intermediate = Vec::new();
intermediate.resize(9000, 0u8);
let mode = bioyino_metric::name::TagFormat::Graphite;
MetricName::new(s.into(), mode, &mut intermediate).unwrap()
}
// A future to send own stats. Never gets ready.
pub struct OwnStats {
interval: u64,
prefix: String,
timer: Interval,
chan: Sender<Task>,
log: Logger,
}
impl OwnStats {
pub fn new(interval: u64, prefix: String, chan: Sender<Task>, log: Logger) -> Self {
let log = log.new(o!("source"=>"stats"));
let now = Instant::now();
let dur = Duration::from_millis(if interval < 100 { 1000 } else { interval }); // exclude too small intervals
Self {
interval,
prefix,
timer: Interval::new(now + dur, dur),
chan,
log,
}
}
pub fn get_stats(&mut self) {
let mut buf = BytesMut::with_capacity((self.prefix.len() + 10) * 7); // 10 is suffix len, 7 is number of metrics
macro_rules! add_metric {
($global:ident, $value:ident, $suffix:expr) => {
let $value = $global.swap(0, Ordering::Relaxed) as Float;
if self.interval > 0 {
buf.put(&self.prefix);
buf.put(".");
buf.put(&$suffix);
let name = MetricName::new_untagged(buf.take());
let metric = Metric::new($value, MetricType::Counter, None, None).unwrap();
let log = self.log.clone();
let sender = self
.chan
.clone()
.send(Task::AddMetric(name, metric))
.map(|_| ())
.map_err(move |_| warn!(log, "stats future could not send metric to task"));
spawn(sender);
}
};
};
add_metric!(EGRESS, egress, "egress");
add_metric!(INGRESS, ingress, "ingress");
add_metric!(INGRESS_METRICS, ingress_m, "ingress-metric");
add_metric!(AGG_ERRORS, agr_errors, "agg-error");
add_metric!(PARSE_ERRORS, parse_errors, "parse-error");
add_metric!(PEER_ERRORS, peer_errors, "peer-error");
add_metric!(DROPS, drops, "drop");
if self.interval > 0 {
let s_interval = self.interval as f64 / 1000f64;
info!(self.log, "stats";
"egress" => format!("{:2}", egress / s_interval),
"ingress" => format!("{:2}", ingress / s_interval),
"ingress-m" => format!("{:2}", ingress_m / s_interval),
"a-err" => format!("{:2}", agr_errors / s_interval),
"p-err" => format!("{:2}", parse_errors / s_interval),
"pe-err" => format!("{:2}", peer_errors / s_interval),
"drops" => format!("{:2}", drops / s_interval),
);
}
}
}
impl Future for OwnStats {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
match self.timer.poll() {
Ok(Async::Ready(Some(_))) => {
self.get_stats();
}
Ok(Async::Ready(None)) => unreachable!(),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => return Err(()),
}
}
}
}
#[derive(Clone, Debug)]
/// Builder for `BackoffRetry`, delays are specified in milliseconds
pub struct BackoffRetryBuilder {
pub delay: u64,
pub delay_mul: f32,
pub delay_max: u64,
pub retries: usize,
}
impl Default for BackoffRetryBuilder {
fn default() -> Self |
}
impl BackoffRetryBuilder {
pub fn spawn<F>(self, action: F) -> BackoffRetry<F>
where
F: IntoFuture + Clone,
{
let inner = Either::A(action.clone().into_future());
BackoffRetry { action, inner, options: self }
}
}
/// TCP client that is able to reconnect with customizable settings
pub struct BackoffRetry<F: IntoFuture> {
action: F,
inner: Either<F::Future, Delay>,
options: BackoffRetryBuilder,
}
impl<F> Future for BackoffRetry<F>
where
F: IntoFuture + Clone,
{
type Item = F::Item;
type Error = Option<F::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let (rotate_f, rotate_t) = match self.inner {
// we are polling a future currently
Either::A(ref mut future) => match future.poll() {
Ok(Async::Ready(item)) => {
return Ok(Async::Ready(item));
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => {
if self.options.retries == 0 {
return Err(Some(e));
} else {
(true, false)
}
}
},
Either::B(ref mut timer) => {
match timer.poll() {
// we are waiting for the delay
Ok(Async::Ready(())) => (false, true),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => unreachable!(), // timer should not return error
}
}
};
if rotate_f {
self.options.retries -= 1;
let delay = self.options.delay as f32 * self.options.delay_mul;
let delay = if delay <= self.options.delay_max as f32 {
delay as u64
} else {
self.options.delay_max as u64
};
let delay = Delay::new(Instant::now() + Duration::from_millis(delay));
self.inner = Either::B(delay);
} else if rotate_t {
self.inner = Either::A(self.action.clone().into_future());
}
}
}
}
| {
Self {
delay: 500,
delay_mul: 2f32,
delay_max: 10000,
retries: 25,
}
} | identifier_body |
listing.rs | use actix_web::http::StatusCode;
use actix_web::{fs, http, Body, FromRequest, HttpRequest, HttpResponse, Query, Result};
use bytesize::ByteSize;
use futures::stream::once;
use htmlescape::encode_minimal as escape_html_entity;
use percent_encoding::{utf8_percent_encode, DEFAULT_ENCODE_SET};
use serde::Deserialize;
use std::io;
use std::path::{Path, PathBuf};
use std::time::SystemTime;
use strum_macros::{Display, EnumString};
use crate::archive::CompressionMethod;
use crate::errors::{self, ContextualError};
use crate::renderer;
use crate::themes::ColorScheme;
/// Query parameters
#[derive(Deserialize)]
pub struct QueryParameters {
pub path: Option<PathBuf>,
pub sort: Option<SortingMethod>,
pub order: Option<SortingOrder>,
pub theme: Option<ColorScheme>,
download: Option<CompressionMethod>,
}
/// Available sorting methods
#[derive(Deserialize, Clone, EnumString, Display, Copy)]
#[serde(rename_all = "snake_case")]
#[strum(serialize_all = "snake_case")]
pub enum SortingMethod {
/// Sort by name
Name,
/// Sort by size
Size,
/// Sort by last modification date (natural sort: follows alphanumerical order)
Date,
}
/// Available sorting orders
#[derive(Deserialize, Clone, EnumString, Display, Copy)]
pub enum SortingOrder {
/// Ascending order
#[serde(alias = "asc")]
#[strum(serialize = "asc")]
Ascending,
/// Descending order
#[serde(alias = "desc")]
#[strum(serialize = "desc")]
Descending,
}
#[derive(PartialEq)]
/// Possible entry types
pub enum EntryType {
/// Entry is a directory
Directory,
/// Entry is a file
File,
/// Entry is a symlink
Symlink,
}
/// Entry
pub struct Entry {
/// Name of the entry
pub name: String,
/// Type of the entry
pub entry_type: EntryType,
/// URL of the entry
pub link: String,
/// Size in byte of the entry. Only available for EntryType::File
pub size: Option<bytesize::ByteSize>,
/// Last modification date
pub last_modification_date: Option<SystemTime>,
}
impl Entry {
fn new(
name: String,
entry_type: EntryType,
link: String,
size: Option<bytesize::ByteSize>,
last_modification_date: Option<SystemTime>,
) -> Self {
Entry {
name,
entry_type,
link,
size,
last_modification_date,
}
}
/// Returns whether the entry is a directory
pub fn | (&self) -> bool {
self.entry_type == EntryType::Directory
}
/// Returns whether the entry is a file
pub fn is_file(&self) -> bool {
self.entry_type == EntryType::File
}
/// Returns whether the entry is a symlink
pub fn is_symlink(&self) -> bool {
self.entry_type == EntryType::Symlink
}
// Returns whether the entry is a video
pub fn is_video(&self) -> bool {
let video_extensions = vec!["mp4", "ogv", "avi", "mkv"];
self.entry_type == EntryType::File && self.extension()
.map(|ext| video_extensions.contains(&ext.as_str()))
.unwrap_or(false)
}
// Returns whether the entry is an audio file
pub fn is_audio(&self) -> bool {
let audio_extensions = vec!["ogg", "mp3", "aac", "flac", "wav", "m4a"];
self.entry_type == EntryType::File && self.extension()
.map(|ext| audio_extensions.contains(&ext.as_str()))
.unwrap_or(false)
}
fn extension(&self) -> Option<String> {
std::path::PathBuf::from(&self.name).extension().and_then(|s| s.to_str()).map(|s| s.to_string())
}
}
pub fn file_handler(req: &HttpRequest<crate::MiniserveConfig>) -> Result<fs::NamedFile> {
let path = &req.state().path;
Ok(fs::NamedFile::open(path)?)
}
/// List a directory and renders a HTML file accordingly
/// Adapted from https://docs.rs/actix-web/0.7.13/src/actix_web/fs.rs.html#564
#[allow(clippy::identity_conversion)]
pub fn directory_listing<S>(
dir: &fs::Directory,
req: &HttpRequest<S>,
skip_symlinks: bool,
file_upload: bool,
random_route: Option<String>,
default_color_scheme: ColorScheme,
upload_route: String,
) -> Result<HttpResponse, io::Error> {
let serve_path = req.path();
let base = Path::new(serve_path);
let random_route = format!("/{}", random_route.unwrap_or_default());
let is_root = base.parent().is_none() || req.path() == random_route;
let page_parent = base.parent().map(|p| p.display().to_string());
let current_dir = match base.strip_prefix(random_route) {
Ok(c_d) => Path::new("/").join(c_d),
Err(_) => base.to_path_buf(),
};
let query_params = extract_query_parameters(req);
let mut entries: Vec<Entry> = Vec::new();
for entry in dir.path.read_dir()? {
if dir.is_visible(&entry) {
let entry = entry?;
let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) => base.join(p),
Err(_) => continue,
};
// show file url as relative to static path
let file_url =
utf8_percent_encode(&p.to_string_lossy(), DEFAULT_ENCODE_SET).to_string();
// " -- " & -- & '-- ' < -- < > -- >
let file_name = escape_html_entity(&entry.file_name().to_string_lossy());
// if file is a directory, add '/' to the end of the name
if let Ok(metadata) = entry.metadata() {
if skip_symlinks && metadata.file_type().is_symlink() {
continue;
}
let last_modification_date = match metadata.modified() {
Ok(date) => Some(date),
Err(_) => None,
};
if metadata.file_type().is_symlink() {
entries.push(Entry::new(
file_name,
EntryType::Symlink,
file_url,
None,
last_modification_date,
));
} else if metadata.is_dir() {
entries.push(Entry::new(
file_name,
EntryType::Directory,
file_url,
None,
last_modification_date,
));
} else {
entries.push(Entry::new(
file_name,
EntryType::File,
file_url,
Some(ByteSize::b(metadata.len())),
last_modification_date,
));
}
} else {
continue;
}
}
}
if let Some(sorting_method) = query_params.sort {
match sorting_method {
SortingMethod::Name => entries
.sort_by(|e1, e2| alphanumeric_sort::compare_str(e1.name.clone(), e2.name.clone())),
SortingMethod::Size => entries.sort_by(|e1, e2| {
// If we can't get the size of the entry (directory for instance)
// let's consider it's 0b
e2.size
.unwrap_or_else(|| ByteSize::b(0))
.cmp(&e1.size.unwrap_or_else(|| ByteSize::b(0)))
}),
SortingMethod::Date => entries.sort_by(|e1, e2| {
// If, for some reason, we can't get the last modification date of an entry
// let's consider it was modified on UNIX_EPOCH (01/01/19270 00:00:00)
e2.last_modification_date
.unwrap_or(SystemTime::UNIX_EPOCH)
.cmp(&e1.last_modification_date.unwrap_or(SystemTime::UNIX_EPOCH))
}),
};
} else {
// Sort in alphanumeric order by default
entries.sort_by(|e1, e2| alphanumeric_sort::compare_str(e1.name.clone(), e2.name.clone()))
}
if let Some(sorting_order) = query_params.order {
if let SortingOrder::Descending = sorting_order {
entries.reverse()
}
}
let color_scheme = query_params.theme.unwrap_or(default_color_scheme);
if let Some(compression_method) = &query_params.download {
log::info!(
"Creating an archive ({extension}) of {path}...",
extension = compression_method.extension(),
path = &dir.path.display().to_string()
);
match compression_method.create_archive(&dir.path, skip_symlinks) {
Ok((filename, content)) => {
log::info!("{file} successfully created!", file = &filename);
Ok(HttpResponse::Ok()
.content_type(compression_method.content_type())
.content_encoding(compression_method.content_encoding())
.header("Content-Transfer-Encoding", "binary")
.header(
"Content-Disposition",
format!("attachment; filename={:?}", filename),
)
.chunked()
.body(Body::Streaming(Box::new(once(Ok(content))))))
}
Err(err) => {
errors::log_error_chain(err.to_string());
Ok(HttpResponse::Ok()
.status(http::StatusCode::INTERNAL_SERVER_ERROR)
.body(
renderer::render_error(
&err.to_string(),
StatusCode::INTERNAL_SERVER_ERROR,
serve_path,
query_params.sort,
query_params.order,
color_scheme,
default_color_scheme,
false,
true,
)
.into_string(),
))
}
}
} else {
Ok(HttpResponse::Ok()
.content_type("text/html; charset=utf-8")
.body(
renderer::page(
serve_path,
entries,
is_root,
page_parent,
query_params.sort,
query_params.order,
default_color_scheme,
color_scheme,
file_upload,
&upload_route,
¤t_dir.display().to_string(),
)
.into_string(),
))
}
}
pub fn extract_query_parameters<S>(req: &HttpRequest<S>) -> QueryParameters {
match Query::<QueryParameters>::extract(req) {
Ok(query) => QueryParameters {
sort: query.sort,
order: query.order,
download: query.download.clone(),
theme: query.theme,
path: query.path.clone(),
},
Err(e) => {
let err = ContextualError::ParseError("query parameters".to_string(), e.to_string());
errors::log_error_chain(err.to_string());
QueryParameters {
sort: None,
order: None,
download: None,
theme: None,
path: None,
}
}
}
}
| is_dir | identifier_name |
listing.rs | use actix_web::http::StatusCode;
use actix_web::{fs, http, Body, FromRequest, HttpRequest, HttpResponse, Query, Result};
use bytesize::ByteSize;
use futures::stream::once;
use htmlescape::encode_minimal as escape_html_entity;
use percent_encoding::{utf8_percent_encode, DEFAULT_ENCODE_SET};
use serde::Deserialize;
use std::io;
use std::path::{Path, PathBuf};
use std::time::SystemTime;
use strum_macros::{Display, EnumString};
use crate::archive::CompressionMethod;
use crate::errors::{self, ContextualError};
use crate::renderer;
use crate::themes::ColorScheme;
/// Query parameters
#[derive(Deserialize)]
pub struct QueryParameters {
pub path: Option<PathBuf>,
pub sort: Option<SortingMethod>,
pub order: Option<SortingOrder>,
pub theme: Option<ColorScheme>,
download: Option<CompressionMethod>,
}
/// Available sorting methods
#[derive(Deserialize, Clone, EnumString, Display, Copy)]
#[serde(rename_all = "snake_case")]
#[strum(serialize_all = "snake_case")]
pub enum SortingMethod {
/// Sort by name
Name,
/// Sort by size
Size,
/// Sort by last modification date (natural sort: follows alphanumerical order)
Date,
}
/// Available sorting orders
#[derive(Deserialize, Clone, EnumString, Display, Copy)]
pub enum SortingOrder {
/// Ascending order
#[serde(alias = "asc")]
#[strum(serialize = "asc")]
Ascending,
/// Descending order
#[serde(alias = "desc")]
#[strum(serialize = "desc")]
Descending,
}
#[derive(PartialEq)]
/// Possible entry types
pub enum EntryType {
/// Entry is a directory
Directory,
/// Entry is a file
File,
/// Entry is a symlink
Symlink,
}
| /// Type of the entry
pub entry_type: EntryType,
/// URL of the entry
pub link: String,
/// Size in byte of the entry. Only available for EntryType::File
pub size: Option<bytesize::ByteSize>,
/// Last modification date
pub last_modification_date: Option<SystemTime>,
}
impl Entry {
fn new(
name: String,
entry_type: EntryType,
link: String,
size: Option<bytesize::ByteSize>,
last_modification_date: Option<SystemTime>,
) -> Self {
Entry {
name,
entry_type,
link,
size,
last_modification_date,
}
}
/// Returns whether the entry is a directory
pub fn is_dir(&self) -> bool {
self.entry_type == EntryType::Directory
}
/// Returns whether the entry is a file
pub fn is_file(&self) -> bool {
self.entry_type == EntryType::File
}
/// Returns whether the entry is a symlink
pub fn is_symlink(&self) -> bool {
self.entry_type == EntryType::Symlink
}
// Returns whether the entry is a video
pub fn is_video(&self) -> bool {
let video_extensions = vec!["mp4", "ogv", "avi", "mkv"];
self.entry_type == EntryType::File && self.extension()
.map(|ext| video_extensions.contains(&ext.as_str()))
.unwrap_or(false)
}
// Returns whether the entry is an audio file
pub fn is_audio(&self) -> bool {
let audio_extensions = vec!["ogg", "mp3", "aac", "flac", "wav", "m4a"];
self.entry_type == EntryType::File && self.extension()
.map(|ext| audio_extensions.contains(&ext.as_str()))
.unwrap_or(false)
}
fn extension(&self) -> Option<String> {
std::path::PathBuf::from(&self.name).extension().and_then(|s| s.to_str()).map(|s| s.to_string())
}
}
pub fn file_handler(req: &HttpRequest<crate::MiniserveConfig>) -> Result<fs::NamedFile> {
let path = &req.state().path;
Ok(fs::NamedFile::open(path)?)
}
/// List a directory and renders a HTML file accordingly
/// Adapted from https://docs.rs/actix-web/0.7.13/src/actix_web/fs.rs.html#564
#[allow(clippy::identity_conversion)]
pub fn directory_listing<S>(
dir: &fs::Directory,
req: &HttpRequest<S>,
skip_symlinks: bool,
file_upload: bool,
random_route: Option<String>,
default_color_scheme: ColorScheme,
upload_route: String,
) -> Result<HttpResponse, io::Error> {
let serve_path = req.path();
let base = Path::new(serve_path);
let random_route = format!("/{}", random_route.unwrap_or_default());
let is_root = base.parent().is_none() || req.path() == random_route;
let page_parent = base.parent().map(|p| p.display().to_string());
let current_dir = match base.strip_prefix(random_route) {
Ok(c_d) => Path::new("/").join(c_d),
Err(_) => base.to_path_buf(),
};
let query_params = extract_query_parameters(req);
let mut entries: Vec<Entry> = Vec::new();
for entry in dir.path.read_dir()? {
if dir.is_visible(&entry) {
let entry = entry?;
let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) => base.join(p),
Err(_) => continue,
};
// show file url as relative to static path
let file_url =
utf8_percent_encode(&p.to_string_lossy(), DEFAULT_ENCODE_SET).to_string();
// " -- " & -- & '-- ' < -- < > -- >
let file_name = escape_html_entity(&entry.file_name().to_string_lossy());
// if file is a directory, add '/' to the end of the name
if let Ok(metadata) = entry.metadata() {
if skip_symlinks && metadata.file_type().is_symlink() {
continue;
}
let last_modification_date = match metadata.modified() {
Ok(date) => Some(date),
Err(_) => None,
};
if metadata.file_type().is_symlink() {
entries.push(Entry::new(
file_name,
EntryType::Symlink,
file_url,
None,
last_modification_date,
));
} else if metadata.is_dir() {
entries.push(Entry::new(
file_name,
EntryType::Directory,
file_url,
None,
last_modification_date,
));
} else {
entries.push(Entry::new(
file_name,
EntryType::File,
file_url,
Some(ByteSize::b(metadata.len())),
last_modification_date,
));
}
} else {
continue;
}
}
}
if let Some(sorting_method) = query_params.sort {
match sorting_method {
SortingMethod::Name => entries
.sort_by(|e1, e2| alphanumeric_sort::compare_str(e1.name.clone(), e2.name.clone())),
SortingMethod::Size => entries.sort_by(|e1, e2| {
// If we can't get the size of the entry (directory for instance)
// let's consider it's 0b
e2.size
.unwrap_or_else(|| ByteSize::b(0))
.cmp(&e1.size.unwrap_or_else(|| ByteSize::b(0)))
}),
SortingMethod::Date => entries.sort_by(|e1, e2| {
// If, for some reason, we can't get the last modification date of an entry
// let's consider it was modified on UNIX_EPOCH (01/01/19270 00:00:00)
e2.last_modification_date
.unwrap_or(SystemTime::UNIX_EPOCH)
.cmp(&e1.last_modification_date.unwrap_or(SystemTime::UNIX_EPOCH))
}),
};
} else {
// Sort in alphanumeric order by default
entries.sort_by(|e1, e2| alphanumeric_sort::compare_str(e1.name.clone(), e2.name.clone()))
}
if let Some(sorting_order) = query_params.order {
if let SortingOrder::Descending = sorting_order {
entries.reverse()
}
}
let color_scheme = query_params.theme.unwrap_or(default_color_scheme);
if let Some(compression_method) = &query_params.download {
log::info!(
"Creating an archive ({extension}) of {path}...",
extension = compression_method.extension(),
path = &dir.path.display().to_string()
);
match compression_method.create_archive(&dir.path, skip_symlinks) {
Ok((filename, content)) => {
log::info!("{file} successfully created!", file = &filename);
Ok(HttpResponse::Ok()
.content_type(compression_method.content_type())
.content_encoding(compression_method.content_encoding())
.header("Content-Transfer-Encoding", "binary")
.header(
"Content-Disposition",
format!("attachment; filename={:?}", filename),
)
.chunked()
.body(Body::Streaming(Box::new(once(Ok(content))))))
}
Err(err) => {
errors::log_error_chain(err.to_string());
Ok(HttpResponse::Ok()
.status(http::StatusCode::INTERNAL_SERVER_ERROR)
.body(
renderer::render_error(
&err.to_string(),
StatusCode::INTERNAL_SERVER_ERROR,
serve_path,
query_params.sort,
query_params.order,
color_scheme,
default_color_scheme,
false,
true,
)
.into_string(),
))
}
}
} else {
Ok(HttpResponse::Ok()
.content_type("text/html; charset=utf-8")
.body(
renderer::page(
serve_path,
entries,
is_root,
page_parent,
query_params.sort,
query_params.order,
default_color_scheme,
color_scheme,
file_upload,
&upload_route,
¤t_dir.display().to_string(),
)
.into_string(),
))
}
}
pub fn extract_query_parameters<S>(req: &HttpRequest<S>) -> QueryParameters {
match Query::<QueryParameters>::extract(req) {
Ok(query) => QueryParameters {
sort: query.sort,
order: query.order,
download: query.download.clone(),
theme: query.theme,
path: query.path.clone(),
},
Err(e) => {
let err = ContextualError::ParseError("query parameters".to_string(), e.to_string());
errors::log_error_chain(err.to_string());
QueryParameters {
sort: None,
order: None,
download: None,
theme: None,
path: None,
}
}
}
} | /// Entry
pub struct Entry {
/// Name of the entry
pub name: String,
| random_line_split |
app.rs | //! Contains the main types a user needs to interact with to configure and run a skulpin app
use crate::skia_safe;
use crate::winit;
use super::app_control::AppControl;
use super::input_state::InputState;
use super::time_state::TimeState;
use super::util::PeriodicEvent;
use skulpin_renderer::LogicalSize;
use skulpin_renderer::Size;
use skulpin_renderer::RendererBuilder;
use skulpin_renderer::CoordinateSystem;
use skulpin_renderer::CoordinateSystemHelper;
use skulpin_renderer::ValidationMode;
use skulpin_renderer::rafx::api::RafxError;
use crate::rafx::api::RafxExtents2D;
/// Represents an error from creating the renderer
#[derive(Debug)]
pub enum AppError {
RafxError(skulpin_renderer::rafx::api::RafxError),
WinitError(winit::error::OsError),
}
impl std::error::Error for AppError {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
match *self {
AppError::RafxError(ref e) => Some(e),
AppError::WinitError(ref e) => Some(e),
}
}
}
impl core::fmt::Display for AppError { | match *self {
AppError::RafxError(ref e) => e.fmt(fmt),
AppError::WinitError(ref e) => e.fmt(fmt),
}
}
}
impl From<RafxError> for AppError {
fn from(result: RafxError) -> Self {
AppError::RafxError(result)
}
}
impl From<winit::error::OsError> for AppError {
fn from(result: winit::error::OsError) -> Self {
AppError::WinitError(result)
}
}
pub struct AppUpdateArgs<'a, 'b, 'c> {
pub app_control: &'a mut AppControl,
pub input_state: &'b InputState,
pub time_state: &'c TimeState,
}
pub struct AppDrawArgs<'a, 'b, 'c, 'd> {
pub app_control: &'a AppControl,
pub input_state: &'b InputState,
pub time_state: &'c TimeState,
pub canvas: &'d mut skia_safe::Canvas,
pub coordinate_system_helper: CoordinateSystemHelper,
}
/// A skulpin app requires implementing the AppHandler. A separate update and draw call must be
/// implemented.
///
/// `update` is called when winit provides a `winit::event::Event::MainEventsCleared` message
///
/// `draw` is called when winit provides a `winit::event::RedrawRequested` message
///
/// I would recommend putting general logic you always want to run in the `update` and just
/// rendering code in the `draw`.
pub trait AppHandler {
/// Called frequently, this is the intended place to put non-rendering logic
fn update(
&mut self,
update_args: AppUpdateArgs,
);
/// Called frequently, this is the intended place to put drawing code
fn draw(
&mut self,
draw_args: AppDrawArgs,
);
fn fatal_error(
&mut self,
error: &AppError,
);
}
/// Used to configure the app behavior and create the app
pub struct AppBuilder {
inner_size: Size,
window_title: String,
renderer_builder: RendererBuilder,
}
impl Default for AppBuilder {
fn default() -> Self {
AppBuilder::new()
}
}
impl AppBuilder {
/// Construct the app builder initialized with default options
pub fn new() -> Self {
AppBuilder {
inner_size: LogicalSize::new(900, 600).into(),
window_title: "Skulpin".to_string(),
renderer_builder: RendererBuilder::new(),
}
}
/// Specifies the inner size of the window. Both physical and logical coordinates are accepted.
pub fn inner_size<S: Into<Size>>(
mut self,
inner_size: S,
) -> Self {
self.inner_size = inner_size.into();
self
}
/// Specifies the title that the window will be created with
pub fn window_title<T: Into<String>>(
mut self,
window_title: T,
) -> Self {
self.window_title = window_title.into();
self
}
/// Determine the coordinate system to use for the canvas. This can be overridden by using the
/// canvas sizer passed into the draw callback
pub fn coordinate_system(
mut self,
coordinate_system: CoordinateSystem,
) -> Self {
self.renderer_builder = self.renderer_builder.coordinate_system(coordinate_system);
self
}
/// Set the validation mode in rafx. For skulpin, this essentially means turning the vulkan
/// debug layers on/off.
pub fn validation_mode(
mut self,
validation_mode: ValidationMode,
) -> Self {
self.renderer_builder = self.renderer_builder.validation_mode(validation_mode);
self
}
/// Start the app. `app_handler` must be an implementation of [skulpin::app::AppHandler].
/// This does not return because winit does not return. For consistency, we use the
/// fatal_error() callback on the passed in AppHandler.
pub fn run<T:'static + AppHandler>(
self,
app_handler: T,
) ->! {
App::run(
app_handler,
self.inner_size,
self.window_title.clone(),
self.renderer_builder,
)
}
}
/// Constructed by `AppBuilder` which immediately calls `run`.
pub struct App {}
impl App {
/// Runs the app. This is called by `AppBuilder::run`. This does not return because winit does
/// not return. For consistency, we use the fatal_error() callback on the passed in AppHandler.
pub fn run<T:'static + AppHandler>(
mut app_handler: T,
inner_size: Size,
window_title: String,
renderer_builder: RendererBuilder,
) ->! {
// Create the event loop
let event_loop = winit::event_loop::EventLoop::<()>::with_user_event();
let winit_size = match inner_size {
Size::Physical(physical_size) => winit::dpi::Size::Physical(
winit::dpi::PhysicalSize::new(physical_size.width, physical_size.height),
),
Size::Logical(logical_size) => winit::dpi::Size::Logical(winit::dpi::LogicalSize::new(
logical_size.width as f64,
logical_size.height as f64,
)),
};
// Create a single window
let window_result = winit::window::WindowBuilder::new()
.with_title(window_title)
.with_inner_size(winit_size)
.build(&event_loop);
let window = match window_result {
Ok(window) => window,
Err(e) => {
warn!("Passing WindowBuilder::build() error to app {}", e);
let app_error = e.into();
app_handler.fatal_error(&app_error);
// Exiting in this way is consistent with how we will exit if we fail within the
// input loop
std::process::exit(0);
}
};
let mut app_control = AppControl::default();
let mut time_state = TimeState::new();
let mut input_state = InputState::new(&window);
let window_size = window.inner_size();
let window_extents = RafxExtents2D {
width: window_size.width,
height: window_size.height,
};
let renderer_result = renderer_builder.build(&window, window_extents);
let mut renderer = match renderer_result {
Ok(renderer) => renderer,
Err(e) => {
warn!("Passing RendererBuilder::build() error to app {}", e);
let app_error = e.into();
app_handler.fatal_error(&app_error);
// Exiting in this way is consistent with how we will exit if we fail within the
// input loop
std::process::exit(0);
}
};
// To print fps once per second
let mut print_fps_event = PeriodicEvent::default();
// Pass control of this thread to winit until the app terminates. If this app wants to quit,
// the update loop should send the appropriate event via the channel
event_loop.run(move |event, window_target, control_flow| {
input_state.handle_winit_event(&mut app_control, &event, window_target);
match event {
winit::event::Event::MainEventsCleared => {
time_state.update();
if print_fps_event.try_take_event(
time_state.current_instant(),
std::time::Duration::from_secs(1),
) {
debug!("fps: {}", time_state.updates_per_second());
}
app_handler.update(AppUpdateArgs {
app_control: &mut app_control,
input_state: &input_state,
time_state: &time_state,
});
// Call this to mark the start of the next frame (i.e. "key just down" will return false)
input_state.end_frame();
// Queue a RedrawRequested event.
window.request_redraw();
}
winit::event::Event::RedrawRequested(_window_id) => {
let window_size = window.inner_size();
let window_extents = RafxExtents2D {
width: window_size.width,
height: window_size.height,
};
if let Err(e) = renderer.draw(
window_extents,
window.scale_factor(),
|canvas, coordinate_system_helper| {
app_handler.draw(AppDrawArgs {
app_control: &app_control,
input_state: &input_state,
time_state: &time_state,
canvas,
coordinate_system_helper,
});
},
) {
warn!("Passing Renderer::draw() error to app {}", e);
app_handler.fatal_error(&e.into());
app_control.enqueue_terminate_process();
}
}
_ => {}
}
if app_control.should_terminate_process() {
*control_flow = winit::event_loop::ControlFlow::Exit
}
});
}
} | fn fmt(
&self,
fmt: &mut core::fmt::Formatter,
) -> core::fmt::Result { | random_line_split |
app.rs | //! Contains the main types a user needs to interact with to configure and run a skulpin app
use crate::skia_safe;
use crate::winit;
use super::app_control::AppControl;
use super::input_state::InputState;
use super::time_state::TimeState;
use super::util::PeriodicEvent;
use skulpin_renderer::LogicalSize;
use skulpin_renderer::Size;
use skulpin_renderer::RendererBuilder;
use skulpin_renderer::CoordinateSystem;
use skulpin_renderer::CoordinateSystemHelper;
use skulpin_renderer::ValidationMode;
use skulpin_renderer::rafx::api::RafxError;
use crate::rafx::api::RafxExtents2D;
/// Represents an error from creating the renderer
#[derive(Debug)]
pub enum AppError {
RafxError(skulpin_renderer::rafx::api::RafxError),
WinitError(winit::error::OsError),
}
impl std::error::Error for AppError {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
match *self {
AppError::RafxError(ref e) => Some(e),
AppError::WinitError(ref e) => Some(e),
}
}
}
impl core::fmt::Display for AppError {
fn fmt(
&self,
fmt: &mut core::fmt::Formatter,
) -> core::fmt::Result {
match *self {
AppError::RafxError(ref e) => e.fmt(fmt),
AppError::WinitError(ref e) => e.fmt(fmt),
}
}
}
impl From<RafxError> for AppError {
fn from(result: RafxError) -> Self {
AppError::RafxError(result)
}
}
impl From<winit::error::OsError> for AppError {
fn from(result: winit::error::OsError) -> Self {
AppError::WinitError(result)
}
}
pub struct AppUpdateArgs<'a, 'b, 'c> {
pub app_control: &'a mut AppControl,
pub input_state: &'b InputState,
pub time_state: &'c TimeState,
}
pub struct AppDrawArgs<'a, 'b, 'c, 'd> {
pub app_control: &'a AppControl,
pub input_state: &'b InputState,
pub time_state: &'c TimeState,
pub canvas: &'d mut skia_safe::Canvas,
pub coordinate_system_helper: CoordinateSystemHelper,
}
/// A skulpin app requires implementing the AppHandler. A separate update and draw call must be
/// implemented.
///
/// `update` is called when winit provides a `winit::event::Event::MainEventsCleared` message
///
/// `draw` is called when winit provides a `winit::event::RedrawRequested` message
///
/// I would recommend putting general logic you always want to run in the `update` and just
/// rendering code in the `draw`.
pub trait AppHandler {
/// Called frequently, this is the intended place to put non-rendering logic
fn update(
&mut self,
update_args: AppUpdateArgs,
);
/// Called frequently, this is the intended place to put drawing code
fn draw(
&mut self,
draw_args: AppDrawArgs,
);
fn fatal_error(
&mut self,
error: &AppError,
);
}
/// Used to configure the app behavior and create the app
pub struct AppBuilder {
inner_size: Size,
window_title: String,
renderer_builder: RendererBuilder,
}
impl Default for AppBuilder {
fn default() -> Self {
AppBuilder::new()
}
}
impl AppBuilder {
/// Construct the app builder initialized with default options
pub fn new() -> Self {
AppBuilder {
inner_size: LogicalSize::new(900, 600).into(),
window_title: "Skulpin".to_string(),
renderer_builder: RendererBuilder::new(),
}
}
/// Specifies the inner size of the window. Both physical and logical coordinates are accepted.
pub fn inner_size<S: Into<Size>>(
mut self,
inner_size: S,
) -> Self {
self.inner_size = inner_size.into();
self
}
/// Specifies the title that the window will be created with
pub fn window_title<T: Into<String>>(
mut self,
window_title: T,
) -> Self {
self.window_title = window_title.into();
self
}
/// Determine the coordinate system to use for the canvas. This can be overridden by using the
/// canvas sizer passed into the draw callback
pub fn coordinate_system(
mut self,
coordinate_system: CoordinateSystem,
) -> Self {
self.renderer_builder = self.renderer_builder.coordinate_system(coordinate_system);
self
}
/// Set the validation mode in rafx. For skulpin, this essentially means turning the vulkan
/// debug layers on/off.
pub fn validation_mode(
mut self,
validation_mode: ValidationMode,
) -> Self {
self.renderer_builder = self.renderer_builder.validation_mode(validation_mode);
self
}
/// Start the app. `app_handler` must be an implementation of [skulpin::app::AppHandler].
/// This does not return because winit does not return. For consistency, we use the
/// fatal_error() callback on the passed in AppHandler.
pub fn run<T:'static + AppHandler>(
self,
app_handler: T,
) ->! {
App::run(
app_handler,
self.inner_size,
self.window_title.clone(),
self.renderer_builder,
)
}
}
/// Constructed by `AppBuilder` which immediately calls `run`.
pub struct App {}
impl App {
/// Runs the app. This is called by `AppBuilder::run`. This does not return because winit does
/// not return. For consistency, we use the fatal_error() callback on the passed in AppHandler.
pub fn run<T:'static + AppHandler>(
mut app_handler: T,
inner_size: Size,
window_title: String,
renderer_builder: RendererBuilder,
) ->! {
// Create the event loop
let event_loop = winit::event_loop::EventLoop::<()>::with_user_event();
let winit_size = match inner_size {
Size::Physical(physical_size) => winit::dpi::Size::Physical(
winit::dpi::PhysicalSize::new(physical_size.width, physical_size.height),
),
Size::Logical(logical_size) => winit::dpi::Size::Logical(winit::dpi::LogicalSize::new(
logical_size.width as f64,
logical_size.height as f64,
)),
};
// Create a single window
let window_result = winit::window::WindowBuilder::new()
.with_title(window_title)
.with_inner_size(winit_size)
.build(&event_loop);
let window = match window_result {
Ok(window) => window,
Err(e) => {
warn!("Passing WindowBuilder::build() error to app {}", e);
let app_error = e.into();
app_handler.fatal_error(&app_error);
// Exiting in this way is consistent with how we will exit if we fail within the
// input loop
std::process::exit(0);
}
};
let mut app_control = AppControl::default();
let mut time_state = TimeState::new();
let mut input_state = InputState::new(&window);
let window_size = window.inner_size();
let window_extents = RafxExtents2D {
width: window_size.width,
height: window_size.height,
};
let renderer_result = renderer_builder.build(&window, window_extents);
let mut renderer = match renderer_result {
Ok(renderer) => renderer,
Err(e) => {
warn!("Passing RendererBuilder::build() error to app {}", e);
let app_error = e.into();
app_handler.fatal_error(&app_error);
// Exiting in this way is consistent with how we will exit if we fail within the
// input loop
std::process::exit(0);
}
};
// To print fps once per second
let mut print_fps_event = PeriodicEvent::default();
// Pass control of this thread to winit until the app terminates. If this app wants to quit,
// the update loop should send the appropriate event via the channel
event_loop.run(move |event, window_target, control_flow| {
input_state.handle_winit_event(&mut app_control, &event, window_target);
match event {
winit::event::Event::MainEventsCleared => {
time_state.update();
if print_fps_event.try_take_event(
time_state.current_instant(),
std::time::Duration::from_secs(1),
) {
debug!("fps: {}", time_state.updates_per_second());
}
app_handler.update(AppUpdateArgs {
app_control: &mut app_control,
input_state: &input_state,
time_state: &time_state,
});
// Call this to mark the start of the next frame (i.e. "key just down" will return false)
input_state.end_frame();
// Queue a RedrawRequested event.
window.request_redraw();
}
winit::event::Event::RedrawRequested(_window_id) => {
let window_size = window.inner_size();
let window_extents = RafxExtents2D {
width: window_size.width,
height: window_size.height,
};
if let Err(e) = renderer.draw(
window_extents,
window.scale_factor(),
|canvas, coordinate_system_helper| {
app_handler.draw(AppDrawArgs {
app_control: &app_control,
input_state: &input_state,
time_state: &time_state,
canvas,
coordinate_system_helper,
});
},
) {
warn!("Passing Renderer::draw() error to app {}", e);
app_handler.fatal_error(&e.into());
app_control.enqueue_terminate_process();
}
}
_ => |
}
if app_control.should_terminate_process() {
*control_flow = winit::event_loop::ControlFlow::Exit
}
});
}
}
| {} | conditional_block |
app.rs | //! Contains the main types a user needs to interact with to configure and run a skulpin app
use crate::skia_safe;
use crate::winit;
use super::app_control::AppControl;
use super::input_state::InputState;
use super::time_state::TimeState;
use super::util::PeriodicEvent;
use skulpin_renderer::LogicalSize;
use skulpin_renderer::Size;
use skulpin_renderer::RendererBuilder;
use skulpin_renderer::CoordinateSystem;
use skulpin_renderer::CoordinateSystemHelper;
use skulpin_renderer::ValidationMode;
use skulpin_renderer::rafx::api::RafxError;
use crate::rafx::api::RafxExtents2D;
/// Represents an error from creating the renderer
#[derive(Debug)]
pub enum AppError {
RafxError(skulpin_renderer::rafx::api::RafxError),
WinitError(winit::error::OsError),
}
impl std::error::Error for AppError {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
match *self {
AppError::RafxError(ref e) => Some(e),
AppError::WinitError(ref e) => Some(e),
}
}
}
impl core::fmt::Display for AppError {
fn fmt(
&self,
fmt: &mut core::fmt::Formatter,
) -> core::fmt::Result {
match *self {
AppError::RafxError(ref e) => e.fmt(fmt),
AppError::WinitError(ref e) => e.fmt(fmt),
}
}
}
impl From<RafxError> for AppError {
fn from(result: RafxError) -> Self {
AppError::RafxError(result)
}
}
impl From<winit::error::OsError> for AppError {
fn | (result: winit::error::OsError) -> Self {
AppError::WinitError(result)
}
}
pub struct AppUpdateArgs<'a, 'b, 'c> {
pub app_control: &'a mut AppControl,
pub input_state: &'b InputState,
pub time_state: &'c TimeState,
}
pub struct AppDrawArgs<'a, 'b, 'c, 'd> {
pub app_control: &'a AppControl,
pub input_state: &'b InputState,
pub time_state: &'c TimeState,
pub canvas: &'d mut skia_safe::Canvas,
pub coordinate_system_helper: CoordinateSystemHelper,
}
/// A skulpin app requires implementing the AppHandler. A separate update and draw call must be
/// implemented.
///
/// `update` is called when winit provides a `winit::event::Event::MainEventsCleared` message
///
/// `draw` is called when winit provides a `winit::event::RedrawRequested` message
///
/// I would recommend putting general logic you always want to run in the `update` and just
/// rendering code in the `draw`.
pub trait AppHandler {
/// Called frequently, this is the intended place to put non-rendering logic
fn update(
&mut self,
update_args: AppUpdateArgs,
);
/// Called frequently, this is the intended place to put drawing code
fn draw(
&mut self,
draw_args: AppDrawArgs,
);
fn fatal_error(
&mut self,
error: &AppError,
);
}
/// Used to configure the app behavior and create the app
pub struct AppBuilder {
inner_size: Size,
window_title: String,
renderer_builder: RendererBuilder,
}
impl Default for AppBuilder {
fn default() -> Self {
AppBuilder::new()
}
}
impl AppBuilder {
/// Construct the app builder initialized with default options
pub fn new() -> Self {
AppBuilder {
inner_size: LogicalSize::new(900, 600).into(),
window_title: "Skulpin".to_string(),
renderer_builder: RendererBuilder::new(),
}
}
/// Specifies the inner size of the window. Both physical and logical coordinates are accepted.
pub fn inner_size<S: Into<Size>>(
mut self,
inner_size: S,
) -> Self {
self.inner_size = inner_size.into();
self
}
/// Specifies the title that the window will be created with
pub fn window_title<T: Into<String>>(
mut self,
window_title: T,
) -> Self {
self.window_title = window_title.into();
self
}
/// Determine the coordinate system to use for the canvas. This can be overridden by using the
/// canvas sizer passed into the draw callback
pub fn coordinate_system(
mut self,
coordinate_system: CoordinateSystem,
) -> Self {
self.renderer_builder = self.renderer_builder.coordinate_system(coordinate_system);
self
}
/// Set the validation mode in rafx. For skulpin, this essentially means turning the vulkan
/// debug layers on/off.
pub fn validation_mode(
mut self,
validation_mode: ValidationMode,
) -> Self {
self.renderer_builder = self.renderer_builder.validation_mode(validation_mode);
self
}
/// Start the app. `app_handler` must be an implementation of [skulpin::app::AppHandler].
/// This does not return because winit does not return. For consistency, we use the
/// fatal_error() callback on the passed in AppHandler.
pub fn run<T:'static + AppHandler>(
self,
app_handler: T,
) ->! {
App::run(
app_handler,
self.inner_size,
self.window_title.clone(),
self.renderer_builder,
)
}
}
/// Constructed by `AppBuilder` which immediately calls `run`.
pub struct App {}
impl App {
/// Runs the app. This is called by `AppBuilder::run`. This does not return because winit does
/// not return. For consistency, we use the fatal_error() callback on the passed in AppHandler.
pub fn run<T:'static + AppHandler>(
mut app_handler: T,
inner_size: Size,
window_title: String,
renderer_builder: RendererBuilder,
) ->! {
// Create the event loop
let event_loop = winit::event_loop::EventLoop::<()>::with_user_event();
let winit_size = match inner_size {
Size::Physical(physical_size) => winit::dpi::Size::Physical(
winit::dpi::PhysicalSize::new(physical_size.width, physical_size.height),
),
Size::Logical(logical_size) => winit::dpi::Size::Logical(winit::dpi::LogicalSize::new(
logical_size.width as f64,
logical_size.height as f64,
)),
};
// Create a single window
let window_result = winit::window::WindowBuilder::new()
.with_title(window_title)
.with_inner_size(winit_size)
.build(&event_loop);
let window = match window_result {
Ok(window) => window,
Err(e) => {
warn!("Passing WindowBuilder::build() error to app {}", e);
let app_error = e.into();
app_handler.fatal_error(&app_error);
// Exiting in this way is consistent with how we will exit if we fail within the
// input loop
std::process::exit(0);
}
};
let mut app_control = AppControl::default();
let mut time_state = TimeState::new();
let mut input_state = InputState::new(&window);
let window_size = window.inner_size();
let window_extents = RafxExtents2D {
width: window_size.width,
height: window_size.height,
};
let renderer_result = renderer_builder.build(&window, window_extents);
let mut renderer = match renderer_result {
Ok(renderer) => renderer,
Err(e) => {
warn!("Passing RendererBuilder::build() error to app {}", e);
let app_error = e.into();
app_handler.fatal_error(&app_error);
// Exiting in this way is consistent with how we will exit if we fail within the
// input loop
std::process::exit(0);
}
};
// To print fps once per second
let mut print_fps_event = PeriodicEvent::default();
// Pass control of this thread to winit until the app terminates. If this app wants to quit,
// the update loop should send the appropriate event via the channel
event_loop.run(move |event, window_target, control_flow| {
input_state.handle_winit_event(&mut app_control, &event, window_target);
match event {
winit::event::Event::MainEventsCleared => {
time_state.update();
if print_fps_event.try_take_event(
time_state.current_instant(),
std::time::Duration::from_secs(1),
) {
debug!("fps: {}", time_state.updates_per_second());
}
app_handler.update(AppUpdateArgs {
app_control: &mut app_control,
input_state: &input_state,
time_state: &time_state,
});
// Call this to mark the start of the next frame (i.e. "key just down" will return false)
input_state.end_frame();
// Queue a RedrawRequested event.
window.request_redraw();
}
winit::event::Event::RedrawRequested(_window_id) => {
let window_size = window.inner_size();
let window_extents = RafxExtents2D {
width: window_size.width,
height: window_size.height,
};
if let Err(e) = renderer.draw(
window_extents,
window.scale_factor(),
|canvas, coordinate_system_helper| {
app_handler.draw(AppDrawArgs {
app_control: &app_control,
input_state: &input_state,
time_state: &time_state,
canvas,
coordinate_system_helper,
});
},
) {
warn!("Passing Renderer::draw() error to app {}", e);
app_handler.fatal_error(&e.into());
app_control.enqueue_terminate_process();
}
}
_ => {}
}
if app_control.should_terminate_process() {
*control_flow = winit::event_loop::ControlFlow::Exit
}
});
}
}
| from | identifier_name |
neexe.rs | use bitflags::bitflags;
use byteorder::{ByteOrder, LE};
use custom_error::custom_error;
use crate::util::read_pascal_string;
use enum_primitive::*;
use nom::{apply, count, do_parse, le_u8, le_u16, le_u32, named, named_args, tag, take};
macro_rules! try_parse (
($result: expr, $error: expr) => (match $result {
Ok((_, result)) => result,
Err(_) => { return Err($error); }
})
);
custom_error!{pub ParseError
NotMZ = "invalid MZ header",
NotNE = "invalid NE header",
SegmentHeader{ segment_number: u16 } = "invalid segment {segment_number} header",
SelfLoadHeader = "invalid self-load header"
}
named!(parse_ne_offset<u16>,
do_parse!(
tag!("MZ") >>
take!(58) >>
ne_offset: le_u16 >>
(ne_offset)
)
);
bitflags!(pub struct NEFlags: u16 {
const SINGLE_DATA = 0x0001;
const MULTIPLE_DATA = 0x0002;
const GLOBAL_INIT = 0x0004;
const PROTECTED_MODE = 0x0008;
// There seems to be some disagreement as to what these high nibble bits
// mean, but they are sometimes set so they should probably not be ignored
const WIN32S = 0x0010;
const INST_286 = 0x0020;
const INST_386 = 0x0040;
const INST_X87 = 0x0080;
const FULLSCREEN = 0x0100;
const CONSOLE = 0x0200;
const GUI = 0x0300;
const SELF_LOAD = 0x0800;
const LINKER_ERROR = 0x2000;
const CALL_WEP = 0x4000;
const LIB_MODULE = 0x8000;
});
#[derive(Clone, Debug)]
pub struct | {
pub linker_major_version: u8,
pub linker_minor_version: u8,
pub entry_table_offset: u16,
pub entry_table_size: u16,
pub crc: u32,
pub flags: NEFlags,
pub auto_data_segment_index: u16,
pub heap_size: u16,
pub stack_size: u16,
pub entry_point: u32,
pub init_stack_pointer: u32,
pub num_segments: u16,
pub num_imports: u16,
pub non_resident_table_size: u16,
pub segment_table_offset: u16, // bytes, from start of NEHeader
pub resource_table_offset: u16,
pub names_table_offset: u16,
pub module_table_offset: u16,
pub import_names_table_offset: u16,
pub non_resident_table_offset: u32,
pub num_movable_entry_point: u16,
pub alignment_shift_count: u16, // 1 << alignment_shift_count = logical sector
pub num_resources: u16,
pub target_os: u8,
pub os2_flags: u8,
pub thunk_offset: u16,
pub segment_thunk_offset: u16,
pub min_code_swap_size: u16,
pub win_version_minor: u8,
pub win_version_major: u8,
}
bitflags!(pub struct NESegmentFlags: u16 {
const CODE = 0x0000;
const DATA = 0x0001;
const MOVABLE = 0x0010;
const PRELOAD = 0x0040;
const HAS_RELOC = 0x0100;
const PRIORITY = 0xF000;
});
named!(read_ne_header<NEHeader>,
do_parse!(
tag!("NE") >>
linker_major_version: le_u8 >>
linker_minor_version: le_u8 >>
entry_table_offset: le_u16 >> // relative to beginning of header
entry_table_size: le_u16 >> // bytes
crc: le_u32 >>
flags: le_u16 >>
auto_data_segment_index: le_u16 >>
heap_size: le_u16 >>
stack_size: le_u16 >>
entry_point: le_u32 >> // cs:ip
init_stack_pointer: le_u32 >> // ss:sp
num_segments: le_u16 >>
num_imports: le_u16 >>
non_resident_table_size: le_u16 >>
segment_table_offset: le_u16 >>
resource_table_offset: le_u16 >>
names_table_offset: le_u16 >>
module_table_offset: le_u16 >>
import_names_table_offset: le_u16 >>
non_resident_table_offset: le_u32 >>
num_movable_entry_point: le_u16 >>
alignment_shift_count: le_u16 >>
num_resources: le_u16 >>
target_os: le_u8 >>
os2_flags: le_u8 >>
thunk_offset: le_u16 >>
segment_thunk_offset: le_u16 >>
min_code_swap_size: le_u16 >>
win_version_minor: le_u8 >>
win_version_major: le_u8 >>
(NEHeader {
linker_major_version,
linker_minor_version,
entry_table_offset,
entry_table_size,
crc,
flags: NEFlags::from_bits_truncate(flags),
auto_data_segment_index,
heap_size,
stack_size,
entry_point,
init_stack_pointer,
num_segments,
num_imports,
non_resident_table_size,
segment_table_offset,
resource_table_offset,
names_table_offset,
module_table_offset,
import_names_table_offset,
non_resident_table_offset,
num_movable_entry_point,
alignment_shift_count,
num_resources,
target_os,
os2_flags,
thunk_offset,
segment_thunk_offset,
min_code_swap_size,
win_version_minor,
win_version_major
})
)
);
#[derive(Clone, Debug)]
pub struct NESegmentEntry {
pub offset: u32, // bytes
pub data_size: u32, // bytes
pub flags: NESegmentFlags,
pub alloc_size: u32, // bytes
}
named_args!(parse_segment_header(offset_shift: u16)<NESegmentEntry>,
do_parse!(
offset: le_u16 >>
data_size: le_u16 >>
flags: le_u16 >>
alloc_size: le_u16 >>
(NESegmentEntry {
offset: u32::from(offset) << offset_shift,
data_size: if data_size == 0 { 0x10000 } else { data_size.into() },
flags: NESegmentFlags::from_bits_truncate(flags),
alloc_size: if alloc_size == 0 { 0x10000 } else { alloc_size.into() }
})
)
);
named_args!(parse_segments(offset_shift: u16, num_segments: u16)<Vec<NESegmentEntry> >,
count!(apply!(parse_segment_header, offset_shift), num_segments as usize)
);
bitflags!(pub struct NEResourceFlags: u16 {
const MOVABLE = 0x10;
const PURE = 0x20;
const PRELOAD = 0x40;
});
enum_from_primitive! {
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NEPredefinedResourceKind {
Cursor = 1,
Bitmap = 2,
Icon = 3,
Menu = 4,
Dialog = 5,
StringTable = 6,
FontDirectory = 7,
FontResource = 8,
AcceleratorTable = 9,
RawData = 10,
MessageTable = 11,
GroupCursor = 12,
GroupIcon = 14,
// NameTable: https://hackernoon.com/win3mu-part-5-windows-3-executable-files-c2affeec0e5
NameTable = 15,
Version = 16,
DlgInclude = 17,
PlugPlay = 19,
VXD = 20,
AnimatedCursor = 21,
AnimatedIcon = 22,
HTML = 23,
Manifest = 24,
}
}
#[derive(Clone, Debug)]
pub enum NEResourceId {
Integer(u16),
String(String),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NEResourceKind {
Predefined(NEPredefinedResourceKind),
Integer(u16),
String(String),
}
#[derive(Clone, Debug)]
pub struct NEResourceEntry {
pub kind: NEResourceKind,
pub id: NEResourceId,
pub offset: u32, // bytes
pub length: u32, // bytes
pub flags: NEResourceFlags,
}
named_args!(read_resource<'a>(resource_table: &'a [u8], kind: NEResourceKind, offset_shift: u16)<NEResourceEntry>,
do_parse!(
offset: le_u16 >> // in sectors
length: le_u16 >> // in sectors
flags: le_u16 >>
id: le_u16 >>
/* reserved */ le_u32 >>
(NEResourceEntry {
offset: u32::from(offset) << offset_shift,
length: u32::from(length) << offset_shift,
flags: NEResourceFlags::from_bits_truncate(flags),
kind,
id: if id & 0x8000!= 0 {
NEResourceId::Integer(id & 0x7fff)
} else {
NEResourceId::String(read_pascal_string(&resource_table[id as usize..]).unwrap().1)
}
})
)
);
enum_from_primitive! {
#[derive(Clone, Debug)]
pub enum NESegmentRelocationSourceKind {
LoByte = 0,
Segment = 2,
FarAddr = 3,
Offset = 5,
}
}
#[derive(Clone, Debug)]
pub struct NESelfLoadHeader {
pub boot_app_offset: u32,
pub load_app_seg_offset: u32,
}
named!(read_selfload_header<NESelfLoadHeader>,
do_parse!(
tag!("A0") >>
take!(2) >> // reserved
boot_app_offset: le_u32 >> // segment:offset
load_app_seg_offset: le_u32 >> // segment:offset
take!(4) >> // reserved
take!(4) >> // mem alloc
take!(4) >> // ordinal resolve
take!(4) >> // exit
take!(2 * 4) >> // reserved
take!(4) >> // set owner
(NESelfLoadHeader {
boot_app_offset,
load_app_seg_offset
})
)
);
const SEGMENT_HEADER_SIZE: u16 = 8;
const FIXUP_SIZE: u16 = 8;
pub struct NEExecutable<'a> {
input: &'a [u8],
header: NEHeader,
header_offset: u16,
// A raw header slice is stored to make it easier to resolve offsets which
// are relative to the start of the NE header
raw_header: &'a [u8],
}
pub struct NEResourcesIterator<'a> {
table: &'a [u8],
index: usize,
table_kind: NEResourceKind,
offset_shift: u16,
block_index: u16,
block_len: u16,
finished: bool,
}
impl<'a> NEResourcesIterator<'a> {
pub fn new(table: &'a [u8]) -> NEResourcesIterator<'a> {
let offset_shift = LE::read_u16(table);
let mut iterator = NEResourcesIterator {
table,
index: 2,
table_kind: NEResourceKind::Integer(0xffff),
offset_shift,
block_index: 0,
block_len: 0,
finished: false,
};
iterator.load_next_block();
iterator
}
fn load_next_block(&mut self) {
let id = LE::read_u16(&self.table[self.index..]);
self.finished = id == 0;
if!self.finished {
self.table_kind = if id & 0x8000!= 0 {
let id = id & 0x7fff;
if let Some(kind) = NEPredefinedResourceKind::from_u16(id) {
NEResourceKind::Predefined(kind)
} else {
NEResourceKind::Integer(id)
}
} else {
NEResourceKind::String(read_pascal_string(&self.table[self.index + id as usize..]).unwrap().1)
};
self.block_index = 0;
self.block_len = LE::read_u16(&self.table[self.index + 2..]);
self.index += 8;
}
}
}
impl<'a> Iterator for NEResourcesIterator<'a> {
type Item = NEResourceEntry;
fn next(&mut self) -> Option<Self::Item> {
if self.block_index == self.block_len {
self.load_next_block();
}
if self.finished {
None
} else {
let (_, header) = read_resource(&self.table[self.index..], self.table, self.table_kind.clone(), self.offset_shift).unwrap();
self.index += 12;
self.block_index += 1;
Some(header)
}
}
}
impl<'a> NEExecutable<'a> {
pub fn new(input: &'a [u8]) -> Result<Self, ParseError> {
let header_offset = try_parse!(parse_ne_offset(input), ParseError::NotMZ);
let raw_header = &input[header_offset as usize..];
let header = try_parse!(read_ne_header(raw_header), ParseError::NotNE);
Ok(NEExecutable {
input,
header,
header_offset, // TODO: Get rid of this
raw_header
})
}
pub fn raw_data(&self) -> &'a [u8] {
self.input
}
pub fn header_offset(&self) -> usize {
self.header_offset as usize
}
pub fn name(&self) -> Option<String> {
if self.header.non_resident_table_size == 0 {
None
} else {
let ne_non_resident_table = &self.input[self.header.non_resident_table_offset as usize..];
match read_pascal_string(&ne_non_resident_table) {
Ok((_, name)) => Some(name),
Err(_) => None
}
}
}
pub fn header(&self) -> &NEHeader {
&self.header
}
pub fn selfload_header(&self) -> Result<Option<(NESelfLoadHeader, &[u8])>, ParseError> {
if self.header.flags.contains(NEFlags::SELF_LOAD) {
Ok(Some(self.selfload_header_impl()?))
} else {
Ok(None)
}
}
/// # Arguments
/// * segment_number - 1-indexed segment number
pub fn segment_header(&self, segment_number: u16) -> Result<NESegmentEntry, ParseError> {
assert!(segment_number!= 0 || segment_number <= self.header.num_segments, format!("segment number {} is out of range", segment_number));
let offset = self.header.segment_table_offset + ((segment_number - 1) * SEGMENT_HEADER_SIZE);
match parse_segment_header(&self.raw_header[offset as usize..], self.header.alignment_shift_count) {
Ok((_, header)) => Ok(header),
Err(_) => Err(ParseError::SegmentHeader{ segment_number })
}
}
/// # Arguments
/// * segment_number - 1-indexed segment number
pub fn segment_data(&self, segment_number: u16) -> Result<&[u8], ParseError> {
let header = self.segment_header(segment_number)?;
let data = &self.input[header.offset as usize..];
let mut size = header.data_size as usize;
if header.flags.contains(NESegmentFlags::HAS_RELOC) {
let fixup_table_size = LE::read_u16(&data[size..]) as usize * FIXUP_SIZE as usize;
size += fixup_table_size;
}
Ok(&data[..size])
}
pub fn resource_table_alignment_shift(&self) -> Option<u16> {
if let Some(table) = self.resource_table_data() {
Some(LE::read_u16(table))
} else {
None
}
}
pub fn resource_table_data(&self) -> Option<&[u8]> {
if self.has_resource_table() {
Some(&self.raw_header[self.header.resource_table_offset as usize..])
} else {
None
}
}
pub fn iter_resources(&self) -> NEResourcesIterator {
if self.has_resource_table() {
NEResourcesIterator::new(&self.raw_header[self.header.resource_table_offset as usize..])
} else {
NEResourcesIterator {
table: self.raw_header,
index: 0,
table_kind: NEResourceKind::Integer(0xffff),
offset_shift: 0,
block_index: 1,
block_len: 0,
finished: true
}
}
}
pub fn has_resource_table(&self) -> bool {
// In DIRAPI.DLL from Director for Windows, the resource table offset
// is non-zero but there is no resource table; the resource table offset
// and names table offset are identical.
self.header.resource_table_offset!= 0 && self.header.resource_table_offset!= self.header.names_table_offset
}
fn selfload_header_impl(&self) -> Result<(NESelfLoadHeader, &[u8]), ParseError> {
let segment_data = self.segment_data(1)?;
match read_selfload_header(segment_data) {
Ok(header) => Ok((header.1, header.0)),
Err(_) => Err(ParseError::SelfLoadHeader)
}
}
}
| NEHeader | identifier_name |
neexe.rs | use bitflags::bitflags;
use byteorder::{ByteOrder, LE};
use custom_error::custom_error;
use crate::util::read_pascal_string;
use enum_primitive::*;
use nom::{apply, count, do_parse, le_u8, le_u16, le_u32, named, named_args, tag, take};
macro_rules! try_parse (
($result: expr, $error: expr) => (match $result {
Ok((_, result)) => result,
Err(_) => { return Err($error); }
})
);
custom_error!{pub ParseError
NotMZ = "invalid MZ header",
NotNE = "invalid NE header",
SegmentHeader{ segment_number: u16 } = "invalid segment {segment_number} header",
SelfLoadHeader = "invalid self-load header"
}
named!(parse_ne_offset<u16>,
do_parse!(
tag!("MZ") >>
take!(58) >>
ne_offset: le_u16 >>
(ne_offset)
)
);
bitflags!(pub struct NEFlags: u16 {
const SINGLE_DATA = 0x0001;
const MULTIPLE_DATA = 0x0002;
const GLOBAL_INIT = 0x0004;
const PROTECTED_MODE = 0x0008;
// There seems to be some disagreement as to what these high nibble bits
// mean, but they are sometimes set so they should probably not be ignored
const WIN32S = 0x0010;
const INST_286 = 0x0020;
const INST_386 = 0x0040;
const INST_X87 = 0x0080;
const FULLSCREEN = 0x0100;
const CONSOLE = 0x0200;
const GUI = 0x0300;
const SELF_LOAD = 0x0800;
const LINKER_ERROR = 0x2000;
const CALL_WEP = 0x4000;
const LIB_MODULE = 0x8000;
});
#[derive(Clone, Debug)]
pub struct NEHeader {
pub linker_major_version: u8,
pub linker_minor_version: u8,
pub entry_table_offset: u16,
pub entry_table_size: u16,
pub crc: u32,
pub flags: NEFlags,
pub auto_data_segment_index: u16,
pub heap_size: u16,
pub stack_size: u16,
pub entry_point: u32,
pub init_stack_pointer: u32,
pub num_segments: u16,
pub num_imports: u16,
pub non_resident_table_size: u16,
pub segment_table_offset: u16, // bytes, from start of NEHeader
pub resource_table_offset: u16,
pub names_table_offset: u16,
pub module_table_offset: u16,
pub import_names_table_offset: u16,
pub non_resident_table_offset: u32,
pub num_movable_entry_point: u16,
pub alignment_shift_count: u16, // 1 << alignment_shift_count = logical sector
pub num_resources: u16,
pub target_os: u8,
pub os2_flags: u8,
pub thunk_offset: u16,
pub segment_thunk_offset: u16,
pub min_code_swap_size: u16,
pub win_version_minor: u8,
pub win_version_major: u8,
}
bitflags!(pub struct NESegmentFlags: u16 {
const CODE = 0x0000;
const DATA = 0x0001;
const MOVABLE = 0x0010;
const PRELOAD = 0x0040;
const HAS_RELOC = 0x0100;
const PRIORITY = 0xF000;
});
named!(read_ne_header<NEHeader>,
do_parse!(
tag!("NE") >>
linker_major_version: le_u8 >>
linker_minor_version: le_u8 >>
entry_table_offset: le_u16 >> // relative to beginning of header
entry_table_size: le_u16 >> // bytes
crc: le_u32 >>
flags: le_u16 >>
auto_data_segment_index: le_u16 >>
heap_size: le_u16 >>
stack_size: le_u16 >>
entry_point: le_u32 >> // cs:ip
init_stack_pointer: le_u32 >> // ss:sp
num_segments: le_u16 >>
num_imports: le_u16 >>
non_resident_table_size: le_u16 >>
segment_table_offset: le_u16 >>
resource_table_offset: le_u16 >>
names_table_offset: le_u16 >>
module_table_offset: le_u16 >>
import_names_table_offset: le_u16 >>
non_resident_table_offset: le_u32 >>
num_movable_entry_point: le_u16 >>
alignment_shift_count: le_u16 >>
num_resources: le_u16 >>
target_os: le_u8 >>
os2_flags: le_u8 >>
thunk_offset: le_u16 >> | min_code_swap_size: le_u16 >>
win_version_minor: le_u8 >>
win_version_major: le_u8 >>
(NEHeader {
linker_major_version,
linker_minor_version,
entry_table_offset,
entry_table_size,
crc,
flags: NEFlags::from_bits_truncate(flags),
auto_data_segment_index,
heap_size,
stack_size,
entry_point,
init_stack_pointer,
num_segments,
num_imports,
non_resident_table_size,
segment_table_offset,
resource_table_offset,
names_table_offset,
module_table_offset,
import_names_table_offset,
non_resident_table_offset,
num_movable_entry_point,
alignment_shift_count,
num_resources,
target_os,
os2_flags,
thunk_offset,
segment_thunk_offset,
min_code_swap_size,
win_version_minor,
win_version_major
})
)
);
#[derive(Clone, Debug)]
pub struct NESegmentEntry {
pub offset: u32, // bytes
pub data_size: u32, // bytes
pub flags: NESegmentFlags,
pub alloc_size: u32, // bytes
}
named_args!(parse_segment_header(offset_shift: u16)<NESegmentEntry>,
do_parse!(
offset: le_u16 >>
data_size: le_u16 >>
flags: le_u16 >>
alloc_size: le_u16 >>
(NESegmentEntry {
offset: u32::from(offset) << offset_shift,
data_size: if data_size == 0 { 0x10000 } else { data_size.into() },
flags: NESegmentFlags::from_bits_truncate(flags),
alloc_size: if alloc_size == 0 { 0x10000 } else { alloc_size.into() }
})
)
);
named_args!(parse_segments(offset_shift: u16, num_segments: u16)<Vec<NESegmentEntry> >,
count!(apply!(parse_segment_header, offset_shift), num_segments as usize)
);
bitflags!(pub struct NEResourceFlags: u16 {
const MOVABLE = 0x10;
const PURE = 0x20;
const PRELOAD = 0x40;
});
enum_from_primitive! {
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NEPredefinedResourceKind {
Cursor = 1,
Bitmap = 2,
Icon = 3,
Menu = 4,
Dialog = 5,
StringTable = 6,
FontDirectory = 7,
FontResource = 8,
AcceleratorTable = 9,
RawData = 10,
MessageTable = 11,
GroupCursor = 12,
GroupIcon = 14,
// NameTable: https://hackernoon.com/win3mu-part-5-windows-3-executable-files-c2affeec0e5
NameTable = 15,
Version = 16,
DlgInclude = 17,
PlugPlay = 19,
VXD = 20,
AnimatedCursor = 21,
AnimatedIcon = 22,
HTML = 23,
Manifest = 24,
}
}
#[derive(Clone, Debug)]
pub enum NEResourceId {
Integer(u16),
String(String),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NEResourceKind {
Predefined(NEPredefinedResourceKind),
Integer(u16),
String(String),
}
#[derive(Clone, Debug)]
pub struct NEResourceEntry {
pub kind: NEResourceKind,
pub id: NEResourceId,
pub offset: u32, // bytes
pub length: u32, // bytes
pub flags: NEResourceFlags,
}
named_args!(read_resource<'a>(resource_table: &'a [u8], kind: NEResourceKind, offset_shift: u16)<NEResourceEntry>,
do_parse!(
offset: le_u16 >> // in sectors
length: le_u16 >> // in sectors
flags: le_u16 >>
id: le_u16 >>
/* reserved */ le_u32 >>
(NEResourceEntry {
offset: u32::from(offset) << offset_shift,
length: u32::from(length) << offset_shift,
flags: NEResourceFlags::from_bits_truncate(flags),
kind,
id: if id & 0x8000!= 0 {
NEResourceId::Integer(id & 0x7fff)
} else {
NEResourceId::String(read_pascal_string(&resource_table[id as usize..]).unwrap().1)
}
})
)
);
enum_from_primitive! {
#[derive(Clone, Debug)]
pub enum NESegmentRelocationSourceKind {
LoByte = 0,
Segment = 2,
FarAddr = 3,
Offset = 5,
}
}
#[derive(Clone, Debug)]
pub struct NESelfLoadHeader {
pub boot_app_offset: u32,
pub load_app_seg_offset: u32,
}
named!(read_selfload_header<NESelfLoadHeader>,
do_parse!(
tag!("A0") >>
take!(2) >> // reserved
boot_app_offset: le_u32 >> // segment:offset
load_app_seg_offset: le_u32 >> // segment:offset
take!(4) >> // reserved
take!(4) >> // mem alloc
take!(4) >> // ordinal resolve
take!(4) >> // exit
take!(2 * 4) >> // reserved
take!(4) >> // set owner
(NESelfLoadHeader {
boot_app_offset,
load_app_seg_offset
})
)
);
const SEGMENT_HEADER_SIZE: u16 = 8;
const FIXUP_SIZE: u16 = 8;
pub struct NEExecutable<'a> {
input: &'a [u8],
header: NEHeader,
header_offset: u16,
// A raw header slice is stored to make it easier to resolve offsets which
// are relative to the start of the NE header
raw_header: &'a [u8],
}
pub struct NEResourcesIterator<'a> {
table: &'a [u8],
index: usize,
table_kind: NEResourceKind,
offset_shift: u16,
block_index: u16,
block_len: u16,
finished: bool,
}
impl<'a> NEResourcesIterator<'a> {
pub fn new(table: &'a [u8]) -> NEResourcesIterator<'a> {
let offset_shift = LE::read_u16(table);
let mut iterator = NEResourcesIterator {
table,
index: 2,
table_kind: NEResourceKind::Integer(0xffff),
offset_shift,
block_index: 0,
block_len: 0,
finished: false,
};
iterator.load_next_block();
iterator
}
fn load_next_block(&mut self) {
let id = LE::read_u16(&self.table[self.index..]);
self.finished = id == 0;
if!self.finished {
self.table_kind = if id & 0x8000!= 0 {
let id = id & 0x7fff;
if let Some(kind) = NEPredefinedResourceKind::from_u16(id) {
NEResourceKind::Predefined(kind)
} else {
NEResourceKind::Integer(id)
}
} else {
NEResourceKind::String(read_pascal_string(&self.table[self.index + id as usize..]).unwrap().1)
};
self.block_index = 0;
self.block_len = LE::read_u16(&self.table[self.index + 2..]);
self.index += 8;
}
}
}
impl<'a> Iterator for NEResourcesIterator<'a> {
type Item = NEResourceEntry;
fn next(&mut self) -> Option<Self::Item> {
if self.block_index == self.block_len {
self.load_next_block();
}
if self.finished {
None
} else {
let (_, header) = read_resource(&self.table[self.index..], self.table, self.table_kind.clone(), self.offset_shift).unwrap();
self.index += 12;
self.block_index += 1;
Some(header)
}
}
}
impl<'a> NEExecutable<'a> {
pub fn new(input: &'a [u8]) -> Result<Self, ParseError> {
let header_offset = try_parse!(parse_ne_offset(input), ParseError::NotMZ);
let raw_header = &input[header_offset as usize..];
let header = try_parse!(read_ne_header(raw_header), ParseError::NotNE);
Ok(NEExecutable {
input,
header,
header_offset, // TODO: Get rid of this
raw_header
})
}
pub fn raw_data(&self) -> &'a [u8] {
self.input
}
pub fn header_offset(&self) -> usize {
self.header_offset as usize
}
pub fn name(&self) -> Option<String> {
if self.header.non_resident_table_size == 0 {
None
} else {
let ne_non_resident_table = &self.input[self.header.non_resident_table_offset as usize..];
match read_pascal_string(&ne_non_resident_table) {
Ok((_, name)) => Some(name),
Err(_) => None
}
}
}
pub fn header(&self) -> &NEHeader {
&self.header
}
pub fn selfload_header(&self) -> Result<Option<(NESelfLoadHeader, &[u8])>, ParseError> {
if self.header.flags.contains(NEFlags::SELF_LOAD) {
Ok(Some(self.selfload_header_impl()?))
} else {
Ok(None)
}
}
/// # Arguments
/// * segment_number - 1-indexed segment number
pub fn segment_header(&self, segment_number: u16) -> Result<NESegmentEntry, ParseError> {
assert!(segment_number!= 0 || segment_number <= self.header.num_segments, format!("segment number {} is out of range", segment_number));
let offset = self.header.segment_table_offset + ((segment_number - 1) * SEGMENT_HEADER_SIZE);
match parse_segment_header(&self.raw_header[offset as usize..], self.header.alignment_shift_count) {
Ok((_, header)) => Ok(header),
Err(_) => Err(ParseError::SegmentHeader{ segment_number })
}
}
/// # Arguments
/// * segment_number - 1-indexed segment number
pub fn segment_data(&self, segment_number: u16) -> Result<&[u8], ParseError> {
let header = self.segment_header(segment_number)?;
let data = &self.input[header.offset as usize..];
let mut size = header.data_size as usize;
if header.flags.contains(NESegmentFlags::HAS_RELOC) {
let fixup_table_size = LE::read_u16(&data[size..]) as usize * FIXUP_SIZE as usize;
size += fixup_table_size;
}
Ok(&data[..size])
}
pub fn resource_table_alignment_shift(&self) -> Option<u16> {
if let Some(table) = self.resource_table_data() {
Some(LE::read_u16(table))
} else {
None
}
}
pub fn resource_table_data(&self) -> Option<&[u8]> {
if self.has_resource_table() {
Some(&self.raw_header[self.header.resource_table_offset as usize..])
} else {
None
}
}
pub fn iter_resources(&self) -> NEResourcesIterator {
if self.has_resource_table() {
NEResourcesIterator::new(&self.raw_header[self.header.resource_table_offset as usize..])
} else {
NEResourcesIterator {
table: self.raw_header,
index: 0,
table_kind: NEResourceKind::Integer(0xffff),
offset_shift: 0,
block_index: 1,
block_len: 0,
finished: true
}
}
}
pub fn has_resource_table(&self) -> bool {
// In DIRAPI.DLL from Director for Windows, the resource table offset
// is non-zero but there is no resource table; the resource table offset
// and names table offset are identical.
self.header.resource_table_offset!= 0 && self.header.resource_table_offset!= self.header.names_table_offset
}
fn selfload_header_impl(&self) -> Result<(NESelfLoadHeader, &[u8]), ParseError> {
let segment_data = self.segment_data(1)?;
match read_selfload_header(segment_data) {
Ok(header) => Ok((header.1, header.0)),
Err(_) => Err(ParseError::SelfLoadHeader)
}
}
} | segment_thunk_offset: le_u16 >> | random_line_split |
neexe.rs | use bitflags::bitflags;
use byteorder::{ByteOrder, LE};
use custom_error::custom_error;
use crate::util::read_pascal_string;
use enum_primitive::*;
use nom::{apply, count, do_parse, le_u8, le_u16, le_u32, named, named_args, tag, take};
macro_rules! try_parse (
($result: expr, $error: expr) => (match $result {
Ok((_, result)) => result,
Err(_) => { return Err($error); }
})
);
custom_error!{pub ParseError
NotMZ = "invalid MZ header",
NotNE = "invalid NE header",
SegmentHeader{ segment_number: u16 } = "invalid segment {segment_number} header",
SelfLoadHeader = "invalid self-load header"
}
named!(parse_ne_offset<u16>,
do_parse!(
tag!("MZ") >>
take!(58) >>
ne_offset: le_u16 >>
(ne_offset)
)
);
bitflags!(pub struct NEFlags: u16 {
const SINGLE_DATA = 0x0001;
const MULTIPLE_DATA = 0x0002;
const GLOBAL_INIT = 0x0004;
const PROTECTED_MODE = 0x0008;
// There seems to be some disagreement as to what these high nibble bits
// mean, but they are sometimes set so they should probably not be ignored
const WIN32S = 0x0010;
const INST_286 = 0x0020;
const INST_386 = 0x0040;
const INST_X87 = 0x0080;
const FULLSCREEN = 0x0100;
const CONSOLE = 0x0200;
const GUI = 0x0300;
const SELF_LOAD = 0x0800;
const LINKER_ERROR = 0x2000;
const CALL_WEP = 0x4000;
const LIB_MODULE = 0x8000;
});
#[derive(Clone, Debug)]
pub struct NEHeader {
pub linker_major_version: u8,
pub linker_minor_version: u8,
pub entry_table_offset: u16,
pub entry_table_size: u16,
pub crc: u32,
pub flags: NEFlags,
pub auto_data_segment_index: u16,
pub heap_size: u16,
pub stack_size: u16,
pub entry_point: u32,
pub init_stack_pointer: u32,
pub num_segments: u16,
pub num_imports: u16,
pub non_resident_table_size: u16,
pub segment_table_offset: u16, // bytes, from start of NEHeader
pub resource_table_offset: u16,
pub names_table_offset: u16,
pub module_table_offset: u16,
pub import_names_table_offset: u16,
pub non_resident_table_offset: u32,
pub num_movable_entry_point: u16,
pub alignment_shift_count: u16, // 1 << alignment_shift_count = logical sector
pub num_resources: u16,
pub target_os: u8,
pub os2_flags: u8,
pub thunk_offset: u16,
pub segment_thunk_offset: u16,
pub min_code_swap_size: u16,
pub win_version_minor: u8,
pub win_version_major: u8,
}
bitflags!(pub struct NESegmentFlags: u16 {
const CODE = 0x0000;
const DATA = 0x0001;
const MOVABLE = 0x0010;
const PRELOAD = 0x0040;
const HAS_RELOC = 0x0100;
const PRIORITY = 0xF000;
});
named!(read_ne_header<NEHeader>,
do_parse!(
tag!("NE") >>
linker_major_version: le_u8 >>
linker_minor_version: le_u8 >>
entry_table_offset: le_u16 >> // relative to beginning of header
entry_table_size: le_u16 >> // bytes
crc: le_u32 >>
flags: le_u16 >>
auto_data_segment_index: le_u16 >>
heap_size: le_u16 >>
stack_size: le_u16 >>
entry_point: le_u32 >> // cs:ip
init_stack_pointer: le_u32 >> // ss:sp
num_segments: le_u16 >>
num_imports: le_u16 >>
non_resident_table_size: le_u16 >>
segment_table_offset: le_u16 >>
resource_table_offset: le_u16 >>
names_table_offset: le_u16 >>
module_table_offset: le_u16 >>
import_names_table_offset: le_u16 >>
non_resident_table_offset: le_u32 >>
num_movable_entry_point: le_u16 >>
alignment_shift_count: le_u16 >>
num_resources: le_u16 >>
target_os: le_u8 >>
os2_flags: le_u8 >>
thunk_offset: le_u16 >>
segment_thunk_offset: le_u16 >>
min_code_swap_size: le_u16 >>
win_version_minor: le_u8 >>
win_version_major: le_u8 >>
(NEHeader {
linker_major_version,
linker_minor_version,
entry_table_offset,
entry_table_size,
crc,
flags: NEFlags::from_bits_truncate(flags),
auto_data_segment_index,
heap_size,
stack_size,
entry_point,
init_stack_pointer,
num_segments,
num_imports,
non_resident_table_size,
segment_table_offset,
resource_table_offset,
names_table_offset,
module_table_offset,
import_names_table_offset,
non_resident_table_offset,
num_movable_entry_point,
alignment_shift_count,
num_resources,
target_os,
os2_flags,
thunk_offset,
segment_thunk_offset,
min_code_swap_size,
win_version_minor,
win_version_major
})
)
);
#[derive(Clone, Debug)]
pub struct NESegmentEntry {
pub offset: u32, // bytes
pub data_size: u32, // bytes
pub flags: NESegmentFlags,
pub alloc_size: u32, // bytes
}
named_args!(parse_segment_header(offset_shift: u16)<NESegmentEntry>,
do_parse!(
offset: le_u16 >>
data_size: le_u16 >>
flags: le_u16 >>
alloc_size: le_u16 >>
(NESegmentEntry {
offset: u32::from(offset) << offset_shift,
data_size: if data_size == 0 { 0x10000 } else { data_size.into() },
flags: NESegmentFlags::from_bits_truncate(flags),
alloc_size: if alloc_size == 0 { 0x10000 } else { alloc_size.into() }
})
)
);
named_args!(parse_segments(offset_shift: u16, num_segments: u16)<Vec<NESegmentEntry> >,
count!(apply!(parse_segment_header, offset_shift), num_segments as usize)
);
bitflags!(pub struct NEResourceFlags: u16 {
const MOVABLE = 0x10;
const PURE = 0x20;
const PRELOAD = 0x40;
});
enum_from_primitive! {
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NEPredefinedResourceKind {
Cursor = 1,
Bitmap = 2,
Icon = 3,
Menu = 4,
Dialog = 5,
StringTable = 6,
FontDirectory = 7,
FontResource = 8,
AcceleratorTable = 9,
RawData = 10,
MessageTable = 11,
GroupCursor = 12,
GroupIcon = 14,
// NameTable: https://hackernoon.com/win3mu-part-5-windows-3-executable-files-c2affeec0e5
NameTable = 15,
Version = 16,
DlgInclude = 17,
PlugPlay = 19,
VXD = 20,
AnimatedCursor = 21,
AnimatedIcon = 22,
HTML = 23,
Manifest = 24,
}
}
#[derive(Clone, Debug)]
pub enum NEResourceId {
Integer(u16),
String(String),
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NEResourceKind {
Predefined(NEPredefinedResourceKind),
Integer(u16),
String(String),
}
#[derive(Clone, Debug)]
pub struct NEResourceEntry {
pub kind: NEResourceKind,
pub id: NEResourceId,
pub offset: u32, // bytes
pub length: u32, // bytes
pub flags: NEResourceFlags,
}
named_args!(read_resource<'a>(resource_table: &'a [u8], kind: NEResourceKind, offset_shift: u16)<NEResourceEntry>,
do_parse!(
offset: le_u16 >> // in sectors
length: le_u16 >> // in sectors
flags: le_u16 >>
id: le_u16 >>
/* reserved */ le_u32 >>
(NEResourceEntry {
offset: u32::from(offset) << offset_shift,
length: u32::from(length) << offset_shift,
flags: NEResourceFlags::from_bits_truncate(flags),
kind,
id: if id & 0x8000!= 0 {
NEResourceId::Integer(id & 0x7fff)
} else {
NEResourceId::String(read_pascal_string(&resource_table[id as usize..]).unwrap().1)
}
})
)
);
enum_from_primitive! {
#[derive(Clone, Debug)]
pub enum NESegmentRelocationSourceKind {
LoByte = 0,
Segment = 2,
FarAddr = 3,
Offset = 5,
}
}
#[derive(Clone, Debug)]
pub struct NESelfLoadHeader {
pub boot_app_offset: u32,
pub load_app_seg_offset: u32,
}
named!(read_selfload_header<NESelfLoadHeader>,
do_parse!(
tag!("A0") >>
take!(2) >> // reserved
boot_app_offset: le_u32 >> // segment:offset
load_app_seg_offset: le_u32 >> // segment:offset
take!(4) >> // reserved
take!(4) >> // mem alloc
take!(4) >> // ordinal resolve
take!(4) >> // exit
take!(2 * 4) >> // reserved
take!(4) >> // set owner
(NESelfLoadHeader {
boot_app_offset,
load_app_seg_offset
})
)
);
const SEGMENT_HEADER_SIZE: u16 = 8;
const FIXUP_SIZE: u16 = 8;
pub struct NEExecutable<'a> {
input: &'a [u8],
header: NEHeader,
header_offset: u16,
// A raw header slice is stored to make it easier to resolve offsets which
// are relative to the start of the NE header
raw_header: &'a [u8],
}
pub struct NEResourcesIterator<'a> {
table: &'a [u8],
index: usize,
table_kind: NEResourceKind,
offset_shift: u16,
block_index: u16,
block_len: u16,
finished: bool,
}
impl<'a> NEResourcesIterator<'a> {
pub fn new(table: &'a [u8]) -> NEResourcesIterator<'a> {
let offset_shift = LE::read_u16(table);
let mut iterator = NEResourcesIterator {
table,
index: 2,
table_kind: NEResourceKind::Integer(0xffff),
offset_shift,
block_index: 0,
block_len: 0,
finished: false,
};
iterator.load_next_block();
iterator
}
fn load_next_block(&mut self) {
let id = LE::read_u16(&self.table[self.index..]);
self.finished = id == 0;
if!self.finished {
self.table_kind = if id & 0x8000!= 0 {
let id = id & 0x7fff;
if let Some(kind) = NEPredefinedResourceKind::from_u16(id) {
NEResourceKind::Predefined(kind)
} else {
NEResourceKind::Integer(id)
}
} else {
NEResourceKind::String(read_pascal_string(&self.table[self.index + id as usize..]).unwrap().1)
};
self.block_index = 0;
self.block_len = LE::read_u16(&self.table[self.index + 2..]);
self.index += 8;
}
}
}
impl<'a> Iterator for NEResourcesIterator<'a> {
type Item = NEResourceEntry;
fn next(&mut self) -> Option<Self::Item> {
if self.block_index == self.block_len {
self.load_next_block();
}
if self.finished {
None
} else {
let (_, header) = read_resource(&self.table[self.index..], self.table, self.table_kind.clone(), self.offset_shift).unwrap();
self.index += 12;
self.block_index += 1;
Some(header)
}
}
}
impl<'a> NEExecutable<'a> {
pub fn new(input: &'a [u8]) -> Result<Self, ParseError> {
let header_offset = try_parse!(parse_ne_offset(input), ParseError::NotMZ);
let raw_header = &input[header_offset as usize..];
let header = try_parse!(read_ne_header(raw_header), ParseError::NotNE);
Ok(NEExecutable {
input,
header,
header_offset, // TODO: Get rid of this
raw_header
})
}
pub fn raw_data(&self) -> &'a [u8] {
self.input
}
pub fn header_offset(&self) -> usize {
self.header_offset as usize
}
pub fn name(&self) -> Option<String> {
if self.header.non_resident_table_size == 0 {
None
} else {
let ne_non_resident_table = &self.input[self.header.non_resident_table_offset as usize..];
match read_pascal_string(&ne_non_resident_table) {
Ok((_, name)) => Some(name),
Err(_) => None
}
}
}
pub fn header(&self) -> &NEHeader {
&self.header
}
pub fn selfload_header(&self) -> Result<Option<(NESelfLoadHeader, &[u8])>, ParseError> {
if self.header.flags.contains(NEFlags::SELF_LOAD) {
Ok(Some(self.selfload_header_impl()?))
} else {
Ok(None)
}
}
/// # Arguments
/// * segment_number - 1-indexed segment number
pub fn segment_header(&self, segment_number: u16) -> Result<NESegmentEntry, ParseError> {
assert!(segment_number!= 0 || segment_number <= self.header.num_segments, format!("segment number {} is out of range", segment_number));
let offset = self.header.segment_table_offset + ((segment_number - 1) * SEGMENT_HEADER_SIZE);
match parse_segment_header(&self.raw_header[offset as usize..], self.header.alignment_shift_count) {
Ok((_, header)) => Ok(header),
Err(_) => Err(ParseError::SegmentHeader{ segment_number })
}
}
/// # Arguments
/// * segment_number - 1-indexed segment number
pub fn segment_data(&self, segment_number: u16) -> Result<&[u8], ParseError> {
let header = self.segment_header(segment_number)?;
let data = &self.input[header.offset as usize..];
let mut size = header.data_size as usize;
if header.flags.contains(NESegmentFlags::HAS_RELOC) {
let fixup_table_size = LE::read_u16(&data[size..]) as usize * FIXUP_SIZE as usize;
size += fixup_table_size;
}
Ok(&data[..size])
}
pub fn resource_table_alignment_shift(&self) -> Option<u16> {
if let Some(table) = self.resource_table_data() {
Some(LE::read_u16(table))
} else {
None
}
}
pub fn resource_table_data(&self) -> Option<&[u8]> {
if self.has_resource_table() {
Some(&self.raw_header[self.header.resource_table_offset as usize..])
} else {
None
}
}
pub fn iter_resources(&self) -> NEResourcesIterator {
if self.has_resource_table() | else {
NEResourcesIterator {
table: self.raw_header,
index: 0,
table_kind: NEResourceKind::Integer(0xffff),
offset_shift: 0,
block_index: 1,
block_len: 0,
finished: true
}
}
}
pub fn has_resource_table(&self) -> bool {
// In DIRAPI.DLL from Director for Windows, the resource table offset
// is non-zero but there is no resource table; the resource table offset
// and names table offset are identical.
self.header.resource_table_offset!= 0 && self.header.resource_table_offset!= self.header.names_table_offset
}
fn selfload_header_impl(&self) -> Result<(NESelfLoadHeader, &[u8]), ParseError> {
let segment_data = self.segment_data(1)?;
match read_selfload_header(segment_data) {
Ok(header) => Ok((header.1, header.0)),
Err(_) => Err(ParseError::SelfLoadHeader)
}
}
}
| {
NEResourcesIterator::new(&self.raw_header[self.header.resource_table_offset as usize..])
} | conditional_block |
hashed.rs | //! Implementation using ordered keys with hashes and robin hood hashing.
use std::default::Default;
use timely_sort::Unsigned;
use ::hashable::{Hashable, HashOrdered};
use super::{Trie, Cursor, Builder, MergeBuilder, TupleBuilder};
const MINIMUM_SHIFT : usize = 4;
const BLOAT_FACTOR : f64 = 1.1;
// I would like the trie entries to look like (Key, usize), where a usize equal to the
// previous entry indicates that the location is empty. This would let us always use the
// prior location to determine lower bounds, rather than double up upper and lower bounds
// in Entry.
//
// It might also be good to optimistically build the hash map in place. We can do this by
// upper bounding the number of keys, allocating and placing as if this many, and then
// drawing down the allocation and placements if many keys collided or cancelled.
/// A level of the trie, with keys and offsets into a lower layer.
///
/// If keys[i].1 == 0 then entry i should
/// be ignored. This is our version of `Option<(K, usize)>`, which comes at the cost
/// of requiring `K: Default` to populate empty keys.
///
/// Each region of this layer is an independent immutable RHH map, whose size should
/// equal something like `(1 << i) + i` for some value of `i`. The first `(1 << i)`
/// elements are where we expect to find keys, and the remaining `i` are for spill-over
/// due to collisions near the end of the first region.
///
/// We might do something like "if X or fewer elements, just use an ordered list".
#[derive(Debug)]
pub struct HashedLayer<K: HashOrdered, L> {
/// Keys and offsets for the keys.
pub keys: Vec<Entry<K>>, // track upper and lower bounds, because trickery is hard.
/// A lower layer containing ranges of values.
pub vals: L,
}
impl<K: HashOrdered, L> HashedLayer<K, L> {
fn _entry_valid(&self, index: usize) -> bool { self.keys[index].is_some() }
fn lower(&self, index: usize) -> usize { self.keys[index].get_lower() }
fn upper(&self, index: usize) -> usize { self.keys[index].get_upper() }
}
impl<K: Clone+HashOrdered+Default, L: Trie> Trie for HashedLayer<K, L> {
type Item = (K, L::Item);
type Cursor = HashedCursor<L>;
type MergeBuilder = HashedBuilder<K, L::MergeBuilder>;
type TupleBuilder = HashedBuilder<K, L::TupleBuilder>;
fn keys(&self) -> usize { self.keys.len() }
fn tuples(&self) -> usize { self.vals.tuples() }
fn cursor_from(&self, lower: usize, upper: usize) -> Self::Cursor {
if lower < upper {
let mut shift = 0;
while upper - lower >= (1 << shift) {
shift += 1;
}
shift -= 1;
let mut pos = lower; // set self.pos to something valid.
while pos < upper &&!self.keys[pos].is_some() {
pos += 1;
}
HashedCursor {
shift: shift,
bounds: (lower, upper),
pos: pos,
// keys: owned_self.clone().map(|x| &x.keys[..]),
child: self.vals.cursor_from(self.keys[pos].get_lower(), self.keys[pos].get_upper())
}
}
else {
HashedCursor {
shift: 0,
bounds: (0, 0),
pos: 0,
// keys: owned_self.clone().map(|x| &x.keys[..]), // &self.keys,
child: self.vals.cursor_from(0, 0),
}
}
}
}
/// An entry in hash tables.
#[derive(Debug, Clone)]
pub struct Entry<K: HashOrdered> {
/// The contained key.
key: K,
lower1: u32,
upper1: u32,
}
impl<K: HashOrdered> Entry<K> {
fn new(key: K, lower: usize, upper: usize) -> Self {
Entry {
key: key,
lower1: lower as u32,
upper1: upper as u32,
}
}
// fn for_cmp(&self) -> (K::Output, &K) { (self.key.hashed(), &self.key) }
fn is_some(&self) -> bool { self.upper1!= 0 }
fn empty() -> Self where K: Default { Self::new(Default::default(), 0, 0) }
fn get_lower(&self) -> usize { self.lower1 as usize}
fn get_upper(&self) -> usize { self.upper1 as usize}
fn _set_lower(&mut self, x: usize) { self.lower1 = x as u32; }
fn set_upper(&mut self, x: usize) { self.upper1 = x as u32; }
}
/// Assembles a layer of this
pub struct HashedBuilder<K: HashOrdered, L> {
temp: Vec<Entry<K>>, // staging for building; densely packed here and then re-laid out in self.keys.
/// Entries in the hash map.
pub keys: Vec<Entry<K>>, // keys and offs co-located because we expect to find the right answers fast.
/// A builder for the layer below.
pub vals: L,
}
impl<K: HashOrdered+Clone+Default, L> HashedBuilder<K, L> {
#[inline]
fn _lower(&self, index: usize) -> usize {
self.keys[index].get_lower()
}
#[inline]
fn _upper(&self, index: usize) -> usize {
self.keys[index].get_upper()
}
}
impl<K: HashOrdered+Clone+Default, L: Builder> Builder for HashedBuilder<K, L> {
type Trie = HashedLayer<K, L::Trie>;
/// Looks at the contents of self.temp and extends self.keys appropriately.
///
/// This is where the "hash map" structure is produced. Up until this point, all (key, usize) pairs were
/// committed to self.temp, where they awaited layout. That now happens here.
fn boundary(&mut self) -> usize {
/// self.temp *should* be sorted by (hash, key); let's check!
debug_assert!((1.. self.temp.len()).all(|i| self.temp[i-1].key < self.temp[i].key));
let boundary = self.vals.boundary();
if self.temp.len() > 0 {
// push doesn't know the length at the end; must write it
if!self.temp[self.temp.len()-1].is_some() {
let pos = self.temp.len()-1;
self.temp[pos].set_upper(boundary);
}
// having densely packed everything, we now want to extend the allocation and rewrite the contents
// so that their spacing is in line with how robin hood hashing works.
let lower = self.keys.len();
if self.temp.len() < (1 << MINIMUM_SHIFT) {
self.keys.extend(self.temp.drain(..));
}
else {
let target = (BLOAT_FACTOR * (self.temp.len() as f64)) as u64;
let mut shift = MINIMUM_SHIFT;
while (1 << shift) < target {
shift += 1;
}
self.keys.reserve(1 << shift);
// now going to start pushing things in to self.keys
let mut cursor: usize = 0; // <-- current write pos in self.keys.
for entry in self.temp.drain(..) {
// acquire top `shift` bits from `key.hashed()`
let target = (entry.key.hashed().as_u64() >> ((<K as Hashable>::Output::bytes() * 8) - shift)) as usize;
debug_assert!(target < (1 << shift));
while cursor < target {
// filling with bogus stuff
self.keys.push(Entry::empty());
cursor += 1;
}
self.keys.push(entry);
cursor += 1;
}
// fill out the space, if not full.
while cursor < (1 << shift) {
self.keys.push(Entry::empty());
cursor += 1;
}
// assert that we haven't doubled the allocation (would confuse the "what is shift?" logic)
assert!((self.keys.len() - lower) < (2 << shift));
}
}
self.keys.len()
}
#[inline(never)]
fn done(mut self) -> Self::Trie {
self.boundary();
self.keys.shrink_to_fit();
let vals = self.vals.done();
if vals.tuples() > 0 {
assert!(self.keys.len() > 0);
}
HashedLayer {
keys: self.keys,
vals: vals,
}
}
}
impl<K: HashOrdered+Clone+Default, L: MergeBuilder> MergeBuilder for HashedBuilder<K, L> {
fn with_capacity(other1: &Self::Trie, other2: &Self::Trie) -> Self {
HashedBuilder {
temp: Vec::new(),
keys: Vec::with_capacity(other1.keys() + other2.keys()),
vals: L::with_capacity(&other1.vals, &other2.vals),
}
}
/// Copies fully formed ranges (note plural) of keys from another trie.
///
/// While the ranges are fully formed, the offsets in them are relative to the other trie, and
/// must be corrected. These keys must be moved immediately to self.keys, as there is no info
/// about boundaries between them, and we are unable to lay out the info any differently.
fn copy_range(&mut self, other: &Self::Trie, lower: usize, upper: usize) {
if lower < upper |
}
fn push_merge(&mut self, other1: (&Self::Trie, usize, usize), other2: (&Self::Trie, usize, usize)) -> usize {
// just rebinding names to clarify code.
let (trie1, mut lower1, upper1) = other1;
let (trie2, mut lower2, upper2) = other2;
debug_assert!(upper1 <= trie1.keys.len());
debug_assert!(upper2 <= trie2.keys.len());
self.temp.reserve((upper1 - lower1) + (upper2 - lower2));
while lower1 < trie1.keys.len() &&!trie1.keys[lower1].is_some() { lower1 += 1; }
while lower2 < trie2.keys.len() &&!trie2.keys[lower2].is_some() { lower2 += 1; }
// while both mergees are still active
while lower1 < upper1 && lower2 < upper2 {
debug_assert!(trie1.keys[lower1].is_some());
debug_assert!(trie2.keys[lower2].is_some());
match trie1.keys[lower1].key.cmp(&trie2.keys[lower2].key) {
::std::cmp::Ordering::Less => {
lower1 += self.push_while_less(trie1, lower1, upper1, &trie2.keys[lower2].key);
}
::std::cmp::Ordering::Equal => {
let lower = self.vals.boundary();
let upper = self.vals.push_merge(
(&trie1.vals, trie1.lower(lower1), trie1.upper(lower1)),
(&trie2.vals, trie2.lower(lower2), trie2.upper(lower2))
);
if upper > lower {
self.temp.push(Entry::new(trie1.keys[lower1].key.clone(), lower, upper));
}
lower1 += 1;
lower2 += 1;
while lower1 < trie1.keys.len() &&!trie1.keys[lower1].is_some() { lower1 += 1; }
while lower2 < trie2.keys.len() &&!trie2.keys[lower2].is_some() { lower2 += 1; }
}
::std::cmp::Ordering::Greater => {
lower2 += self.push_while_less(trie2, lower2, upper2, &trie1.keys[lower1].key);
}
}
}
if lower1 < upper1 { self.push_all(trie1, lower1, upper1); }
if lower2 < upper2 { self.push_all(trie2, lower2, upper2); }
self.boundary()
}
}
impl<K: HashOrdered+Clone+Default, L: TupleBuilder> TupleBuilder for HashedBuilder<K, L> {
type Item = (K, L::Item);
fn new() -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::new(), vals: L::new() } }
fn with_capacity(cap: usize) -> Self {
HashedBuilder {
temp: Vec::with_capacity(cap),
keys: Vec::with_capacity(cap),
vals: L::with_capacity(cap),
}
}
#[inline]
fn push_tuple(&mut self, (key, val): (K, L::Item)) {
// we build up self.temp, and rely on self.boundary() to drain self.temp.
let temp_len = self.temp.len();
if temp_len == 0 || self.temp[temp_len-1].key!= key {
if temp_len > 0 { debug_assert!(self.temp[temp_len-1].key < key); }
let boundary = self.vals.boundary();
if temp_len > 0 {
self.temp[temp_len-1].set_upper(boundary);
}
self.temp.push(Entry::new(key, boundary, 0)); // this should be fixed by boundary?
}
self.vals.push_tuple(val);
}
}
impl<K: HashOrdered+Clone+Default, L: MergeBuilder> HashedBuilder<K, L> {
/// Moves other stuff into self.temp. Returns number of element consumed.
fn push_while_less(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize, vs: &K) -> usize {
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
let mut bound = 0; // tracks largest value of upper
let mut index = lower;
// let vs_hashed = vs.hashed();
// stop if overrun, or if we find a valid element >= our target.
while index < upper &&!(other.keys[index].is_some() && &other.keys[index].key >= vs) {
if other.upper(index)!= 0 {
if bound < other.upper(index) { bound = other.upper(index); }
debug_assert!(other.lower(index) < other.upper(index));
let lower = (other.lower(index) + self_basis) - other_basis;
let upper = (other.upper(index) + self_basis) - other_basis;
self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper));
}
index += 1;
}
debug_assert!(bound > 0);
self.vals.copy_range(&other.vals, other.lower(lower), bound);
index - lower
}
fn push_all(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize) {
debug_assert!(lower < upper);
debug_assert!(upper <= other.keys.len());
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
let mut bound = 0; // tracks largest value of upper
for index in lower.. upper {
if other.upper(index)!= 0 {
if bound < other.upper(index) { bound = other.upper(index); }
let lower = (other.lower(index) + self_basis) - other_basis;
let upper = (other.upper(index) + self_basis) - other_basis;
self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper));
}
}
debug_assert!(bound > 0);
self.vals.copy_range(&other.vals, other.lower(lower), bound);
}
}
/// A cursor with a child cursor that is updated as we move.
#[derive(Debug)]
pub struct HashedCursor<L: Trie> {
shift: usize, // amount by which to shift hashes.
bounds: (usize, usize), // bounds of slice of self.keys.
pos: usize, // <-- current cursor position.
/// A cursor for the layer below this one.
pub child: L::Cursor,
}
impl<K: HashOrdered, L: Trie> Cursor<HashedLayer<K, L>> for HashedCursor<L> {
type Key = K;
fn key<'a>(&self, storage: &'a HashedLayer<K, L>) -> &'a Self::Key { &storage.keys[self.pos].key }
fn step(&mut self, storage: &HashedLayer<K, L>) {
// look for next valid entry
self.pos += 1;
while self.pos < self.bounds.1 &&!storage.keys[self.pos].is_some() {
self.pos += 1;
}
if self.valid(storage) {
let child_lower = storage.keys[self.pos].get_lower();
let child_upper = storage.keys[self.pos].get_upper();
self.child.reposition(&storage.vals, child_lower, child_upper);
}
else {
self.pos = self.bounds.1;
}
}
#[inline(never)]
fn seek(&mut self, storage: &HashedLayer<K, L>, key: &Self::Key) {
// leap to where the key *should* be, or at least be soon after.
// let key_hash = key.hashed();
// only update position if shift is large. otherwise leave it alone.
if self.shift >= MINIMUM_SHIFT {
let target = (key.hashed().as_u64() >> ((K::Output::bytes() * 8) - self.shift)) as usize;
self.pos = target;
}
// scan forward until we find a valid entry >= (key_hash, key)
while self.pos < self.bounds.1 && (!storage.keys[self.pos].is_some() || &storage.keys[self.pos].key < key) {
self.pos += 1;
}
// self.pos should now either
// (i) have self.pos == self.bounds.1 (and be invalid) or
// (ii) point at a valid entry with (entry_hash, entry) >= (key_hash, key).
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
fn valid(&self, _storage: &HashedLayer<K, L>) -> bool { self.pos < self.bounds.1 }
fn rewind(&mut self, storage: &HashedLayer<K, L>) {
self.pos = self.bounds.0;
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
fn reposition(&mut self, storage: &HashedLayer<K, L>, lower: usize, upper: usize) {
// sort out what the shift is.
// should be just before the first power of two strictly containing (lower, upper].
self.shift = 0;
while upper - lower >= (1 << self.shift) {
self.shift += 1;
}
self.shift -= 1;
self.bounds = (lower, upper);
self.pos = lower; // set self.pos to something valid.
while self.pos < self.bounds.1 &&!storage.keys[self.pos].is_some() {
self.pos += 1;
}
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
}
| {
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
for index in lower .. upper {
let other_entry = &other.keys[index];
let new_entry = if other_entry.is_some() {
Entry::new(
other_entry.key.clone(),
(other_entry.get_lower() + self_basis) - other_basis,
(other_entry.get_upper() + self_basis) - other_basis,
)
}
else { Entry::empty() };
self.keys.push(new_entry);
}
self.vals.copy_range(&other.vals, other.lower(lower), other.upper(upper-1));
self.boundary(); // <-- perhaps unnecessary, but ...
} | conditional_block |
hashed.rs | //! Implementation using ordered keys with hashes and robin hood hashing.
use std::default::Default;
use timely_sort::Unsigned;
use ::hashable::{Hashable, HashOrdered};
use super::{Trie, Cursor, Builder, MergeBuilder, TupleBuilder};
const MINIMUM_SHIFT : usize = 4;
const BLOAT_FACTOR : f64 = 1.1;
// I would like the trie entries to look like (Key, usize), where a usize equal to the
// previous entry indicates that the location is empty. This would let us always use the
// prior location to determine lower bounds, rather than double up upper and lower bounds
// in Entry.
//
// It might also be good to optimistically build the hash map in place. We can do this by
// upper bounding the number of keys, allocating and placing as if this many, and then
// drawing down the allocation and placements if many keys collided or cancelled.
/// A level of the trie, with keys and offsets into a lower layer.
///
/// If keys[i].1 == 0 then entry i should
/// be ignored. This is our version of `Option<(K, usize)>`, which comes at the cost
/// of requiring `K: Default` to populate empty keys.
///
/// Each region of this layer is an independent immutable RHH map, whose size should
/// equal something like `(1 << i) + i` for some value of `i`. The first `(1 << i)`
/// elements are where we expect to find keys, and the remaining `i` are for spill-over
/// due to collisions near the end of the first region.
///
/// We might do something like "if X or fewer elements, just use an ordered list".
#[derive(Debug)]
pub struct HashedLayer<K: HashOrdered, L> {
/// Keys and offsets for the keys.
pub keys: Vec<Entry<K>>, // track upper and lower bounds, because trickery is hard.
/// A lower layer containing ranges of values.
pub vals: L,
}
impl<K: HashOrdered, L> HashedLayer<K, L> {
fn _entry_valid(&self, index: usize) -> bool { self.keys[index].is_some() }
fn lower(&self, index: usize) -> usize { self.keys[index].get_lower() }
fn upper(&self, index: usize) -> usize { self.keys[index].get_upper() }
}
impl<K: Clone+HashOrdered+Default, L: Trie> Trie for HashedLayer<K, L> {
type Item = (K, L::Item);
type Cursor = HashedCursor<L>;
type MergeBuilder = HashedBuilder<K, L::MergeBuilder>;
type TupleBuilder = HashedBuilder<K, L::TupleBuilder>;
fn keys(&self) -> usize { self.keys.len() }
fn tuples(&self) -> usize { self.vals.tuples() }
fn cursor_from(&self, lower: usize, upper: usize) -> Self::Cursor {
if lower < upper {
let mut shift = 0;
while upper - lower >= (1 << shift) {
shift += 1;
}
shift -= 1;
let mut pos = lower; // set self.pos to something valid.
while pos < upper &&!self.keys[pos].is_some() {
pos += 1;
}
HashedCursor {
shift: shift,
bounds: (lower, upper),
pos: pos,
// keys: owned_self.clone().map(|x| &x.keys[..]),
child: self.vals.cursor_from(self.keys[pos].get_lower(), self.keys[pos].get_upper())
}
}
else {
HashedCursor {
shift: 0,
bounds: (0, 0),
pos: 0,
// keys: owned_self.clone().map(|x| &x.keys[..]), // &self.keys,
child: self.vals.cursor_from(0, 0),
}
}
}
}
/// An entry in hash tables.
#[derive(Debug, Clone)]
pub struct Entry<K: HashOrdered> {
/// The contained key.
key: K,
lower1: u32,
upper1: u32,
}
impl<K: HashOrdered> Entry<K> {
fn new(key: K, lower: usize, upper: usize) -> Self {
Entry {
key: key,
lower1: lower as u32,
upper1: upper as u32,
}
}
// fn for_cmp(&self) -> (K::Output, &K) { (self.key.hashed(), &self.key) }
fn is_some(&self) -> bool { self.upper1!= 0 }
fn empty() -> Self where K: Default { Self::new(Default::default(), 0, 0) }
fn get_lower(&self) -> usize { self.lower1 as usize}
fn get_upper(&self) -> usize { self.upper1 as usize}
fn _set_lower(&mut self, x: usize) { self.lower1 = x as u32; }
fn set_upper(&mut self, x: usize) { self.upper1 = x as u32; }
}
/// Assembles a layer of this
pub struct HashedBuilder<K: HashOrdered, L> {
temp: Vec<Entry<K>>, // staging for building; densely packed here and then re-laid out in self.keys.
/// Entries in the hash map.
pub keys: Vec<Entry<K>>, // keys and offs co-located because we expect to find the right answers fast.
/// A builder for the layer below.
pub vals: L,
}
impl<K: HashOrdered+Clone+Default, L> HashedBuilder<K, L> {
#[inline]
fn _lower(&self, index: usize) -> usize {
self.keys[index].get_lower()
}
#[inline]
fn _upper(&self, index: usize) -> usize {
self.keys[index].get_upper()
}
}
impl<K: HashOrdered+Clone+Default, L: Builder> Builder for HashedBuilder<K, L> {
type Trie = HashedLayer<K, L::Trie>;
/// Looks at the contents of self.temp and extends self.keys appropriately.
///
/// This is where the "hash map" structure is produced. Up until this point, all (key, usize) pairs were
/// committed to self.temp, where they awaited layout. That now happens here.
fn boundary(&mut self) -> usize {
/// self.temp *should* be sorted by (hash, key); let's check!
debug_assert!((1.. self.temp.len()).all(|i| self.temp[i-1].key < self.temp[i].key));
let boundary = self.vals.boundary();
if self.temp.len() > 0 {
// push doesn't know the length at the end; must write it
if!self.temp[self.temp.len()-1].is_some() {
let pos = self.temp.len()-1;
self.temp[pos].set_upper(boundary);
}
// having densely packed everything, we now want to extend the allocation and rewrite the contents
// so that their spacing is in line with how robin hood hashing works.
let lower = self.keys.len();
if self.temp.len() < (1 << MINIMUM_SHIFT) {
self.keys.extend(self.temp.drain(..));
}
else {
let target = (BLOAT_FACTOR * (self.temp.len() as f64)) as u64;
let mut shift = MINIMUM_SHIFT;
while (1 << shift) < target {
shift += 1;
}
self.keys.reserve(1 << shift);
// now going to start pushing things in to self.keys
let mut cursor: usize = 0; // <-- current write pos in self.keys.
for entry in self.temp.drain(..) {
// acquire top `shift` bits from `key.hashed()`
let target = (entry.key.hashed().as_u64() >> ((<K as Hashable>::Output::bytes() * 8) - shift)) as usize;
debug_assert!(target < (1 << shift));
while cursor < target {
// filling with bogus stuff
self.keys.push(Entry::empty());
cursor += 1;
}
self.keys.push(entry);
cursor += 1;
}
// fill out the space, if not full.
while cursor < (1 << shift) {
self.keys.push(Entry::empty());
cursor += 1;
}
// assert that we haven't doubled the allocation (would confuse the "what is shift?" logic)
assert!((self.keys.len() - lower) < (2 << shift));
}
}
self.keys.len()
}
#[inline(never)]
fn done(mut self) -> Self::Trie {
self.boundary();
self.keys.shrink_to_fit();
let vals = self.vals.done();
if vals.tuples() > 0 {
assert!(self.keys.len() > 0);
}
HashedLayer {
keys: self.keys,
vals: vals,
}
}
}
impl<K: HashOrdered+Clone+Default, L: MergeBuilder> MergeBuilder for HashedBuilder<K, L> {
fn with_capacity(other1: &Self::Trie, other2: &Self::Trie) -> Self {
HashedBuilder {
temp: Vec::new(),
keys: Vec::with_capacity(other1.keys() + other2.keys()),
vals: L::with_capacity(&other1.vals, &other2.vals),
}
}
/// Copies fully formed ranges (note plural) of keys from another trie.
///
/// While the ranges are fully formed, the offsets in them are relative to the other trie, and
/// must be corrected. These keys must be moved immediately to self.keys, as there is no info
/// about boundaries between them, and we are unable to lay out the info any differently.
fn copy_range(&mut self, other: &Self::Trie, lower: usize, upper: usize) {
if lower < upper {
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
for index in lower.. upper {
let other_entry = &other.keys[index];
let new_entry = if other_entry.is_some() {
Entry::new(
other_entry.key.clone(),
(other_entry.get_lower() + self_basis) - other_basis,
(other_entry.get_upper() + self_basis) - other_basis,
)
}
else { Entry::empty() };
self.keys.push(new_entry);
}
self.vals.copy_range(&other.vals, other.lower(lower), other.upper(upper-1));
self.boundary(); // <-- perhaps unnecessary, but...
}
}
fn push_merge(&mut self, other1: (&Self::Trie, usize, usize), other2: (&Self::Trie, usize, usize)) -> usize {
// just rebinding names to clarify code.
let (trie1, mut lower1, upper1) = other1;
let (trie2, mut lower2, upper2) = other2;
debug_assert!(upper1 <= trie1.keys.len());
debug_assert!(upper2 <= trie2.keys.len());
self.temp.reserve((upper1 - lower1) + (upper2 - lower2));
while lower1 < trie1.keys.len() &&!trie1.keys[lower1].is_some() { lower1 += 1; }
while lower2 < trie2.keys.len() &&!trie2.keys[lower2].is_some() { lower2 += 1; }
// while both mergees are still active
while lower1 < upper1 && lower2 < upper2 {
debug_assert!(trie1.keys[lower1].is_some());
debug_assert!(trie2.keys[lower2].is_some());
match trie1.keys[lower1].key.cmp(&trie2.keys[lower2].key) {
::std::cmp::Ordering::Less => {
lower1 += self.push_while_less(trie1, lower1, upper1, &trie2.keys[lower2].key);
}
::std::cmp::Ordering::Equal => {
let lower = self.vals.boundary();
let upper = self.vals.push_merge(
(&trie1.vals, trie1.lower(lower1), trie1.upper(lower1)),
(&trie2.vals, trie2.lower(lower2), trie2.upper(lower2))
);
if upper > lower {
self.temp.push(Entry::new(trie1.keys[lower1].key.clone(), lower, upper));
}
lower1 += 1;
lower2 += 1;
while lower1 < trie1.keys.len() &&!trie1.keys[lower1].is_some() { lower1 += 1; }
while lower2 < trie2.keys.len() &&!trie2.keys[lower2].is_some() { lower2 += 1; }
}
::std::cmp::Ordering::Greater => {
lower2 += self.push_while_less(trie2, lower2, upper2, &trie1.keys[lower1].key);
}
}
}
if lower1 < upper1 { self.push_all(trie1, lower1, upper1); }
if lower2 < upper2 { self.push_all(trie2, lower2, upper2); }
self.boundary()
}
}
impl<K: HashOrdered+Clone+Default, L: TupleBuilder> TupleBuilder for HashedBuilder<K, L> {
type Item = (K, L::Item);
fn new() -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::new(), vals: L::new() } }
fn with_capacity(cap: usize) -> Self |
#[inline]
fn push_tuple(&mut self, (key, val): (K, L::Item)) {
// we build up self.temp, and rely on self.boundary() to drain self.temp.
let temp_len = self.temp.len();
if temp_len == 0 || self.temp[temp_len-1].key!= key {
if temp_len > 0 { debug_assert!(self.temp[temp_len-1].key < key); }
let boundary = self.vals.boundary();
if temp_len > 0 {
self.temp[temp_len-1].set_upper(boundary);
}
self.temp.push(Entry::new(key, boundary, 0)); // this should be fixed by boundary?
}
self.vals.push_tuple(val);
}
}
impl<K: HashOrdered+Clone+Default, L: MergeBuilder> HashedBuilder<K, L> {
/// Moves other stuff into self.temp. Returns number of element consumed.
fn push_while_less(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize, vs: &K) -> usize {
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
let mut bound = 0; // tracks largest value of upper
let mut index = lower;
// let vs_hashed = vs.hashed();
// stop if overrun, or if we find a valid element >= our target.
while index < upper &&!(other.keys[index].is_some() && &other.keys[index].key >= vs) {
if other.upper(index)!= 0 {
if bound < other.upper(index) { bound = other.upper(index); }
debug_assert!(other.lower(index) < other.upper(index));
let lower = (other.lower(index) + self_basis) - other_basis;
let upper = (other.upper(index) + self_basis) - other_basis;
self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper));
}
index += 1;
}
debug_assert!(bound > 0);
self.vals.copy_range(&other.vals, other.lower(lower), bound);
index - lower
}
fn push_all(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize) {
debug_assert!(lower < upper);
debug_assert!(upper <= other.keys.len());
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
let mut bound = 0; // tracks largest value of upper
for index in lower.. upper {
if other.upper(index)!= 0 {
if bound < other.upper(index) { bound = other.upper(index); }
let lower = (other.lower(index) + self_basis) - other_basis;
let upper = (other.upper(index) + self_basis) - other_basis;
self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper));
}
}
debug_assert!(bound > 0);
self.vals.copy_range(&other.vals, other.lower(lower), bound);
}
}
/// A cursor with a child cursor that is updated as we move.
#[derive(Debug)]
pub struct HashedCursor<L: Trie> {
shift: usize, // amount by which to shift hashes.
bounds: (usize, usize), // bounds of slice of self.keys.
pos: usize, // <-- current cursor position.
/// A cursor for the layer below this one.
pub child: L::Cursor,
}
impl<K: HashOrdered, L: Trie> Cursor<HashedLayer<K, L>> for HashedCursor<L> {
type Key = K;
fn key<'a>(&self, storage: &'a HashedLayer<K, L>) -> &'a Self::Key { &storage.keys[self.pos].key }
fn step(&mut self, storage: &HashedLayer<K, L>) {
// look for next valid entry
self.pos += 1;
while self.pos < self.bounds.1 &&!storage.keys[self.pos].is_some() {
self.pos += 1;
}
if self.valid(storage) {
let child_lower = storage.keys[self.pos].get_lower();
let child_upper = storage.keys[self.pos].get_upper();
self.child.reposition(&storage.vals, child_lower, child_upper);
}
else {
self.pos = self.bounds.1;
}
}
#[inline(never)]
fn seek(&mut self, storage: &HashedLayer<K, L>, key: &Self::Key) {
// leap to where the key *should* be, or at least be soon after.
// let key_hash = key.hashed();
// only update position if shift is large. otherwise leave it alone.
if self.shift >= MINIMUM_SHIFT {
let target = (key.hashed().as_u64() >> ((K::Output::bytes() * 8) - self.shift)) as usize;
self.pos = target;
}
// scan forward until we find a valid entry >= (key_hash, key)
while self.pos < self.bounds.1 && (!storage.keys[self.pos].is_some() || &storage.keys[self.pos].key < key) {
self.pos += 1;
}
// self.pos should now either
// (i) have self.pos == self.bounds.1 (and be invalid) or
// (ii) point at a valid entry with (entry_hash, entry) >= (key_hash, key).
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
fn valid(&self, _storage: &HashedLayer<K, L>) -> bool { self.pos < self.bounds.1 }
fn rewind(&mut self, storage: &HashedLayer<K, L>) {
self.pos = self.bounds.0;
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
fn reposition(&mut self, storage: &HashedLayer<K, L>, lower: usize, upper: usize) {
// sort out what the shift is.
// should be just before the first power of two strictly containing (lower, upper].
self.shift = 0;
while upper - lower >= (1 << self.shift) {
self.shift += 1;
}
self.shift -= 1;
self.bounds = (lower, upper);
self.pos = lower; // set self.pos to something valid.
while self.pos < self.bounds.1 &&!storage.keys[self.pos].is_some() {
self.pos += 1;
}
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
}
| {
HashedBuilder {
temp: Vec::with_capacity(cap),
keys: Vec::with_capacity(cap),
vals: L::with_capacity(cap),
}
} | identifier_body |
hashed.rs | //! Implementation using ordered keys with hashes and robin hood hashing.
use std::default::Default;
use timely_sort::Unsigned;
use ::hashable::{Hashable, HashOrdered};
use super::{Trie, Cursor, Builder, MergeBuilder, TupleBuilder};
const MINIMUM_SHIFT : usize = 4;
const BLOAT_FACTOR : f64 = 1.1;
// I would like the trie entries to look like (Key, usize), where a usize equal to the
// previous entry indicates that the location is empty. This would let us always use the
// prior location to determine lower bounds, rather than double up upper and lower bounds
// in Entry.
//
// It might also be good to optimistically build the hash map in place. We can do this by
// upper bounding the number of keys, allocating and placing as if this many, and then
// drawing down the allocation and placements if many keys collided or cancelled.
/// A level of the trie, with keys and offsets into a lower layer.
///
/// If keys[i].1 == 0 then entry i should
/// be ignored. This is our version of `Option<(K, usize)>`, which comes at the cost
/// of requiring `K: Default` to populate empty keys.
///
/// Each region of this layer is an independent immutable RHH map, whose size should
/// equal something like `(1 << i) + i` for some value of `i`. The first `(1 << i)`
/// elements are where we expect to find keys, and the remaining `i` are for spill-over
/// due to collisions near the end of the first region.
///
/// We might do something like "if X or fewer elements, just use an ordered list".
#[derive(Debug)]
pub struct HashedLayer<K: HashOrdered, L> {
/// Keys and offsets for the keys.
pub keys: Vec<Entry<K>>, // track upper and lower bounds, because trickery is hard.
/// A lower layer containing ranges of values.
pub vals: L,
}
impl<K: HashOrdered, L> HashedLayer<K, L> {
fn _entry_valid(&self, index: usize) -> bool { self.keys[index].is_some() }
fn lower(&self, index: usize) -> usize { self.keys[index].get_lower() }
fn upper(&self, index: usize) -> usize { self.keys[index].get_upper() }
}
impl<K: Clone+HashOrdered+Default, L: Trie> Trie for HashedLayer<K, L> {
type Item = (K, L::Item);
type Cursor = HashedCursor<L>;
type MergeBuilder = HashedBuilder<K, L::MergeBuilder>;
type TupleBuilder = HashedBuilder<K, L::TupleBuilder>;
fn keys(&self) -> usize { self.keys.len() }
fn tuples(&self) -> usize { self.vals.tuples() }
fn cursor_from(&self, lower: usize, upper: usize) -> Self::Cursor {
if lower < upper {
let mut shift = 0;
while upper - lower >= (1 << shift) {
shift += 1;
}
shift -= 1;
let mut pos = lower; // set self.pos to something valid.
while pos < upper &&!self.keys[pos].is_some() {
pos += 1;
}
HashedCursor {
shift: shift,
bounds: (lower, upper),
pos: pos,
// keys: owned_self.clone().map(|x| &x.keys[..]),
child: self.vals.cursor_from(self.keys[pos].get_lower(), self.keys[pos].get_upper())
}
}
else {
HashedCursor {
shift: 0,
bounds: (0, 0),
pos: 0,
// keys: owned_self.clone().map(|x| &x.keys[..]), // &self.keys,
child: self.vals.cursor_from(0, 0),
}
}
}
}
/// An entry in hash tables.
#[derive(Debug, Clone)]
pub struct Entry<K: HashOrdered> {
/// The contained key.
key: K,
lower1: u32,
upper1: u32,
}
impl<K: HashOrdered> Entry<K> {
fn new(key: K, lower: usize, upper: usize) -> Self {
Entry {
key: key,
lower1: lower as u32,
upper1: upper as u32,
}
}
// fn for_cmp(&self) -> (K::Output, &K) { (self.key.hashed(), &self.key) }
fn is_some(&self) -> bool { self.upper1!= 0 }
fn empty() -> Self where K: Default { Self::new(Default::default(), 0, 0) }
fn get_lower(&self) -> usize { self.lower1 as usize}
fn get_upper(&self) -> usize { self.upper1 as usize}
fn _set_lower(&mut self, x: usize) { self.lower1 = x as u32; }
fn set_upper(&mut self, x: usize) { self.upper1 = x as u32; }
}
/// Assembles a layer of this
pub struct HashedBuilder<K: HashOrdered, L> {
temp: Vec<Entry<K>>, // staging for building; densely packed here and then re-laid out in self.keys.
/// Entries in the hash map.
pub keys: Vec<Entry<K>>, // keys and offs co-located because we expect to find the right answers fast.
/// A builder for the layer below.
pub vals: L,
}
impl<K: HashOrdered+Clone+Default, L> HashedBuilder<K, L> {
#[inline]
fn _lower(&self, index: usize) -> usize {
self.keys[index].get_lower()
}
#[inline]
fn _upper(&self, index: usize) -> usize {
self.keys[index].get_upper()
}
}
impl<K: HashOrdered+Clone+Default, L: Builder> Builder for HashedBuilder<K, L> {
type Trie = HashedLayer<K, L::Trie>;
/// Looks at the contents of self.temp and extends self.keys appropriately.
///
/// This is where the "hash map" structure is produced. Up until this point, all (key, usize) pairs were
/// committed to self.temp, where they awaited layout. That now happens here.
fn boundary(&mut self) -> usize {
/// self.temp *should* be sorted by (hash, key); let's check!
debug_assert!((1.. self.temp.len()).all(|i| self.temp[i-1].key < self.temp[i].key));
let boundary = self.vals.boundary();
if self.temp.len() > 0 {
// push doesn't know the length at the end; must write it
if!self.temp[self.temp.len()-1].is_some() {
let pos = self.temp.len()-1;
self.temp[pos].set_upper(boundary);
}
// having densely packed everything, we now want to extend the allocation and rewrite the contents
// so that their spacing is in line with how robin hood hashing works.
let lower = self.keys.len();
if self.temp.len() < (1 << MINIMUM_SHIFT) {
self.keys.extend(self.temp.drain(..));
}
else {
let target = (BLOAT_FACTOR * (self.temp.len() as f64)) as u64;
let mut shift = MINIMUM_SHIFT;
while (1 << shift) < target {
shift += 1;
}
self.keys.reserve(1 << shift);
// now going to start pushing things in to self.keys
let mut cursor: usize = 0; // <-- current write pos in self.keys.
for entry in self.temp.drain(..) {
// acquire top `shift` bits from `key.hashed()`
let target = (entry.key.hashed().as_u64() >> ((<K as Hashable>::Output::bytes() * 8) - shift)) as usize;
debug_assert!(target < (1 << shift));
while cursor < target {
// filling with bogus stuff
self.keys.push(Entry::empty());
cursor += 1;
}
self.keys.push(entry);
cursor += 1;
}
// fill out the space, if not full.
while cursor < (1 << shift) {
self.keys.push(Entry::empty());
cursor += 1;
}
// assert that we haven't doubled the allocation (would confuse the "what is shift?" logic)
assert!((self.keys.len() - lower) < (2 << shift));
}
}
self.keys.len()
}
#[inline(never)]
fn done(mut self) -> Self::Trie {
self.boundary();
self.keys.shrink_to_fit();
let vals = self.vals.done();
if vals.tuples() > 0 {
assert!(self.keys.len() > 0);
}
HashedLayer {
keys: self.keys,
vals: vals,
}
}
}
impl<K: HashOrdered+Clone+Default, L: MergeBuilder> MergeBuilder for HashedBuilder<K, L> {
fn | (other1: &Self::Trie, other2: &Self::Trie) -> Self {
HashedBuilder {
temp: Vec::new(),
keys: Vec::with_capacity(other1.keys() + other2.keys()),
vals: L::with_capacity(&other1.vals, &other2.vals),
}
}
/// Copies fully formed ranges (note plural) of keys from another trie.
///
/// While the ranges are fully formed, the offsets in them are relative to the other trie, and
/// must be corrected. These keys must be moved immediately to self.keys, as there is no info
/// about boundaries between them, and we are unable to lay out the info any differently.
fn copy_range(&mut self, other: &Self::Trie, lower: usize, upper: usize) {
if lower < upper {
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
for index in lower.. upper {
let other_entry = &other.keys[index];
let new_entry = if other_entry.is_some() {
Entry::new(
other_entry.key.clone(),
(other_entry.get_lower() + self_basis) - other_basis,
(other_entry.get_upper() + self_basis) - other_basis,
)
}
else { Entry::empty() };
self.keys.push(new_entry);
}
self.vals.copy_range(&other.vals, other.lower(lower), other.upper(upper-1));
self.boundary(); // <-- perhaps unnecessary, but...
}
}
fn push_merge(&mut self, other1: (&Self::Trie, usize, usize), other2: (&Self::Trie, usize, usize)) -> usize {
// just rebinding names to clarify code.
let (trie1, mut lower1, upper1) = other1;
let (trie2, mut lower2, upper2) = other2;
debug_assert!(upper1 <= trie1.keys.len());
debug_assert!(upper2 <= trie2.keys.len());
self.temp.reserve((upper1 - lower1) + (upper2 - lower2));
while lower1 < trie1.keys.len() &&!trie1.keys[lower1].is_some() { lower1 += 1; }
while lower2 < trie2.keys.len() &&!trie2.keys[lower2].is_some() { lower2 += 1; }
// while both mergees are still active
while lower1 < upper1 && lower2 < upper2 {
debug_assert!(trie1.keys[lower1].is_some());
debug_assert!(trie2.keys[lower2].is_some());
match trie1.keys[lower1].key.cmp(&trie2.keys[lower2].key) {
::std::cmp::Ordering::Less => {
lower1 += self.push_while_less(trie1, lower1, upper1, &trie2.keys[lower2].key);
}
::std::cmp::Ordering::Equal => {
let lower = self.vals.boundary();
let upper = self.vals.push_merge(
(&trie1.vals, trie1.lower(lower1), trie1.upper(lower1)),
(&trie2.vals, trie2.lower(lower2), trie2.upper(lower2))
);
if upper > lower {
self.temp.push(Entry::new(trie1.keys[lower1].key.clone(), lower, upper));
}
lower1 += 1;
lower2 += 1;
while lower1 < trie1.keys.len() &&!trie1.keys[lower1].is_some() { lower1 += 1; }
while lower2 < trie2.keys.len() &&!trie2.keys[lower2].is_some() { lower2 += 1; }
}
::std::cmp::Ordering::Greater => {
lower2 += self.push_while_less(trie2, lower2, upper2, &trie1.keys[lower1].key);
}
}
}
if lower1 < upper1 { self.push_all(trie1, lower1, upper1); }
if lower2 < upper2 { self.push_all(trie2, lower2, upper2); }
self.boundary()
}
}
impl<K: HashOrdered+Clone+Default, L: TupleBuilder> TupleBuilder for HashedBuilder<K, L> {
type Item = (K, L::Item);
fn new() -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::new(), vals: L::new() } }
fn with_capacity(cap: usize) -> Self {
HashedBuilder {
temp: Vec::with_capacity(cap),
keys: Vec::with_capacity(cap),
vals: L::with_capacity(cap),
}
}
#[inline]
fn push_tuple(&mut self, (key, val): (K, L::Item)) {
// we build up self.temp, and rely on self.boundary() to drain self.temp.
let temp_len = self.temp.len();
if temp_len == 0 || self.temp[temp_len-1].key!= key {
if temp_len > 0 { debug_assert!(self.temp[temp_len-1].key < key); }
let boundary = self.vals.boundary();
if temp_len > 0 {
self.temp[temp_len-1].set_upper(boundary);
}
self.temp.push(Entry::new(key, boundary, 0)); // this should be fixed by boundary?
}
self.vals.push_tuple(val);
}
}
impl<K: HashOrdered+Clone+Default, L: MergeBuilder> HashedBuilder<K, L> {
/// Moves other stuff into self.temp. Returns number of element consumed.
fn push_while_less(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize, vs: &K) -> usize {
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
let mut bound = 0; // tracks largest value of upper
let mut index = lower;
// let vs_hashed = vs.hashed();
// stop if overrun, or if we find a valid element >= our target.
while index < upper &&!(other.keys[index].is_some() && &other.keys[index].key >= vs) {
if other.upper(index)!= 0 {
if bound < other.upper(index) { bound = other.upper(index); }
debug_assert!(other.lower(index) < other.upper(index));
let lower = (other.lower(index) + self_basis) - other_basis;
let upper = (other.upper(index) + self_basis) - other_basis;
self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper));
}
index += 1;
}
debug_assert!(bound > 0);
self.vals.copy_range(&other.vals, other.lower(lower), bound);
index - lower
}
fn push_all(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize) {
debug_assert!(lower < upper);
debug_assert!(upper <= other.keys.len());
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
let mut bound = 0; // tracks largest value of upper
for index in lower.. upper {
if other.upper(index)!= 0 {
if bound < other.upper(index) { bound = other.upper(index); }
let lower = (other.lower(index) + self_basis) - other_basis;
let upper = (other.upper(index) + self_basis) - other_basis;
self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper));
}
}
debug_assert!(bound > 0);
self.vals.copy_range(&other.vals, other.lower(lower), bound);
}
}
/// A cursor with a child cursor that is updated as we move.
#[derive(Debug)]
pub struct HashedCursor<L: Trie> {
shift: usize, // amount by which to shift hashes.
bounds: (usize, usize), // bounds of slice of self.keys.
pos: usize, // <-- current cursor position.
/// A cursor for the layer below this one.
pub child: L::Cursor,
}
impl<K: HashOrdered, L: Trie> Cursor<HashedLayer<K, L>> for HashedCursor<L> {
type Key = K;
fn key<'a>(&self, storage: &'a HashedLayer<K, L>) -> &'a Self::Key { &storage.keys[self.pos].key }
fn step(&mut self, storage: &HashedLayer<K, L>) {
// look for next valid entry
self.pos += 1;
while self.pos < self.bounds.1 &&!storage.keys[self.pos].is_some() {
self.pos += 1;
}
if self.valid(storage) {
let child_lower = storage.keys[self.pos].get_lower();
let child_upper = storage.keys[self.pos].get_upper();
self.child.reposition(&storage.vals, child_lower, child_upper);
}
else {
self.pos = self.bounds.1;
}
}
#[inline(never)]
fn seek(&mut self, storage: &HashedLayer<K, L>, key: &Self::Key) {
// leap to where the key *should* be, or at least be soon after.
// let key_hash = key.hashed();
// only update position if shift is large. otherwise leave it alone.
if self.shift >= MINIMUM_SHIFT {
let target = (key.hashed().as_u64() >> ((K::Output::bytes() * 8) - self.shift)) as usize;
self.pos = target;
}
// scan forward until we find a valid entry >= (key_hash, key)
while self.pos < self.bounds.1 && (!storage.keys[self.pos].is_some() || &storage.keys[self.pos].key < key) {
self.pos += 1;
}
// self.pos should now either
// (i) have self.pos == self.bounds.1 (and be invalid) or
// (ii) point at a valid entry with (entry_hash, entry) >= (key_hash, key).
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
fn valid(&self, _storage: &HashedLayer<K, L>) -> bool { self.pos < self.bounds.1 }
fn rewind(&mut self, storage: &HashedLayer<K, L>) {
self.pos = self.bounds.0;
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
fn reposition(&mut self, storage: &HashedLayer<K, L>, lower: usize, upper: usize) {
// sort out what the shift is.
// should be just before the first power of two strictly containing (lower, upper].
self.shift = 0;
while upper - lower >= (1 << self.shift) {
self.shift += 1;
}
self.shift -= 1;
self.bounds = (lower, upper);
self.pos = lower; // set self.pos to something valid.
while self.pos < self.bounds.1 &&!storage.keys[self.pos].is_some() {
self.pos += 1;
}
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
}
| with_capacity | identifier_name |
hashed.rs | //! Implementation using ordered keys with hashes and robin hood hashing.
use std::default::Default;
use timely_sort::Unsigned;
use ::hashable::{Hashable, HashOrdered};
use super::{Trie, Cursor, Builder, MergeBuilder, TupleBuilder};
const MINIMUM_SHIFT : usize = 4;
const BLOAT_FACTOR : f64 = 1.1;
// I would like the trie entries to look like (Key, usize), where a usize equal to the
// previous entry indicates that the location is empty. This would let us always use the
// prior location to determine lower bounds, rather than double up upper and lower bounds
// in Entry.
//
// It might also be good to optimistically build the hash map in place. We can do this by
// upper bounding the number of keys, allocating and placing as if this many, and then
// drawing down the allocation and placements if many keys collided or cancelled.
/// A level of the trie, with keys and offsets into a lower layer.
///
/// If keys[i].1 == 0 then entry i should
/// be ignored. This is our version of `Option<(K, usize)>`, which comes at the cost
/// of requiring `K: Default` to populate empty keys.
///
/// Each region of this layer is an independent immutable RHH map, whose size should
/// equal something like `(1 << i) + i` for some value of `i`. The first `(1 << i)`
/// elements are where we expect to find keys, and the remaining `i` are for spill-over
/// due to collisions near the end of the first region.
///
/// We might do something like "if X or fewer elements, just use an ordered list".
#[derive(Debug)]
pub struct HashedLayer<K: HashOrdered, L> {
/// Keys and offsets for the keys.
pub keys: Vec<Entry<K>>, // track upper and lower bounds, because trickery is hard.
/// A lower layer containing ranges of values.
pub vals: L,
}
impl<K: HashOrdered, L> HashedLayer<K, L> {
fn _entry_valid(&self, index: usize) -> bool { self.keys[index].is_some() }
fn lower(&self, index: usize) -> usize { self.keys[index].get_lower() }
fn upper(&self, index: usize) -> usize { self.keys[index].get_upper() }
}
impl<K: Clone+HashOrdered+Default, L: Trie> Trie for HashedLayer<K, L> {
type Item = (K, L::Item);
type Cursor = HashedCursor<L>;
type MergeBuilder = HashedBuilder<K, L::MergeBuilder>;
type TupleBuilder = HashedBuilder<K, L::TupleBuilder>;
fn keys(&self) -> usize { self.keys.len() }
fn tuples(&self) -> usize { self.vals.tuples() }
fn cursor_from(&self, lower: usize, upper: usize) -> Self::Cursor {
if lower < upper {
let mut shift = 0;
while upper - lower >= (1 << shift) {
shift += 1;
}
shift -= 1;
let mut pos = lower; // set self.pos to something valid.
while pos < upper &&!self.keys[pos].is_some() {
pos += 1;
}
HashedCursor {
shift: shift,
bounds: (lower, upper),
pos: pos,
// keys: owned_self.clone().map(|x| &x.keys[..]),
child: self.vals.cursor_from(self.keys[pos].get_lower(), self.keys[pos].get_upper())
}
}
else {
HashedCursor {
shift: 0,
bounds: (0, 0),
pos: 0,
// keys: owned_self.clone().map(|x| &x.keys[..]), // &self.keys,
child: self.vals.cursor_from(0, 0),
}
}
}
}
/// An entry in hash tables.
#[derive(Debug, Clone)]
pub struct Entry<K: HashOrdered> {
/// The contained key.
key: K,
lower1: u32,
upper1: u32,
}
impl<K: HashOrdered> Entry<K> {
fn new(key: K, lower: usize, upper: usize) -> Self {
Entry {
key: key,
lower1: lower as u32,
upper1: upper as u32,
}
}
// fn for_cmp(&self) -> (K::Output, &K) { (self.key.hashed(), &self.key) }
fn is_some(&self) -> bool { self.upper1!= 0 }
fn empty() -> Self where K: Default { Self::new(Default::default(), 0, 0) }
fn get_lower(&self) -> usize { self.lower1 as usize}
fn get_upper(&self) -> usize { self.upper1 as usize}
fn _set_lower(&mut self, x: usize) { self.lower1 = x as u32; }
fn set_upper(&mut self, x: usize) { self.upper1 = x as u32; }
}
/// Assembles a layer of this
pub struct HashedBuilder<K: HashOrdered, L> {
temp: Vec<Entry<K>>, // staging for building; densely packed here and then re-laid out in self.keys.
/// Entries in the hash map.
pub keys: Vec<Entry<K>>, // keys and offs co-located because we expect to find the right answers fast.
/// A builder for the layer below.
pub vals: L,
}
impl<K: HashOrdered+Clone+Default, L> HashedBuilder<K, L> {
#[inline]
fn _lower(&self, index: usize) -> usize {
self.keys[index].get_lower()
}
#[inline]
fn _upper(&self, index: usize) -> usize {
self.keys[index].get_upper()
}
}
impl<K: HashOrdered+Clone+Default, L: Builder> Builder for HashedBuilder<K, L> {
type Trie = HashedLayer<K, L::Trie>;
/// Looks at the contents of self.temp and extends self.keys appropriately.
///
/// This is where the "hash map" structure is produced. Up until this point, all (key, usize) pairs were
/// committed to self.temp, where they awaited layout. That now happens here.
fn boundary(&mut self) -> usize {
/// self.temp *should* be sorted by (hash, key); let's check!
debug_assert!((1.. self.temp.len()).all(|i| self.temp[i-1].key < self.temp[i].key));
let boundary = self.vals.boundary();
if self.temp.len() > 0 {
// push doesn't know the length at the end; must write it
if!self.temp[self.temp.len()-1].is_some() {
let pos = self.temp.len()-1;
self.temp[pos].set_upper(boundary);
}
// having densely packed everything, we now want to extend the allocation and rewrite the contents
// so that their spacing is in line with how robin hood hashing works.
let lower = self.keys.len();
if self.temp.len() < (1 << MINIMUM_SHIFT) {
self.keys.extend(self.temp.drain(..));
}
else {
let target = (BLOAT_FACTOR * (self.temp.len() as f64)) as u64;
let mut shift = MINIMUM_SHIFT;
while (1 << shift) < target {
shift += 1;
}
self.keys.reserve(1 << shift);
// now going to start pushing things in to self.keys
let mut cursor: usize = 0; // <-- current write pos in self.keys.
for entry in self.temp.drain(..) {
// acquire top `shift` bits from `key.hashed()`
let target = (entry.key.hashed().as_u64() >> ((<K as Hashable>::Output::bytes() * 8) - shift)) as usize;
debug_assert!(target < (1 << shift));
while cursor < target {
// filling with bogus stuff
self.keys.push(Entry::empty());
cursor += 1;
}
self.keys.push(entry);
cursor += 1;
}
// fill out the space, if not full.
while cursor < (1 << shift) {
self.keys.push(Entry::empty());
cursor += 1;
}
// assert that we haven't doubled the allocation (would confuse the "what is shift?" logic)
assert!((self.keys.len() - lower) < (2 << shift));
}
}
self.keys.len()
}
#[inline(never)]
fn done(mut self) -> Self::Trie {
self.boundary();
self.keys.shrink_to_fit();
let vals = self.vals.done();
if vals.tuples() > 0 {
assert!(self.keys.len() > 0);
}
HashedLayer {
keys: self.keys,
vals: vals,
}
}
}
impl<K: HashOrdered+Clone+Default, L: MergeBuilder> MergeBuilder for HashedBuilder<K, L> {
fn with_capacity(other1: &Self::Trie, other2: &Self::Trie) -> Self {
HashedBuilder {
temp: Vec::new(),
keys: Vec::with_capacity(other1.keys() + other2.keys()),
vals: L::with_capacity(&other1.vals, &other2.vals),
}
}
/// Copies fully formed ranges (note plural) of keys from another trie.
///
/// While the ranges are fully formed, the offsets in them are relative to the other trie, and
/// must be corrected. These keys must be moved immediately to self.keys, as there is no info
/// about boundaries between them, and we are unable to lay out the info any differently.
fn copy_range(&mut self, other: &Self::Trie, lower: usize, upper: usize) {
if lower < upper {
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
for index in lower.. upper {
let other_entry = &other.keys[index];
let new_entry = if other_entry.is_some() {
Entry::new(
other_entry.key.clone(),
(other_entry.get_lower() + self_basis) - other_basis,
(other_entry.get_upper() + self_basis) - other_basis,
)
}
else { Entry::empty() };
self.keys.push(new_entry);
}
self.vals.copy_range(&other.vals, other.lower(lower), other.upper(upper-1));
self.boundary(); // <-- perhaps unnecessary, but...
}
}
fn push_merge(&mut self, other1: (&Self::Trie, usize, usize), other2: (&Self::Trie, usize, usize)) -> usize {
// just rebinding names to clarify code.
let (trie1, mut lower1, upper1) = other1;
let (trie2, mut lower2, upper2) = other2;
debug_assert!(upper1 <= trie1.keys.len());
debug_assert!(upper2 <= trie2.keys.len());
self.temp.reserve((upper1 - lower1) + (upper2 - lower2));
while lower1 < trie1.keys.len() &&!trie1.keys[lower1].is_some() { lower1 += 1; }
while lower2 < trie2.keys.len() &&!trie2.keys[lower2].is_some() { lower2 += 1; }
// while both mergees are still active
while lower1 < upper1 && lower2 < upper2 {
debug_assert!(trie1.keys[lower1].is_some());
debug_assert!(trie2.keys[lower2].is_some());
match trie1.keys[lower1].key.cmp(&trie2.keys[lower2].key) {
::std::cmp::Ordering::Less => {
lower1 += self.push_while_less(trie1, lower1, upper1, &trie2.keys[lower2].key);
}
::std::cmp::Ordering::Equal => {
let lower = self.vals.boundary();
let upper = self.vals.push_merge(
(&trie1.vals, trie1.lower(lower1), trie1.upper(lower1)),
(&trie2.vals, trie2.lower(lower2), trie2.upper(lower2))
);
if upper > lower {
self.temp.push(Entry::new(trie1.keys[lower1].key.clone(), lower, upper));
}
lower1 += 1;
lower2 += 1;
while lower1 < trie1.keys.len() &&!trie1.keys[lower1].is_some() { lower1 += 1; }
while lower2 < trie2.keys.len() &&!trie2.keys[lower2].is_some() { lower2 += 1; }
}
::std::cmp::Ordering::Greater => {
lower2 += self.push_while_less(trie2, lower2, upper2, &trie1.keys[lower1].key);
}
}
}
if lower1 < upper1 { self.push_all(trie1, lower1, upper1); }
if lower2 < upper2 { self.push_all(trie2, lower2, upper2); }
self.boundary()
}
}
impl<K: HashOrdered+Clone+Default, L: TupleBuilder> TupleBuilder for HashedBuilder<K, L> {
type Item = (K, L::Item);
fn new() -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::new(), vals: L::new() } }
fn with_capacity(cap: usize) -> Self {
HashedBuilder {
temp: Vec::with_capacity(cap),
keys: Vec::with_capacity(cap),
vals: L::with_capacity(cap),
}
}
#[inline]
fn push_tuple(&mut self, (key, val): (K, L::Item)) {
// we build up self.temp, and rely on self.boundary() to drain self.temp.
let temp_len = self.temp.len();
if temp_len == 0 || self.temp[temp_len-1].key!= key {
if temp_len > 0 { debug_assert!(self.temp[temp_len-1].key < key); }
let boundary = self.vals.boundary();
if temp_len > 0 {
self.temp[temp_len-1].set_upper(boundary);
}
self.temp.push(Entry::new(key, boundary, 0)); // this should be fixed by boundary?
}
self.vals.push_tuple(val);
}
}
impl<K: HashOrdered+Clone+Default, L: MergeBuilder> HashedBuilder<K, L> {
/// Moves other stuff into self.temp. Returns number of element consumed.
fn push_while_less(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize, vs: &K) -> usize {
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
let mut bound = 0; // tracks largest value of upper
let mut index = lower;
// let vs_hashed = vs.hashed();
// stop if overrun, or if we find a valid element >= our target.
while index < upper &&!(other.keys[index].is_some() && &other.keys[index].key >= vs) {
if other.upper(index)!= 0 {
if bound < other.upper(index) { bound = other.upper(index); }
debug_assert!(other.lower(index) < other.upper(index));
let lower = (other.lower(index) + self_basis) - other_basis;
let upper = (other.upper(index) + self_basis) - other_basis;
self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper));
}
index += 1;
}
debug_assert!(bound > 0);
self.vals.copy_range(&other.vals, other.lower(lower), bound);
index - lower
}
fn push_all(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize) {
debug_assert!(lower < upper);
debug_assert!(upper <= other.keys.len());
let other_basis = other.lower(lower); // from where in `other` the offsets do start.
let self_basis = self.vals.boundary(); // from where in `self` the offsets must start.
let mut bound = 0; // tracks largest value of upper
for index in lower.. upper {
if other.upper(index)!= 0 {
if bound < other.upper(index) { bound = other.upper(index); }
let lower = (other.lower(index) + self_basis) - other_basis;
let upper = (other.upper(index) + self_basis) - other_basis;
self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper));
}
}
debug_assert!(bound > 0);
self.vals.copy_range(&other.vals, other.lower(lower), bound);
}
}
/// A cursor with a child cursor that is updated as we move.
#[derive(Debug)]
pub struct HashedCursor<L: Trie> {
shift: usize, // amount by which to shift hashes.
bounds: (usize, usize), // bounds of slice of self.keys.
pos: usize, // <-- current cursor position.
/// A cursor for the layer below this one.
pub child: L::Cursor,
}
impl<K: HashOrdered, L: Trie> Cursor<HashedLayer<K, L>> for HashedCursor<L> {
type Key = K;
fn key<'a>(&self, storage: &'a HashedLayer<K, L>) -> &'a Self::Key { &storage.keys[self.pos].key }
fn step(&mut self, storage: &HashedLayer<K, L>) {
// look for next valid entry
self.pos += 1;
while self.pos < self.bounds.1 &&!storage.keys[self.pos].is_some() {
self.pos += 1;
}
if self.valid(storage) {
let child_lower = storage.keys[self.pos].get_lower();
let child_upper = storage.keys[self.pos].get_upper();
self.child.reposition(&storage.vals, child_lower, child_upper); | else {
self.pos = self.bounds.1;
}
}
#[inline(never)]
fn seek(&mut self, storage: &HashedLayer<K, L>, key: &Self::Key) {
// leap to where the key *should* be, or at least be soon after.
// let key_hash = key.hashed();
// only update position if shift is large. otherwise leave it alone.
if self.shift >= MINIMUM_SHIFT {
let target = (key.hashed().as_u64() >> ((K::Output::bytes() * 8) - self.shift)) as usize;
self.pos = target;
}
// scan forward until we find a valid entry >= (key_hash, key)
while self.pos < self.bounds.1 && (!storage.keys[self.pos].is_some() || &storage.keys[self.pos].key < key) {
self.pos += 1;
}
// self.pos should now either
// (i) have self.pos == self.bounds.1 (and be invalid) or
// (ii) point at a valid entry with (entry_hash, entry) >= (key_hash, key).
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
fn valid(&self, _storage: &HashedLayer<K, L>) -> bool { self.pos < self.bounds.1 }
fn rewind(&mut self, storage: &HashedLayer<K, L>) {
self.pos = self.bounds.0;
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
fn reposition(&mut self, storage: &HashedLayer<K, L>, lower: usize, upper: usize) {
// sort out what the shift is.
// should be just before the first power of two strictly containing (lower, upper].
self.shift = 0;
while upper - lower >= (1 << self.shift) {
self.shift += 1;
}
self.shift -= 1;
self.bounds = (lower, upper);
self.pos = lower; // set self.pos to something valid.
while self.pos < self.bounds.1 &&!storage.keys[self.pos].is_some() {
self.pos += 1;
}
if self.valid(storage) {
self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper());
}
}
} | } | random_line_split |
mapper.rs | use std::{
borrow::Cow,
cmp::{Ordering, Reverse},
collections::HashMap,
sync::Arc,
};
use bathbot_macros::{command, HasName, SlashCommand};
use bathbot_model::ScoreSlim;
use bathbot_psql::model::configs::{ListSize, MinimizedPp};
use bathbot_util::{
constants::{GENERAL_ISSUE, OSU_API_ISSUE},
matcher, CowUtils,
};
use eyre::{Report, Result};
use rosu_v2::{
prelude::{GameMode, Grade, OsuError, Score},
request::UserId,
};
use twilight_interactions::command::{CommandModel, CreateCommand};
use twilight_model::id::{marker::UserMarker, Id};
use super::{require_link, user_not_found, ScoreOrder, TopEntry};
use crate::{
active::{impls::TopPagination, ActiveMessages},
commands::GameModeOption,
core::commands::{prefix::Args, CommandOrigin},
manager::redis::{osu::UserArgs, RedisData},
util::{interaction::InteractionCommand, ChannelExt, InteractionCommandExt},
Context,
};
#[derive(CommandModel, CreateCommand, HasName, SlashCommand)]
#[command(
name = "mapper",
desc = "How often does the given mapper appear in top a user's top plays",
help = "Count the top plays on maps of the given mapper.\n\
It will try to consider guest difficulties so that if a map was created by someone else \
but the given mapper made the guest diff, it will count.\n\
Similarly, if the given mapper created the mapset but someone else guest diff'd, \
it will not count.\n\
This does not always work perfectly, especially for older maps but it's what the api provides."
)]
pub struct Mapper<'a> {
#[command(desc = "Specify a mapper username")]
mapper: Cow<'a, str>,
#[command(desc = "Specify a gamemode")]
mode: Option<GameModeOption>,
#[command(desc = "Specify a username")]
name: Option<Cow<'a, str>>,
#[command(desc = "Choose how the scores should be ordered")]
sort: Option<ScoreOrder>,
#[command(
desc = "Specify a linked discord user",
help = "Instead of specifying an osu! username with the `name` option, \
you can use this option to choose a discord user.\n\
Only works on users who have used the `/link` command."
)]
discord: Option<Id<UserMarker>>,
#[command(
desc = "Size of the embed",
help = "Size of the embed.\n\
`Condensed` shows 10 scores, `Detailed` shows 5, and `Single` shows 1.\n\
The default can be set with the `/config` command."
)]
size: Option<ListSize>,
}
impl<'m> Mapper<'m> {
fn args(
mode: Option<GameModeOption>,
mut args: Args<'m>,
mapper: Option<&'static str>,
) -> Result<Self, &'static str> {
let mapper = match mapper.or_else(|| args.next()) {
Some(arg) => arg.into(),
None => {
let content = "You need to specify at least one osu! username for the mapper. \
If you're not linked, you must specify at least two names.";
return Err(content);
}
};
let mut name = None;
let mut discord = None;
if let Some(arg) = args.next() {
match matcher::get_mention_user(arg) {
Some(id) => discord = Some(id),
None => name = Some(arg.into()),
}
}
Ok(Self {
mapper,
mode,
name,
sort: None,
discord,
size: None,
})
}
}
#[command]
#[desc("How many maps of a user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[group(Osu)]
async fn prefix_mapper(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(None, args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a mania user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a mania user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[alias("mapperm")]
#[group(Mania)]
pub async fn prefix_mappermania(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Mania), args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a taiko user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a taiko user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[alias("mappert")]
#[group(Taiko)]
pub async fn prefix_mappertaiko(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Taiko), args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a ctb user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a ctb user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[aliases("mapperc", "mappercatch")]
#[group(Catch)]
async fn prefix_mapperctb(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Catch), args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a user's top100 are made by Sotarks?")]
#[usage("[username]")]
#[example("badewanne3")]
#[group(Osu)]
pub async fn prefix_sotarks(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Osu), args, Some("sotarks")) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
async fn slash_mapper(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> {
let args = Mapper::from_interaction(command.input_data())?;
mapper(ctx, (&mut command).into(), args).await
}
async fn mapper(ctx: Arc<Context>, orig: CommandOrigin<'_>, args: Mapper<'_>) -> Result<()> {
let msg_owner = orig.user_id()?;
let mut config = match ctx.user_config().with_osu_id(msg_owner).await {
Ok(config) => config,
Err(err) => {
let _ = orig.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let mode = args
.mode
.map(GameMode::from)
.or(config.mode)
.unwrap_or(GameMode::Osu);
let user_id = match user_id!(ctx, orig, args) {
Some(user_id) => user_id,
None => match config.osu.take() {
Some(user_id) => UserId::Id(user_id),
None => return require_link(&ctx, &orig).await,
},
};
let mapper = args.mapper.cow_to_ascii_lowercase();
let mapper_args = UserArgs::username(&ctx, mapper.as_ref()).await.mode(mode);
let mapper_fut = ctx.redis().osu_user(mapper_args);
// Retrieve the user and their top scores
let user_args = UserArgs::rosu_id(&ctx, &user_id).await.mode(mode);
let scores_fut = ctx.osu_scores().top().limit(100).exec_with_user(user_args);
let (mapper, user, scores) = match tokio::join!(mapper_fut, scores_fut) {
(Ok(mapper), Ok((user, scores))) => (mapper, user, scores),
(Err(OsuError::NotFound), _) => {
let content = format!("Mapper with username `{mapper}` was not found");
return orig.error(&ctx, content).await;
}
(_, Err(OsuError::NotFound)) => {
let content = user_not_found(&ctx, user_id).await;
return orig.error(&ctx, content).await;
}
(Err(err), _) | (_, Err(err)) => {
let _ = orig.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("failed to get mapper, user, or scores");
return Err(err);
}
};
let (mapper_name, mapper_id) = match &mapper {
RedisData::Original(mapper) => (mapper.username.as_str(), mapper.user_id),
RedisData::Archive(mapper) => (mapper.username.as_str(), mapper.user_id),
};
let username = user.username();
let entries = match process_scores(&ctx, scores, mapper_id, args.sort).await {
Ok(entries) => entries,
Err(err) => {
let _ = orig.error(&ctx, GENERAL_ISSUE).await;
return Err(err.wrap_err("failed to process scores"));
}
};
// Accumulate all necessary data
let content = match mapper_name {
"Sotarks" => {
let amount = entries.len();
let mut content = format!(
"I found {amount} Sotarks map{plural} in `{username}`'s top100, ",
amount = amount,
plural = if amount!= 1 { "s" } else { "" },
);
let to_push = match amount {
0 => "I'm proud \\:)",
1..=4 => "that's already too many...",
5..=8 => "kinda sad \\:/",
9..=15 => "pretty sad \\:(",
16..=25 => "this is so sad \\:((",
26..=35 => "this needs to stop",
36..=49 => "that's a serious problem...",
50 => "that's half. HALF.",
51..=79 => "how do you sleep at night...",
80..=99 => "i'm not even mad, that's just impressive",
100 => "you did it. \"Congrats\".",
_ => "wait how did you do that",
};
content.push_str(to_push);
content
}
_ => format!(
"{count} of `{username}`'{genitive} top score maps were mapped by `{mapper_name}`",
count = entries.len(),
genitive = if username.ends_with('s') { "" } else { "s" },
),
};
let sort_by = args.sort.unwrap_or(ScoreOrder::Pp).into();
let farm = HashMap::default();
let list_size = match args.size.or(config.list_size) {
Some(size) => size,
None => match orig.guild_id() {
Some(guild_id) => ctx
.guild_config()
.peek(guild_id, |config| config.list_size)
.await
.unwrap_or_default(),
None => ListSize::default(),
},
};
let minimized_pp = match config.minimized_pp {
Some(minimized_pp) => minimized_pp,
None => match list_size {
ListSize::Condensed | ListSize::Detailed => MinimizedPp::default(),
ListSize::Single => match orig.guild_id() {
Some(guild_id) => ctx
.guild_config()
.peek(guild_id, |config| config.minimized_pp)
.await
.unwrap_or_default(),
None => MinimizedPp::default(),
},
},
};
let pagination = TopPagination::builder()
.user(user)
.mode(mode)
.entries(entries.into_boxed_slice())
.sort_by(sort_by)
.farm(farm)
.list_size(list_size)
.minimized_pp(minimized_pp)
.content(content.into_boxed_str())
.msg_owner(msg_owner)
.build();
ActiveMessages::builder(pagination)
.start_by_update(true)
.begin(ctx, orig)
.await
}
async fn process_scores(
ctx: &Context,
scores: Vec<Score>,
mapper_id: u32,
sort: Option<ScoreOrder>,
) -> Result<Vec<TopEntry>> {
let mut entries = Vec::new();
let maps_id_checksum = scores
.iter()
.filter_map(|score| score.map.as_ref())
.filter(|map| map.creator_id == mapper_id)
.map(|map| (map.map_id as i32, map.checksum.as_deref()))
.collect();
let mut maps = ctx.osu_map().maps(&maps_id_checksum).await?;
for (i, score) in scores.into_iter().enumerate() {
let Some(mut map) = maps.remove(&score.map_id) else { continue };
map.convert_mut(score.mode);
let mut calc = ctx.pp(&map).mode(score.mode).mods(score.mods.bits());
let attrs = calc.difficulty().await;
let stars = attrs.stars() as f32;
let max_combo = attrs.max_combo() as u32;
let pp = score.pp.expect("missing pp");
let max_pp = match score
.pp
.filter(|_| score.grade.eq_letter(Grade::X) && score.mode!= GameMode::Mania)
{
Some(pp) => pp,
None => calc.performance().await.pp() as f32,
};
let entry = TopEntry {
original_idx: i,
replay: score.replay,
score: ScoreSlim::new(score, pp),
map,
max_pp,
stars,
max_combo,
};
entries.push(entry);
}
match sort {
None => {}
Some(ScoreOrder::Acc) => entries.sort_by(|a, b| {
b.score
.accuracy
.partial_cmp(&a.score.accuracy)
.unwrap_or(Ordering::Equal)
}),
Some(ScoreOrder::Bpm) => entries.sort_by(|a, b| {
b.map
.bpm()
.partial_cmp(&a.map.bpm())
.unwrap_or(Ordering::Equal)
}),
Some(ScoreOrder::Combo) => entries.sort_by_key(|entry| Reverse(entry.score.max_combo)),
Some(ScoreOrder::Date) => entries.sort_by_key(|entry| Reverse(entry.score.ended_at)),
Some(ScoreOrder::Length) => {
entries.sort_by(|a, b| {
let a_len = a.map.seconds_drain() as f32 / a.score.mods.clock_rate().unwrap_or(1.0);
let b_len = b.map.seconds_drain() as f32 / b.score.mods.clock_rate().unwrap_or(1.0);
b_len.partial_cmp(&a_len).unwrap_or(Ordering::Equal)
});
}
Some(ScoreOrder::Misses) => entries.sort_by(|a, b| {
b.score
.statistics | let hits_b = b.score.total_hits();
let ratio_a = a.score.statistics.count_miss as f32 / hits_a as f32;
let ratio_b = b.score.statistics.count_miss as f32 / hits_b as f32;
ratio_b
.partial_cmp(&ratio_a)
.unwrap_or(Ordering::Equal)
.then_with(|| hits_b.cmp(&hits_a))
})
}),
Some(ScoreOrder::Pp) => entries.sort_by(|a, b| {
b.score
.pp
.partial_cmp(&a.score.pp)
.unwrap_or(Ordering::Equal)
}),
Some(ScoreOrder::RankedDate) => {
entries.sort_by_key(|entry| Reverse(entry.map.ranked_date()))
}
Some(ScoreOrder::Score) => entries.sort_by_key(|entry| Reverse(entry.score.score)),
Some(ScoreOrder::Stars) => {
entries.sort_by(|a, b| b.stars.partial_cmp(&a.stars).unwrap_or(Ordering::Equal))
}
}
Ok(entries)
} | .count_miss
.cmp(&a.score.statistics.count_miss)
.then_with(|| {
let hits_a = a.score.total_hits(); | random_line_split |
mapper.rs | use std::{
borrow::Cow,
cmp::{Ordering, Reverse},
collections::HashMap,
sync::Arc,
};
use bathbot_macros::{command, HasName, SlashCommand};
use bathbot_model::ScoreSlim;
use bathbot_psql::model::configs::{ListSize, MinimizedPp};
use bathbot_util::{
constants::{GENERAL_ISSUE, OSU_API_ISSUE},
matcher, CowUtils,
};
use eyre::{Report, Result};
use rosu_v2::{
prelude::{GameMode, Grade, OsuError, Score},
request::UserId,
};
use twilight_interactions::command::{CommandModel, CreateCommand};
use twilight_model::id::{marker::UserMarker, Id};
use super::{require_link, user_not_found, ScoreOrder, TopEntry};
use crate::{
active::{impls::TopPagination, ActiveMessages},
commands::GameModeOption,
core::commands::{prefix::Args, CommandOrigin},
manager::redis::{osu::UserArgs, RedisData},
util::{interaction::InteractionCommand, ChannelExt, InteractionCommandExt},
Context,
};
#[derive(CommandModel, CreateCommand, HasName, SlashCommand)]
#[command(
name = "mapper",
desc = "How often does the given mapper appear in top a user's top plays",
help = "Count the top plays on maps of the given mapper.\n\
It will try to consider guest difficulties so that if a map was created by someone else \
but the given mapper made the guest diff, it will count.\n\
Similarly, if the given mapper created the mapset but someone else guest diff'd, \
it will not count.\n\
This does not always work perfectly, especially for older maps but it's what the api provides."
)]
pub struct Mapper<'a> {
#[command(desc = "Specify a mapper username")]
mapper: Cow<'a, str>,
#[command(desc = "Specify a gamemode")]
mode: Option<GameModeOption>,
#[command(desc = "Specify a username")]
name: Option<Cow<'a, str>>,
#[command(desc = "Choose how the scores should be ordered")]
sort: Option<ScoreOrder>,
#[command(
desc = "Specify a linked discord user",
help = "Instead of specifying an osu! username with the `name` option, \
you can use this option to choose a discord user.\n\
Only works on users who have used the `/link` command."
)]
discord: Option<Id<UserMarker>>,
#[command(
desc = "Size of the embed",
help = "Size of the embed.\n\
`Condensed` shows 10 scores, `Detailed` shows 5, and `Single` shows 1.\n\
The default can be set with the `/config` command."
)]
size: Option<ListSize>,
}
impl<'m> Mapper<'m> {
fn args(
mode: Option<GameModeOption>,
mut args: Args<'m>,
mapper: Option<&'static str>,
) -> Result<Self, &'static str> {
let mapper = match mapper.or_else(|| args.next()) {
Some(arg) => arg.into(),
None => {
let content = "You need to specify at least one osu! username for the mapper. \
If you're not linked, you must specify at least two names.";
return Err(content);
}
};
let mut name = None;
let mut discord = None;
if let Some(arg) = args.next() {
match matcher::get_mention_user(arg) {
Some(id) => discord = Some(id),
None => name = Some(arg.into()),
}
}
Ok(Self {
mapper,
mode,
name,
sort: None,
discord,
size: None,
})
}
}
#[command]
#[desc("How many maps of a user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[group(Osu)]
async fn prefix_mapper(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(None, args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a mania user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a mania user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[alias("mapperm")]
#[group(Mania)]
pub async fn prefix_mappermania(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Mania), args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a taiko user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a taiko user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[alias("mappert")]
#[group(Taiko)]
pub async fn | (ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Taiko), args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a ctb user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a ctb user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[aliases("mapperc", "mappercatch")]
#[group(Catch)]
async fn prefix_mapperctb(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Catch), args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a user's top100 are made by Sotarks?")]
#[usage("[username]")]
#[example("badewanne3")]
#[group(Osu)]
pub async fn prefix_sotarks(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Osu), args, Some("sotarks")) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
async fn slash_mapper(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> {
let args = Mapper::from_interaction(command.input_data())?;
mapper(ctx, (&mut command).into(), args).await
}
async fn mapper(ctx: Arc<Context>, orig: CommandOrigin<'_>, args: Mapper<'_>) -> Result<()> {
let msg_owner = orig.user_id()?;
let mut config = match ctx.user_config().with_osu_id(msg_owner).await {
Ok(config) => config,
Err(err) => {
let _ = orig.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let mode = args
.mode
.map(GameMode::from)
.or(config.mode)
.unwrap_or(GameMode::Osu);
let user_id = match user_id!(ctx, orig, args) {
Some(user_id) => user_id,
None => match config.osu.take() {
Some(user_id) => UserId::Id(user_id),
None => return require_link(&ctx, &orig).await,
},
};
let mapper = args.mapper.cow_to_ascii_lowercase();
let mapper_args = UserArgs::username(&ctx, mapper.as_ref()).await.mode(mode);
let mapper_fut = ctx.redis().osu_user(mapper_args);
// Retrieve the user and their top scores
let user_args = UserArgs::rosu_id(&ctx, &user_id).await.mode(mode);
let scores_fut = ctx.osu_scores().top().limit(100).exec_with_user(user_args);
let (mapper, user, scores) = match tokio::join!(mapper_fut, scores_fut) {
(Ok(mapper), Ok((user, scores))) => (mapper, user, scores),
(Err(OsuError::NotFound), _) => {
let content = format!("Mapper with username `{mapper}` was not found");
return orig.error(&ctx, content).await;
}
(_, Err(OsuError::NotFound)) => {
let content = user_not_found(&ctx, user_id).await;
return orig.error(&ctx, content).await;
}
(Err(err), _) | (_, Err(err)) => {
let _ = orig.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("failed to get mapper, user, or scores");
return Err(err);
}
};
let (mapper_name, mapper_id) = match &mapper {
RedisData::Original(mapper) => (mapper.username.as_str(), mapper.user_id),
RedisData::Archive(mapper) => (mapper.username.as_str(), mapper.user_id),
};
let username = user.username();
let entries = match process_scores(&ctx, scores, mapper_id, args.sort).await {
Ok(entries) => entries,
Err(err) => {
let _ = orig.error(&ctx, GENERAL_ISSUE).await;
return Err(err.wrap_err("failed to process scores"));
}
};
// Accumulate all necessary data
let content = match mapper_name {
"Sotarks" => {
let amount = entries.len();
let mut content = format!(
"I found {amount} Sotarks map{plural} in `{username}`'s top100, ",
amount = amount,
plural = if amount!= 1 { "s" } else { "" },
);
let to_push = match amount {
0 => "I'm proud \\:)",
1..=4 => "that's already too many...",
5..=8 => "kinda sad \\:/",
9..=15 => "pretty sad \\:(",
16..=25 => "this is so sad \\:((",
26..=35 => "this needs to stop",
36..=49 => "that's a serious problem...",
50 => "that's half. HALF.",
51..=79 => "how do you sleep at night...",
80..=99 => "i'm not even mad, that's just impressive",
100 => "you did it. \"Congrats\".",
_ => "wait how did you do that",
};
content.push_str(to_push);
content
}
_ => format!(
"{count} of `{username}`'{genitive} top score maps were mapped by `{mapper_name}`",
count = entries.len(),
genitive = if username.ends_with('s') { "" } else { "s" },
),
};
let sort_by = args.sort.unwrap_or(ScoreOrder::Pp).into();
let farm = HashMap::default();
let list_size = match args.size.or(config.list_size) {
Some(size) => size,
None => match orig.guild_id() {
Some(guild_id) => ctx
.guild_config()
.peek(guild_id, |config| config.list_size)
.await
.unwrap_or_default(),
None => ListSize::default(),
},
};
let minimized_pp = match config.minimized_pp {
Some(minimized_pp) => minimized_pp,
None => match list_size {
ListSize::Condensed | ListSize::Detailed => MinimizedPp::default(),
ListSize::Single => match orig.guild_id() {
Some(guild_id) => ctx
.guild_config()
.peek(guild_id, |config| config.minimized_pp)
.await
.unwrap_or_default(),
None => MinimizedPp::default(),
},
},
};
let pagination = TopPagination::builder()
.user(user)
.mode(mode)
.entries(entries.into_boxed_slice())
.sort_by(sort_by)
.farm(farm)
.list_size(list_size)
.minimized_pp(minimized_pp)
.content(content.into_boxed_str())
.msg_owner(msg_owner)
.build();
ActiveMessages::builder(pagination)
.start_by_update(true)
.begin(ctx, orig)
.await
}
async fn process_scores(
ctx: &Context,
scores: Vec<Score>,
mapper_id: u32,
sort: Option<ScoreOrder>,
) -> Result<Vec<TopEntry>> {
let mut entries = Vec::new();
let maps_id_checksum = scores
.iter()
.filter_map(|score| score.map.as_ref())
.filter(|map| map.creator_id == mapper_id)
.map(|map| (map.map_id as i32, map.checksum.as_deref()))
.collect();
let mut maps = ctx.osu_map().maps(&maps_id_checksum).await?;
for (i, score) in scores.into_iter().enumerate() {
let Some(mut map) = maps.remove(&score.map_id) else { continue };
map.convert_mut(score.mode);
let mut calc = ctx.pp(&map).mode(score.mode).mods(score.mods.bits());
let attrs = calc.difficulty().await;
let stars = attrs.stars() as f32;
let max_combo = attrs.max_combo() as u32;
let pp = score.pp.expect("missing pp");
let max_pp = match score
.pp
.filter(|_| score.grade.eq_letter(Grade::X) && score.mode!= GameMode::Mania)
{
Some(pp) => pp,
None => calc.performance().await.pp() as f32,
};
let entry = TopEntry {
original_idx: i,
replay: score.replay,
score: ScoreSlim::new(score, pp),
map,
max_pp,
stars,
max_combo,
};
entries.push(entry);
}
match sort {
None => {}
Some(ScoreOrder::Acc) => entries.sort_by(|a, b| {
b.score
.accuracy
.partial_cmp(&a.score.accuracy)
.unwrap_or(Ordering::Equal)
}),
Some(ScoreOrder::Bpm) => entries.sort_by(|a, b| {
b.map
.bpm()
.partial_cmp(&a.map.bpm())
.unwrap_or(Ordering::Equal)
}),
Some(ScoreOrder::Combo) => entries.sort_by_key(|entry| Reverse(entry.score.max_combo)),
Some(ScoreOrder::Date) => entries.sort_by_key(|entry| Reverse(entry.score.ended_at)),
Some(ScoreOrder::Length) => {
entries.sort_by(|a, b| {
let a_len = a.map.seconds_drain() as f32 / a.score.mods.clock_rate().unwrap_or(1.0);
let b_len = b.map.seconds_drain() as f32 / b.score.mods.clock_rate().unwrap_or(1.0);
b_len.partial_cmp(&a_len).unwrap_or(Ordering::Equal)
});
}
Some(ScoreOrder::Misses) => entries.sort_by(|a, b| {
b.score
.statistics
.count_miss
.cmp(&a.score.statistics.count_miss)
.then_with(|| {
let hits_a = a.score.total_hits();
let hits_b = b.score.total_hits();
let ratio_a = a.score.statistics.count_miss as f32 / hits_a as f32;
let ratio_b = b.score.statistics.count_miss as f32 / hits_b as f32;
ratio_b
.partial_cmp(&ratio_a)
.unwrap_or(Ordering::Equal)
.then_with(|| hits_b.cmp(&hits_a))
})
}),
Some(ScoreOrder::Pp) => entries.sort_by(|a, b| {
b.score
.pp
.partial_cmp(&a.score.pp)
.unwrap_or(Ordering::Equal)
}),
Some(ScoreOrder::RankedDate) => {
entries.sort_by_key(|entry| Reverse(entry.map.ranked_date()))
}
Some(ScoreOrder::Score) => entries.sort_by_key(|entry| Reverse(entry.score.score)),
Some(ScoreOrder::Stars) => {
entries.sort_by(|a, b| b.stars.partial_cmp(&a.stars).unwrap_or(Ordering::Equal))
}
}
Ok(entries)
}
| prefix_mappertaiko | identifier_name |
mapper.rs | use std::{
borrow::Cow,
cmp::{Ordering, Reverse},
collections::HashMap,
sync::Arc,
};
use bathbot_macros::{command, HasName, SlashCommand};
use bathbot_model::ScoreSlim;
use bathbot_psql::model::configs::{ListSize, MinimizedPp};
use bathbot_util::{
constants::{GENERAL_ISSUE, OSU_API_ISSUE},
matcher, CowUtils,
};
use eyre::{Report, Result};
use rosu_v2::{
prelude::{GameMode, Grade, OsuError, Score},
request::UserId,
};
use twilight_interactions::command::{CommandModel, CreateCommand};
use twilight_model::id::{marker::UserMarker, Id};
use super::{require_link, user_not_found, ScoreOrder, TopEntry};
use crate::{
active::{impls::TopPagination, ActiveMessages},
commands::GameModeOption,
core::commands::{prefix::Args, CommandOrigin},
manager::redis::{osu::UserArgs, RedisData},
util::{interaction::InteractionCommand, ChannelExt, InteractionCommandExt},
Context,
};
#[derive(CommandModel, CreateCommand, HasName, SlashCommand)]
#[command(
name = "mapper",
desc = "How often does the given mapper appear in top a user's top plays",
help = "Count the top plays on maps of the given mapper.\n\
It will try to consider guest difficulties so that if a map was created by someone else \
but the given mapper made the guest diff, it will count.\n\
Similarly, if the given mapper created the mapset but someone else guest diff'd, \
it will not count.\n\
This does not always work perfectly, especially for older maps but it's what the api provides."
)]
pub struct Mapper<'a> {
#[command(desc = "Specify a mapper username")]
mapper: Cow<'a, str>,
#[command(desc = "Specify a gamemode")]
mode: Option<GameModeOption>,
#[command(desc = "Specify a username")]
name: Option<Cow<'a, str>>,
#[command(desc = "Choose how the scores should be ordered")]
sort: Option<ScoreOrder>,
#[command(
desc = "Specify a linked discord user",
help = "Instead of specifying an osu! username with the `name` option, \
you can use this option to choose a discord user.\n\
Only works on users who have used the `/link` command."
)]
discord: Option<Id<UserMarker>>,
#[command(
desc = "Size of the embed",
help = "Size of the embed.\n\
`Condensed` shows 10 scores, `Detailed` shows 5, and `Single` shows 1.\n\
The default can be set with the `/config` command."
)]
size: Option<ListSize>,
}
impl<'m> Mapper<'m> {
fn args(
mode: Option<GameModeOption>,
mut args: Args<'m>,
mapper: Option<&'static str>,
) -> Result<Self, &'static str> {
let mapper = match mapper.or_else(|| args.next()) {
Some(arg) => arg.into(),
None => {
let content = "You need to specify at least one osu! username for the mapper. \
If you're not linked, you must specify at least two names.";
return Err(content);
}
};
let mut name = None;
let mut discord = None;
if let Some(arg) = args.next() {
match matcher::get_mention_user(arg) {
Some(id) => discord = Some(id),
None => name = Some(arg.into()),
}
}
Ok(Self {
mapper,
mode,
name,
sort: None,
discord,
size: None,
})
}
}
#[command]
#[desc("How many maps of a user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[group(Osu)]
async fn prefix_mapper(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(None, args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a mania user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a mania user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[alias("mapperm")]
#[group(Mania)]
pub async fn prefix_mappermania(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Mania), args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a taiko user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a taiko user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[alias("mappert")]
#[group(Taiko)]
pub async fn prefix_mappertaiko(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Taiko), args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a ctb user's top100 are made by the given mapper?")]
#[help(
"Display the top plays of a ctb user which were mapped by the given mapper.\n\
Specify the __mapper first__ and the __user second__."
)]
#[usage("[mapper] [user]")]
#[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")]
#[aliases("mapperc", "mappercatch")]
#[group(Catch)]
async fn prefix_mapperctb(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Catch), args, None) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
#[command]
#[desc("How many maps of a user's top100 are made by Sotarks?")]
#[usage("[username]")]
#[example("badewanne3")]
#[group(Osu)]
pub async fn prefix_sotarks(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> {
match Mapper::args(Some(GameModeOption::Osu), args, Some("sotarks")) {
Ok(args) => mapper(ctx, msg.into(), args).await,
Err(content) => {
msg.error(&ctx, content).await?;
Ok(())
}
}
}
async fn slash_mapper(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> {
let args = Mapper::from_interaction(command.input_data())?;
mapper(ctx, (&mut command).into(), args).await
}
async fn mapper(ctx: Arc<Context>, orig: CommandOrigin<'_>, args: Mapper<'_>) -> Result<()> | None => match config.osu.take() {
Some(user_id) => UserId::Id(user_id),
None => return require_link(&ctx, &orig).await,
},
};
let mapper = args.mapper.cow_to_ascii_lowercase();
let mapper_args = UserArgs::username(&ctx, mapper.as_ref()).await.mode(mode);
let mapper_fut = ctx.redis().osu_user(mapper_args);
// Retrieve the user and their top scores
let user_args = UserArgs::rosu_id(&ctx, &user_id).await.mode(mode);
let scores_fut = ctx.osu_scores().top().limit(100).exec_with_user(user_args);
let (mapper, user, scores) = match tokio::join!(mapper_fut, scores_fut) {
(Ok(mapper), Ok((user, scores))) => (mapper, user, scores),
(Err(OsuError::NotFound), _) => {
let content = format!("Mapper with username `{mapper}` was not found");
return orig.error(&ctx, content).await;
}
(_, Err(OsuError::NotFound)) => {
let content = user_not_found(&ctx, user_id).await;
return orig.error(&ctx, content).await;
}
(Err(err), _) | (_, Err(err)) => {
let _ = orig.error(&ctx, OSU_API_ISSUE).await;
let err = Report::new(err).wrap_err("failed to get mapper, user, or scores");
return Err(err);
}
};
let (mapper_name, mapper_id) = match &mapper {
RedisData::Original(mapper) => (mapper.username.as_str(), mapper.user_id),
RedisData::Archive(mapper) => (mapper.username.as_str(), mapper.user_id),
};
let username = user.username();
let entries = match process_scores(&ctx, scores, mapper_id, args.sort).await {
Ok(entries) => entries,
Err(err) => {
let _ = orig.error(&ctx, GENERAL_ISSUE).await;
return Err(err.wrap_err("failed to process scores"));
}
};
// Accumulate all necessary data
let content = match mapper_name {
"Sotarks" => {
let amount = entries.len();
let mut content = format!(
"I found {amount} Sotarks map{plural} in `{username}`'s top100, ",
amount = amount,
plural = if amount!= 1 { "s" } else { "" },
);
let to_push = match amount {
0 => "I'm proud \\:)",
1..=4 => "that's already too many...",
5..=8 => "kinda sad \\:/",
9..=15 => "pretty sad \\:(",
16..=25 => "this is so sad \\:((",
26..=35 => "this needs to stop",
36..=49 => "that's a serious problem...",
50 => "that's half. HALF.",
51..=79 => "how do you sleep at night...",
80..=99 => "i'm not even mad, that's just impressive",
100 => "you did it. \"Congrats\".",
_ => "wait how did you do that",
};
content.push_str(to_push);
content
}
_ => format!(
"{count} of `{username}`'{genitive} top score maps were mapped by `{mapper_name}`",
count = entries.len(),
genitive = if username.ends_with('s') { "" } else { "s" },
),
};
let sort_by = args.sort.unwrap_or(ScoreOrder::Pp).into();
let farm = HashMap::default();
let list_size = match args.size.or(config.list_size) {
Some(size) => size,
None => match orig.guild_id() {
Some(guild_id) => ctx
.guild_config()
.peek(guild_id, |config| config.list_size)
.await
.unwrap_or_default(),
None => ListSize::default(),
},
};
let minimized_pp = match config.minimized_pp {
Some(minimized_pp) => minimized_pp,
None => match list_size {
ListSize::Condensed | ListSize::Detailed => MinimizedPp::default(),
ListSize::Single => match orig.guild_id() {
Some(guild_id) => ctx
.guild_config()
.peek(guild_id, |config| config.minimized_pp)
.await
.unwrap_or_default(),
None => MinimizedPp::default(),
},
},
};
let pagination = TopPagination::builder()
.user(user)
.mode(mode)
.entries(entries.into_boxed_slice())
.sort_by(sort_by)
.farm(farm)
.list_size(list_size)
.minimized_pp(minimized_pp)
.content(content.into_boxed_str())
.msg_owner(msg_owner)
.build();
ActiveMessages::builder(pagination)
.start_by_update(true)
.begin(ctx, orig)
.await
}
async fn process_scores(
ctx: &Context,
scores: Vec<Score>,
mapper_id: u32,
sort: Option<ScoreOrder>,
) -> Result<Vec<TopEntry>> {
let mut entries = Vec::new();
let maps_id_checksum = scores
.iter()
.filter_map(|score| score.map.as_ref())
.filter(|map| map.creator_id == mapper_id)
.map(|map| (map.map_id as i32, map.checksum.as_deref()))
.collect();
let mut maps = ctx.osu_map().maps(&maps_id_checksum).await?;
for (i, score) in scores.into_iter().enumerate() {
let Some(mut map) = maps.remove(&score.map_id) else { continue };
map.convert_mut(score.mode);
let mut calc = ctx.pp(&map).mode(score.mode).mods(score.mods.bits());
let attrs = calc.difficulty().await;
let stars = attrs.stars() as f32;
let max_combo = attrs.max_combo() as u32;
let pp = score.pp.expect("missing pp");
let max_pp = match score
.pp
.filter(|_| score.grade.eq_letter(Grade::X) && score.mode!= GameMode::Mania)
{
Some(pp) => pp,
None => calc.performance().await.pp() as f32,
};
let entry = TopEntry {
original_idx: i,
replay: score.replay,
score: ScoreSlim::new(score, pp),
map,
max_pp,
stars,
max_combo,
};
entries.push(entry);
}
match sort {
None => {}
Some(ScoreOrder::Acc) => entries.sort_by(|a, b| {
b.score
.accuracy
.partial_cmp(&a.score.accuracy)
.unwrap_or(Ordering::Equal)
}),
Some(ScoreOrder::Bpm) => entries.sort_by(|a, b| {
b.map
.bpm()
.partial_cmp(&a.map.bpm())
.unwrap_or(Ordering::Equal)
}),
Some(ScoreOrder::Combo) => entries.sort_by_key(|entry| Reverse(entry.score.max_combo)),
Some(ScoreOrder::Date) => entries.sort_by_key(|entry| Reverse(entry.score.ended_at)),
Some(ScoreOrder::Length) => {
entries.sort_by(|a, b| {
let a_len = a.map.seconds_drain() as f32 / a.score.mods.clock_rate().unwrap_or(1.0);
let b_len = b.map.seconds_drain() as f32 / b.score.mods.clock_rate().unwrap_or(1.0);
b_len.partial_cmp(&a_len).unwrap_or(Ordering::Equal)
});
}
Some(ScoreOrder::Misses) => entries.sort_by(|a, b| {
b.score
.statistics
.count_miss
.cmp(&a.score.statistics.count_miss)
.then_with(|| {
let hits_a = a.score.total_hits();
let hits_b = b.score.total_hits();
let ratio_a = a.score.statistics.count_miss as f32 / hits_a as f32;
let ratio_b = b.score.statistics.count_miss as f32 / hits_b as f32;
ratio_b
.partial_cmp(&ratio_a)
.unwrap_or(Ordering::Equal)
.then_with(|| hits_b.cmp(&hits_a))
})
}),
Some(ScoreOrder::Pp) => entries.sort_by(|a, b| {
b.score
.pp
.partial_cmp(&a.score.pp)
.unwrap_or(Ordering::Equal)
}),
Some(ScoreOrder::RankedDate) => {
entries.sort_by_key(|entry| Reverse(entry.map.ranked_date()))
}
Some(ScoreOrder::Score) => entries.sort_by_key(|entry| Reverse(entry.score.score)),
Some(ScoreOrder::Stars) => {
entries.sort_by(|a, b| b.stars.partial_cmp(&a.stars).unwrap_or(Ordering::Equal))
}
}
Ok(entries)
}
| {
let msg_owner = orig.user_id()?;
let mut config = match ctx.user_config().with_osu_id(msg_owner).await {
Ok(config) => config,
Err(err) => {
let _ = orig.error(&ctx, GENERAL_ISSUE).await;
return Err(err);
}
};
let mode = args
.mode
.map(GameMode::from)
.or(config.mode)
.unwrap_or(GameMode::Osu);
let user_id = match user_id!(ctx, orig, args) {
Some(user_id) => user_id, | identifier_body |
dp.rs | // 我们开始学习动态规划吧
use std::cmp::min;
// https://leetcode-cn.com/problems/maximum-subarray
// 最大子序各,好像看不出什么动态规则的意味,反而像滑动窗口
pub fn max_sub_array(nums: Vec<i32>) -> i32 {
let mut sum = nums[0];
let mut ans = nums[0];
for i in 1..nums.len() {
if sum > 0 {
// add positive sum means larger
sum += nums[i];
} else {
// start from new one means larger
sum = nums[i];
}
// ans always store the largest sum
ans = std::cmp::max(sum, ans);
}
ans
}
// https://leetcode-cn.com/problems/climbing-stairs/solution/
// basic dynamic programming
pub fn climb_stairs(n: i32) -> i32 {
if n == 0 || n == 1 {
return 1;
}
// f(n) = f(n-1) + f(n-2)
// iterative is harder than recursive
let mut n_1 = 1; // f(n-1)
let mut n_2 = 1; // f(n-2)
let mut ans = 0;
for _ in 1..n {
ans = n_1 + n_2;
n_1 = n_2;
n_2 = ans;
}
ans
}
// https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/solution/yi-ge-fang-fa-tuan-mie-6-dao-gu-piao-wen-ti-by-l-3/
// sell stock using state machine
// this is the solution for infinite k
pub fn max_profit_infinite(prices: Vec<i32>) -> i32 {
let mut s_keep = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty = 0;
for price in prices {
s_keep = std::cmp::max(s_keep, s_empty - price);
s_empty = std::cmp::max(s_empty, s_keep + price);
}
return s_empty;
}
// https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/solution/zhuang-tai-ji-mo-xing-dp-by-acw_wangdh15/
// 用有限状态机的方式去解题
use std::i32;
pub fn max_profit_cool(prices: Vec<i32>) -> i32 {
let n = prices.len();
let mut dp = vec![vec![i32::MIN; 3]; n+1];
// 0 可以买入的状态,买入之后转移到状态1。可以原地保持状态,或者从冷冻态转过来
// 1 可以卖出的状态,卖出之后转移到状态2。可以原地保持状态,或者从状态0转过来
// 2 冷冻期,过了一天转入状态0。可以从状态1转过来。
// 0 明天可买入,要么今天不买,要么今天是冷冻期
// 1 明天可卖出:要么今天买,要么今天不卖
// 2 明天是冷冻,那就今天卖了吧
dp[0][0] = 0;
for i in 0..n {
dp[i+1][0] = dp[i][0].max(dp[i][2]); // 来自 0 和 2 的转移
dp[i+1][1] = dp[i][1].max(dp[i][0] - prices[i]);
dp[i+1][2] = dp[i][1] + prices[i];
// println!("dp[i][0]: {}", dp[i][0]);
// println!("dp[i][1]: {}", dp[i][1]);
// println!("dp[i][2]: {}", dp[i][2]);
}
return dp[n][0].max(dp[n][2]);
}
pub fn max_profit_once(prices: Vec<i32>) -> i32 {
// suffix 0 means no trade (buy or sell) happen
// 1 means it happend
// let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty_0 = 0;
let mut s_keep_1 = std::i32::MIN;
let mut s_empty_1 = std::i32::MIN;
for price in prices {
s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price);
s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price);
}
return std::cmp::max(s_empty_1, 0);
}
pub fn max_profit_twice(prices: Vec<i32>) -> i32 {
// suffix 0 means no trade (buy or sell) happen
// 1 means it happend
// let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty_0 = 0;
let mut s_keep_1 = std::i32::MIN;
let mut s_empty_1 = std::i32::MIN;
let mut s_keep_2 = std::i32::MIN;
let mut s_empty_2 = std::i32::MIN;
for price in prices {
s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price);
s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price);
s_keep_2 = std::cmp::max(s_keep_2, s_empty_1 - price);
s_empty_2 = std::cmp::max(s_empty_2, s_keep_2 + price);
}
return std::cmp::max(s_empty_2, 0);
}
// this one works but consume too much memory
pub fn max_profit_k_memory_consume(k: i32, prices: Vec<i32>) -> i32 {
// from example above, we know the initial value is 0
// here, k become a variable, some we need a matrix to
// store different status
// how many status we have?
// empty or keep => 2
// trade times => k
// so we have 2k status
let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep
let mut s_times: Vec<[i32;2]> = Vec::new();
let k: usize = k as usize;
for i in 0..k+1 {
s_times.push(s_trade.clone());
}
s_times[0][0] = 0;
for price in prices {
for j in 0..k {
s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price);
s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price);
}
}
return std::cmp::max(0, s_times[k][0]);
}
// memory efficient version
pub fn max_profit_k(k: i32, prices: Vec<i32>) -> i32 {
// here if k in unreasonable large, switch to infinite version
let k: usize = k as usize;
if k > prices.len()/2 {
return max_profit_infinite(prices);
}
let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep
let mut s_times: Vec<[i32;2]> = Vec::new();
for i in 0..k+1 {
s_times.push(s_trade.clone());
}
s_times[0][0] = 0;
for price in prices {
for j in 0..k {
s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price);
s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price);
}
}
return std::cmp::max(0, s_times[k][0]);
}
// shortest path
// https://leetcode-cn.com/problems/minimum-path-sum/
// way: set grid value as the cost to get there
// matrix:
// 1 0 1 1 1 2
// 2 3 5 => 3 4 7
// 5 3 2 8 7 9
pub fn min_path_sum(grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
let mut cost = grid.clone();
for r in 0..row {
for c in 0..col {
if r == 0 && c == 0 {
cost[r][c] = grid[r][c];
} else if r == 0 {
cost[r][c] = grid[r][c] + cost[r][c-1];
} else if c == 0 {
cost[r][c] = grid[r][c] + cost[r-1][c];
} else {
cost[r][c] = grid[r][c] + min(cost[r-1][c], cost[r][c-1]);
}
}
}
return cost[row-1][col-1];
}
// https://leetcode-cn.com/problems/generate-parentheses/solution/
pub fn generate_parenthesis(n: i32) -> Vec<String> {
if n == 0 {
return Vec::new();
}
let mut dp = vec![Vec::<String>::new(); (n+1) as usize];
dp[0] = vec![String::from("")];
for i in 1..=n {
println!("Round {}", i);
let mut cur = vec![];
for j in 0..i {
let left = &dp[j as usize];
let right = &dp[(i-j-1) as usize];
for l in left {
for r in right {
let tmp = format!("({}){}", l, r);
println!("new string {}", tmp);
cur.push(tmp);
}
}
}
dp[i as usize] = cur;
}
let res = dp.pop().unwrap();
return res
}
// https://leetcode-cn.com/problems/unique-paths/
// 到达P[i][j]的路径数 = P[i-1][j] + P[i][j-1]
pub fn unique_paths(m: i32, n: i32) -> i32 {
if m == 1 || n == 1 {
return 1;
} else {
return unique_paths(m - 1, n) + unique_paths(m, n - 1);
}
}
pub fn unique_paths_iter(m: i32, n: i32) -> i32 {
let m: usize = m as usize;
let n: usize = n as usize;
let mut cache = vec![vec![0; n]; m];
for i in 0..m {
for j in 0..n {
if i == 0 || j == 0 {
cache[i][j] = 1;
} else {
cache[i][j] = cache[i-1][j] + cache[i][j-1];
}
}
}
return cache[m-1][n-1] as i32;
}
// https://leetcode-cn.com/problems/unique-paths-ii/solution/
pub fn unique_paths_with_obstacles2(obstacle_grid: Vec<Vec<i32>>) -> i32 {
let m = obstacle_grid.len();
let n = obstacle_grid[0].len();
let mut cache = vec![vec![0; n]; m];
for i in 0..m {
for j in 0..n {
if obstacle_grid[i][j] == 1 {
cache[i][j] = 0;
} else if i == 0 && j == 0 {
cache[i][j] = 1;
} else if i == 0 {
cache[i][j] = cache[i][j-1];
} else if j == 0 {
cache[i][j] = cache[i-1][j];
} else {
cache[i][j] = cache[i-1][j] + cache[i][j-1];
}
}
}
return cache[m-1][n-1];
}
// https://leetcode-cn.com/problems/house-robber/submissions/
pub fn rob(nums: Vec<i32>) -> i32 {
let len = nums.len();
if len == 0 {
return 0;
} else if len == 1 {
return nums[0];
} else if len == 2 {
return nums[0].max(nums[1]);
} // else len > 2
let mut m1 = nums[0];
let mut m2 = nums[1].max(m1);
for i in 2..nums.len() {
println!("m1 {} m2 {}", m1, m2);
m1 = (m1 + nums[i]).max(m2);
let temp = m2;
m2 = m1;
m1 = temp;
}
println!("m1 {} m2 {}", m1, m2);
return m2;
}
// https://leetcode-cn.com/problems/maximum-product-subarray/submissions/
pub fn max_product(nums: Vec<i32>) -> i32 {
if nums.len() == 0 { return 0; }
let (mut max, mut min) = (1, 1);
let mut res = std::i32::MIN;
let len = nums.len();
// 由于有 if 在循环里面,所以速度慢!
for n in nums {
let t_max = max;
let t_min = min;
max = (t_max * n).max(n).max(t_min * n);
min = (t_min * n).min(n).min(t_max * n);
res = res.max(max);
}
println!("{}", res);
return res;
}
// https://leetcode-cn.com/problems/gu-piao-de-zui-da-li-run-lcof/
// 由于只买卖一次,所以只需要记录最低价格就好了
pub fn max_profit(mut prices: Vec<i32>) -> i32 {
let mut profit = 0;
let mut cost = 1<<30;
for i in 0..prices.len() {
cost = cost.min(prices[i]);
profit = (prices[i] - cost).max(profit);
}
return profit;
}
// https://leetcode-cn.com/problems/word-break/
pub fn word_break(s: String, word_dict: Vec<String>) -> bool {
if word_dict.is_empty() { return false; }
let len = s.len();
let mut dp: Vec<bool> = vec![false; len+1];
dp[0] = true;
for i in 0..len {
if!dp[i] { continue; }
for w in &word_dict {
let end = i + w.len();
if end <= len &&!dp[end] && &s[i..end] == w.as_str() {
dp[end] = true;
}
}
}
dp[len]
}
// https://leetcode-cn.com/problems/maximum-length-of-repeated-subarray/solution/
// 相当于填表
pub fn find_length(a: Vec<i32>, b: Vec<i32>) -> i32 {
let row = a.len();
let col = b.len();
let mut dp = vec![vec![0; col]; row];
let mut res = 0;
for i in 0..row {
for j in 0..col {
if a[i] == b[j] {
let last = if ( i == 0 || j == 0 ) { 0 } else { dp[i-1][j-1] };
dp[i][j] = last + 1;
res = res.max(dp[i][j]);
} else {
dp[i][j] = 0;
}
}
}
return res as i32;
}
// https://leetcode-cn.com/problems/unique-paths-ii/
pub fn unique_paths_with_obstacles(obstacle_grid: Vec<Vec<i32>>) -> i32 {
let row = obstacle_grid.len();
let col = obstacle_grid[0].len();
let mut dp = vec![vec![0; col]; row];
// init first row and col
for i in 0..row {
for j in 0..col {
if obstacle_grid[i][j] == 0 {
if i == 0 && j == 0 {
dp[i][j] = 1;
} else if i == 0 {
dp[i][j] = dp[i][j-1];
} else if j == 0 {
dp[i][j] = dp[i-1][j];
} else {
| let col = grid[0].len();
for i in 0..row {
for j in 0..col {
if i == 0 && j == 0 {
// pass
} else if i == 0 {
grid[i][j] += grid[i][j-1];
} else if j == 0 {
grid[i][j] += grid[i-1][j];
} else {
grid[i][j] += grid[i-1][j].max(grid[i][j-1]);
}
}
}
return grid[row-1][col-1];
}
// https://leetcode-cn.com/problems/triangle/solution/di-gui-ji-yi-hua-dp-bi-xu-miao-dong-by-sweetiee/
pub fn minimum_total(triangle: Vec<Vec<i32>>) -> i32 {
let n = triangle.len();
let mut dp = vec![0; n+1];
for i in (0..n).rev() {
for j in 0..=i {
println!("i, j = {}, {}", i, j);
dp[j] = dp[j].min(dp[j+1]) + triangle[i][j];
}
}
return dp[0];
}
// https://leetcode-cn.com/problems/nge-tou-zi-de-dian-shu-lcof/solution/
pub fn two_sum(n: i32) -> Vec<f64> {
let mut res = vec![1./6.;6];
for i in 1..n as usize {
let mut temp = vec![0.0; 5 * i + 6];
for j in 0..res.len() {
for k in 0..6 {
temp[j+k] += res[j] * 1.0/6.0;
}
}
res = temp;
}
return res;
}
// https://leetcode-cn.com/problems/minimum-path-sum/submissions/
pub fn min_path_sum2(mut grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
for i in 1..row {
grid[i][0] += grid[i-1][0];
}
for j in 1..col {
grid[0][j] += grid[0][j-1];
}
for i in 1..row {
for j in 1..col {
grid[i][j] = grid[i][j-1].min(grid[i-1][j]) + grid[i][j];
}
}
return grid[row-1][col-1];
}
fn main()
{
// generate_parenthesis(4);
// println!("(1,1) {}", unique_paths_iter(1, 1));
// println!("(2,2) {}", unique_paths_iter(2, 2));
// println!("(3,2) {}", unique_paths_iter(3, 2));
// println!("(2,3) {}", unique_paths_iter(2, 3));
// rob([1, 3, 1, 3, 100].to_vec());
// max_product([-2,0,-1].to_vec());
// max_product([-1,-2,-9,-6].to_vec());
// max_profit([1,2,3].to_vec());
// word_break("leetcode".to_string(), ["leet".to_string(), "code".to_string()].to_vec());
// dbg!(find_length([1,2,3,2,1].to_vec(), [3,2,1,4,7].to_vec()));
// dbg!(max_profit_cool([1,2,3,0,2].to_vec()));
// let tri = [
// [2].to_vec(),
// [3,4].to_vec(),
// [6,5,7].to_vec(),
// [4,1,8,3].to_vec()
// ].to_vec();
// dbg!(minimum_total(tri));
// dbg!(two_sum(5));
min_path_sum2([
[1,3,1].to_vec(),
[1,5,1].to_vec(),
[4,2,1].to_vec(),
].to_vec());
} | dp[i][j] = dp[i-1][j] + dp[i][j-1];
}
} else {
// 遇到障碍了,但一开始我们就是初始化为0的,所以这里其实可以不写
dp[i][j] = 0;
}
}
}
return dp[row-1][col-1];
}
// https://leetcode-cn.com/problems/re-space-lcci/
pub fn respace(dictionary: Vec<String>, sentence: String) -> i32 {
42
}
// https://leetcode-cn.com/problems/li-wu-de-zui-da-jie-zhi-lcof/
pub fn max_value(mut grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len(); | identifier_body |
dp.rs | // 我们开始学习动态规划吧
use std::cmp::min;
// https://leetcode-cn.com/problems/maximum-subarray
// 最大子序各,好像看不出什么动态规则的意味,反而像滑动窗口
pub fn max_sub_array(nums: Vec<i32>) -> i32 {
let mut sum = nums[0];
let mut ans = nums[0];
for i in 1..nums.len() {
if sum > 0 {
// add positive sum means larger
sum += nums[i];
} else {
// start from new one means larger
sum = nums[i];
}
// ans always store the largest sum
ans = std::cmp::max(sum, ans);
}
ans
}
// https://leetcode-cn.com/problems/climbing-stairs/solution/
// basic dynamic programming
pub fn climb_stairs(n: i32) -> i32 {
if n == 0 || n == 1 {
return 1;
}
// f(n) = f(n-1) + f(n-2)
// iterative is harder than recursive
let mut n_1 = 1; // f(n-1)
let mut n_2 = 1; // f(n-2)
let mut ans = 0;
for _ in 1..n {
ans = n_1 + n_2;
n_1 = n_2;
n_2 = ans;
}
ans
}
// https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/solution/yi-ge-fang-fa-tuan-mie-6-dao-gu-piao-wen-ti-by-l-3/
// sell stock using state machine
// this is the solution for infinite k
pub fn max_profit_infinite(prices: Vec<i32>) -> i32 {
let mut s_keep = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty = 0;
for price in prices {
s_keep = std::cmp::max(s_keep, s_empty - price);
s_empty = std::cmp::max(s_empty, s_keep + price);
}
return s_empty;
}
// https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/solution/zhuang-tai-ji-mo-xing-dp-by-acw_wangdh15/
// 用有限状态机的方式去解题
use std::i32;
pub fn max_profit_cool(prices: Vec<i32>) -> i32 {
let n = prices.len();
let mut dp = vec![vec![i32::MIN; 3]; n+1];
// 0 可以买入的状态,买入之后转移到状态1。可以原地保持状态,或者从冷冻态转过来
// 1 可以卖出的状态,卖出之后转移到状态2。可以原地保持状态,或者从状态0转过来
// 2 冷冻期,过了一天转入状态0。可以从状态1转过来。
// 0 明天可买入,要么今天不买,要么今天是冷冻期
// 1 明天可卖出:要么今天买,要么今天不卖
// 2 明天是冷冻,那就今天卖了吧
dp[0][0] = 0;
for i in 0..n {
dp[i+1][0] = dp[i][0].max(dp[i][2]); // 来自 0 和 2 的转移
dp[i+1][1] = dp[i][1].max(dp[i][0] - prices[i]);
dp[i+1][2] = dp[i][1] + prices[i];
// println!("dp[i][0]: {}", dp[i][0]);
// println!("dp[i][1]: {}", dp[i][1]);
// println!("dp[i][2]: {}", dp[i][2]);
}
return dp[n][0].max(dp[n][2]);
}
pub fn max_profit_once(prices: Vec<i32>) -> i32 {
// suffix 0 means no trade (buy or sell) happen
// 1 means it happend
// let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty_0 = 0;
let mut s_keep_1 = std::i32::MIN;
let mut s_empty_1 = std::i32::MIN;
for price in prices {
s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price);
s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price);
}
return std::cmp::max(s_empty_1, 0);
}
pub fn max_profit_twice(prices: Vec<i32>) -> i32 {
// suffix 0 means no trade (buy or sell) happen
// 1 means it happend
// let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty_0 = 0;
let mut s_keep_1 = std::i32::MIN;
let mut s_empty_1 = std::i32::MIN;
let mut s_keep_2 = std::i32::MIN;
let mut s_empty_2 = std::i32::MIN;
for price in prices {
s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price);
s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price);
s_keep_2 = std::cmp::max(s_keep_2, s_empty_1 - price);
s_empty_2 = std::cmp::max(s_empty_2, s_keep_2 + price);
}
return std::cmp::max(s_empty_2, 0);
}
// this one works but consume too much memory
pub fn max_profit_k_memory_consume(k: i32, prices: Vec<i32>) -> i32 {
// from example above, we know the initial value is 0
// here, k become a variable, some we need a matrix to
// store different status
// how many status we have?
// empty or keep => 2
// trade times => k
// so we have 2k status
let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep
let mut s_times: Vec<[i32;2]> = Vec::new();
let k: usize = k as usize;
for i in 0..k+1 {
s_times.push(s_trade.clone());
}
s_times[0][0] = 0;
for price in prices {
for j in 0..k {
s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price);
s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price);
}
}
return std::cmp::max(0, s_times[k][0]);
}
// memory efficient version
pub fn max_profit_k(k: i32, prices: Vec<i32>) -> i32 {
// here if k in unreasonable large, switch to infinite version
let k: usize = k as usize;
if k > prices.len()/2 {
return max_profit_infinite(prices);
}
let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep
let mut s_times: Vec<[i32;2]> = Vec::new();
for i in 0..k+1 {
s_times.push(s_trade.clone());
}
s_times[0][0] = 0;
for price in prices {
for j in 0..k {
s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price);
s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price);
}
}
return std::cmp::max(0, s_times[k][0]);
}
// shortest path
// https://leetcode-cn.com/problems/minimum-path-sum/
// way: set grid value as the cost to get there
// matrix:
// 1 0 1 1 1 2
// 2 3 5 => 3 4 7
// 5 3 2 8 7 9
pub fn min_path_sum(grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
let mut cost = grid.clone();
for r in 0..row {
for c in 0..col {
if r == 0 && c == 0 {
cost[r][c] = grid[r][c];
} else if r == 0 {
cost[r][c] = grid[r][c] + cost[r][c-1];
} else if c == 0 {
cost[r][c] = gri | st[r-1][c];
} else {
cost[r][c] = grid[r][c] + min(cost[r-1][c], cost[r][c-1]);
}
}
}
return cost[row-1][col-1];
}
// https://leetcode-cn.com/problems/generate-parentheses/solution/
pub fn generate_parenthesis(n: i32) -> Vec<String> {
if n == 0 {
return Vec::new();
}
let mut dp = vec![Vec::<String>::new(); (n+1) as usize];
dp[0] = vec![String::from("")];
for i in 1..=n {
println!("Round {}", i);
let mut cur = vec![];
for j in 0..i {
let left = &dp[j as usize];
let right = &dp[(i-j-1) as usize];
for l in left {
for r in right {
let tmp = format!("({}){}", l, r);
println!("new string {}", tmp);
cur.push(tmp);
}
}
}
dp[i as usize] = cur;
}
let res = dp.pop().unwrap();
return res
}
// https://leetcode-cn.com/problems/unique-paths/
// 到达P[i][j]的路径数 = P[i-1][j] + P[i][j-1]
pub fn unique_paths(m: i32, n: i32) -> i32 {
if m == 1 || n == 1 {
return 1;
} else {
return unique_paths(m - 1, n) + unique_paths(m, n - 1);
}
}
pub fn unique_paths_iter(m: i32, n: i32) -> i32 {
let m: usize = m as usize;
let n: usize = n as usize;
let mut cache = vec![vec![0; n]; m];
for i in 0..m {
for j in 0..n {
if i == 0 || j == 0 {
cache[i][j] = 1;
} else {
cache[i][j] = cache[i-1][j] + cache[i][j-1];
}
}
}
return cache[m-1][n-1] as i32;
}
// https://leetcode-cn.com/problems/unique-paths-ii/solution/
pub fn unique_paths_with_obstacles2(obstacle_grid: Vec<Vec<i32>>) -> i32 {
let m = obstacle_grid.len();
let n = obstacle_grid[0].len();
let mut cache = vec![vec![0; n]; m];
for i in 0..m {
for j in 0..n {
if obstacle_grid[i][j] == 1 {
cache[i][j] = 0;
} else if i == 0 && j == 0 {
cache[i][j] = 1;
} else if i == 0 {
cache[i][j] = cache[i][j-1];
} else if j == 0 {
cache[i][j] = cache[i-1][j];
} else {
cache[i][j] = cache[i-1][j] + cache[i][j-1];
}
}
}
return cache[m-1][n-1];
}
// https://leetcode-cn.com/problems/house-robber/submissions/
pub fn rob(nums: Vec<i32>) -> i32 {
let len = nums.len();
if len == 0 {
return 0;
} else if len == 1 {
return nums[0];
} else if len == 2 {
return nums[0].max(nums[1]);
} // else len > 2
let mut m1 = nums[0];
let mut m2 = nums[1].max(m1);
for i in 2..nums.len() {
println!("m1 {} m2 {}", m1, m2);
m1 = (m1 + nums[i]).max(m2);
let temp = m2;
m2 = m1;
m1 = temp;
}
println!("m1 {} m2 {}", m1, m2);
return m2;
}
// https://leetcode-cn.com/problems/maximum-product-subarray/submissions/
pub fn max_product(nums: Vec<i32>) -> i32 {
if nums.len() == 0 { return 0; }
let (mut max, mut min) = (1, 1);
let mut res = std::i32::MIN;
let len = nums.len();
// 由于有 if 在循环里面,所以速度慢!
for n in nums {
let t_max = max;
let t_min = min;
max = (t_max * n).max(n).max(t_min * n);
min = (t_min * n).min(n).min(t_max * n);
res = res.max(max);
}
println!("{}", res);
return res;
}
// https://leetcode-cn.com/problems/gu-piao-de-zui-da-li-run-lcof/
// 由于只买卖一次,所以只需要记录最低价格就好了
pub fn max_profit(mut prices: Vec<i32>) -> i32 {
let mut profit = 0;
let mut cost = 1<<30;
for i in 0..prices.len() {
cost = cost.min(prices[i]);
profit = (prices[i] - cost).max(profit);
}
return profit;
}
// https://leetcode-cn.com/problems/word-break/
pub fn word_break(s: String, word_dict: Vec<String>) -> bool {
if word_dict.is_empty() { return false; }
let len = s.len();
let mut dp: Vec<bool> = vec![false; len+1];
dp[0] = true;
for i in 0..len {
if!dp[i] { continue; }
for w in &word_dict {
let end = i + w.len();
if end <= len &&!dp[end] && &s[i..end] == w.as_str() {
dp[end] = true;
}
}
}
dp[len]
}
// https://leetcode-cn.com/problems/maximum-length-of-repeated-subarray/solution/
// 相当于填表
pub fn find_length(a: Vec<i32>, b: Vec<i32>) -> i32 {
let row = a.len();
let col = b.len();
let mut dp = vec![vec![0; col]; row];
let mut res = 0;
for i in 0..row {
for j in 0..col {
if a[i] == b[j] {
let last = if ( i == 0 || j == 0 ) { 0 } else { dp[i-1][j-1] };
dp[i][j] = last + 1;
res = res.max(dp[i][j]);
} else {
dp[i][j] = 0;
}
}
}
return res as i32;
}
// https://leetcode-cn.com/problems/unique-paths-ii/
pub fn unique_paths_with_obstacles(obstacle_grid: Vec<Vec<i32>>) -> i32 {
let row = obstacle_grid.len();
let col = obstacle_grid[0].len();
let mut dp = vec![vec![0; col]; row];
// init first row and col
for i in 0..row {
for j in 0..col {
if obstacle_grid[i][j] == 0 {
if i == 0 && j == 0 {
dp[i][j] = 1;
} else if i == 0 {
dp[i][j] = dp[i][j-1];
} else if j == 0 {
dp[i][j] = dp[i-1][j];
} else {
dp[i][j] = dp[i-1][j] + dp[i][j-1];
}
} else {
// 遇到障碍了,但一开始我们就是初始化为0的,所以这里其实可以不写
dp[i][j] = 0;
}
}
}
return dp[row-1][col-1];
}
// https://leetcode-cn.com/problems/re-space-lcci/
pub fn respace(dictionary: Vec<String>, sentence: String) -> i32 {
42
}
// https://leetcode-cn.com/problems/li-wu-de-zui-da-jie-zhi-lcof/
pub fn max_value(mut grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
for i in 0..row {
for j in 0..col {
if i == 0 && j == 0 {
// pass
} else if i == 0 {
grid[i][j] += grid[i][j-1];
} else if j == 0 {
grid[i][j] += grid[i-1][j];
} else {
grid[i][j] += grid[i-1][j].max(grid[i][j-1]);
}
}
}
return grid[row-1][col-1];
}
// https://leetcode-cn.com/problems/triangle/solution/di-gui-ji-yi-hua-dp-bi-xu-miao-dong-by-sweetiee/
pub fn minimum_total(triangle: Vec<Vec<i32>>) -> i32 {
let n = triangle.len();
let mut dp = vec![0; n+1];
for i in (0..n).rev() {
for j in 0..=i {
println!("i, j = {}, {}", i, j);
dp[j] = dp[j].min(dp[j+1]) + triangle[i][j];
}
}
return dp[0];
}
// https://leetcode-cn.com/problems/nge-tou-zi-de-dian-shu-lcof/solution/
pub fn two_sum(n: i32) -> Vec<f64> {
let mut res = vec![1./6.;6];
for i in 1..n as usize {
let mut temp = vec![0.0; 5 * i + 6];
for j in 0..res.len() {
for k in 0..6 {
temp[j+k] += res[j] * 1.0/6.0;
}
}
res = temp;
}
return res;
}
// https://leetcode-cn.com/problems/minimum-path-sum/submissions/
pub fn min_path_sum2(mut grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
for i in 1..row {
grid[i][0] += grid[i-1][0];
}
for j in 1..col {
grid[0][j] += grid[0][j-1];
}
for i in 1..row {
for j in 1..col {
grid[i][j] = grid[i][j-1].min(grid[i-1][j]) + grid[i][j];
}
}
return grid[row-1][col-1];
}
fn main()
{
// generate_parenthesis(4);
// println!("(1,1) {}", unique_paths_iter(1, 1));
// println!("(2,2) {}", unique_paths_iter(2, 2));
// println!("(3,2) {}", unique_paths_iter(3, 2));
// println!("(2,3) {}", unique_paths_iter(2, 3));
// rob([1, 3, 1, 3, 100].to_vec());
// max_product([-2,0,-1].to_vec());
// max_product([-1,-2,-9,-6].to_vec());
// max_profit([1,2,3].to_vec());
// word_break("leetcode".to_string(), ["leet".to_string(), "code".to_string()].to_vec());
// dbg!(find_length([1,2,3,2,1].to_vec(), [3,2,1,4,7].to_vec()));
// dbg!(max_profit_cool([1,2,3,0,2].to_vec()));
// let tri = [
// [2].to_vec(),
// [3,4].to_vec(),
// [6,5,7].to_vec(),
// [4,1,8,3].to_vec()
// ].to_vec();
// dbg!(minimum_total(tri));
// dbg!(two_sum(5));
min_path_sum2([
[1,3,1].to_vec(),
[1,5,1].to_vec(),
[4,2,1].to_vec(),
].to_vec());
} | d[r][c] + co | identifier_name |
dp.rs | // 我们开始学习动态规划吧
use std::cmp::min;
// https://leetcode-cn.com/problems/maximum-subarray
// 最大子序各,好像看不出什么动态规则的意味,反而像滑动窗口
pub fn max_sub_array(nums: Vec<i32>) -> i32 {
let mut sum = nums[0];
let mut ans = nums[0];
for i in 1..nums.len() {
if sum > 0 {
// add positive sum means larger
sum += nums[i];
} else {
// start from new one means larger
sum = nums[i];
}
// ans always store the largest sum
ans = std::cmp::max(sum, ans);
}
ans
}
// https://leetcode-cn.com/problems/climbing-stairs/solution/
// basic dynamic programming
pub fn climb_stairs(n: i32) -> i32 {
if n == 0 || n == 1 {
return 1;
}
// f(n) = f(n-1) + f(n-2)
// iterative is harder than recursive
let mut n_1 = 1; // f(n-1)
let mut n_2 = 1; // f(n-2)
let mut ans = 0;
for _ in 1..n {
ans = n_1 + n_2;
n_1 = n_2;
n_2 = ans;
}
ans
}
// https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/solution/yi-ge-fang-fa-tuan-mie-6-dao-gu-piao-wen-ti-by-l-3/
// sell stock using state machine
// this is the solution for infinite k
pub fn max_profit_infinite(prices: Vec<i32>) -> i32 {
let mut s_keep = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty = 0;
for price in prices {
s_keep = std::cmp::max(s_keep, s_empty - price);
s_empty = std::cmp::max(s_empty, s_keep + price);
}
return s_empty;
}
// https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/solution/zhuang-tai-ji-mo-xing-dp-by-acw_wangdh15/
// 用有限状态机的方式去解题
use std::i32;
pub fn max_profit_cool(prices: Vec<i32>) -> i32 {
let n = prices.len();
let mut dp = vec![vec![i32::MIN; 3]; n+1];
// 0 可以买入的状态,买入之后转移到状态1。可以原地保持状态,或者从冷冻态转过来
// 1 可以卖出的状态,卖出之后转移到状态2。可以原地保持状态,或者从状态0转过来
// 2 冷冻期,过了一天转入状态0。可以从状态1转过来。
// 0 明天可买入,要么今天不买,要么今天是冷冻期
// 1 明天可卖出:要么今天买,要么今天不卖
// 2 明天是冷冻,那就今天卖了吧
dp[0][0] = 0;
for i in 0..n {
dp[i+1][0] = dp[i][0].max(dp[i][2]); // 来自 0 和 2 的转移
dp[i+1][1] = dp[i][1].max(dp[i][0] - prices[i]);
dp[i+1][2] = dp[i][1] + prices[i];
// println!("dp[i][0]: {}", dp[i][0]);
// println!("dp[i][1]: {}", dp[i][1]);
// println!("dp[i][2]: {}", dp[i][2]);
}
return dp[n][0].max(dp[n][2]);
}
pub fn max_profit_once(prices: Vec<i32>) -> i32 {
// suffix 0 means no trade (buy or sell) happen
// 1 means it happend
// let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty_0 = 0;
let mut s_keep_1 = std::i32::MIN;
let mut s_empty_1 = std::i32::MIN;
for price in prices {
s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price);
s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price);
}
return std::cmp::max(s_empty_1, 0);
}
pub fn max_profit_twice(prices: Vec<i32>) -> i32 {
// suffix 0 means no trade (buy or sell) happen
// 1 means it happend
// let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty_0 = 0;
let mut s_keep_1 = std::i32::MIN;
let mut s_empty_1 = std::i32::MIN;
let mut s_keep_2 = std::i32::MIN;
let mut s_empty_2 = std::i32::MIN;
for price in prices {
s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price);
s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price);
s_keep_2 = std::cmp::max(s_keep_2, s_empty_1 - price);
s_empty_2 = std::cmp::max(s_empty_2, s_keep_2 + price);
}
return std::cmp::max(s_empty_2, 0);
}
// this one works but consume too much memory
pub fn max_profit_k_memory_consume(k: i32, prices: Vec<i32>) -> i32 {
// from example above, we know the initial value is 0
// here, k become a variable, some we need a matrix to
// store different status
// how many status we have?
// empty or keep => 2
// trade times => k
// so we have 2k status
let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep
let mut s_times: Vec<[i32;2]> = Vec::new();
let k: usize = k as usize;
for i in 0..k+1 {
s_times.push(s_trade.clone());
}
s_times[0][0] = 0;
for price in prices {
for j in 0..k {
s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price);
s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price);
}
}
return std::cmp::max(0, s_times[k][0]);
}
// memory efficient version
pub fn max_profit_k(k: i32, prices: Vec<i32>) -> i32 {
// here if k in unreasonable large, switch to infinite version
let k: usize = k as usize;
if k > prices.len()/2 {
return max_profit_infinite(prices);
}
let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep
let mut s_times: Vec<[i32;2]> = Vec::new();
for i in 0..k+1 {
s_times.push(s_trade.clone());
}
s_times[0][0] = 0;
for price in prices {
for j in 0..k {
s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price);
s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price);
}
}
return std::cmp::max(0, s_times[k][0]);
}
// shortest path
// https://leetcode-cn.com/problems/minimum-path-sum/
// way: set grid value as the cost to get there
// matrix:
// 1 0 1 1 1 2
// 2 3 5 => 3 4 7
// 5 3 2 8 7 9
pub fn min_path_sum(grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
let mut cost = grid.clone();
for r in 0..row {
for c in 0..col {
if r == 0 && c == 0 {
cost[r][c] = grid[r][c];
} else if r == 0 {
cost[r][c] = grid[r][c] + cost[r][c-1];
} else if c == 0 {
cost[r][c] = grid[r][c] + cost[r-1][c];
} else {
cost[r][c] = grid[r][c] + min(cost[r-1][c], cost[r][c-1]);
}
}
}
return cost[row-1][col-1];
}
// https://leetcode-cn.com/problems/generate-parentheses/solution/
pub fn generate_parenthesis(n: i32) -> Vec<String> {
if n == 0 {
return Vec::new();
}
let mut dp = vec![Vec::<String>::new(); (n+1) as usize];
dp[0] = vec![String::from("")];
for i in 1..=n {
println!("Round {}", i);
let mut cur = vec![];
for j in 0..i {
let left = &dp[j as usize];
let right = &dp[(i-j-1) as usize];
for l in left {
for r in right {
let tmp = format!("({}){}", l, r);
println!("new string {}", tmp);
cur.push(tmp);
}
}
}
dp[i as usize] = cur;
}
let res = dp.pop().unwrap();
return res
}
// https://leetcode-cn.com/problems/unique-paths/
// 到达P[i][j]的路径数 = P[i-1][j] + P[i][j-1]
pub fn unique_paths(m: i32, n: i32) -> i32 {
if m == 1 || n == 1 {
return 1;
} else {
return unique_paths(m - 1, n) + unique_paths(m, n - 1);
}
}
pub fn unique_paths_iter(m: i32, n: i32) -> i32 {
let m: usize = m as usize;
let n: usize = n as usize;
let mut cache = vec![vec![0; n]; m];
for i in 0..m {
for j in 0..n {
if i == 0 || j == 0 {
cache[i][j] = 1;
} else {
cache[i][j] = cache[i-1][j] + cache[i][j-1];
}
}
}
return cache[m-1][n-1] as i32;
}
// https://leetcode-cn.com/problems/unique-paths-ii/solution/
pub fn unique_paths_with_obstacles2(obstacle_grid: Vec<Vec<i32>>) -> i32 {
let m = obstacle_grid.len();
let n = obstacle_grid[0].len();
let mut cache = vec![vec![0; n]; m];
for i in 0..m {
for j in 0..n {
if obstacle_grid[i][j] == 1 {
cache[i][j] = 0;
} else if i == 0 && j == 0 {
cache[i][j] = 1;
} else if i == 0 {
cache[i][j] = cache[i][j-1];
} else if j == 0 {
cache[i][j] = cache[i-1][j];
} else {
cache[i][j] = cache[i-1][j] + cache[i][j-1];
}
}
}
return cache[m-1][n-1];
}
// https://leetcode-cn.com/problems/house-robber/submissions/
pub fn rob(nums: Vec<i32>) -> i32 {
let len = nums.len();
if len == 0 {
return 0;
} else if len == 1 {
return nums[0];
} else if len == 2 {
return nums[0].max(nums[1]);
} // else len > 2
let mut m1 = nums[0];
let mut m2 = nums[1].max(m1);
for i in 2..nums.len() {
println!("m1 {} m2 {}", m1, m2);
m1 = (m1 + nums[i]).max(m2);
let temp = m2;
m2 = m1;
m1 = temp;
}
println!("m1 {} m2 {}", m1, m2);
return m2;
}
// https://leetcode-cn.com/problems/maximum-product-subarray/submissions/
pub fn max_product(nums: Vec<i32>) -> i32 {
if nums.len() == 0 { return 0; }
let (mut max, mut min) = (1, 1);
let mut res = std::i32::MIN;
let len = nums.len();
// 由于有 if 在循环里面,所以速度慢!
for n in nums {
let t_max = max;
let t_min = min;
max = (t_max * n).max(n).max(t_min * n);
min = (t_min * n).min(n).min(t_max * n);
res = res.max(max);
}
println!("{}", res);
return res;
}
// https://leetcode-cn.com/problems/gu-piao-de-zui-da-li-run-lcof/
// 由于只买卖一次,所以只需要记录最低价格就好了
pub fn max_profit(mut prices: Vec<i32>) -> i32 {
let mut profit = 0;
let mut cost = 1<<30;
for i in 0..prices.len() {
cost = cost.min(prices[i]);
profit = (prices[i] - cost).max(profit);
}
return profit;
}
// https://leetcode-cn.com/problems/word-break/
pub fn word_break(s: String, word_dict: Vec<String>) -> bool {
if word_dict.is_empty() { return false; }
let len = s.len();
let mut dp: Vec<bool> = vec![false; len+1];
dp[0] = true;
for i in 0..len {
if!dp[i] { continue; }
for w in &word_dict {
let end = i + w.len();
if end <= len &&!dp[end] && &s[i..end] == w.as_str() {
dp[end] = true;
}
}
}
dp[len]
}
// https://leetcode-cn.com/problems/maximum-length-of-repeated-subarray/solution/
// 相当于填表
pub fn find_length(a: Vec<i32>, b: Vec<i32>) -> i32 {
let row = a.len();
let col = b.len();
let mut dp = vec![vec![0; col]; row];
let mut res = 0;
for i in 0..row {
for j in 0..col {
if a[i] == b[j] {
let last = if ( i == 0 || j == 0 ) { 0 } else { dp[i-1][j-1] };
dp[i][j] = last + 1;
res = res.max(dp[i][j]);
} else {
dp[i][j] = 0;
}
}
}
return res as i32;
}
// https://leetcode-cn.com/problems/unique-paths-ii/
pub fn unique_paths_with_obstacles(obstacle_grid: Vec<Vec<i32>>) -> i32 {
let row = obstacle_grid.len();
let col = obstacle_grid[0].len();
let mut dp = vec![vec![0; col]; row];
// init first row and col
for i in 0..row {
for j in 0..col {
if obstacle_grid[i][j] == 0 {
if i == 0 && j == 0 {
dp[i][j] = 1;
} else if i == 0 {
dp[i][j] = dp[i][j-1];
} else if j == 0 {
dp[i][j] = dp[i-1][j];
} else {
dp[i][j] = dp[i-1][j] + dp[i][j-1];
}
} else {
// 遇到障碍了,但一开始我们就是初始化为0的,所以这里其实可以不写
dp[i][j] = 0;
}
}
}
return dp[row-1][col-1];
}
// https://leetcode-cn.com/problems/re-space-lcci/
pub fn respace(dictionary: Vec<String>, sentence: String) -> i32 {
42
}
// https://leetcode-cn.com/problems/li-wu-de-zui-da-jie-zhi-lcof/
pub fn max_value(mut grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
for i in 0..row {
for j in 0..col {
if i == 0 && j == 0 {
// pass
} else if i == 0 {
grid[i][j] += grid[i][j-1];
} else if j == 0 {
grid[i][j] += grid[i-1][j];
} else {
grid[i][j] += grid[i-1][j].max(grid[i][j-1]);
}
}
}
return grid[row-1][col-1];
}
// https://leetcode-cn.com/problems/triangle/solution/di-gui-ji-yi-hua-dp-bi-xu-miao-dong-by-sweetiee/
pub fn minimum_total(triangle: Vec<Vec<i32>>) -> i32 {
let n = triangle.len();
let mut dp = vec![0; n+1];
for i in (0..n).rev() {
for j in 0..=i {
println! | +1]) + triangle[i][j];
}
}
return dp[0];
}
// https://leetcode-cn.com/problems/nge-tou-zi-de-dian-shu-lcof/solution/
pub fn two_sum(n: i32) -> Vec<f64> {
let mut res = vec![1./6.;6];
for i in 1..n as usize {
let mut temp = vec![0.0; 5 * i + 6];
for j in 0..res.len() {
for k in 0..6 {
temp[j+k] += res[j] * 1.0/6.0;
}
}
res = temp;
}
return res;
}
// https://leetcode-cn.com/problems/minimum-path-sum/submissions/
pub fn min_path_sum2(mut grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
for i in 1..row {
grid[i][0] += grid[i-1][0];
}
for j in 1..col {
grid[0][j] += grid[0][j-1];
}
for i in 1..row {
for j in 1..col {
grid[i][j] = grid[i][j-1].min(grid[i-1][j]) + grid[i][j];
}
}
return grid[row-1][col-1];
}
fn main()
{
// generate_parenthesis(4);
// println!("(1,1) {}", unique_paths_iter(1, 1));
// println!("(2,2) {}", unique_paths_iter(2, 2));
// println!("(3,2) {}", unique_paths_iter(3, 2));
// println!("(2,3) {}", unique_paths_iter(2, 3));
// rob([1, 3, 1, 3, 100].to_vec());
// max_product([-2,0,-1].to_vec());
// max_product([-1,-2,-9,-6].to_vec());
// max_profit([1,2,3].to_vec());
// word_break("leetcode".to_string(), ["leet".to_string(), "code".to_string()].to_vec());
// dbg!(find_length([1,2,3,2,1].to_vec(), [3,2,1,4,7].to_vec()));
// dbg!(max_profit_cool([1,2,3,0,2].to_vec()));
// let tri = [
// [2].to_vec(),
// [3,4].to_vec(),
// [6,5,7].to_vec(),
// [4,1,8,3].to_vec()
// ].to_vec();
// dbg!(minimum_total(tri));
// dbg!(two_sum(5));
min_path_sum2([
[1,3,1].to_vec(),
[1,5,1].to_vec(),
[4,2,1].to_vec(),
].to_vec());
} | ("i, j = {}, {}", i, j);
dp[j] = dp[j].min(dp[j | conditional_block |
dp.rs | // 我们开始学习动态规划吧
use std::cmp::min;
// https://leetcode-cn.com/problems/maximum-subarray | let mut ans = nums[0];
for i in 1..nums.len() {
if sum > 0 {
// add positive sum means larger
sum += nums[i];
} else {
// start from new one means larger
sum = nums[i];
}
// ans always store the largest sum
ans = std::cmp::max(sum, ans);
}
ans
}
// https://leetcode-cn.com/problems/climbing-stairs/solution/
// basic dynamic programming
pub fn climb_stairs(n: i32) -> i32 {
if n == 0 || n == 1 {
return 1;
}
// f(n) = f(n-1) + f(n-2)
// iterative is harder than recursive
let mut n_1 = 1; // f(n-1)
let mut n_2 = 1; // f(n-2)
let mut ans = 0;
for _ in 1..n {
ans = n_1 + n_2;
n_1 = n_2;
n_2 = ans;
}
ans
}
// https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/solution/yi-ge-fang-fa-tuan-mie-6-dao-gu-piao-wen-ti-by-l-3/
// sell stock using state machine
// this is the solution for infinite k
pub fn max_profit_infinite(prices: Vec<i32>) -> i32 {
let mut s_keep = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty = 0;
for price in prices {
s_keep = std::cmp::max(s_keep, s_empty - price);
s_empty = std::cmp::max(s_empty, s_keep + price);
}
return s_empty;
}
// https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/solution/zhuang-tai-ji-mo-xing-dp-by-acw_wangdh15/
// 用有限状态机的方式去解题
use std::i32;
pub fn max_profit_cool(prices: Vec<i32>) -> i32 {
let n = prices.len();
let mut dp = vec![vec![i32::MIN; 3]; n+1];
// 0 可以买入的状态,买入之后转移到状态1。可以原地保持状态,或者从冷冻态转过来
// 1 可以卖出的状态,卖出之后转移到状态2。可以原地保持状态,或者从状态0转过来
// 2 冷冻期,过了一天转入状态0。可以从状态1转过来。
// 0 明天可买入,要么今天不买,要么今天是冷冻期
// 1 明天可卖出:要么今天买,要么今天不卖
// 2 明天是冷冻,那就今天卖了吧
dp[0][0] = 0;
for i in 0..n {
dp[i+1][0] = dp[i][0].max(dp[i][2]); // 来自 0 和 2 的转移
dp[i+1][1] = dp[i][1].max(dp[i][0] - prices[i]);
dp[i+1][2] = dp[i][1] + prices[i];
// println!("dp[i][0]: {}", dp[i][0]);
// println!("dp[i][1]: {}", dp[i][1]);
// println!("dp[i][2]: {}", dp[i][2]);
}
return dp[n][0].max(dp[n][2]);
}
pub fn max_profit_once(prices: Vec<i32>) -> i32 {
// suffix 0 means no trade (buy or sell) happen
// 1 means it happend
// let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty_0 = 0;
let mut s_keep_1 = std::i32::MIN;
let mut s_empty_1 = std::i32::MIN;
for price in prices {
s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price);
s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price);
}
return std::cmp::max(s_empty_1, 0);
}
pub fn max_profit_twice(prices: Vec<i32>) -> i32 {
// suffix 0 means no trade (buy or sell) happen
// 1 means it happend
// let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day
let mut s_empty_0 = 0;
let mut s_keep_1 = std::i32::MIN;
let mut s_empty_1 = std::i32::MIN;
let mut s_keep_2 = std::i32::MIN;
let mut s_empty_2 = std::i32::MIN;
for price in prices {
s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price);
s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price);
s_keep_2 = std::cmp::max(s_keep_2, s_empty_1 - price);
s_empty_2 = std::cmp::max(s_empty_2, s_keep_2 + price);
}
return std::cmp::max(s_empty_2, 0);
}
// this one works but consume too much memory
pub fn max_profit_k_memory_consume(k: i32, prices: Vec<i32>) -> i32 {
// from example above, we know the initial value is 0
// here, k become a variable, some we need a matrix to
// store different status
// how many status we have?
// empty or keep => 2
// trade times => k
// so we have 2k status
let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep
let mut s_times: Vec<[i32;2]> = Vec::new();
let k: usize = k as usize;
for i in 0..k+1 {
s_times.push(s_trade.clone());
}
s_times[0][0] = 0;
for price in prices {
for j in 0..k {
s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price);
s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price);
}
}
return std::cmp::max(0, s_times[k][0]);
}
// memory efficient version
pub fn max_profit_k(k: i32, prices: Vec<i32>) -> i32 {
// here if k in unreasonable large, switch to infinite version
let k: usize = k as usize;
if k > prices.len()/2 {
return max_profit_infinite(prices);
}
let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep
let mut s_times: Vec<[i32;2]> = Vec::new();
for i in 0..k+1 {
s_times.push(s_trade.clone());
}
s_times[0][0] = 0;
for price in prices {
for j in 0..k {
s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price);
s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price);
}
}
return std::cmp::max(0, s_times[k][0]);
}
// shortest path
// https://leetcode-cn.com/problems/minimum-path-sum/
// way: set grid value as the cost to get there
// matrix:
// 1 0 1 1 1 2
// 2 3 5 => 3 4 7
// 5 3 2 8 7 9
pub fn min_path_sum(grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
let mut cost = grid.clone();
for r in 0..row {
for c in 0..col {
if r == 0 && c == 0 {
cost[r][c] = grid[r][c];
} else if r == 0 {
cost[r][c] = grid[r][c] + cost[r][c-1];
} else if c == 0 {
cost[r][c] = grid[r][c] + cost[r-1][c];
} else {
cost[r][c] = grid[r][c] + min(cost[r-1][c], cost[r][c-1]);
}
}
}
return cost[row-1][col-1];
}
// https://leetcode-cn.com/problems/generate-parentheses/solution/
pub fn generate_parenthesis(n: i32) -> Vec<String> {
if n == 0 {
return Vec::new();
}
let mut dp = vec![Vec::<String>::new(); (n+1) as usize];
dp[0] = vec![String::from("")];
for i in 1..=n {
println!("Round {}", i);
let mut cur = vec![];
for j in 0..i {
let left = &dp[j as usize];
let right = &dp[(i-j-1) as usize];
for l in left {
for r in right {
let tmp = format!("({}){}", l, r);
println!("new string {}", tmp);
cur.push(tmp);
}
}
}
dp[i as usize] = cur;
}
let res = dp.pop().unwrap();
return res
}
// https://leetcode-cn.com/problems/unique-paths/
// 到达P[i][j]的路径数 = P[i-1][j] + P[i][j-1]
pub fn unique_paths(m: i32, n: i32) -> i32 {
if m == 1 || n == 1 {
return 1;
} else {
return unique_paths(m - 1, n) + unique_paths(m, n - 1);
}
}
pub fn unique_paths_iter(m: i32, n: i32) -> i32 {
let m: usize = m as usize;
let n: usize = n as usize;
let mut cache = vec![vec![0; n]; m];
for i in 0..m {
for j in 0..n {
if i == 0 || j == 0 {
cache[i][j] = 1;
} else {
cache[i][j] = cache[i-1][j] + cache[i][j-1];
}
}
}
return cache[m-1][n-1] as i32;
}
// https://leetcode-cn.com/problems/unique-paths-ii/solution/
pub fn unique_paths_with_obstacles2(obstacle_grid: Vec<Vec<i32>>) -> i32 {
let m = obstacle_grid.len();
let n = obstacle_grid[0].len();
let mut cache = vec![vec![0; n]; m];
for i in 0..m {
for j in 0..n {
if obstacle_grid[i][j] == 1 {
cache[i][j] = 0;
} else if i == 0 && j == 0 {
cache[i][j] = 1;
} else if i == 0 {
cache[i][j] = cache[i][j-1];
} else if j == 0 {
cache[i][j] = cache[i-1][j];
} else {
cache[i][j] = cache[i-1][j] + cache[i][j-1];
}
}
}
return cache[m-1][n-1];
}
// https://leetcode-cn.com/problems/house-robber/submissions/
pub fn rob(nums: Vec<i32>) -> i32 {
let len = nums.len();
if len == 0 {
return 0;
} else if len == 1 {
return nums[0];
} else if len == 2 {
return nums[0].max(nums[1]);
} // else len > 2
let mut m1 = nums[0];
let mut m2 = nums[1].max(m1);
for i in 2..nums.len() {
println!("m1 {} m2 {}", m1, m2);
m1 = (m1 + nums[i]).max(m2);
let temp = m2;
m2 = m1;
m1 = temp;
}
println!("m1 {} m2 {}", m1, m2);
return m2;
}
// https://leetcode-cn.com/problems/maximum-product-subarray/submissions/
pub fn max_product(nums: Vec<i32>) -> i32 {
if nums.len() == 0 { return 0; }
let (mut max, mut min) = (1, 1);
let mut res = std::i32::MIN;
let len = nums.len();
// 由于有 if 在循环里面,所以速度慢!
for n in nums {
let t_max = max;
let t_min = min;
max = (t_max * n).max(n).max(t_min * n);
min = (t_min * n).min(n).min(t_max * n);
res = res.max(max);
}
println!("{}", res);
return res;
}
// https://leetcode-cn.com/problems/gu-piao-de-zui-da-li-run-lcof/
// 由于只买卖一次,所以只需要记录最低价格就好了
pub fn max_profit(mut prices: Vec<i32>) -> i32 {
let mut profit = 0;
let mut cost = 1<<30;
for i in 0..prices.len() {
cost = cost.min(prices[i]);
profit = (prices[i] - cost).max(profit);
}
return profit;
}
// https://leetcode-cn.com/problems/word-break/
pub fn word_break(s: String, word_dict: Vec<String>) -> bool {
if word_dict.is_empty() { return false; }
let len = s.len();
let mut dp: Vec<bool> = vec![false; len+1];
dp[0] = true;
for i in 0..len {
if!dp[i] { continue; }
for w in &word_dict {
let end = i + w.len();
if end <= len &&!dp[end] && &s[i..end] == w.as_str() {
dp[end] = true;
}
}
}
dp[len]
}
// https://leetcode-cn.com/problems/maximum-length-of-repeated-subarray/solution/
// 相当于填表
pub fn find_length(a: Vec<i32>, b: Vec<i32>) -> i32 {
let row = a.len();
let col = b.len();
let mut dp = vec![vec![0; col]; row];
let mut res = 0;
for i in 0..row {
for j in 0..col {
if a[i] == b[j] {
let last = if ( i == 0 || j == 0 ) { 0 } else { dp[i-1][j-1] };
dp[i][j] = last + 1;
res = res.max(dp[i][j]);
} else {
dp[i][j] = 0;
}
}
}
return res as i32;
}
// https://leetcode-cn.com/problems/unique-paths-ii/
pub fn unique_paths_with_obstacles(obstacle_grid: Vec<Vec<i32>>) -> i32 {
let row = obstacle_grid.len();
let col = obstacle_grid[0].len();
let mut dp = vec![vec![0; col]; row];
// init first row and col
for i in 0..row {
for j in 0..col {
if obstacle_grid[i][j] == 0 {
if i == 0 && j == 0 {
dp[i][j] = 1;
} else if i == 0 {
dp[i][j] = dp[i][j-1];
} else if j == 0 {
dp[i][j] = dp[i-1][j];
} else {
dp[i][j] = dp[i-1][j] + dp[i][j-1];
}
} else {
// 遇到障碍了,但一开始我们就是初始化为0的,所以这里其实可以不写
dp[i][j] = 0;
}
}
}
return dp[row-1][col-1];
}
// https://leetcode-cn.com/problems/re-space-lcci/
pub fn respace(dictionary: Vec<String>, sentence: String) -> i32 {
42
}
// https://leetcode-cn.com/problems/li-wu-de-zui-da-jie-zhi-lcof/
pub fn max_value(mut grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
for i in 0..row {
for j in 0..col {
if i == 0 && j == 0 {
// pass
} else if i == 0 {
grid[i][j] += grid[i][j-1];
} else if j == 0 {
grid[i][j] += grid[i-1][j];
} else {
grid[i][j] += grid[i-1][j].max(grid[i][j-1]);
}
}
}
return grid[row-1][col-1];
}
// https://leetcode-cn.com/problems/triangle/solution/di-gui-ji-yi-hua-dp-bi-xu-miao-dong-by-sweetiee/
pub fn minimum_total(triangle: Vec<Vec<i32>>) -> i32 {
let n = triangle.len();
let mut dp = vec![0; n+1];
for i in (0..n).rev() {
for j in 0..=i {
println!("i, j = {}, {}", i, j);
dp[j] = dp[j].min(dp[j+1]) + triangle[i][j];
}
}
return dp[0];
}
// https://leetcode-cn.com/problems/nge-tou-zi-de-dian-shu-lcof/solution/
pub fn two_sum(n: i32) -> Vec<f64> {
let mut res = vec![1./6.;6];
for i in 1..n as usize {
let mut temp = vec![0.0; 5 * i + 6];
for j in 0..res.len() {
for k in 0..6 {
temp[j+k] += res[j] * 1.0/6.0;
}
}
res = temp;
}
return res;
}
// https://leetcode-cn.com/problems/minimum-path-sum/submissions/
pub fn min_path_sum2(mut grid: Vec<Vec<i32>>) -> i32 {
let row = grid.len();
let col = grid[0].len();
for i in 1..row {
grid[i][0] += grid[i-1][0];
}
for j in 1..col {
grid[0][j] += grid[0][j-1];
}
for i in 1..row {
for j in 1..col {
grid[i][j] = grid[i][j-1].min(grid[i-1][j]) + grid[i][j];
}
}
return grid[row-1][col-1];
}
fn main()
{
// generate_parenthesis(4);
// println!("(1,1) {}", unique_paths_iter(1, 1));
// println!("(2,2) {}", unique_paths_iter(2, 2));
// println!("(3,2) {}", unique_paths_iter(3, 2));
// println!("(2,3) {}", unique_paths_iter(2, 3));
// rob([1, 3, 1, 3, 100].to_vec());
// max_product([-2,0,-1].to_vec());
// max_product([-1,-2,-9,-6].to_vec());
// max_profit([1,2,3].to_vec());
// word_break("leetcode".to_string(), ["leet".to_string(), "code".to_string()].to_vec());
// dbg!(find_length([1,2,3,2,1].to_vec(), [3,2,1,4,7].to_vec()));
// dbg!(max_profit_cool([1,2,3,0,2].to_vec()));
// let tri = [
// [2].to_vec(),
// [3,4].to_vec(),
// [6,5,7].to_vec(),
// [4,1,8,3].to_vec()
// ].to_vec();
// dbg!(minimum_total(tri));
// dbg!(two_sum(5));
min_path_sum2([
[1,3,1].to_vec(),
[1,5,1].to_vec(),
[4,2,1].to_vec(),
].to_vec());
} | // 最大子序各,好像看不出什么动态规则的意味,反而像滑动窗口
pub fn max_sub_array(nums: Vec<i32>) -> i32 {
let mut sum = nums[0]; | random_line_split |
lib.rs | pub struct Keys<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
#[inline]
fn next(&mut self) -> Option<&'a K> {
self.inner.next().map(|(k, _)| k)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
//#[derive(Clone)]
/// Iterator over the values
pub struct Values<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline]
fn next(&mut self) -> Option<&'a V> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[derive(Clone)]
struct Node<K, V> {
// Key
pub key: K,
// Hash of the key
pub hash: u64,
// Value stored. We'll use `None` as a sentinel value for removed
// entries.
pub value: Option<V>,
// Store vector index pointing to the `Node` for which `hash` is smaller
// than that of this `Node`.
pub left: Cell<Option<NonZeroU32>>,
// Same as above but for `Node`s with hash larger than this one. If the
// hash is the same, but keys are different, the lookup will default
// to the right branch as well.
pub right: Cell<Option<NonZeroU32>>,
}
impl<K, V> fmt::Debug for Node<K, V>
where
K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(
&(&self.key, &self.value, self.left.get(), self.right.get()),
f,
)
}
}
impl<K, V> PartialEq for Node<K, V>
where
K: PartialEq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash && self.key == other.key && self.value == other.value
}
}
impl<K, V> Node<K, V> {
#[inline]
const fn new(key: K, value: V, hash: u64) -> Self {
Node {
key,
hash,
value: Some(value),
left: Cell::new(None),
right: Cell::new(None),
}
}
}
// `Cell` isn't `Sync`, but all of our writes are contained and require
// `&mut` access, ergo this is safe.
unsafe impl<K: Sync, V: Sync> Sync for Node<K, V> {}
/// A binary tree implementation of a string -> `JsonValue` map. You normally don't
/// have to interact with instances of `Object`, much more likely you will be
/// using the `JsonValue::Object` variant, which wraps around this struct.
#[derive(Debug, Clone)]
pub struct Map<K, V, H = AHasher> {
store: Vec<Node<K, V>>,
hasher: PhantomData<H>,
}
enum FindResult<'find> {
Hit(usize),
Miss(Option<&'find Cell<Option<NonZeroU32>>>),
}
use FindResult::*;
impl<K, V> Map<K, V, AHasher> {
/// Create a new `Map`.
#[inline]
pub fn new() -> Self {
Map::<K, V, AHasher>::default()
}
/// Create a `Map` with a given capacity
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Map {
store: Vec::with_capacity(capacity),
hasher: PhantomData,
}
}
}
impl<K, V, H> Default for Map<K, V, H> {
/// Create a new `Map` with a custom hasher.
#[inline]
fn default() -> Self {
Map {
store: Vec::new(),
hasher: PhantomData,
}
}
}
impl<K, V, H> Map<K, V, H>
where
K: Hash + Eq,
H: Hasher + Default,
{
/// An iterator visiting all keys in arbitrary order.
/// The iterator element type is `&'a K`.
pub fn keys(&self) -> Keys<'_, K, V> {
Keys { inner: self.iter() }
}
/// An iterator visiting all values in arbitrary order.
/// The iterator element type is `&'a V`.
pub fn values(&self) -> Values<'_, K, V> {
Values { inner: self.iter() }
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.replace(value) },
Miss(parent) => {
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, value, hash));
None
}
}
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked(idx) };
node.value.as_ref()
}
Miss(_) => None,
}
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but Hash and Eq
/// on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.as_mut() },
Miss(_) => None,
}
}
/// Returns `true` if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked(idx).value.is_some() },
Miss(_) => false,
}
}
/// Get a mutable reference to entry at key. Inserts a new entry by
/// calling `F` if absent.
// TODO: Replace with entry API
pub fn get_or_insert<F>(&mut self, key: K, fill: F) -> &mut V
where
F: FnOnce() -> V,
{
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked_mut(idx) };
if node.value.is_none() {
node.value = Some(fill());
}
node.value.as_mut().unwrap()
}
Miss(parent) => {
let idx = self.store.len();
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, fill(), hash));
self.store[idx].value.as_mut().unwrap()
}
}
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.take() },
Miss(_) => return None,
}
}
/// Returns the number of elements in the map.
#[inline]
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` if the map contains no elements.
#[inline]
pub fn is_empty(&self) -> bool {
self.store.is_empty()
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory for reuse.
#[inline]
pub fn clear(&mut self) {
self.store.clear();
}
#[inline]
fn find(&self, hash: u64) -> FindResult {
if self.len() == 0 {
return Miss(None);
}
let mut idx = 0;
loop {
let node = unsafe { self.store.get_unchecked(idx) };
if hash < node.hash {
match node.left.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.left)),
}
} else if hash > node.hash {
match node.right.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.right)),
}
} else {
return Hit(idx);
}
}
}
#[inline]
fn hash_key<Q: Hash>(key: Q) -> u64 {
// let mut hasher = fnv::FnvHasher::default();
// let mut hasher = rustc_hash::FxHasher::default();
let mut hasher = H::default();
key.hash(&mut hasher);
hasher.finish()
}
/// An iterator visiting all key-value pairs in insertion order.
/// The iterator element type is `(&K, &V)`.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &1),
/// (&"b", &2),
/// (&"c", &3),
/// ],
/// );
/// ```
#[inline]
pub fn iter(&self) -> Iter<K, V> {
Iter {
inner: self.store.iter(),
}
}
/// An iterator visiting all key-value pairs in insertion order, with
/// mutable references to the values. The iterator element type is
/// (&K, &mut V).
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// // Check if values are doubled
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &2),
/// (&"b", &4),
/// (&"c", &6),
/// ],
/// );
/// ```
#[inline]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut {
inner: self.store.iter_mut(),
}
}
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched. After this, insertions into a vacant entry
/// still require an owned key to be provided.
///
/// Raw entries are useful for such exotic situations as:
///
/// * Hash memoization
/// * Deferring the creation of an owned key until it is known to be required
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Because raw entries provide much more low-level control, it's much easier
/// to put the HashMap into an inconsistent state which, while memory-safe,
/// will cause the map to produce seemingly random results. Higher-level and
/// more foolproof APIs like `entry` should be preferred when possible.
///
/// In particular, the hash used to initialized the raw entry must still be
/// consistent with the hash of the key that is ultimately stored in the entry.
/// This is because implementations of HashMap may need to recompute hashes
/// when resizing, at which point only the keys are available.
///
/// Raw entries give mutable access to the keys. This must not be used
/// to modify how the key would compare or hash, as the map will not re-evaluate
/// where the key should go, meaning the keys may become "lost" if their
/// location does not reflect their state. For instance, if you change a key
/// so that the map now contains keys which compare equal, search may start
/// acting erratically, with two keys randomly masking each other. Implementations
/// are free to assume this doesn't happen (within the limits of memory-safety).
#[inline]
pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, H> {
RawEntryBuilderMut { map: self }
}
/// Creates a raw immutable entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched.
///
/// This is useful for
/// * Hash memoization
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Unless you are in such a situation, higher-level and more foolproof APIs like
/// `get` should be preferred.
///
/// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
#[inline]
pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, H> {
RawEntryBuilder { map: self }
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut letters = Map::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
pub fn entry(&mut self, key: K) -> Entry<K, V, H>
where
K: Eq + Clone,
{
for (idx, n) in self.store.iter().enumerate() {
if &key == &n.key {
return Entry::Occupied(OccupiedEntry::new(idx, key, self));
}
}
Entry::Vacant(VacantEntry::new(key, self))
}
}
impl<K, V> IntoIterator for Map<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
#[inline]
fn into_iter(self) -> IntoIter<K, V> {
IntoIter(self)
}
}
/// Consuming iterator
pub struct IntoIter<K, V>(Map<K, V>);
impl<K, V> IntoIter<K, V> {
/// The length of this iterator
pub fn len(&self) -> usize {
self.0.store.len()
}
/// If this iteratoris empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(n) = self.0.store.pop() {
if let Some(v) = n.value {
return Some((n.key, v));
}
} else {
return None;
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.0.store.len();
(l, Some(l))
}
}
impl<K, Q:?Sized, V> Index<&Q> for Map<K, V>
where
K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the HashMap.
fn index(&self, key: &Q) -> &V {
self.get(key).expect("Key not found in Map")
}
}
impl<'json, IK, IV, K, V> FromIterator<(IK, IV)> for Map<K, V>
where
IK: Into<K>,
IV: Into<V>,
K: Hash + Eq,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (IK, IV)>,
{
let iter = iter.into_iter();
let mut map = Map::with_capacity(iter.size_hint().0);
for (key, value) in iter {
map.insert(key.into(), value.into());
}
map
}
}
// Because keys can inserted in different order, the safe way to
// compare `Map`s is to iterate over one and check if the other
// has all the same keys.
impl<K, V> PartialEq for Map<K, V>
where
K: Hash + Eq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
if self.len()!= other.len() {
return false;
}
// Faster than.get() since we can avoid hashing
for &Node {
ref value, hash,..
} in self.store.iter()
{
if let Hit(idx) = other.find(hash) {
if &other.store[idx].value == value {
continue;
}
}
return false;
}
true
}
}
/// An iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter`](./struct.Map.html#method.iter)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct Iter<'a, K, V> {
inner: slice::Iter<'a, Node<K, V>>,
}
/// A mutable iterator over the entries | // use alloc::vec::Vec;
/// Iterator over the keys | random_line_split |
|
lib.rs | k)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
//#[derive(Clone)]
/// Iterator over the values
pub struct Values<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline]
fn next(&mut self) -> Option<&'a V> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[derive(Clone)]
struct Node<K, V> {
// Key
pub key: K,
// Hash of the key
pub hash: u64,
// Value stored. We'll use `None` as a sentinel value for removed
// entries.
pub value: Option<V>,
// Store vector index pointing to the `Node` for which `hash` is smaller
// than that of this `Node`.
pub left: Cell<Option<NonZeroU32>>,
// Same as above but for `Node`s with hash larger than this one. If the
// hash is the same, but keys are different, the lookup will default
// to the right branch as well.
pub right: Cell<Option<NonZeroU32>>,
}
impl<K, V> fmt::Debug for Node<K, V>
where
K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(
&(&self.key, &self.value, self.left.get(), self.right.get()),
f,
)
}
}
impl<K, V> PartialEq for Node<K, V>
where
K: PartialEq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash && self.key == other.key && self.value == other.value
}
}
impl<K, V> Node<K, V> {
#[inline]
const fn new(key: K, value: V, hash: u64) -> Self {
Node {
key,
hash,
value: Some(value),
left: Cell::new(None),
right: Cell::new(None),
}
}
}
// `Cell` isn't `Sync`, but all of our writes are contained and require
// `&mut` access, ergo this is safe.
unsafe impl<K: Sync, V: Sync> Sync for Node<K, V> {}
/// A binary tree implementation of a string -> `JsonValue` map. You normally don't
/// have to interact with instances of `Object`, much more likely you will be
/// using the `JsonValue::Object` variant, which wraps around this struct.
#[derive(Debug, Clone)]
pub struct Map<K, V, H = AHasher> {
store: Vec<Node<K, V>>,
hasher: PhantomData<H>,
}
enum FindResult<'find> {
Hit(usize),
Miss(Option<&'find Cell<Option<NonZeroU32>>>),
}
use FindResult::*;
impl<K, V> Map<K, V, AHasher> {
/// Create a new `Map`.
#[inline]
pub fn new() -> Self {
Map::<K, V, AHasher>::default()
}
/// Create a `Map` with a given capacity
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Map {
store: Vec::with_capacity(capacity),
hasher: PhantomData,
}
}
}
impl<K, V, H> Default for Map<K, V, H> {
/// Create a new `Map` with a custom hasher.
#[inline]
fn default() -> Self {
Map {
store: Vec::new(),
hasher: PhantomData,
}
}
}
impl<K, V, H> Map<K, V, H>
where
K: Hash + Eq,
H: Hasher + Default,
{
/// An iterator visiting all keys in arbitrary order.
/// The iterator element type is `&'a K`.
pub fn | (&self) -> Keys<'_, K, V> {
Keys { inner: self.iter() }
}
/// An iterator visiting all values in arbitrary order.
/// The iterator element type is `&'a V`.
pub fn values(&self) -> Values<'_, K, V> {
Values { inner: self.iter() }
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.replace(value) },
Miss(parent) => {
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, value, hash));
None
}
}
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked(idx) };
node.value.as_ref()
}
Miss(_) => None,
}
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but Hash and Eq
/// on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.as_mut() },
Miss(_) => None,
}
}
/// Returns `true` if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked(idx).value.is_some() },
Miss(_) => false,
}
}
/// Get a mutable reference to entry at key. Inserts a new entry by
/// calling `F` if absent.
// TODO: Replace with entry API
pub fn get_or_insert<F>(&mut self, key: K, fill: F) -> &mut V
where
F: FnOnce() -> V,
{
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked_mut(idx) };
if node.value.is_none() {
node.value = Some(fill());
}
node.value.as_mut().unwrap()
}
Miss(parent) => {
let idx = self.store.len();
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, fill(), hash));
self.store[idx].value.as_mut().unwrap()
}
}
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.take() },
Miss(_) => return None,
}
}
/// Returns the number of elements in the map.
#[inline]
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` if the map contains no elements.
#[inline]
pub fn is_empty(&self) -> bool {
self.store.is_empty()
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory for reuse.
#[inline]
pub fn clear(&mut self) {
self.store.clear();
}
#[inline]
fn find(&self, hash: u64) -> FindResult {
if self.len() == 0 {
return Miss(None);
}
let mut idx = 0;
loop {
let node = unsafe { self.store.get_unchecked(idx) };
if hash < node.hash {
match node.left.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.left)),
}
} else if hash > node.hash {
match node.right.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.right)),
}
} else {
return Hit(idx);
}
}
}
#[inline]
fn hash_key<Q: Hash>(key: Q) -> u64 {
// let mut hasher = fnv::FnvHasher::default();
// let mut hasher = rustc_hash::FxHasher::default();
let mut hasher = H::default();
key.hash(&mut hasher);
hasher.finish()
}
/// An iterator visiting all key-value pairs in insertion order.
/// The iterator element type is `(&K, &V)`.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &1),
/// (&"b", &2),
/// (&"c", &3),
/// ],
/// );
/// ```
#[inline]
pub fn iter(&self) -> Iter<K, V> {
Iter {
inner: self.store.iter(),
}
}
/// An iterator visiting all key-value pairs in insertion order, with
/// mutable references to the values. The iterator element type is
/// (&K, &mut V).
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// // Check if values are doubled
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &2),
/// (&"b", &4),
/// (&"c", &6),
/// ],
/// );
/// ```
#[inline]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut {
inner: self.store.iter_mut(),
}
}
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched. After this, insertions into a vacant entry
/// still require an owned key to be provided.
///
/// Raw entries are useful for such exotic situations as:
///
/// * Hash memoization
/// * Deferring the creation of an owned key until it is known to be required
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Because raw entries provide much more low-level control, it's much easier
/// to put the HashMap into an inconsistent state which, while memory-safe,
/// will cause the map to produce seemingly random results. Higher-level and
/// more foolproof APIs like `entry` should be preferred when possible.
///
/// In particular, the hash used to initialized the raw entry must still be
/// consistent with the hash of the key that is ultimately stored in the entry.
/// This is because implementations of HashMap may need to recompute hashes
/// when resizing, at which point only the keys are available.
///
/// Raw entries give mutable access to the keys. This must not be used
/// to modify how the key would compare or hash, as the map will not re-evaluate
/// where the key should go, meaning the keys may become "lost" if their
/// location does not reflect their state. For instance, if you change a key
/// so that the map now contains keys which compare equal, search may start
/// acting erratically, with two keys randomly masking each other. Implementations
/// are free to assume this doesn't happen (within the limits of memory-safety).
#[inline]
pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, H> {
RawEntryBuilderMut { map: self }
}
/// Creates a raw immutable entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched.
///
/// This is useful for
/// * Hash memoization
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Unless you are in such a situation, higher-level and more foolproof APIs like
/// `get` should be preferred.
///
/// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
#[inline]
pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, H> {
RawEntryBuilder { map: self }
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut letters = Map::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
pub fn entry(&mut self, key: K) -> Entry<K, V, H>
where
K: Eq + Clone,
{
for (idx, n) in self.store.iter().enumerate() {
if &key == &n.key {
return Entry::Occupied(OccupiedEntry::new(idx, key, self));
}
}
Entry::Vacant(VacantEntry::new(key, self))
}
}
impl<K, V> IntoIterator for Map<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
#[inline]
fn into_iter(self) -> IntoIter<K, V> {
IntoIter(self)
}
}
/// Consuming iterator
pub struct IntoIter<K, V>(Map<K, V>);
impl<K, V> IntoIter<K, V> {
/// The length of this iterator
pub fn len(&self) -> usize {
self.0.store.len()
}
/// If this iteratoris empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(n) = self.0.store.pop() {
if let Some(v) = n.value {
return Some((n.key, v));
}
} else {
return None;
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.0.store.len();
(l, Some(l))
}
}
impl<K, Q:?Sized, V> Index<&Q> for Map<K, V>
where
K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the HashMap.
fn index(&self, key: &Q) -> &V {
self.get(key).expect("Key not found in Map")
}
}
impl<'json, IK, IV, K, V> FromIterator<(IK, IV)> for Map<K, V>
where
IK: Into<K>,
IV: Into<V>,
K: Hash + Eq,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (IK, IV)>,
{
let iter = iter.into_iter();
let mut map = Map::with_capacity(iter.size_hint().0);
for (key, value) in iter {
map.insert(key.into(), value.into());
}
map
}
}
// Because keys can inserted in different order, the safe way to
// compare `Map`s is to iterate over one and check if the other
// has all the same keys.
impl<K, V> PartialEq for Map<K, V>
where
K: Hash + Eq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
if self.len()!= other.len() {
return false;
}
// Faster than.get() since we can avoid hashing
for &Node {
ref value, hash,..
} in self.store.iter()
{
if let Hit(idx) = other.find(hash) {
if &other.store[idx].value == value {
continue;
}
}
return false;
}
true
}
}
/// An iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter`](./struct.Map.html#method.iter)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct Iter<'a, K, V> {
inner: slice::Iter<'a, Node<K, V>>,
}
/// A mutable iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter_mut`](./struct.Map.html#method.iter_mut)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct IterMut<'a, K, V> {
inner: slice::IterMut<'a, Node<K, V>>,
}
impl<K, V> Iter<'_, K, V> {
/// Create an empty iterator that always returns `None` | keys | identifier_name |
lib.rs | }
}
impl<K, V> PartialEq for Node<K, V>
where
K: PartialEq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash && self.key == other.key && self.value == other.value
}
}
impl<K, V> Node<K, V> {
#[inline]
const fn new(key: K, value: V, hash: u64) -> Self {
Node {
key,
hash,
value: Some(value),
left: Cell::new(None),
right: Cell::new(None),
}
}
}
// `Cell` isn't `Sync`, but all of our writes are contained and require
// `&mut` access, ergo this is safe.
unsafe impl<K: Sync, V: Sync> Sync for Node<K, V> {}
/// A binary tree implementation of a string -> `JsonValue` map. You normally don't
/// have to interact with instances of `Object`, much more likely you will be
/// using the `JsonValue::Object` variant, which wraps around this struct.
#[derive(Debug, Clone)]
pub struct Map<K, V, H = AHasher> {
store: Vec<Node<K, V>>,
hasher: PhantomData<H>,
}
enum FindResult<'find> {
Hit(usize),
Miss(Option<&'find Cell<Option<NonZeroU32>>>),
}
use FindResult::*;
impl<K, V> Map<K, V, AHasher> {
/// Create a new `Map`.
#[inline]
pub fn new() -> Self {
Map::<K, V, AHasher>::default()
}
/// Create a `Map` with a given capacity
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Map {
store: Vec::with_capacity(capacity),
hasher: PhantomData,
}
}
}
impl<K, V, H> Default for Map<K, V, H> {
/// Create a new `Map` with a custom hasher.
#[inline]
fn default() -> Self {
Map {
store: Vec::new(),
hasher: PhantomData,
}
}
}
impl<K, V, H> Map<K, V, H>
where
K: Hash + Eq,
H: Hasher + Default,
{
/// An iterator visiting all keys in arbitrary order.
/// The iterator element type is `&'a K`.
pub fn keys(&self) -> Keys<'_, K, V> {
Keys { inner: self.iter() }
}
/// An iterator visiting all values in arbitrary order.
/// The iterator element type is `&'a V`.
pub fn values(&self) -> Values<'_, K, V> {
Values { inner: self.iter() }
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.replace(value) },
Miss(parent) => {
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, value, hash));
None
}
}
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked(idx) };
node.value.as_ref()
}
Miss(_) => None,
}
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but Hash and Eq
/// on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.as_mut() },
Miss(_) => None,
}
}
/// Returns `true` if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked(idx).value.is_some() },
Miss(_) => false,
}
}
/// Get a mutable reference to entry at key. Inserts a new entry by
/// calling `F` if absent.
// TODO: Replace with entry API
pub fn get_or_insert<F>(&mut self, key: K, fill: F) -> &mut V
where
F: FnOnce() -> V,
{
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked_mut(idx) };
if node.value.is_none() {
node.value = Some(fill());
}
node.value.as_mut().unwrap()
}
Miss(parent) => {
let idx = self.store.len();
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, fill(), hash));
self.store[idx].value.as_mut().unwrap()
}
}
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.take() },
Miss(_) => return None,
}
}
/// Returns the number of elements in the map.
#[inline]
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` if the map contains no elements.
#[inline]
pub fn is_empty(&self) -> bool {
self.store.is_empty()
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory for reuse.
#[inline]
pub fn clear(&mut self) {
self.store.clear();
}
#[inline]
fn find(&self, hash: u64) -> FindResult {
if self.len() == 0 {
return Miss(None);
}
let mut idx = 0;
loop {
let node = unsafe { self.store.get_unchecked(idx) };
if hash < node.hash {
match node.left.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.left)),
}
} else if hash > node.hash {
match node.right.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.right)),
}
} else {
return Hit(idx);
}
}
}
#[inline]
fn hash_key<Q: Hash>(key: Q) -> u64 {
// let mut hasher = fnv::FnvHasher::default();
// let mut hasher = rustc_hash::FxHasher::default();
let mut hasher = H::default();
key.hash(&mut hasher);
hasher.finish()
}
/// An iterator visiting all key-value pairs in insertion order.
/// The iterator element type is `(&K, &V)`.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &1),
/// (&"b", &2),
/// (&"c", &3),
/// ],
/// );
/// ```
#[inline]
pub fn iter(&self) -> Iter<K, V> {
Iter {
inner: self.store.iter(),
}
}
/// An iterator visiting all key-value pairs in insertion order, with
/// mutable references to the values. The iterator element type is
/// (&K, &mut V).
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// // Check if values are doubled
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &2),
/// (&"b", &4),
/// (&"c", &6),
/// ],
/// );
/// ```
#[inline]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut {
inner: self.store.iter_mut(),
}
}
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched. After this, insertions into a vacant entry
/// still require an owned key to be provided.
///
/// Raw entries are useful for such exotic situations as:
///
/// * Hash memoization
/// * Deferring the creation of an owned key until it is known to be required
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Because raw entries provide much more low-level control, it's much easier
/// to put the HashMap into an inconsistent state which, while memory-safe,
/// will cause the map to produce seemingly random results. Higher-level and
/// more foolproof APIs like `entry` should be preferred when possible.
///
/// In particular, the hash used to initialized the raw entry must still be
/// consistent with the hash of the key that is ultimately stored in the entry.
/// This is because implementations of HashMap may need to recompute hashes
/// when resizing, at which point only the keys are available.
///
/// Raw entries give mutable access to the keys. This must not be used
/// to modify how the key would compare or hash, as the map will not re-evaluate
/// where the key should go, meaning the keys may become "lost" if their
/// location does not reflect their state. For instance, if you change a key
/// so that the map now contains keys which compare equal, search may start
/// acting erratically, with two keys randomly masking each other. Implementations
/// are free to assume this doesn't happen (within the limits of memory-safety).
#[inline]
pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, H> {
RawEntryBuilderMut { map: self }
}
/// Creates a raw immutable entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched.
///
/// This is useful for
/// * Hash memoization
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Unless you are in such a situation, higher-level and more foolproof APIs like
/// `get` should be preferred.
///
/// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
#[inline]
pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, H> {
RawEntryBuilder { map: self }
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut letters = Map::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
pub fn entry(&mut self, key: K) -> Entry<K, V, H>
where
K: Eq + Clone,
{
for (idx, n) in self.store.iter().enumerate() {
if &key == &n.key {
return Entry::Occupied(OccupiedEntry::new(idx, key, self));
}
}
Entry::Vacant(VacantEntry::new(key, self))
}
}
impl<K, V> IntoIterator for Map<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
#[inline]
fn into_iter(self) -> IntoIter<K, V> {
IntoIter(self)
}
}
/// Consuming iterator
pub struct IntoIter<K, V>(Map<K, V>);
impl<K, V> IntoIter<K, V> {
/// The length of this iterator
pub fn len(&self) -> usize {
self.0.store.len()
}
/// If this iteratoris empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(n) = self.0.store.pop() {
if let Some(v) = n.value {
return Some((n.key, v));
}
} else {
return None;
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.0.store.len();
(l, Some(l))
}
}
impl<K, Q:?Sized, V> Index<&Q> for Map<K, V>
where
K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the HashMap.
fn index(&self, key: &Q) -> &V {
self.get(key).expect("Key not found in Map")
}
}
impl<'json, IK, IV, K, V> FromIterator<(IK, IV)> for Map<K, V>
where
IK: Into<K>,
IV: Into<V>,
K: Hash + Eq,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (IK, IV)>,
{
let iter = iter.into_iter();
let mut map = Map::with_capacity(iter.size_hint().0);
for (key, value) in iter {
map.insert(key.into(), value.into());
}
map
}
}
// Because keys can inserted in different order, the safe way to
// compare `Map`s is to iterate over one and check if the other
// has all the same keys.
impl<K, V> PartialEq for Map<K, V>
where
K: Hash + Eq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
if self.len()!= other.len() {
return false;
}
// Faster than.get() since we can avoid hashing
for &Node {
ref value, hash,..
} in self.store.iter()
{
if let Hit(idx) = other.find(hash) {
if &other.store[idx].value == value {
continue;
}
}
return false;
}
true
}
}
/// An iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter`](./struct.Map.html#method.iter)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct Iter<'a, K, V> {
inner: slice::Iter<'a, Node<K, V>>,
}
/// A mutable iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter_mut`](./struct.Map.html#method.iter_mut)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct IterMut<'a, K, V> {
inner: slice::IterMut<'a, Node<K, V>>,
}
impl<K, V> Iter<'_, K, V> {
/// Create an empty iterator that always returns `None`
pub fn empty() -> Self {
Iter { inner: [].iter() }
}
}
impl<'i, K, V> Iterator for Iter<'i, K, V> {
type Item = (&'i K, &'i V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next() {
let value = match node.value {
Some(ref value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> DoubleEndedIterator for Iter<'_, K, V> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
while let Some(node) = self.inner.next_back() {
let value = match node.value {
Some(ref value) => value,
None => continue,
};
return Some((&node.key, value));
}
None
}
}
impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
impl<K, V> IterMut<'_, K, V> {
/// Create an empty iterator that always returns `None`
pub fn empty() -> Self | {
IterMut {
inner: [].iter_mut(),
}
} | identifier_body |
|
lib.rs | k)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
//#[derive(Clone)]
/// Iterator over the values
pub struct Values<'a, K, V> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline]
fn next(&mut self) -> Option<&'a V> {
self.inner.next().map(|(_, v)| v)
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
#[derive(Clone)]
struct Node<K, V> {
// Key
pub key: K,
// Hash of the key
pub hash: u64,
// Value stored. We'll use `None` as a sentinel value for removed
// entries.
pub value: Option<V>,
// Store vector index pointing to the `Node` for which `hash` is smaller
// than that of this `Node`.
pub left: Cell<Option<NonZeroU32>>,
// Same as above but for `Node`s with hash larger than this one. If the
// hash is the same, but keys are different, the lookup will default
// to the right branch as well.
pub right: Cell<Option<NonZeroU32>>,
}
impl<K, V> fmt::Debug for Node<K, V>
where
K: fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(
&(&self.key, &self.value, self.left.get(), self.right.get()),
f,
)
}
}
impl<K, V> PartialEq for Node<K, V>
where
K: PartialEq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash && self.key == other.key && self.value == other.value
}
}
impl<K, V> Node<K, V> {
#[inline]
const fn new(key: K, value: V, hash: u64) -> Self {
Node {
key,
hash,
value: Some(value),
left: Cell::new(None),
right: Cell::new(None),
}
}
}
// `Cell` isn't `Sync`, but all of our writes are contained and require
// `&mut` access, ergo this is safe.
unsafe impl<K: Sync, V: Sync> Sync for Node<K, V> {}
/// A binary tree implementation of a string -> `JsonValue` map. You normally don't
/// have to interact with instances of `Object`, much more likely you will be
/// using the `JsonValue::Object` variant, which wraps around this struct.
#[derive(Debug, Clone)]
pub struct Map<K, V, H = AHasher> {
store: Vec<Node<K, V>>,
hasher: PhantomData<H>,
}
enum FindResult<'find> {
Hit(usize),
Miss(Option<&'find Cell<Option<NonZeroU32>>>),
}
use FindResult::*;
impl<K, V> Map<K, V, AHasher> {
/// Create a new `Map`.
#[inline]
pub fn new() -> Self {
Map::<K, V, AHasher>::default()
}
/// Create a `Map` with a given capacity
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Map {
store: Vec::with_capacity(capacity),
hasher: PhantomData,
}
}
}
impl<K, V, H> Default for Map<K, V, H> {
/// Create a new `Map` with a custom hasher.
#[inline]
fn default() -> Self {
Map {
store: Vec::new(),
hasher: PhantomData,
}
}
}
impl<K, V, H> Map<K, V, H>
where
K: Hash + Eq,
H: Hasher + Default,
{
/// An iterator visiting all keys in arbitrary order.
/// The iterator element type is `&'a K`.
pub fn keys(&self) -> Keys<'_, K, V> {
Keys { inner: self.iter() }
}
/// An iterator visiting all values in arbitrary order.
/// The iterator element type is `&'a V`.
pub fn values(&self) -> Values<'_, K, V> {
Values { inner: self.iter() }
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old
/// value is returned. The key is not updated, though; this matters for
/// types that can be `==` without being identical.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// assert_eq!(map.insert(37, "a"), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b");
/// assert_eq!(map.insert(37, "c"), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.replace(value) },
Miss(parent) => {
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, value, hash));
None
}
}
}
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked(idx) };
node.value.as_ref()
}
Miss(_) => None,
}
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but Hash and Eq
/// on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.as_mut() },
Miss(_) => None,
}
}
/// Returns `true` if the map contains a value for the specified key.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
pub fn contains_key<Q>(&self, key: &Q) -> bool
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked(idx).value.is_some() },
Miss(_) => false,
}
}
/// Get a mutable reference to entry at key. Inserts a new entry by
/// calling `F` if absent.
// TODO: Replace with entry API
pub fn get_or_insert<F>(&mut self, key: K, fill: F) -> &mut V
where
F: FnOnce() -> V,
{
let hash = Self::hash_key(&key);
match self.find(hash) {
Hit(idx) => {
let node = unsafe { self.store.get_unchecked_mut(idx) };
if node.value.is_none() {
node.value = Some(fill());
}
node.value.as_mut().unwrap()
}
Miss(parent) => {
let idx = self.store.len();
if let Some(parent) = parent {
parent.set(NonZeroU32::new(self.store.len() as u32));
}
self.store.push(Node::new(key, fill(), hash));
self.store[idx].value.as_mut().unwrap()
}
}
}
/// Removes a key from the map, returning the value at the key if the key
/// was previously in the map.
///
/// The key may be any borrowed form of the map's key type, but `Hash` and
/// `Eq` on the borrowed form must match those for the key type.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert(1, "a");
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Hash + Eq +?Sized,
{
let hash = Self::hash_key(key);
match self.find(hash) {
Hit(idx) => unsafe { self.store.get_unchecked_mut(idx).value.take() },
Miss(_) => return None,
}
}
/// Returns the number of elements in the map.
#[inline]
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` if the map contains no elements.
#[inline]
pub fn is_empty(&self) -> bool {
self.store.is_empty()
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory for reuse.
#[inline]
pub fn clear(&mut self) {
self.store.clear();
}
#[inline]
fn find(&self, hash: u64) -> FindResult {
if self.len() == 0 {
return Miss(None);
}
let mut idx = 0;
loop {
let node = unsafe { self.store.get_unchecked(idx) };
if hash < node.hash {
match node.left.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.left)),
}
} else if hash > node.hash {
match node.right.get() {
Some(i) => idx = i.get() as usize,
None => return Miss(Some(&node.right)),
}
} else {
return Hit(idx);
}
}
}
#[inline]
fn hash_key<Q: Hash>(key: Q) -> u64 {
// let mut hasher = fnv::FnvHasher::default();
// let mut hasher = rustc_hash::FxHasher::default();
let mut hasher = H::default();
key.hash(&mut hasher);
hasher.finish()
}
/// An iterator visiting all key-value pairs in insertion order.
/// The iterator element type is `(&K, &V)`.
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &1),
/// (&"b", &2),
/// (&"c", &3),
/// ],
/// );
/// ```
#[inline]
pub fn iter(&self) -> Iter<K, V> {
Iter {
inner: self.store.iter(),
}
}
/// An iterator visiting all key-value pairs in insertion order, with
/// mutable references to the values. The iterator element type is
/// (&K, &mut V).
///
/// # Examples
///
/// ```rust
/// use ordnung::Map;
///
/// let mut map = Map::new();
/// map.insert("a", 1);
/// map.insert("b", 2);
/// map.insert("c", 3);
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// // Check if values are doubled
/// let entries: Vec<_> = map.iter().collect();
///
/// assert_eq!(
/// entries,
/// &[
/// (&"a", &2),
/// (&"b", &4),
/// (&"c", &6),
/// ],
/// );
/// ```
#[inline]
pub fn iter_mut(&mut self) -> IterMut<K, V> {
IterMut {
inner: self.store.iter_mut(),
}
}
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched. After this, insertions into a vacant entry
/// still require an owned key to be provided.
///
/// Raw entries are useful for such exotic situations as:
///
/// * Hash memoization
/// * Deferring the creation of an owned key until it is known to be required
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Because raw entries provide much more low-level control, it's much easier
/// to put the HashMap into an inconsistent state which, while memory-safe,
/// will cause the map to produce seemingly random results. Higher-level and
/// more foolproof APIs like `entry` should be preferred when possible.
///
/// In particular, the hash used to initialized the raw entry must still be
/// consistent with the hash of the key that is ultimately stored in the entry.
/// This is because implementations of HashMap may need to recompute hashes
/// when resizing, at which point only the keys are available.
///
/// Raw entries give mutable access to the keys. This must not be used
/// to modify how the key would compare or hash, as the map will not re-evaluate
/// where the key should go, meaning the keys may become "lost" if their
/// location does not reflect their state. For instance, if you change a key
/// so that the map now contains keys which compare equal, search may start
/// acting erratically, with two keys randomly masking each other. Implementations
/// are free to assume this doesn't happen (within the limits of memory-safety).
#[inline]
pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, H> {
RawEntryBuilderMut { map: self }
}
/// Creates a raw immutable entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
/// manipulating a map. They must be manually initialized with a hash and
/// then manually searched.
///
/// This is useful for
/// * Hash memoization
/// * Using a search key that doesn't work with the Borrow trait
/// * Using custom comparison logic without newtype wrappers
///
/// Unless you are in such a situation, higher-level and more foolproof APIs like
/// `get` should be preferred.
///
/// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`.
#[inline]
pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, H> {
RawEntryBuilder { map: self }
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut letters = Map::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
pub fn entry(&mut self, key: K) -> Entry<K, V, H>
where
K: Eq + Clone,
{
for (idx, n) in self.store.iter().enumerate() {
if &key == &n.key {
return Entry::Occupied(OccupiedEntry::new(idx, key, self));
}
}
Entry::Vacant(VacantEntry::new(key, self))
}
}
impl<K, V> IntoIterator for Map<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
#[inline]
fn into_iter(self) -> IntoIter<K, V> {
IntoIter(self)
}
}
/// Consuming iterator
pub struct IntoIter<K, V>(Map<K, V>);
impl<K, V> IntoIter<K, V> {
/// The length of this iterator
pub fn len(&self) -> usize {
self.0.store.len()
}
/// If this iteratoris empty
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some(n) = self.0.store.pop() {
if let Some(v) = n.value {
return Some((n.key, v));
}
} else {
return None;
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.0.store.len();
(l, Some(l))
}
}
impl<K, Q:?Sized, V> Index<&Q> for Map<K, V>
where
K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the HashMap.
fn index(&self, key: &Q) -> &V {
self.get(key).expect("Key not found in Map")
}
}
impl<'json, IK, IV, K, V> FromIterator<(IK, IV)> for Map<K, V>
where
IK: Into<K>,
IV: Into<V>,
K: Hash + Eq,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (IK, IV)>,
{
let iter = iter.into_iter();
let mut map = Map::with_capacity(iter.size_hint().0);
for (key, value) in iter {
map.insert(key.into(), value.into());
}
map
}
}
// Because keys can inserted in different order, the safe way to
// compare `Map`s is to iterate over one and check if the other
// has all the same keys.
impl<K, V> PartialEq for Map<K, V>
where
K: Hash + Eq,
V: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
if self.len()!= other.len() {
return false;
}
// Faster than.get() since we can avoid hashing
for &Node {
ref value, hash,..
} in self.store.iter()
{
if let Hit(idx) = other.find(hash) {
if &other.store[idx].value == value |
}
return false;
}
true
}
}
/// An iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter`](./struct.Map.html#method.iter)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct Iter<'a, K, V> {
inner: slice::Iter<'a, Node<K, V>>,
}
/// A mutable iterator over the entries of a `Map`.
///
/// This struct is created by the [`iter_mut`](./struct.Map.html#method.iter_mut)
/// method on [`Map`](./struct.Map.html). See its documentation for more.
pub struct IterMut<'a, K, V> {
inner: slice::IterMut<'a, Node<K, V>>,
}
impl<K, V> Iter<'_, K, V> {
/// Create an empty iterator that always returns `None` | {
continue;
} | conditional_block |
lib.rs | //! A lock-free, eventually consistent, concurrent multi-value map.
//!
//! This map implementation allows reads and writes to execute entirely in parallel, with no
//! implicit synchronization overhead. Reads never take locks on their critical path, and neither
//! do writes assuming there is a single writer (multi-writer is possible using a `Mutex`), which
//! significantly improves performance under contention. See the [`left-right` crate](left_right)
//! for details on the underlying concurrency primitive.
//!
//! The trade-off exposed by this type is one of eventual consistency: writes are not visible to
//! readers except following explicit synchronization. Specifically, readers only see the
//! operations that preceeded the last call to `WriteHandle::refresh` by a writer. This lets
//! writers decide how stale they are willing to let reads get. They can refresh the map after
//! every write to emulate a regular concurrent `HashMap`, or they can refresh only occasionally to
//! reduce the synchronization overhead at the cost of stale reads.
//!
//! For read-heavy workloads, the scheme used by this module is particularly useful. Writers can
//! afford to refresh after every write, which provides up-to-date reads, and readers remain fast
//! as they do not need to ever take locks.
//!
//! The map is multi-value, meaning that every key maps to a *collection* of values. This
//! introduces some memory cost by adding a layer of indirection through a `Vec` for each value,
//! but enables more advanced use. This choice was made as it would not be possible to emulate such
//! functionality on top of the semantics of this map (think about it -- what would the operational
//! log contain?).
//!
//! To faciliate more advanced use-cases, each of the two maps also carry some customizeable
//! meta-information. The writers may update this at will, and when a refresh happens, the current
//! meta will also be made visible to readers. This could be useful, for example, to indicate what
//! time the refresh happened.
//!
//! # Features
//!
//! - `eviction`: Gives you access to [`WriteHandle::empty_random`] to empty out randomly chosen
//! keys from the map.
//! - `amortize`: Amortizes the cost of resizes in the underlying data structures. See
//! [`griddle`](https://github.com/jonhoo/griddle/) and
//! [`atone`](https://github.com/jonhoo/atone/) for details. This requires a nightly compiler
//! [for the time being](https://docs.rs/indexmap-amortized/1.0/indexmap_amortized/#rust-version).
//!
//!
//! # Examples
//!
//! Single-reader, single-writer
//!
//! ```
//! // new will use the default HashMap hasher, and a meta of ()
//! // note that we get separate read and write handles
//! // the read handle can be cloned to have more readers
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // review some books.
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//!
//! // at this point, reads from book_reviews_r will not see any of the reviews!
//! assert_eq!(book_reviews_r.len(), 0);
//! // we need to refresh first to make the writes visible
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.len(), 4);
//! // reads will now return Some() because the map has been initialized
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // remember, this is a multi-value map, so we can have many reviews
//! book_reviews_w.insert("Grimms' Fairy Tales", "Eh, the title seemed weird.");
//! book_reviews_w.insert("Pride and Prejudice", "Too many words.");
//!
//! // but again, new writes are not yet visible
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // we need to refresh first
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(2));
//!
//! // oops, this review has a lot of spelling mistakes, let's delete it.
//! // remove_entry deletes *all* reviews (though in this case, just one)
//! book_reviews_w.remove_entry("The Adventures of Sherlock Holmes");
//! // but again, it's not visible to readers until we refresh
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), Some(1));
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), None);
//!
//! // look up the values associated with some keys.
//! let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
//! for book in &to_find {
//! if let Some(reviews) = book_reviews_r.get(book) {
//! for review in &*reviews {
//! println!("{}: {}", book, review);
//! }
//! } else {
//! println!("{} is unreviewed.", book);
//! }
//! }
//!
//! // iterate over everything.
//! for (book, reviews) in &book_reviews_r.enter().unwrap() {
//! for review in reviews {
//! println!("{}: \"{}\"", book, review);
//! }
//! }
//! ```
//!
//! Reads from multiple threads are possible by cloning the `ReadHandle`.
//!
//! ```
//! use std::thread;
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some readers
//! let readers: Vec<_> = (0..4).map(|_| {
//! let r = book_reviews_r.clone();
//! thread::spawn(move || {
//! loop {
//! let l = r.len();
//! if l == 0 {
//! thread::yield_now();
//! } else {
//! // the reader will either see all the reviews,
//! // or none of them, since refresh() is atomic.
//! assert_eq!(l, 4);
//! break;
//! }
//! }
//! })
//! }).collect();
//!
//! // do some writes
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//! // expose the writes
//! book_reviews_w.publish();
//!
//! // you can read through the write handle
//! assert_eq!(book_reviews_w.len(), 4);
//!
//! // the original read handle still works too
//! assert_eq!(book_reviews_r.len(), 4);
//!
//! // all the threads should eventually see.len() == 4
//! for r in readers.into_iter() {
//! assert!(r.join().is_ok());
//! }
//! ```
//!
//! If multiple writers are needed, the `WriteHandle` must be protected by a `Mutex`.
//!
//! ```
//! use std::thread;
//! use std::sync::{Arc, Mutex};
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some writers.
//! // since evmap does not support concurrent writes, we need
//! // to protect the write handle by a mutex.
//! let w = Arc::new(Mutex::new(book_reviews_w));
//! let writers: Vec<_> = (0..4).map(|i| {
//! let w = w.clone();
//! thread::spawn(move || {
//! let mut w = w.lock().unwrap();
//! w.insert(i, true);
//! w.publish();
//! })
//! }).collect();
//!
//! // eventually we should see all the writes
//! while book_reviews_r.len() < 4 { thread::yield_now(); };
//!
//! // all the threads should eventually finish writing
//! for w in writers.into_iter() {
//! assert!(w.join().is_ok());
//! }
//! ```
//!
//! [`ReadHandle`] is not `Sync` as sharing a single instance amongst threads would introduce a
//! significant performance bottleneck. A fresh `ReadHandle` needs to be created for each thread
//! either by cloning a [`ReadHandle`] or from a [`handles::ReadHandleFactory`]. For further
//! information, see [`left_right::ReadHandle`].
//!
//! # Implementation
//!
//! Under the hood, the map is implemented using two regular `HashMap`s and some magic. Take a look
//! at [`left-right`](left_right) for a much more in-depth discussion. Since the implementation
//! uses regular `HashMap`s under the hood, table resizing is fully supported. It does, however,
//! also mean that the memory usage of this implementation is approximately twice of that of a
//! regular `HashMap`, and more if writers rarely refresh after writing.
//!
//! # Value storage
//!
//! The values for each key in the map are stored in [`refs::Values`]. Conceptually, each `Values`
//! is a _bag_ or _multiset_; it can store multiple copies of the same value. `evmap` applies some
//! cleverness in an attempt to reduce unnecessary allocations and keep the cost of operations on
//! even large value-bags small. For small bags, `Values` uses the `smallvec` crate. This avoids
//! allocation entirely for single-element bags, and uses a `Vec` if the bag is relatively small.
//! For large bags, `Values` uses the `hashbag` crate, which enables `evmap` to efficiently look up
//! and remove specific elements in the value bag. For bags larger than one element, but smaller
//! than the threshold for moving to `hashbag`, we use `smallvec` to avoid unnecessary hashing.
//! Operations such as `Fit` and `Replace` will automatically switch back to the inline storage if
//! possible. This is ideal for maps that mostly use one element per key, as it can improvate
//! memory locality with less indirection.
#![warn(
missing_docs,
rust_2018_idioms,
missing_debug_implementations,
broken_intra_doc_links
)]
#![allow(clippy::type_complexity)]
// This _should_ detect if we ever accidentally leak aliasing::NoDrop.
// But, currently, it does not..
#![deny(unreachable_pub)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use crate::inner::Inner;
use crate::read::ReadHandle;
use crate::write::WriteHandle;
use left_right::aliasing::Aliased;
use std::collections::hash_map::RandomState;
use std::fmt;
use std::hash::{BuildHasher, Hash};
mod inner;
mod read;
mod stable_hash_eq;
mod values;
mod write;
pub use stable_hash_eq::StableHashEq;
/// Handles to the read and write halves of an `evmap`.
pub mod handles {
pub use crate::write::WriteHandle;
// These cannot use ::{..} syntax because of
// https://github.com/rust-lang/rust/issues/57411
pub use crate::read::ReadHandle;
pub use crate::read::ReadHandleFactory;
}
/// Helper types that give access to values inside the read half of an `evmap`.
pub mod refs {
// Same here, ::{..} won't work.
pub use super::values::Values;
pub use crate::read::MapReadRef;
pub use crate::read::ReadGuardIter;
// Expose `ReadGuard` since it has useful methods the user will likely care about.
#[doc(inline)]
pub use left_right::ReadGuard;
}
// NOTE: It is _critical_ that this module is not public.
mod aliasing;
/// Options for how to initialize the map.
///
/// In particular, the options dictate the hashing function, meta type, and initial capacity of the
/// map.
pub struct Options<M, S>
where
S: BuildHasher,
{
meta: M,
hasher: S,
capacity: Option<usize>,
}
impl<M, S> fmt::Debug for Options<M, S>
where
S: BuildHasher,
M: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Options")
.field("meta", &self.meta)
.field("capacity", &self.capacity)
.finish()
}
}
impl Default for Options<(), RandomState> {
fn default() -> Self {
Options {
meta: (),
hasher: RandomState::default(),
capacity: None,
}
}
}
impl<M, S> Options<M, S>
where
S: BuildHasher,
{
/// Set the initial meta value for the map.
pub fn with_meta<M2>(self, meta: M2) -> Options<M2, S> {
Options {
meta,
hasher: self.hasher,
capacity: self.capacity,
}
}
/// Set the hasher used for the map.
///
/// # Safety
///
/// This method is safe to call as long as the given hasher is deterministic. That is, it must
/// yield the same hash if given the same sequence of inputs.
pub unsafe fn with_hasher<S2>(self, hash_builder: S2) -> Options<M, S2>
where
S2: BuildHasher + Clone,
{
Options {
meta: self.meta,
hasher: hash_builder,
capacity: self.capacity,
}
}
/// Set the initial capacity for the map.
pub fn with_capacity(self, capacity: usize) -> Options<M, S> {
Options {
meta: self.meta,
hasher: self.hasher,
capacity: Some(capacity),
}
}
/// Create the map, and construct the read and write handles used to access it.
///
/// If you want to use arbitrary types for the keys and values, use [`assert_stable`][Options::assert_stable].
#[allow(clippy::type_complexity)]
pub fn construct<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: StableHashEq + Clone,
S: BuildHasher + Clone,
V: StableHashEq,
M:'static + Clone,
{
unsafe { self.assert_stable() }
}
/// Create the map, and construct the read and write handles used to access it.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K`
/// and `V` are deterministic. That is, they must always yield the same result if given the
/// same inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn | <K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
S: BuildHasher + Clone,
V: Eq + Hash,
M:'static + Clone,
{
let inner = if let Some(cap) = self.capacity {
Inner::with_capacity_and_hasher(self.meta, cap, self.hasher)
} else {
Inner::with_hasher(self.meta, self.hasher)
};
let (mut w, r) = left_right::new_from_empty(inner);
w.append(write::Operation::MarkReady);
(WriteHandle::new(w), ReadHandle::new(r))
}
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// If you want to use arbitrary types for the keys and values, use [`new_assert_stable`].
#[allow(clippy::type_complexity)]
pub fn new<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: StableHashEq + Clone,
V: StableHashEq,
{
Options::default().construct()
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V` are deterministic. That is, they must always yield the same result if given the same
/// inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn new_assert_stable<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
{
Options::default().assert_stable()
}
/// Create an empty eventually consistent map with meta information and custom hasher.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V`, and the implementation of `BuildHasher` for `S` and [`Hasher`][std::hash::Hasher]
/// for <code>S::[Hasher][BuildHasher::Hasher]</code> are deterministic. That is, they must always
/// yield the same result if given the same inputs. For keys of type `K` and hashers of type `S`,
/// their behavior must also be consistent between different clones of the same value.
#[allow(clippy::type_complexity)]
pub unsafe fn with_hasher<K, V, M, S>(
meta: M,
hasher: S,
) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
M:'static + Clone,
S: BuildHasher + Clone,
{
Options::default()
.with_hasher(hasher)
.with_meta(meta)
.assert_stable()
}
| assert_stable | identifier_name |
lib.rs | //! A lock-free, eventually consistent, concurrent multi-value map.
//!
//! This map implementation allows reads and writes to execute entirely in parallel, with no
//! implicit synchronization overhead. Reads never take locks on their critical path, and neither
//! do writes assuming there is a single writer (multi-writer is possible using a `Mutex`), which
//! significantly improves performance under contention. See the [`left-right` crate](left_right)
//! for details on the underlying concurrency primitive.
//!
//! The trade-off exposed by this type is one of eventual consistency: writes are not visible to
//! readers except following explicit synchronization. Specifically, readers only see the
//! operations that preceeded the last call to `WriteHandle::refresh` by a writer. This lets
//! writers decide how stale they are willing to let reads get. They can refresh the map after
//! every write to emulate a regular concurrent `HashMap`, or they can refresh only occasionally to
//! reduce the synchronization overhead at the cost of stale reads.
//!
//! For read-heavy workloads, the scheme used by this module is particularly useful. Writers can
//! afford to refresh after every write, which provides up-to-date reads, and readers remain fast
//! as they do not need to ever take locks.
//!
//! The map is multi-value, meaning that every key maps to a *collection* of values. This
//! introduces some memory cost by adding a layer of indirection through a `Vec` for each value,
//! but enables more advanced use. This choice was made as it would not be possible to emulate such
//! functionality on top of the semantics of this map (think about it -- what would the operational
//! log contain?).
//!
//! To faciliate more advanced use-cases, each of the two maps also carry some customizeable
//! meta-information. The writers may update this at will, and when a refresh happens, the current
//! meta will also be made visible to readers. This could be useful, for example, to indicate what
//! time the refresh happened.
//!
//! # Features
//!
//! - `eviction`: Gives you access to [`WriteHandle::empty_random`] to empty out randomly chosen
//! keys from the map.
//! - `amortize`: Amortizes the cost of resizes in the underlying data structures. See
//! [`griddle`](https://github.com/jonhoo/griddle/) and
//! [`atone`](https://github.com/jonhoo/atone/) for details. This requires a nightly compiler
//! [for the time being](https://docs.rs/indexmap-amortized/1.0/indexmap_amortized/#rust-version).
//!
//!
//! # Examples
//!
//! Single-reader, single-writer
//!
//! ```
//! // new will use the default HashMap hasher, and a meta of ()
//! // note that we get separate read and write handles
//! // the read handle can be cloned to have more readers
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // review some books.
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//!
//! // at this point, reads from book_reviews_r will not see any of the reviews!
//! assert_eq!(book_reviews_r.len(), 0);
//! // we need to refresh first to make the writes visible
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.len(), 4);
//! // reads will now return Some() because the map has been initialized
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // remember, this is a multi-value map, so we can have many reviews
//! book_reviews_w.insert("Grimms' Fairy Tales", "Eh, the title seemed weird.");
//! book_reviews_w.insert("Pride and Prejudice", "Too many words.");
//!
//! // but again, new writes are not yet visible
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // we need to refresh first
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(2));
//!
//! // oops, this review has a lot of spelling mistakes, let's delete it.
//! // remove_entry deletes *all* reviews (though in this case, just one)
//! book_reviews_w.remove_entry("The Adventures of Sherlock Holmes");
//! // but again, it's not visible to readers until we refresh
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), Some(1));
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), None);
//!
//! // look up the values associated with some keys.
//! let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
//! for book in &to_find {
//! if let Some(reviews) = book_reviews_r.get(book) {
//! for review in &*reviews {
//! println!("{}: {}", book, review);
//! }
//! } else {
//! println!("{} is unreviewed.", book);
//! }
//! }
//!
//! // iterate over everything.
//! for (book, reviews) in &book_reviews_r.enter().unwrap() {
//! for review in reviews {
//! println!("{}: \"{}\"", book, review);
//! }
//! }
//! ```
//!
//! Reads from multiple threads are possible by cloning the `ReadHandle`.
//!
//! ```
//! use std::thread;
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some readers
//! let readers: Vec<_> = (0..4).map(|_| {
//! let r = book_reviews_r.clone();
//! thread::spawn(move || {
//! loop {
//! let l = r.len();
//! if l == 0 {
//! thread::yield_now();
//! } else {
//! // the reader will either see all the reviews,
//! // or none of them, since refresh() is atomic.
//! assert_eq!(l, 4);
//! break;
//! }
//! }
//! })
//! }).collect();
//!
//! // do some writes
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//! // expose the writes
//! book_reviews_w.publish();
//!
//! // you can read through the write handle
//! assert_eq!(book_reviews_w.len(), 4);
//!
//! // the original read handle still works too
//! assert_eq!(book_reviews_r.len(), 4);
//!
//! // all the threads should eventually see.len() == 4
//! for r in readers.into_iter() {
//! assert!(r.join().is_ok());
//! }
//! ```
//!
//! If multiple writers are needed, the `WriteHandle` must be protected by a `Mutex`.
//!
//! ```
//! use std::thread;
//! use std::sync::{Arc, Mutex};
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some writers.
//! // since evmap does not support concurrent writes, we need
//! // to protect the write handle by a mutex.
//! let w = Arc::new(Mutex::new(book_reviews_w));
//! let writers: Vec<_> = (0..4).map(|i| {
//! let w = w.clone();
//! thread::spawn(move || {
//! let mut w = w.lock().unwrap();
//! w.insert(i, true);
//! w.publish();
//! })
//! }).collect();
//!
//! // eventually we should see all the writes
//! while book_reviews_r.len() < 4 { thread::yield_now(); };
//!
//! // all the threads should eventually finish writing
//! for w in writers.into_iter() {
//! assert!(w.join().is_ok());
//! }
//! ```
//!
//! [`ReadHandle`] is not `Sync` as sharing a single instance amongst threads would introduce a
//! significant performance bottleneck. A fresh `ReadHandle` needs to be created for each thread
//! either by cloning a [`ReadHandle`] or from a [`handles::ReadHandleFactory`]. For further
//! information, see [`left_right::ReadHandle`].
//!
//! # Implementation
//!
//! Under the hood, the map is implemented using two regular `HashMap`s and some magic. Take a look
//! at [`left-right`](left_right) for a much more in-depth discussion. Since the implementation
//! uses regular `HashMap`s under the hood, table resizing is fully supported. It does, however,
//! also mean that the memory usage of this implementation is approximately twice of that of a
//! regular `HashMap`, and more if writers rarely refresh after writing.
//!
//! # Value storage
//!
//! The values for each key in the map are stored in [`refs::Values`]. Conceptually, each `Values`
//! is a _bag_ or _multiset_; it can store multiple copies of the same value. `evmap` applies some
//! cleverness in an attempt to reduce unnecessary allocations and keep the cost of operations on
//! even large value-bags small. For small bags, `Values` uses the `smallvec` crate. This avoids
//! allocation entirely for single-element bags, and uses a `Vec` if the bag is relatively small.
//! For large bags, `Values` uses the `hashbag` crate, which enables `evmap` to efficiently look up
//! and remove specific elements in the value bag. For bags larger than one element, but smaller
//! than the threshold for moving to `hashbag`, we use `smallvec` to avoid unnecessary hashing.
//! Operations such as `Fit` and `Replace` will automatically switch back to the inline storage if
//! possible. This is ideal for maps that mostly use one element per key, as it can improvate
//! memory locality with less indirection.
#![warn(
missing_docs,
rust_2018_idioms,
missing_debug_implementations,
broken_intra_doc_links
)]
#![allow(clippy::type_complexity)]
// This _should_ detect if we ever accidentally leak aliasing::NoDrop.
// But, currently, it does not..
#![deny(unreachable_pub)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use crate::inner::Inner;
use crate::read::ReadHandle;
use crate::write::WriteHandle;
use left_right::aliasing::Aliased;
use std::collections::hash_map::RandomState;
use std::fmt;
use std::hash::{BuildHasher, Hash};
mod inner;
mod read;
mod stable_hash_eq;
mod values;
mod write;
pub use stable_hash_eq::StableHashEq;
/// Handles to the read and write halves of an `evmap`.
pub mod handles {
pub use crate::write::WriteHandle;
// These cannot use ::{..} syntax because of
// https://github.com/rust-lang/rust/issues/57411
pub use crate::read::ReadHandle;
pub use crate::read::ReadHandleFactory;
}
/// Helper types that give access to values inside the read half of an `evmap`.
pub mod refs {
// Same here, ::{..} won't work.
pub use super::values::Values;
pub use crate::read::MapReadRef;
pub use crate::read::ReadGuardIter;
// Expose `ReadGuard` since it has useful methods the user will likely care about.
#[doc(inline)]
pub use left_right::ReadGuard;
}
// NOTE: It is _critical_ that this module is not public.
mod aliasing;
/// Options for how to initialize the map.
///
/// In particular, the options dictate the hashing function, meta type, and initial capacity of the
/// map.
pub struct Options<M, S>
where
S: BuildHasher,
{
meta: M,
hasher: S,
capacity: Option<usize>,
}
impl<M, S> fmt::Debug for Options<M, S>
where
S: BuildHasher,
M: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Options")
.field("meta", &self.meta)
.field("capacity", &self.capacity)
.finish()
}
}
impl Default for Options<(), RandomState> {
fn default() -> Self {
Options {
meta: (),
hasher: RandomState::default(),
capacity: None,
}
}
}
impl<M, S> Options<M, S>
where
S: BuildHasher,
{
/// Set the initial meta value for the map.
pub fn with_meta<M2>(self, meta: M2) -> Options<M2, S> {
Options {
meta,
hasher: self.hasher,
capacity: self.capacity,
}
}
/// Set the hasher used for the map.
///
/// # Safety
///
/// This method is safe to call as long as the given hasher is deterministic. That is, it must
/// yield the same hash if given the same sequence of inputs.
pub unsafe fn with_hasher<S2>(self, hash_builder: S2) -> Options<M, S2>
where
S2: BuildHasher + Clone,
{
Options {
meta: self.meta,
hasher: hash_builder,
capacity: self.capacity,
}
}
/// Set the initial capacity for the map.
pub fn with_capacity(self, capacity: usize) -> Options<M, S> {
Options {
meta: self.meta,
hasher: self.hasher,
capacity: Some(capacity),
}
}
/// Create the map, and construct the read and write handles used to access it.
///
/// If you want to use arbitrary types for the keys and values, use [`assert_stable`][Options::assert_stable].
#[allow(clippy::type_complexity)]
pub fn construct<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: StableHashEq + Clone,
S: BuildHasher + Clone,
V: StableHashEq,
M:'static + Clone,
{
unsafe { self.assert_stable() }
}
/// Create the map, and construct the read and write handles used to access it.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K`
/// and `V` are deterministic. That is, they must always yield the same result if given the
/// same inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn assert_stable<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
S: BuildHasher + Clone,
V: Eq + Hash,
M:'static + Clone,
{
let inner = if let Some(cap) = self.capacity | else {
Inner::with_hasher(self.meta, self.hasher)
};
let (mut w, r) = left_right::new_from_empty(inner);
w.append(write::Operation::MarkReady);
(WriteHandle::new(w), ReadHandle::new(r))
}
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// If you want to use arbitrary types for the keys and values, use [`new_assert_stable`].
#[allow(clippy::type_complexity)]
pub fn new<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: StableHashEq + Clone,
V: StableHashEq,
{
Options::default().construct()
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V` are deterministic. That is, they must always yield the same result if given the same
/// inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn new_assert_stable<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
{
Options::default().assert_stable()
}
/// Create an empty eventually consistent map with meta information and custom hasher.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V`, and the implementation of `BuildHasher` for `S` and [`Hasher`][std::hash::Hasher]
/// for <code>S::[Hasher][BuildHasher::Hasher]</code> are deterministic. That is, they must always
/// yield the same result if given the same inputs. For keys of type `K` and hashers of type `S`,
/// their behavior must also be consistent between different clones of the same value.
#[allow(clippy::type_complexity)]
pub unsafe fn with_hasher<K, V, M, S>(
meta: M,
hasher: S,
) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
M:'static + Clone,
S: BuildHasher + Clone,
{
Options::default()
.with_hasher(hasher)
.with_meta(meta)
.assert_stable()
}
| {
Inner::with_capacity_and_hasher(self.meta, cap, self.hasher)
} | conditional_block |
lib.rs | //! A lock-free, eventually consistent, concurrent multi-value map.
//!
//! This map implementation allows reads and writes to execute entirely in parallel, with no
//! implicit synchronization overhead. Reads never take locks on their critical path, and neither
//! do writes assuming there is a single writer (multi-writer is possible using a `Mutex`), which
//! significantly improves performance under contention. See the [`left-right` crate](left_right)
//! for details on the underlying concurrency primitive.
//!
//! The trade-off exposed by this type is one of eventual consistency: writes are not visible to
//! readers except following explicit synchronization. Specifically, readers only see the
//! operations that preceeded the last call to `WriteHandle::refresh` by a writer. This lets
//! writers decide how stale they are willing to let reads get. They can refresh the map after
//! every write to emulate a regular concurrent `HashMap`, or they can refresh only occasionally to
//! reduce the synchronization overhead at the cost of stale reads.
//!
//! For read-heavy workloads, the scheme used by this module is particularly useful. Writers can
//! afford to refresh after every write, which provides up-to-date reads, and readers remain fast
//! as they do not need to ever take locks.
//!
//! The map is multi-value, meaning that every key maps to a *collection* of values. This
//! introduces some memory cost by adding a layer of indirection through a `Vec` for each value,
//! but enables more advanced use. This choice was made as it would not be possible to emulate such
//! functionality on top of the semantics of this map (think about it -- what would the operational
//! log contain?).
//!
//! To faciliate more advanced use-cases, each of the two maps also carry some customizeable
//! meta-information. The writers may update this at will, and when a refresh happens, the current
//! meta will also be made visible to readers. This could be useful, for example, to indicate what
//! time the refresh happened.
//!
//! # Features
//!
//! - `eviction`: Gives you access to [`WriteHandle::empty_random`] to empty out randomly chosen
//! keys from the map.
//! - `amortize`: Amortizes the cost of resizes in the underlying data structures. See
//! [`griddle`](https://github.com/jonhoo/griddle/) and
//! [`atone`](https://github.com/jonhoo/atone/) for details. This requires a nightly compiler
//! [for the time being](https://docs.rs/indexmap-amortized/1.0/indexmap_amortized/#rust-version).
//!
//!
//! # Examples
//!
//! Single-reader, single-writer
//!
//! ```
//! // new will use the default HashMap hasher, and a meta of ()
//! // note that we get separate read and write handles
//! // the read handle can be cloned to have more readers
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // review some books.
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//!
//! // at this point, reads from book_reviews_r will not see any of the reviews!
//! assert_eq!(book_reviews_r.len(), 0);
//! // we need to refresh first to make the writes visible
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.len(), 4);
//! // reads will now return Some() because the map has been initialized
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // remember, this is a multi-value map, so we can have many reviews
//! book_reviews_w.insert("Grimms' Fairy Tales", "Eh, the title seemed weird.");
//! book_reviews_w.insert("Pride and Prejudice", "Too many words.");
//!
//! // but again, new writes are not yet visible
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // we need to refresh first
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(2));
//!
//! // oops, this review has a lot of spelling mistakes, let's delete it.
//! // remove_entry deletes *all* reviews (though in this case, just one)
//! book_reviews_w.remove_entry("The Adventures of Sherlock Holmes");
//! // but again, it's not visible to readers until we refresh
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), Some(1));
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), None);
//!
//! // look up the values associated with some keys.
//! let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
//! for book in &to_find {
//! if let Some(reviews) = book_reviews_r.get(book) {
//! for review in &*reviews {
//! println!("{}: {}", book, review);
//! }
//! } else {
//! println!("{} is unreviewed.", book);
//! }
//! }
//!
//! // iterate over everything.
//! for (book, reviews) in &book_reviews_r.enter().unwrap() {
//! for review in reviews {
//! println!("{}: \"{}\"", book, review);
//! }
//! }
//! ```
//!
//! Reads from multiple threads are possible by cloning the `ReadHandle`.
//!
//! ```
//! use std::thread;
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some readers
//! let readers: Vec<_> = (0..4).map(|_| {
//! let r = book_reviews_r.clone();
//! thread::spawn(move || {
//! loop {
//! let l = r.len();
//! if l == 0 {
//! thread::yield_now();
//! } else {
//! // the reader will either see all the reviews,
//! // or none of them, since refresh() is atomic.
//! assert_eq!(l, 4);
//! break;
//! }
//! }
//! })
//! }).collect();
//!
//! // do some writes
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//! // expose the writes
//! book_reviews_w.publish();
//!
//! // you can read through the write handle
//! assert_eq!(book_reviews_w.len(), 4);
//!
//! // the original read handle still works too
//! assert_eq!(book_reviews_r.len(), 4);
//!
//! // all the threads should eventually see.len() == 4
//! for r in readers.into_iter() {
//! assert!(r.join().is_ok());
//! }
//! ```
//!
//! If multiple writers are needed, the `WriteHandle` must be protected by a `Mutex`.
//!
//! ```
//! use std::thread;
//! use std::sync::{Arc, Mutex};
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some writers.
//! // since evmap does not support concurrent writes, we need
//! // to protect the write handle by a mutex.
//! let w = Arc::new(Mutex::new(book_reviews_w));
//! let writers: Vec<_> = (0..4).map(|i| {
//! let w = w.clone();
//! thread::spawn(move || {
//! let mut w = w.lock().unwrap();
//! w.insert(i, true);
//! w.publish();
//! })
//! }).collect();
//!
//! // eventually we should see all the writes
//! while book_reviews_r.len() < 4 { thread::yield_now(); };
//!
//! // all the threads should eventually finish writing
//! for w in writers.into_iter() {
//! assert!(w.join().is_ok());
//! }
//! ```
//!
//! [`ReadHandle`] is not `Sync` as sharing a single instance amongst threads would introduce a
//! significant performance bottleneck. A fresh `ReadHandle` needs to be created for each thread
//! either by cloning a [`ReadHandle`] or from a [`handles::ReadHandleFactory`]. For further
//! information, see [`left_right::ReadHandle`].
//!
//! # Implementation
//!
//! Under the hood, the map is implemented using two regular `HashMap`s and some magic. Take a look
//! at [`left-right`](left_right) for a much more in-depth discussion. Since the implementation
//! uses regular `HashMap`s under the hood, table resizing is fully supported. It does, however,
//! also mean that the memory usage of this implementation is approximately twice of that of a
//! regular `HashMap`, and more if writers rarely refresh after writing.
//!
//! # Value storage
//!
//! The values for each key in the map are stored in [`refs::Values`]. Conceptually, each `Values`
//! is a _bag_ or _multiset_; it can store multiple copies of the same value. `evmap` applies some
//! cleverness in an attempt to reduce unnecessary allocations and keep the cost of operations on
//! even large value-bags small. For small bags, `Values` uses the `smallvec` crate. This avoids
//! allocation entirely for single-element bags, and uses a `Vec` if the bag is relatively small.
//! For large bags, `Values` uses the `hashbag` crate, which enables `evmap` to efficiently look up
//! and remove specific elements in the value bag. For bags larger than one element, but smaller
//! than the threshold for moving to `hashbag`, we use `smallvec` to avoid unnecessary hashing.
//! Operations such as `Fit` and `Replace` will automatically switch back to the inline storage if
//! possible. This is ideal for maps that mostly use one element per key, as it can improvate
//! memory locality with less indirection.
#![warn(
missing_docs,
rust_2018_idioms,
missing_debug_implementations,
broken_intra_doc_links
)]
#![allow(clippy::type_complexity)]
// This _should_ detect if we ever accidentally leak aliasing::NoDrop.
// But, currently, it does not..
#![deny(unreachable_pub)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use crate::inner::Inner;
use crate::read::ReadHandle;
use crate::write::WriteHandle;
use left_right::aliasing::Aliased;
use std::collections::hash_map::RandomState;
use std::fmt;
use std::hash::{BuildHasher, Hash};
mod inner;
mod read;
mod stable_hash_eq;
mod values;
mod write;
pub use stable_hash_eq::StableHashEq;
/// Handles to the read and write halves of an `evmap`.
pub mod handles {
pub use crate::write::WriteHandle;
// These cannot use ::{..} syntax because of
// https://github.com/rust-lang/rust/issues/57411
pub use crate::read::ReadHandle;
pub use crate::read::ReadHandleFactory;
}
/// Helper types that give access to values inside the read half of an `evmap`.
pub mod refs {
// Same here, ::{..} won't work.
pub use super::values::Values;
pub use crate::read::MapReadRef;
pub use crate::read::ReadGuardIter;
// Expose `ReadGuard` since it has useful methods the user will likely care about.
#[doc(inline)]
pub use left_right::ReadGuard;
}
// NOTE: It is _critical_ that this module is not public.
mod aliasing;
/// Options for how to initialize the map.
///
/// In particular, the options dictate the hashing function, meta type, and initial capacity of the
/// map.
pub struct Options<M, S>
where
S: BuildHasher,
{
meta: M,
hasher: S,
capacity: Option<usize>,
}
impl<M, S> fmt::Debug for Options<M, S>
where
S: BuildHasher,
M: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Options")
.field("meta", &self.meta)
.field("capacity", &self.capacity)
.finish()
}
}
impl Default for Options<(), RandomState> {
fn default() -> Self {
Options {
meta: (),
hasher: RandomState::default(),
capacity: None,
}
}
}
impl<M, S> Options<M, S>
where
S: BuildHasher,
{
/// Set the initial meta value for the map.
pub fn with_meta<M2>(self, meta: M2) -> Options<M2, S> {
Options {
meta,
hasher: self.hasher,
capacity: self.capacity,
}
}
/// Set the hasher used for the map.
///
/// # Safety
///
/// This method is safe to call as long as the given hasher is deterministic. That is, it must
/// yield the same hash if given the same sequence of inputs.
pub unsafe fn with_hasher<S2>(self, hash_builder: S2) -> Options<M, S2>
where
S2: BuildHasher + Clone,
{
Options {
meta: self.meta,
hasher: hash_builder,
capacity: self.capacity,
}
}
/// Set the initial capacity for the map.
pub fn with_capacity(self, capacity: usize) -> Options<M, S> {
Options {
meta: self.meta,
hasher: self.hasher,
capacity: Some(capacity),
}
}
/// Create the map, and construct the read and write handles used to access it.
///
/// If you want to use arbitrary types for the keys and values, use [`assert_stable`][Options::assert_stable].
#[allow(clippy::type_complexity)]
pub fn construct<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: StableHashEq + Clone,
S: BuildHasher + Clone,
V: StableHashEq,
M:'static + Clone,
{
unsafe { self.assert_stable() }
}
/// Create the map, and construct the read and write handles used to access it.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K`
/// and `V` are deterministic. That is, they must always yield the same result if given the | /// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn assert_stable<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
S: BuildHasher + Clone,
V: Eq + Hash,
M:'static + Clone,
{
let inner = if let Some(cap) = self.capacity {
Inner::with_capacity_and_hasher(self.meta, cap, self.hasher)
} else {
Inner::with_hasher(self.meta, self.hasher)
};
let (mut w, r) = left_right::new_from_empty(inner);
w.append(write::Operation::MarkReady);
(WriteHandle::new(w), ReadHandle::new(r))
}
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// If you want to use arbitrary types for the keys and values, use [`new_assert_stable`].
#[allow(clippy::type_complexity)]
pub fn new<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: StableHashEq + Clone,
V: StableHashEq,
{
Options::default().construct()
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V` are deterministic. That is, they must always yield the same result if given the same
/// inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn new_assert_stable<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
{
Options::default().assert_stable()
}
/// Create an empty eventually consistent map with meta information and custom hasher.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V`, and the implementation of `BuildHasher` for `S` and [`Hasher`][std::hash::Hasher]
/// for <code>S::[Hasher][BuildHasher::Hasher]</code> are deterministic. That is, they must always
/// yield the same result if given the same inputs. For keys of type `K` and hashers of type `S`,
/// their behavior must also be consistent between different clones of the same value.
#[allow(clippy::type_complexity)]
pub unsafe fn with_hasher<K, V, M, S>(
meta: M,
hasher: S,
) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
M:'static + Clone,
S: BuildHasher + Clone,
{
Options::default()
.with_hasher(hasher)
.with_meta(meta)
.assert_stable()
} | /// same inputs. For keys of type `K`, the result must also be consistent between different clones | random_line_split |
lib.rs | //! A lock-free, eventually consistent, concurrent multi-value map.
//!
//! This map implementation allows reads and writes to execute entirely in parallel, with no
//! implicit synchronization overhead. Reads never take locks on their critical path, and neither
//! do writes assuming there is a single writer (multi-writer is possible using a `Mutex`), which
//! significantly improves performance under contention. See the [`left-right` crate](left_right)
//! for details on the underlying concurrency primitive.
//!
//! The trade-off exposed by this type is one of eventual consistency: writes are not visible to
//! readers except following explicit synchronization. Specifically, readers only see the
//! operations that preceeded the last call to `WriteHandle::refresh` by a writer. This lets
//! writers decide how stale they are willing to let reads get. They can refresh the map after
//! every write to emulate a regular concurrent `HashMap`, or they can refresh only occasionally to
//! reduce the synchronization overhead at the cost of stale reads.
//!
//! For read-heavy workloads, the scheme used by this module is particularly useful. Writers can
//! afford to refresh after every write, which provides up-to-date reads, and readers remain fast
//! as they do not need to ever take locks.
//!
//! The map is multi-value, meaning that every key maps to a *collection* of values. This
//! introduces some memory cost by adding a layer of indirection through a `Vec` for each value,
//! but enables more advanced use. This choice was made as it would not be possible to emulate such
//! functionality on top of the semantics of this map (think about it -- what would the operational
//! log contain?).
//!
//! To faciliate more advanced use-cases, each of the two maps also carry some customizeable
//! meta-information. The writers may update this at will, and when a refresh happens, the current
//! meta will also be made visible to readers. This could be useful, for example, to indicate what
//! time the refresh happened.
//!
//! # Features
//!
//! - `eviction`: Gives you access to [`WriteHandle::empty_random`] to empty out randomly chosen
//! keys from the map.
//! - `amortize`: Amortizes the cost of resizes in the underlying data structures. See
//! [`griddle`](https://github.com/jonhoo/griddle/) and
//! [`atone`](https://github.com/jonhoo/atone/) for details. This requires a nightly compiler
//! [for the time being](https://docs.rs/indexmap-amortized/1.0/indexmap_amortized/#rust-version).
//!
//!
//! # Examples
//!
//! Single-reader, single-writer
//!
//! ```
//! // new will use the default HashMap hasher, and a meta of ()
//! // note that we get separate read and write handles
//! // the read handle can be cloned to have more readers
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // review some books.
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//!
//! // at this point, reads from book_reviews_r will not see any of the reviews!
//! assert_eq!(book_reviews_r.len(), 0);
//! // we need to refresh first to make the writes visible
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.len(), 4);
//! // reads will now return Some() because the map has been initialized
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // remember, this is a multi-value map, so we can have many reviews
//! book_reviews_w.insert("Grimms' Fairy Tales", "Eh, the title seemed weird.");
//! book_reviews_w.insert("Pride and Prejudice", "Too many words.");
//!
//! // but again, new writes are not yet visible
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(1));
//!
//! // we need to refresh first
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("Grimms' Fairy Tales").map(|rs| rs.len()), Some(2));
//!
//! // oops, this review has a lot of spelling mistakes, let's delete it.
//! // remove_entry deletes *all* reviews (though in this case, just one)
//! book_reviews_w.remove_entry("The Adventures of Sherlock Holmes");
//! // but again, it's not visible to readers until we refresh
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), Some(1));
//! book_reviews_w.publish();
//! assert_eq!(book_reviews_r.get("The Adventures of Sherlock Holmes").map(|rs| rs.len()), None);
//!
//! // look up the values associated with some keys.
//! let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
//! for book in &to_find {
//! if let Some(reviews) = book_reviews_r.get(book) {
//! for review in &*reviews {
//! println!("{}: {}", book, review);
//! }
//! } else {
//! println!("{} is unreviewed.", book);
//! }
//! }
//!
//! // iterate over everything.
//! for (book, reviews) in &book_reviews_r.enter().unwrap() {
//! for review in reviews {
//! println!("{}: \"{}\"", book, review);
//! }
//! }
//! ```
//!
//! Reads from multiple threads are possible by cloning the `ReadHandle`.
//!
//! ```
//! use std::thread;
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some readers
//! let readers: Vec<_> = (0..4).map(|_| {
//! let r = book_reviews_r.clone();
//! thread::spawn(move || {
//! loop {
//! let l = r.len();
//! if l == 0 {
//! thread::yield_now();
//! } else {
//! // the reader will either see all the reviews,
//! // or none of them, since refresh() is atomic.
//! assert_eq!(l, 4);
//! break;
//! }
//! }
//! })
//! }).collect();
//!
//! // do some writes
//! book_reviews_w.insert("Adventures of Huckleberry Finn", "My favorite book.");
//! book_reviews_w.insert("Grimms' Fairy Tales", "Masterpiece.");
//! book_reviews_w.insert("Pride and Prejudice", "Very enjoyable.");
//! book_reviews_w.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
//! // expose the writes
//! book_reviews_w.publish();
//!
//! // you can read through the write handle
//! assert_eq!(book_reviews_w.len(), 4);
//!
//! // the original read handle still works too
//! assert_eq!(book_reviews_r.len(), 4);
//!
//! // all the threads should eventually see.len() == 4
//! for r in readers.into_iter() {
//! assert!(r.join().is_ok());
//! }
//! ```
//!
//! If multiple writers are needed, the `WriteHandle` must be protected by a `Mutex`.
//!
//! ```
//! use std::thread;
//! use std::sync::{Arc, Mutex};
//! let (mut book_reviews_w, book_reviews_r) = evmap::new();
//!
//! // start some writers.
//! // since evmap does not support concurrent writes, we need
//! // to protect the write handle by a mutex.
//! let w = Arc::new(Mutex::new(book_reviews_w));
//! let writers: Vec<_> = (0..4).map(|i| {
//! let w = w.clone();
//! thread::spawn(move || {
//! let mut w = w.lock().unwrap();
//! w.insert(i, true);
//! w.publish();
//! })
//! }).collect();
//!
//! // eventually we should see all the writes
//! while book_reviews_r.len() < 4 { thread::yield_now(); };
//!
//! // all the threads should eventually finish writing
//! for w in writers.into_iter() {
//! assert!(w.join().is_ok());
//! }
//! ```
//!
//! [`ReadHandle`] is not `Sync` as sharing a single instance amongst threads would introduce a
//! significant performance bottleneck. A fresh `ReadHandle` needs to be created for each thread
//! either by cloning a [`ReadHandle`] or from a [`handles::ReadHandleFactory`]. For further
//! information, see [`left_right::ReadHandle`].
//!
//! # Implementation
//!
//! Under the hood, the map is implemented using two regular `HashMap`s and some magic. Take a look
//! at [`left-right`](left_right) for a much more in-depth discussion. Since the implementation
//! uses regular `HashMap`s under the hood, table resizing is fully supported. It does, however,
//! also mean that the memory usage of this implementation is approximately twice of that of a
//! regular `HashMap`, and more if writers rarely refresh after writing.
//!
//! # Value storage
//!
//! The values for each key in the map are stored in [`refs::Values`]. Conceptually, each `Values`
//! is a _bag_ or _multiset_; it can store multiple copies of the same value. `evmap` applies some
//! cleverness in an attempt to reduce unnecessary allocations and keep the cost of operations on
//! even large value-bags small. For small bags, `Values` uses the `smallvec` crate. This avoids
//! allocation entirely for single-element bags, and uses a `Vec` if the bag is relatively small.
//! For large bags, `Values` uses the `hashbag` crate, which enables `evmap` to efficiently look up
//! and remove specific elements in the value bag. For bags larger than one element, but smaller
//! than the threshold for moving to `hashbag`, we use `smallvec` to avoid unnecessary hashing.
//! Operations such as `Fit` and `Replace` will automatically switch back to the inline storage if
//! possible. This is ideal for maps that mostly use one element per key, as it can improvate
//! memory locality with less indirection.
#![warn(
missing_docs,
rust_2018_idioms,
missing_debug_implementations,
broken_intra_doc_links
)]
#![allow(clippy::type_complexity)]
// This _should_ detect if we ever accidentally leak aliasing::NoDrop.
// But, currently, it does not..
#![deny(unreachable_pub)]
#![cfg_attr(docsrs, feature(doc_cfg))]
use crate::inner::Inner;
use crate::read::ReadHandle;
use crate::write::WriteHandle;
use left_right::aliasing::Aliased;
use std::collections::hash_map::RandomState;
use std::fmt;
use std::hash::{BuildHasher, Hash};
mod inner;
mod read;
mod stable_hash_eq;
mod values;
mod write;
pub use stable_hash_eq::StableHashEq;
/// Handles to the read and write halves of an `evmap`.
pub mod handles {
pub use crate::write::WriteHandle;
// These cannot use ::{..} syntax because of
// https://github.com/rust-lang/rust/issues/57411
pub use crate::read::ReadHandle;
pub use crate::read::ReadHandleFactory;
}
/// Helper types that give access to values inside the read half of an `evmap`.
pub mod refs {
// Same here, ::{..} won't work.
pub use super::values::Values;
pub use crate::read::MapReadRef;
pub use crate::read::ReadGuardIter;
// Expose `ReadGuard` since it has useful methods the user will likely care about.
#[doc(inline)]
pub use left_right::ReadGuard;
}
// NOTE: It is _critical_ that this module is not public.
mod aliasing;
/// Options for how to initialize the map.
///
/// In particular, the options dictate the hashing function, meta type, and initial capacity of the
/// map.
pub struct Options<M, S>
where
S: BuildHasher,
{
meta: M,
hasher: S,
capacity: Option<usize>,
}
impl<M, S> fmt::Debug for Options<M, S>
where
S: BuildHasher,
M: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Options")
.field("meta", &self.meta)
.field("capacity", &self.capacity)
.finish()
}
}
impl Default for Options<(), RandomState> {
fn default() -> Self {
Options {
meta: (),
hasher: RandomState::default(),
capacity: None,
}
}
}
impl<M, S> Options<M, S>
where
S: BuildHasher,
{
/// Set the initial meta value for the map.
pub fn with_meta<M2>(self, meta: M2) -> Options<M2, S> {
Options {
meta,
hasher: self.hasher,
capacity: self.capacity,
}
}
/// Set the hasher used for the map.
///
/// # Safety
///
/// This method is safe to call as long as the given hasher is deterministic. That is, it must
/// yield the same hash if given the same sequence of inputs.
pub unsafe fn with_hasher<S2>(self, hash_builder: S2) -> Options<M, S2>
where
S2: BuildHasher + Clone,
{
Options {
meta: self.meta,
hasher: hash_builder,
capacity: self.capacity,
}
}
/// Set the initial capacity for the map.
pub fn with_capacity(self, capacity: usize) -> Options<M, S> {
Options {
meta: self.meta,
hasher: self.hasher,
capacity: Some(capacity),
}
}
/// Create the map, and construct the read and write handles used to access it.
///
/// If you want to use arbitrary types for the keys and values, use [`assert_stable`][Options::assert_stable].
#[allow(clippy::type_complexity)]
pub fn construct<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: StableHashEq + Clone,
S: BuildHasher + Clone,
V: StableHashEq,
M:'static + Clone,
{
unsafe { self.assert_stable() }
}
/// Create the map, and construct the read and write handles used to access it.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K`
/// and `V` are deterministic. That is, they must always yield the same result if given the
/// same inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn assert_stable<K, V>(self) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
S: BuildHasher + Clone,
V: Eq + Hash,
M:'static + Clone,
|
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// If you want to use arbitrary types for the keys and values, use [`new_assert_stable`].
#[allow(clippy::type_complexity)]
pub fn new<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: StableHashEq + Clone,
V: StableHashEq,
{
Options::default().construct()
}
/// Create an empty eventually consistent map.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V` are deterministic. That is, they must always yield the same result if given the same
/// inputs. For keys of type `K`, the result must also be consistent between different clones
/// of the same key.
#[allow(clippy::type_complexity)]
pub unsafe fn new_assert_stable<K, V>() -> (
WriteHandle<K, V, (), RandomState>,
ReadHandle<K, V, (), RandomState>,
)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
{
Options::default().assert_stable()
}
/// Create an empty eventually consistent map with meta information and custom hasher.
///
/// Use the [`Options`](./struct.Options.html) builder for more control over initialization.
///
/// # Safety
///
/// This method is safe to call as long as the implementation of `Hash` and `Eq` for both `K` and
/// `V`, and the implementation of `BuildHasher` for `S` and [`Hasher`][std::hash::Hasher]
/// for <code>S::[Hasher][BuildHasher::Hasher]</code> are deterministic. That is, they must always
/// yield the same result if given the same inputs. For keys of type `K` and hashers of type `S`,
/// their behavior must also be consistent between different clones of the same value.
#[allow(clippy::type_complexity)]
pub unsafe fn with_hasher<K, V, M, S>(
meta: M,
hasher: S,
) -> (WriteHandle<K, V, M, S>, ReadHandle<K, V, M, S>)
where
K: Eq + Hash + Clone,
V: Eq + Hash,
M:'static + Clone,
S: BuildHasher + Clone,
{
Options::default()
.with_hasher(hasher)
.with_meta(meta)
.assert_stable()
}
| {
let inner = if let Some(cap) = self.capacity {
Inner::with_capacity_and_hasher(self.meta, cap, self.hasher)
} else {
Inner::with_hasher(self.meta, self.hasher)
};
let (mut w, r) = left_right::new_from_empty(inner);
w.append(write::Operation::MarkReady);
(WriteHandle::new(w), ReadHandle::new(r))
} | identifier_body |
physical_plan.rs | with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Ballista Physical Plan (Experimental).
//!
//! The physical plan is a serializable data structure describing how the plan will be executed.
//!
//! It differs from the logical plan in that it deals with specific implementations of operators
//! (e.g. SortMergeJoin versus BroadcastHashJoin) whereas the logical plan just deals with an
//! abstract concept of a join.
//!
//! The physical plan also accounts for partitioning and ordering of data between operators.
use std::collections::HashMap;
use std::fmt::{self, Debug};
use std::sync::Arc;
use crate::arrow::array::{
ArrayRef, Float32Builder, Float64Builder, Int16Builder, Int32Builder, Int64Builder,
Int8Builder, StringBuilder, UInt16Builder, UInt32Builder, UInt64Builder, UInt8Builder,
};
use crate::arrow::datatypes::{DataType, Schema};
use crate::arrow::record_batch::RecordBatch;
use crate::datafusion::logicalplan::Expr;
use crate::datafusion::logicalplan::LogicalPlan;
use crate::datafusion::logicalplan::Operator;
use crate::datafusion::logicalplan::ScalarValue;
use crate::distributed::scheduler::ExecutionTask;
use crate::error::{ballista_error, Result};
use crate::execution::expressions::{
add, alias, aliased_aggr, avg, col, compare, count, div, lit, max, min, mult, subtract, sum,
};
use crate::execution::operators::{
CsvScanExec, FilterExec, HashAggregateExec, InMemoryTableScanExec, ParquetScanExec,
ProjectionExec, ShuffleExchangeExec, ShuffleReaderExec,
};
use crate::distributed::executor::ExecutorConfig;
use async_trait::async_trait;
use uuid::Uuid;
/// Stream of columnar batches using futures
pub type ColumnarBatchStream = Arc<dyn ColumnarBatchIter>;
#[derive(Debug, Clone)]
pub struct ExecutorMeta {
pub id: String,
pub host: String,
pub port: usize,
}
/// Async iterator over a stream of columnar batches
pub trait ColumnarBatchIter {
/// The schema of the iterator's batches
// In principle, this should not be needed as `ColumnarBatch` has a schema.
// However, the stream may be empty
fn schema(&self) -> Arc<Schema>;
/// Get the next batch from the stream, or None if the stream has ended
fn next(&self) -> Result<Option<ColumnarBatch>>;
/// Notify the iterator that no more results will be fetched, so that resources
/// can be freed immediately.
fn close(&self) {}
}
#[async_trait]
pub trait ExecutionContext: Send + Sync {
async fn get_executor_ids(&self) -> Result<Vec<ExecutorMeta>>;
async fn execute_task(
&self,
executor_id: ExecutorMeta,
task: ExecutionTask,
) -> Result<ShuffleId>;
async fn read_shuffle(&self, shuffle_id: &ShuffleId) -> Result<Vec<ColumnarBatch>>;
fn config(&self) -> ExecutorConfig;
}
/// Base trait for all operators
#[async_trait]
pub trait ExecutionPlan: Send + Sync {
/// Specified the output schema of this operator.
fn schema(&self) -> Arc<Schema>;
/// Specifies how data is partitioned across different nodes in the cluster
fn output_partitioning(&self) -> Partitioning {
Partitioning::UnknownPartitioning(0)
}
/// Specifies the data distribution requirements of all the children for this operator
fn required_child_distribution(&self) -> Distribution {
Distribution::UnspecifiedDistribution
}
/// Specifies how data is ordered in each partition
fn output_ordering(&self) -> Option<Vec<SortOrder>> {
None
}
/// Specifies the data distribution requirements of all the children for this operator
fn required_child_ordering(&self) -> Option<Vec<Vec<SortOrder>>> {
None
}
/// Get the children of this plan. Leaf nodes have no children. Unary nodes have a single
/// child. Binary nodes have two children.
fn children(&self) -> Vec<Arc<PhysicalPlan>> {
vec![]
}
/// Runs this query against one partition returning a stream of columnar batches
async fn execute(
&self,
ctx: Arc<dyn ExecutionContext>,
partition_index: usize,
) -> Result<ColumnarBatchStream>;
}
pub trait Expression: Send + Sync + Debug {
/// Get the data type of this expression, given the schema of the input
fn data_type(&self, input_schema: &Schema) -> Result<DataType>;
/// Decide whether this expression is nullable, given the schema of the input
fn nullable(&self, input_schema: &Schema) -> Result<bool>;
/// Evaluate an expression against a ColumnarBatch to produce a scalar or columnar result.
fn evaluate(&self, input: &ColumnarBatch) -> Result<ColumnarValue>;
}
/// Aggregate expression that can be evaluated against a RecordBatch
pub trait AggregateExpr: Send + Sync + Debug {
/// Get the data type of this expression, given the schema of the input
fn data_type(&self, input_schema: &Schema) -> Result<DataType>;
/// Decide whether this expression is nullable, given the schema of the input
fn nullable(&self, input_schema: &Schema) -> Result<bool>;
/// Evaluate the expression being aggregated
fn evaluate_input(&self, batch: &ColumnarBatch) -> Result<ColumnarValue>;
/// Create an accumulator for this aggregate expression
fn create_accumulator(&self, mode: &AggregateMode) -> Box<dyn Accumulator>;
}
/// Aggregate accumulator
pub trait Accumulator: Send + Sync {
/// Update the accumulator based on a columnar value
fn accumulate(&mut self, value: &ColumnarValue) -> Result<()>;
/// Get the final value for the accumulator
fn get_value(&self) -> Result<Option<ScalarValue>>;
}
/// Action that can be sent to an executor
#[derive(Debug, Clone)]
pub enum Action {
/// Execute the query with DataFusion and return the results
InteractiveQuery {
plan: LogicalPlan,
settings: HashMap<String, String>,
},
/// Execute a query and store the results in memory
Execute(ExecutionTask),
/// Collect a shuffle
FetchShuffle(ShuffleId),
}
pub type MaybeColumnarBatch = Result<Option<ColumnarBatch>>;
/// Batch of columnar data.
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct ColumnarBatch {
schema: Arc<Schema>,
columns: HashMap<String, ColumnarValue>,
}
impl ColumnarBatch {
pub fn from_arrow(batch: &RecordBatch) -> Self {
let columns = batch
.columns()
.iter()
.enumerate()
.map(|(i, array)| {
(
batch.schema().field(i).name().clone(),
ColumnarValue::Columnar(array.clone()),
)
})
.collect();
Self {
schema: batch.schema(),
columns,
}
}
pub fn from_values(values: &[ColumnarValue], schema: &Schema) -> Self {
let columns = schema
.fields()
.iter()
.enumerate()
.map(|(i, f)| (f.name().clone(), values[i].clone()))
.collect();
Self {
schema: Arc::new(schema.clone()),
columns,
}
}
pub fn to_arrow(&self) -> Result<RecordBatch> {
let arrays = self
.schema
.fields()
.iter()
.map(|c| {
match self.column(c.name())? {
ColumnarValue::Columnar(array) => Ok(array.clone()),
ColumnarValue::Scalar(_, _) => {
// note that this can be implemented easily if needed
Err(ballista_error("Cannot convert scalar value to Arrow array"))
}
}
})
.collect::<Result<Vec<_>>>()?;
Ok(RecordBatch::try_new(self.schema.clone(), arrays)?)
}
pub fn schema(&self) -> Arc<Schema> {
self.schema.clone()
}
pub fn num_columns(&self) -> usize {
self.columns.len()
}
pub fn num_rows(&self) -> usize {
self.columns[self.schema.field(0).name()].len()
}
pub fn column(&self, name: &str) -> Result<&ColumnarValue> {
Ok(&self.columns[name]) |
pub fn memory_size(&self) -> usize {
self.columns.values().map(|c| c.memory_size()).sum()
}
}
macro_rules! build_literal_array {
($LEN:expr, $BUILDER:ident, $VALUE:expr) => {{
let mut builder = $BUILDER::new($LEN);
for _ in 0..$LEN {
builder.append_value($VALUE)?;
}
Ok(Arc::new(builder.finish()))
}};
}
/// A columnar value can either be a scalar value or an Arrow array.
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub enum ColumnarValue {
Scalar(ScalarValue, usize),
Columnar(ArrayRef),
}
impl ColumnarValue {
pub fn len(&self) -> usize {
match self {
ColumnarValue::Scalar(_, n) => *n,
ColumnarValue::Columnar(array) => array.len(),
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn data_type(&self) -> &DataType {
match self {
ColumnarValue::Columnar(array) => array.data_type(),
ColumnarValue::Scalar(value, _) => match value {
ScalarValue::UInt8(_) => &DataType::UInt8,
ScalarValue::UInt16(_) => &DataType::UInt16,
ScalarValue::UInt32(_) => &DataType::UInt32,
ScalarValue::UInt64(_) => &DataType::UInt64,
ScalarValue::Int8(_) => &DataType::Int8,
ScalarValue::Int16(_) => &DataType::Int16,
ScalarValue::Int32(_) => &DataType::Int32,
ScalarValue::Int64(_) => &DataType::Int64,
ScalarValue::Float32(_) => &DataType::Float32,
ScalarValue::Float64(_) => &DataType::Float64,
_ => unimplemented!(),
},
}
}
pub fn to_arrow(&self) -> Result<ArrayRef> {
match self {
ColumnarValue::Columnar(array) => Ok(array.clone()),
ColumnarValue::Scalar(value, n) => match value {
ScalarValue::Int8(value) => build_literal_array!(*n, Int8Builder, *value),
ScalarValue::Int16(value) => build_literal_array!(*n, Int16Builder, *value),
ScalarValue::Int32(value) => build_literal_array!(*n, Int32Builder, *value),
ScalarValue::Int64(value) => build_literal_array!(*n, Int64Builder, *value),
ScalarValue::UInt8(value) => build_literal_array!(*n, UInt8Builder, *value),
ScalarValue::UInt16(value) => build_literal_array!(*n, UInt16Builder, *value),
ScalarValue::UInt32(value) => build_literal_array!(*n, UInt32Builder, *value),
ScalarValue::UInt64(value) => build_literal_array!(*n, UInt64Builder, *value),
ScalarValue::Float32(value) => build_literal_array!(*n, Float32Builder, *value),
ScalarValue::Float64(value) => build_literal_array!(*n, Float64Builder, *value),
ScalarValue::Utf8(value) => build_literal_array!(*n, StringBuilder, value),
other => Err(ballista_error(&format!(
"Unsupported literal type {:?}",
other
))),
},
}
}
pub fn memory_size(&self) -> usize {
//TODO delegate to Arrow once https://issues.apache.org/jira/browse/ARROW-9582 is
// implemented
match self {
ColumnarValue::Columnar(array) => {
let mut size = 0;
for buffer in array.data().buffers() {
size += buffer.capacity();
}
size
}
_ => 0,
}
}
}
/// Enumeration wrapping physical plan structs so that they can be represented in a tree easily
/// and processed using pattern matching
#[derive(Clone)]
pub enum PhysicalPlan {
/// Projection.
Projection(Arc<ProjectionExec>),
/// Filter a.k.a predicate.
Filter(Arc<FilterExec>),
/// Hash aggregate
HashAggregate(Arc<HashAggregateExec>),
/// Performs a shuffle that will result in the desired partitioning.
ShuffleExchange(Arc<ShuffleExchangeExec>),
/// Reads results from a ShuffleExchange
ShuffleReader(Arc<ShuffleReaderExec>),
/// Scans a partitioned Parquet data source
ParquetScan(Arc<ParquetScanExec>),
/// Scans a partitioned CSV data source
CsvScan(Arc<CsvScanExec>),
/// Scans an in-memory table
InMemoryTableScan(Arc<InMemoryTableScanExec>),
}
impl PhysicalPlan {
pub fn as_execution_plan(&self) -> Arc<dyn ExecutionPlan> {
match self {
Self::Projection(exec) => exec.clone(),
Self::Filter(exec) => exec.clone(),
Self::HashAggregate(exec) => exec.clone(),
Self::ParquetScan(exec) => exec.clone(),
Self::CsvScan(exec) => exec.clone(),
Self::ShuffleExchange(exec) => exec.clone(),
Self::ShuffleReader(exec) => exec.clone(),
Self::InMemoryTableScan(exec) => exec.clone(),
}
}
pub fn with_new_children(&self, new_children: Vec<Arc<PhysicalPlan>>) -> PhysicalPlan {
match self {
Self::HashAggregate(exec) => {
Self::HashAggregate(Arc::new(exec.with_new_children(new_children)))
}
_ => unimplemented!(),
}
}
fn fmt_with_indent(&self, f: &mut fmt::Formatter, indent: usize) -> fmt::Result {
if indent > 0 {
writeln!(f)?;
for _ in 0..indent {
write!(f, " ")?;
}
}
match self {
PhysicalPlan::CsvScan(exec) => write!(
f,
"CsvScan: {:?}, partitions={}; projection={:?}",
exec.path,
exec.filenames.len(),
exec.projection
),
PhysicalPlan::ParquetScan(exec) => write!(
f,
"ParquetScan: {:?}, partitions={}; projection={:?}",
exec.path,
exec.filenames.len(),
exec.projection
),
PhysicalPlan::HashAggregate(exec) => {
write!(
f,
"HashAggregate: mode={:?}, groupExpr={:?}, aggrExpr={:?}",
exec.mode, exec.group_expr, exec.aggr_expr
)?;
exec.child.fmt_with_indent(f, indent + 1)
}
PhysicalPlan::ShuffleExchange(exec) => {
write!(f, "Shuffle: {:?}", exec.as_ref().output_partitioning())?;
exec.as_ref().child.fmt_with_indent(f, indent + 1)
}
PhysicalPlan::ShuffleReader(exec) => {
write!(f, "ShuffleReader: shuffle_id={:?}", exec.shuffle_id)
}
PhysicalPlan::Projection(_exec) => write!(f, "Projection:"),
PhysicalPlan::Filter(_exec) => write!(f, "Filter:"),
_ => write!(f, "???"),
}
}
}
impl fmt::Debug for PhysicalPlan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.fmt_with_indent(f, 0)
}
}
#[derive(Debug, Clone)]
pub enum Distribution {
UnspecifiedDistribution,
SinglePartition,
BroadcastDistribution,
ClusteredDistribution {
required_num_partitions: usize,
clustering: Vec<Expr>,
},
HashClusteredDistribution {
required_num_partitions: usize,
clustering: Vec<Expr>,
},
OrderedDistribution(Vec<SortOrder>),
}
#[derive(Debug, Clone)]
pub enum JoinType {
Inner,
}
#[derive(Debug, Clone)]
pub enum BuildSide {
BuildLeft,
BuildRight,
}
#[derive(Debug, Clone)]
pub enum SortDirection {
Ascending,
Descending,
}
/// Aggregate operator modes.
#[derive(Debug, Clone)]
pub enum AggregateMode {
/// Partial aggregation that can run in parallel per partition
Partial,
/// Perform final aggregation on results of partial aggregation. For example, this would
/// produce the SUM of SUMs, or the SUMs of COUNTs.
Final,
/// Perform complete aggregation in one pass. This is used when there is only a single
/// partition to operate on.
Complete,
}
#[derive(Debug, Clone)]
pub struct SortOrder {
child: Arc<Expr>,
direction: SortDirection,
null_ordering: NullOrdering,
}
#[derive(Debug, Clone)]
pub enum NullOrdering {
NullsFirst,
NullsLast,
}
/// Partitioning schemes supported by operators.
#[derive(Debug, Clone)]
pub enum Partitioning {
UnknownPartitioning(usize),
HashPartitioning(usize, Vec<Arc<Expr>>),
}
impl Partitioning {
pub fn partition_count(&self) -> usize {
use Partitioning::*;
match self {
UnknownPartitioning(n) => *n,
HashPartitioning(n, _) => *n,
}
}
}
/// Unique identifier for the output shuffle partition of an operator.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ShuffleId {
pub(crate) job_uuid: Uuid,
pub(crate) stage_id: usize,
pub(crate) partition_id: usize,
}
impl ShuffleId {
pub fn new(job_uuid: Uuid, stage_id: usize, partition_id: usize) -> Self {
Self {
job_uuid,
stage_id,
partition_id,
}
}
}
pub struct ShuffleLocation {}
/// Translate a logical expression into a physical expression that can be evaluated against
/// input data.
pub fn compile_expression(expr: &Expr, input: &Schema) -> Result<Arc<dyn Expression>> {
match expr {
Expr::Alias(expr, name) => Ok(alias(compile_expression(expr, input)?, name)),
Expr::Column(name) => Ok(col(name)),
Expr::Literal(value) => Ok(lit(value.to_owned())),
Expr::BinaryExpr { left, op, right } => {
let l = compile_expression(left, input)?;
let r = compile_expression(right, input)?;
match op {
Operator::Plus => Ok(add(l, r)),
Operator::Minus => Ok(subtract(l, r)),
Operator::Multiply => Ok(mult(l, r)),
Operator::Divide => Ok(div(l, r)),
Operator::Lt
| Operator::LtEq
| Operator::Gt
| Operator::GtEq
| Operator::Eq
| Operator::NotEq => Ok(compare(l, op, r)),
other => Err(ballista_error(&format!(
"Unsupported binary operator in compile_expression {:?}",
other
))),
}
}
other => Err(ballista_error(&format!(
"Unsupported expression in compile_expression {:?}",
other
))),
}
}
/// Translate one or more logical expressions into physical expressions that can be evaluated
/// against input data.
pub fn compile_expressions(expr: &[Expr], input: &Schema) -> Result<Vec<Arc<dyn Expression>>> {
expr.iter().map(|e| compile_expression(e, input)).collect()
}
/// Translate a logical aggregate expression into a physical expression that can be evaluated
/// against input data.
pub fn compile_aggregate_expression(
expr: &Expr,
input_schema: &Schema,
) -> Result<Arc<dyn AggregateExpr>> {
match expr {
Expr::Alias(expr, alias) => Ok(aliased_aggr(
compile_aggregate_expression(expr, input_schema)?,
alias,
)),
Expr::AggregateFunction { name, args,.. } => match name.to_lowercase().as_ref() {
"avg" => Ok(avg(compile_expression(&args[0], input_schema)?)),
"count" => Ok(count(compile_expression(&args[0], input_schema)?)),
"max" => Ok(max(compile_expression(&args[0], input_schema)?)),
"min" => Ok(min(compile_expression(&args[0], input_schema)?)),
"sum" => Ok(sum(compile_expression(&args[0], input_schema)?)),
other => Err(ballista_error(&format!(
"Unsupported aggregate function in compile_aggregate_expression '{}'",
other
))),
},
other => Err(ballista_error(&format!(
"Unsupported aggregate expression in compile_aggregate_expression {:?}",
other
))),
}
}
/// Translate one or more logical aggregate expressions into physical expressions that can be evaluated
/// against input data.
pub | } | random_line_split |
physical_plan.rs | a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Ballista Physical Plan (Experimental).
//!
//! The physical plan is a serializable data structure describing how the plan will be executed.
//!
//! It differs from the logical plan in that it deals with specific implementations of operators
//! (e.g. SortMergeJoin versus BroadcastHashJoin) whereas the logical plan just deals with an
//! abstract concept of a join.
//!
//! The physical plan also accounts for partitioning and ordering of data between operators.
use std::collections::HashMap;
use std::fmt::{self, Debug};
use std::sync::Arc;
use crate::arrow::array::{
ArrayRef, Float32Builder, Float64Builder, Int16Builder, Int32Builder, Int64Builder,
Int8Builder, StringBuilder, UInt16Builder, UInt32Builder, UInt64Builder, UInt8Builder,
};
use crate::arrow::datatypes::{DataType, Schema};
use crate::arrow::record_batch::RecordBatch;
use crate::datafusion::logicalplan::Expr;
use crate::datafusion::logicalplan::LogicalPlan;
use crate::datafusion::logicalplan::Operator;
use crate::datafusion::logicalplan::ScalarValue;
use crate::distributed::scheduler::ExecutionTask;
use crate::error::{ballista_error, Result};
use crate::execution::expressions::{
add, alias, aliased_aggr, avg, col, compare, count, div, lit, max, min, mult, subtract, sum,
};
use crate::execution::operators::{
CsvScanExec, FilterExec, HashAggregateExec, InMemoryTableScanExec, ParquetScanExec,
ProjectionExec, ShuffleExchangeExec, ShuffleReaderExec,
};
use crate::distributed::executor::ExecutorConfig;
use async_trait::async_trait;
use uuid::Uuid;
/// Stream of columnar batches using futures
pub type ColumnarBatchStream = Arc<dyn ColumnarBatchIter>;
#[derive(Debug, Clone)]
pub struct ExecutorMeta {
pub id: String,
pub host: String,
pub port: usize,
}
/// Async iterator over a stream of columnar batches
pub trait ColumnarBatchIter {
/// The schema of the iterator's batches
// In principle, this should not be needed as `ColumnarBatch` has a schema.
// However, the stream may be empty
fn schema(&self) -> Arc<Schema>;
/// Get the next batch from the stream, or None if the stream has ended
fn next(&self) -> Result<Option<ColumnarBatch>>;
/// Notify the iterator that no more results will be fetched, so that resources
/// can be freed immediately.
fn close(&self) {}
}
#[async_trait]
pub trait ExecutionContext: Send + Sync {
async fn get_executor_ids(&self) -> Result<Vec<ExecutorMeta>>;
async fn execute_task(
&self,
executor_id: ExecutorMeta,
task: ExecutionTask,
) -> Result<ShuffleId>;
async fn read_shuffle(&self, shuffle_id: &ShuffleId) -> Result<Vec<ColumnarBatch>>;
fn config(&self) -> ExecutorConfig;
}
/// Base trait for all operators
#[async_trait]
pub trait ExecutionPlan: Send + Sync {
/// Specified the output schema of this operator.
fn schema(&self) -> Arc<Schema>;
/// Specifies how data is partitioned across different nodes in the cluster
fn output_partitioning(&self) -> Partitioning {
Partitioning::UnknownPartitioning(0)
}
/// Specifies the data distribution requirements of all the children for this operator
fn required_child_distribution(&self) -> Distribution {
Distribution::UnspecifiedDistribution
}
/// Specifies how data is ordered in each partition
fn output_ordering(&self) -> Option<Vec<SortOrder>> {
None
}
/// Specifies the data distribution requirements of all the children for this operator
fn required_child_ordering(&self) -> Option<Vec<Vec<SortOrder>>> {
None
}
/// Get the children of this plan. Leaf nodes have no children. Unary nodes have a single
/// child. Binary nodes have two children.
fn children(&self) -> Vec<Arc<PhysicalPlan>> {
vec![]
}
/// Runs this query against one partition returning a stream of columnar batches
async fn execute(
&self,
ctx: Arc<dyn ExecutionContext>,
partition_index: usize,
) -> Result<ColumnarBatchStream>;
}
pub trait Expression: Send + Sync + Debug {
/// Get the data type of this expression, given the schema of the input
fn data_type(&self, input_schema: &Schema) -> Result<DataType>;
/// Decide whether this expression is nullable, given the schema of the input
fn nullable(&self, input_schema: &Schema) -> Result<bool>;
/// Evaluate an expression against a ColumnarBatch to produce a scalar or columnar result.
fn evaluate(&self, input: &ColumnarBatch) -> Result<ColumnarValue>;
}
/// Aggregate expression that can be evaluated against a RecordBatch
pub trait AggregateExpr: Send + Sync + Debug {
/// Get the data type of this expression, given the schema of the input
fn data_type(&self, input_schema: &Schema) -> Result<DataType>;
/// Decide whether this expression is nullable, given the schema of the input
fn nullable(&self, input_schema: &Schema) -> Result<bool>;
/// Evaluate the expression being aggregated
fn evaluate_input(&self, batch: &ColumnarBatch) -> Result<ColumnarValue>;
/// Create an accumulator for this aggregate expression
fn create_accumulator(&self, mode: &AggregateMode) -> Box<dyn Accumulator>;
}
/// Aggregate accumulator
pub trait Accumulator: Send + Sync {
/// Update the accumulator based on a columnar value
fn accumulate(&mut self, value: &ColumnarValue) -> Result<()>;
/// Get the final value for the accumulator
fn get_value(&self) -> Result<Option<ScalarValue>>;
}
/// Action that can be sent to an executor
#[derive(Debug, Clone)]
pub enum Action {
/// Execute the query with DataFusion and return the results
InteractiveQuery {
plan: LogicalPlan,
settings: HashMap<String, String>,
},
/// Execute a query and store the results in memory
Execute(ExecutionTask),
/// Collect a shuffle
FetchShuffle(ShuffleId),
}
pub type MaybeColumnarBatch = Result<Option<ColumnarBatch>>;
/// Batch of columnar data.
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct ColumnarBatch {
schema: Arc<Schema>,
columns: HashMap<String, ColumnarValue>,
}
impl ColumnarBatch {
pub fn from_arrow(batch: &RecordBatch) -> Self {
let columns = batch
.columns()
.iter()
.enumerate()
.map(|(i, array)| {
(
batch.schema().field(i).name().clone(),
ColumnarValue::Columnar(array.clone()),
)
})
.collect();
Self {
schema: batch.schema(),
columns,
}
}
pub fn from_values(values: &[ColumnarValue], schema: &Schema) -> Self {
let columns = schema
.fields()
.iter()
.enumerate()
.map(|(i, f)| (f.name().clone(), values[i].clone()))
.collect();
Self {
schema: Arc::new(schema.clone()),
columns,
}
}
pub fn to_arrow(&self) -> Result<RecordBatch> {
let arrays = self
.schema
.fields()
.iter()
.map(|c| {
match self.column(c.name())? {
ColumnarValue::Columnar(array) => Ok(array.clone()),
ColumnarValue::Scalar(_, _) => {
// note that this can be implemented easily if needed
Err(ballista_error("Cannot convert scalar value to Arrow array"))
}
}
})
.collect::<Result<Vec<_>>>()?;
Ok(RecordBatch::try_new(self.schema.clone(), arrays)?)
}
pub fn schema(&self) -> Arc<Schema> {
self.schema.clone()
}
pub fn num_columns(&self) -> usize {
self.columns.len()
}
pub fn num_rows(&self) -> usize {
self.columns[self.schema.field(0).name()].len()
}
pub fn column(&self, name: &str) -> Result<&ColumnarValue> {
Ok(&self.columns[name])
}
pub fn memory_size(&self) -> usize {
self.columns.values().map(|c| c.memory_size()).sum()
}
}
macro_rules! build_literal_array {
($LEN:expr, $BUILDER:ident, $VALUE:expr) => {{
let mut builder = $BUILDER::new($LEN);
for _ in 0..$LEN {
builder.append_value($VALUE)?;
}
Ok(Arc::new(builder.finish()))
}};
}
/// A columnar value can either be a scalar value or an Arrow array.
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub enum ColumnarValue {
Scalar(ScalarValue, usize),
Columnar(ArrayRef),
}
impl ColumnarValue {
pub fn len(&self) -> usize {
match self {
ColumnarValue::Scalar(_, n) => *n,
ColumnarValue::Columnar(array) => array.len(),
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn data_type(&self) -> &DataType {
match self {
ColumnarValue::Columnar(array) => array.data_type(),
ColumnarValue::Scalar(value, _) => match value {
ScalarValue::UInt8(_) => &DataType::UInt8,
ScalarValue::UInt16(_) => &DataType::UInt16,
ScalarValue::UInt32(_) => &DataType::UInt32,
ScalarValue::UInt64(_) => &DataType::UInt64,
ScalarValue::Int8(_) => &DataType::Int8,
ScalarValue::Int16(_) => &DataType::Int16,
ScalarValue::Int32(_) => &DataType::Int32,
ScalarValue::Int64(_) => &DataType::Int64,
ScalarValue::Float32(_) => &DataType::Float32,
ScalarValue::Float64(_) => &DataType::Float64,
_ => unimplemented!(),
},
}
}
pub fn to_arrow(&self) -> Result<ArrayRef> {
match self {
ColumnarValue::Columnar(array) => Ok(array.clone()),
ColumnarValue::Scalar(value, n) => match value {
ScalarValue::Int8(value) => build_literal_array!(*n, Int8Builder, *value),
ScalarValue::Int16(value) => build_literal_array!(*n, Int16Builder, *value),
ScalarValue::Int32(value) => build_literal_array!(*n, Int32Builder, *value),
ScalarValue::Int64(value) => build_literal_array!(*n, Int64Builder, *value),
ScalarValue::UInt8(value) => build_literal_array!(*n, UInt8Builder, *value),
ScalarValue::UInt16(value) => build_literal_array!(*n, UInt16Builder, *value),
ScalarValue::UInt32(value) => build_literal_array!(*n, UInt32Builder, *value),
ScalarValue::UInt64(value) => build_literal_array!(*n, UInt64Builder, *value),
ScalarValue::Float32(value) => build_literal_array!(*n, Float32Builder, *value),
ScalarValue::Float64(value) => build_literal_array!(*n, Float64Builder, *value),
ScalarValue::Utf8(value) => build_literal_array!(*n, StringBuilder, value),
other => Err(ballista_error(&format!(
"Unsupported literal type {:?}",
other
))),
},
}
}
pub fn memory_size(&self) -> usize {
//TODO delegate to Arrow once https://issues.apache.org/jira/browse/ARROW-9582 is
// implemented
match self {
ColumnarValue::Columnar(array) => {
let mut size = 0;
for buffer in array.data().buffers() {
size += buffer.capacity();
}
size
}
_ => 0,
}
}
}
/// Enumeration wrapping physical plan structs so that they can be represented in a tree easily
/// and processed using pattern matching
#[derive(Clone)]
pub enum PhysicalPlan {
/// Projection.
Projection(Arc<ProjectionExec>),
/// Filter a.k.a predicate.
Filter(Arc<FilterExec>),
/// Hash aggregate
HashAggregate(Arc<HashAggregateExec>),
/// Performs a shuffle that will result in the desired partitioning.
ShuffleExchange(Arc<ShuffleExchangeExec>),
/// Reads results from a ShuffleExchange
ShuffleReader(Arc<ShuffleReaderExec>),
/// Scans a partitioned Parquet data source
ParquetScan(Arc<ParquetScanExec>),
/// Scans a partitioned CSV data source
CsvScan(Arc<CsvScanExec>),
/// Scans an in-memory table
InMemoryTableScan(Arc<InMemoryTableScanExec>),
}
impl PhysicalPlan {
pub fn as_execution_plan(&self) -> Arc<dyn ExecutionPlan> {
match self {
Self::Projection(exec) => exec.clone(),
Self::Filter(exec) => exec.clone(),
Self::HashAggregate(exec) => exec.clone(),
Self::ParquetScan(exec) => exec.clone(),
Self::CsvScan(exec) => exec.clone(),
Self::ShuffleExchange(exec) => exec.clone(),
Self::ShuffleReader(exec) => exec.clone(),
Self::InMemoryTableScan(exec) => exec.clone(),
}
}
pub fn with_new_children(&self, new_children: Vec<Arc<PhysicalPlan>>) -> PhysicalPlan {
match self {
Self::HashAggregate(exec) => {
Self::HashAggregate(Arc::new(exec.with_new_children(new_children)))
}
_ => unimplemented!(),
}
}
fn fmt_with_indent(&self, f: &mut fmt::Formatter, indent: usize) -> fmt::Result {
if indent > 0 {
writeln!(f)?;
for _ in 0..indent {
write!(f, " ")?;
}
}
match self {
PhysicalPlan::CsvScan(exec) => write!(
f,
"CsvScan: {:?}, partitions={}; projection={:?}",
exec.path,
exec.filenames.len(),
exec.projection
),
PhysicalPlan::ParquetScan(exec) => write!(
f,
"ParquetScan: {:?}, partitions={}; projection={:?}",
exec.path,
exec.filenames.len(),
exec.projection
),
PhysicalPlan::HashAggregate(exec) => {
write!(
f,
"HashAggregate: mode={:?}, groupExpr={:?}, aggrExpr={:?}",
exec.mode, exec.group_expr, exec.aggr_expr
)?;
exec.child.fmt_with_indent(f, indent + 1)
}
PhysicalPlan::ShuffleExchange(exec) => {
write!(f, "Shuffle: {:?}", exec.as_ref().output_partitioning())?;
exec.as_ref().child.fmt_with_indent(f, indent + 1)
}
PhysicalPlan::ShuffleReader(exec) => {
write!(f, "ShuffleReader: shuffle_id={:?}", exec.shuffle_id)
}
PhysicalPlan::Projection(_exec) => write!(f, "Projection:"),
PhysicalPlan::Filter(_exec) => write!(f, "Filter:"),
_ => write!(f, "???"),
}
}
}
impl fmt::Debug for PhysicalPlan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.fmt_with_indent(f, 0)
}
}
#[derive(Debug, Clone)]
pub enum Distribution {
UnspecifiedDistribution,
SinglePartition,
BroadcastDistribution,
ClusteredDistribution {
required_num_partitions: usize,
clustering: Vec<Expr>,
},
HashClusteredDistribution {
required_num_partitions: usize,
clustering: Vec<Expr>,
},
OrderedDistribution(Vec<SortOrder>),
}
#[derive(Debug, Clone)]
pub enum JoinType {
Inner,
}
#[derive(Debug, Clone)]
pub enum BuildSide {
BuildLeft,
BuildRight,
}
#[derive(Debug, Clone)]
pub enum SortDirection {
Ascending,
Descending,
}
/// Aggregate operator modes.
#[derive(Debug, Clone)]
pub enum AggregateMode {
/// Partial aggregation that can run in parallel per partition
Partial,
/// Perform final aggregation on results of partial aggregation. For example, this would
/// produce the SUM of SUMs, or the SUMs of COUNTs.
Final,
/// Perform complete aggregation in one pass. This is used when there is only a single
/// partition to operate on.
Complete,
}
#[derive(Debug, Clone)]
pub struct SortOrder {
child: Arc<Expr>,
direction: SortDirection,
null_ordering: NullOrdering,
}
#[derive(Debug, Clone)]
pub enum NullOrdering {
NullsFirst,
NullsLast,
}
/// Partitioning schemes supported by operators.
#[derive(Debug, Clone)]
pub enum Partitioning {
UnknownPartitioning(usize),
HashPartitioning(usize, Vec<Arc<Expr>>),
}
impl Partitioning {
pub fn partition_count(&self) -> usize {
use Partitioning::*;
match self {
UnknownPartitioning(n) => *n,
HashPartitioning(n, _) => *n,
}
}
}
/// Unique identifier for the output shuffle partition of an operator.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ShuffleId {
pub(crate) job_uuid: Uuid,
pub(crate) stage_id: usize,
pub(crate) partition_id: usize,
}
impl ShuffleId {
pub fn new(job_uuid: Uuid, stage_id: usize, partition_id: usize) -> Self {
Self {
job_uuid,
stage_id,
partition_id,
}
}
}
pub struct ShuffleLocation {}
/// Translate a logical expression into a physical expression that can be evaluated against
/// input data.
pub fn compile_expression(expr: &Expr, input: &Schema) -> Result<Arc<dyn Expression>> {
match expr {
Expr::Alias(expr, name) => Ok(alias(compile_expression(expr, input)?, name)),
Expr::Column(name) => Ok(col(name)),
Expr::Literal(value) => Ok(lit(value.to_owned())),
Expr::BinaryExpr { left, op, right } => {
let l = compile_expression(left, input)?;
let r = compile_expression(right, input)?;
match op {
Operator::Plus => Ok(add(l, r)),
Operator::Minus => Ok(subtract(l, r)),
Operator::Multiply => Ok(mult(l, r)),
Operator::Divide => Ok(div(l, r)),
Operator::Lt
| Operator::LtEq
| Operator::Gt
| Operator::GtEq
| Operator::Eq
| Operator::NotEq => Ok(compare(l, op, r)),
other => Err(ballista_error(&format!(
"Unsupported binary operator in compile_expression {:?}",
other
))),
}
}
other => Err(ballista_error(&format!(
"Unsupported expression in compile_expression {:?}",
other
))),
}
}
/// Translate one or more logical expressions into physical expressions that can be evaluated
/// against input data.
pub fn compile_expressions(expr: &[Expr], input: &Schema) -> Result<Vec<Arc<dyn Expression>>> {
expr.iter().map(|e| compile_expression(e, input)).collect()
}
/// Translate a logical aggregate expression into a physical expression that can be evaluated
/// against input data.
pub fn compile_aggregate_expression(
expr: &Expr,
input_schema: &Schema,
) -> Result<Arc<dyn AggregateExpr>> {
match expr {
Expr::Alias(expr, alias) => Ok(aliased_aggr(
compile_aggregate_expression(expr, input_schema)?,
alias,
)),
Expr::AggregateFunction { name, args,.. } => match name.to_lowercase().as_ref() {
"avg" => Ok(avg(compile_expression(&args[0], input_schema)?)),
"count" => Ok(count(compile_expression(&args[0], input_schema)?)),
"max" => Ok(max(compile_expression(&args[0], input_schema)?)),
"min" => Ok(min(compile_expression(&args[0], input_schema)?)),
"sum" => Ok(sum(compile_expression(&args[0], input_schema)?)),
other => Err(ballista_error(&format!(
"Unsupported aggregate function in compile_aggregate_expression '{}'",
other
))),
},
other => Err(ballista_error(&format!(
"Unsupported aggregate expression in compile_aggregate_expression {:?}",
other
))),
}
}
/// Translate one or more logical aggregate expressions into physical expressions that can be evaluated
/// against input data.
pub fn | compile_aggregate_expressions | identifier_name |
|
time_zones.rs | use core::fmt;
use super::{NaiveDateTime, DateTime, UnixTimestamp, Month, DayOfTheWeek};
use num::{div_floor, positive_rem};
pub trait TimeZone {
fn from_timestamp(&self, t: UnixTimestamp) -> NaiveDateTime;
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError>;
}
/// When a time zone makes clock jump forward or back at any instant in time
/// (for example twice a year with daylight-saving time, a.k.a. summer-time period)
/// This error is returned when either:
///
/// * Clocks went back and this local time occurred at multiple instants in time,
/// making its interpretation or conversion ambiguous.
///
/// * Clocks jumped forward and this local time did not occur.
/// It does not represent any real instant in time.
/// It could be argued that a range of local times all represent the same instant,
/// but this library does not implement the conversion that way.
#[derive(Eq, PartialEq)]
pub struct LocalTimeConversionError {
/// Make the type opaque to allow for future extensions
_private: (),
}
impl fmt::Debug for LocalTimeConversionError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "LocalTimeConversionError")
}
}
/// Implemented for time zones where `LocalTimeConversionError` never occurs,
/// namely for `Utc` and `FixedOffsetFromUtc`.
///
/// Any UTC-offset change in a time zone creates local times that either don’t occur or occur twice.
/// `TimeZone::to_timestamp` returns `Err(LocalTimeConversionError)` for such local times.
pub trait UnambiguousTimeZone: TimeZone {
fn to_unambiguous_timestamp(&self, d: &NaiveDateTime) -> UnixTimestamp {
self.to_timestamp(d).unwrap()
}
}
/// The *Coordinated Universal Time* time time zone.
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct Utc;
impl UnambiguousTimeZone for Utc {}
impl TimeZone for Utc {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
let days_since_unix = div_floor(u.0, SECONDS_PER_DAY) as i32;
let days = days_since_unix + days_since_d0(1970);
let year = div_floor(days * 400, DAYS_PER_400YEARS) as i32;
let day_of_the_year = days - days_since_d0(year);
let (month, day) = Month::from_day_of_the_year(day_of_the_year, year.into());
let hour = positive_rem(div_floor(u.0, SECONDS_PER_HOUR), 24) as u8;
let minute = positive_rem(div_floor(u.0, SECONDS_PER_MINUTE), 60) as u8;
let second = positive_rem(u.0, 60) as u8;
NaiveDateTime::new(year, month, day, hour, minute, second)
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
Ok(UnixTimestamp(
i64::from(days_since_unix(d)) * SECONDS_PER_DAY
+ i64::from(d.hour) * SECONDS_PER_HOUR
+ i64::from(d.minute) * SECONDS_PER_MINUTE
+ i64::from(d.second)
))
}
}
/// The offset is typically positive east of Greenwich (longitude 0°), negative west.
///
/// For example, Japan Standard Time is UTC+09:00:
///
/// ```rust
/// use gregor::FixedOffsetFromUtc;
/// let jst = FixedOffsetFromUtc::from_hours_and_minutes(9, 0);
/// ```
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct FixedOffsetFromUtc {
seconds_ahead_of_utc: i32,
}
impl FixedOffsetFromUtc {
pub fn from_hours_and_minutes(hours: i32, minutes: i32) -> Self {
FixedOffsetFromUtc {
seconds_ahead_of_utc: (hours * 60 + minutes) * 60,
}
}
}
impl UnambiguousTimeZone for FixedOffsetFromUtc {}
impl TimeZone for FixedOffsetFromUtc {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
// When local time is ahead of UTC (positive offset)
// that instant happened before midnight UTC
// so there are more seconds since then.
// (Add the offset rather than subtract it.)
// Seconds since *this time zone*’s midnight of 1970-01-01.
let seconds = u.0 + i64::from(self.seconds_ahead_of_utc);
// This is not really a Unix timestamp or a UTC date-time,
// but the two errors compensate to give a date-time in this time zone.
Utc.from_timestamp(UnixTimestamp(seconds))
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
// Pretend this is UTC to obtain seconds since *this time zone*’s midnight of 1970-01-01.
let seconds = Utc.to_unambiguous_timestamp(d).0;
// For positives offsets (ahead of UTC) this is earlier in time than UTC midnight
// (with more seconds), so *subtract* the offset to make a Unix timestamp.
Ok(UnixTimestamp(seconds - i64::from(self.seconds_ahead_of_utc)))
}
}
pub trait DaylightSaving {
fn offset_outside_dst(&self) -> FixedOffsetFromUtc;
fn offset_during_dst(&self) -> FixedOffsetFromUtc;
fn is_in_dst(&self, t: UnixTimestamp) -> bool;
}
impl<Tz: DaylightSaving> TimeZone for Tz {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
let offset = if self.is_in_dst(u) {
self.offset_during_dst()
} else {
self.offset_outside_dst()
};
offset.from_timestamp(u)
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
// The actual timestamp/instant is one of these two:
let assuming_outside = self.offset_outside_dst().to_unambiguous_timestamp(d);
let assuming_during = self.offset_during_dst().to_unambiguous_timestamp(d);
// Let’s take Central Europe for example.
// When converted to UTC, `assuming_outside` and `assuming_during` respectively
// represent date-times one hour and two hours before `d`.
// They are one hour apart.
//
// If both timestamps are in the same DST period (during DST or outside)
// then we know for sure which of `assuming_outside` or `assuming_during` is correct.
//
// If they disagree, that means their one hour span contains a DST change:
//
// * 1 am UTC is between `d - 2 hours` and `d - 1 hour`
// * `d - 2 hours` < 1am UTC, and 1am UTC <= `d - 1 hour`
// * `d` < 3 am local time, and 2 am local time <= `d`
// * `d` is between 2 am and 3 am local time.
//
// * In October when clocks go "back", this kind of local time happens twice the same day:
// it’s ambiguous.
// * In March when clocks go "forward", that hour is skipped entirely.
// This kind of local time does not exist. This `d` value might come from buggy code.
match (self.is_in_dst(assuming_outside), self.is_in_dst(assuming_during)) {
(true, true) => Ok(assuming_during),
(false, false) => Ok(assuming_outside),
_ => Err(LocalTimeConversionError { _private: () }),
}
}
}
/// CET (Central European Time) / CEST (Central European Summer Time)
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct CentralEurope;
impl DaylightSaving for CentralEurope {
fn offset_outs | FixedOffsetFromUtc {
FixedOffsetFromUtc::from_hours_and_minutes(1, 0)
}
fn offset_during_dst(&self) -> FixedOffsetFromUtc {
FixedOffsetFromUtc::from_hours_and_minutes(2, 0)
}
fn is_in_dst(&self, t: UnixTimestamp) -> bool {
use Month::*;
let d = DateTime::from_timestamp(t, Utc);
// Directive 2000/84/EC of the European Parliament and of the Council
// of 19 January 2001 on summer-time arrangements
// http://eur-lex.europa.eu/legal-content/EN/ALL/?uri=CELEX:32000L0084
//
// > Article 1
//
// > For the purposes of this Directive "summer-time period"
// > shall mean the period of the year
// > during which clocks are put forward by 60 minutes compared with the rest of the year.
// >
// > Article 2
// >
// > From 2002 onwards, the summer-time period shall begin, in every Member State,
// > at 1.00 a.m., Greenwich Mean Time, on the last Sunday in March.
// >
// > Article 3
// >
// > From 2002 onwards, the summer-time period shall end, in every Member State,
// > at 1.00 a.m., Greenwich Mean Time, on the last Sunday in October.
if d.month() < March || d.month() > October {
false
} else if d.month() > March && d.month() < October {
true
} else if d.month() == March {
!before_last_sunday_1_am(&d)
} else if d.month() == October {
before_last_sunday_1_am(&d)
} else {
unreachable!()
}
}
}
fn before_last_sunday_1_am(d: &DateTime<Utc>) -> bool {
let last_sunday = last_of_the_month(d, DayOfTheWeek::Sunday);
d.day() < last_sunday || (
d.day() == last_sunday &&
(d.hour(), d.minute(), d.second()) < (1, 0, 0)
)
}
fn last_of_the_month(d: &DateTime<Utc>, requested_dow: DayOfTheWeek) -> u8 {
let last_day = d.month().length(d.year().into());
let last_dow = NaiveDateTime::new(d.year(), d.month(), last_day, 0, 0, 0).day_of_the_week();
let difference = i32::from(last_dow.to_iso_number()) - i32::from(requested_dow.to_iso_number());
last_day - (positive_rem(difference, 7) as u8)
}
pub fn days_since_unix(d: &NaiveDateTime) -> i32 {
(d.year - 1970) * DAYS_PER_COMMON_YEAR
+ leap_days_since_y0(d.year) - leap_days_since_y0(1970)
+ d.month.days_since_january_1st(d.year.into())
+ i32::from(d.day - 1)
}
/// How many leap days occurred between January of year 0 and January of the given year
/// (in Gregorian calendar).
pub fn leap_days_since_y0(year: i32) -> i32 {
if year > 0 {
let year = year - 1; // Don’t include Feb 29 of the given year, if any.
// +1 because year 0 is a leap year.
((year / 4) - (year / 100) + (year / 400)) + 1
} else {
let year = -year;
-((year / 4) - (year / 100) + (year / 400))
}
}
/// Days between January 1st of year 0 and January 1st of the given year.
fn days_since_d0(year: i32) -> i32 {
year * DAYS_PER_COMMON_YEAR + leap_days_since_y0(year)
}
const SECONDS_PER_MINUTE: i64 = 60;
const SECONDS_PER_HOUR: i64 = SECONDS_PER_MINUTE * 60;
const SECONDS_PER_DAY: i64 = SECONDS_PER_HOUR * 24;
/// The leap year schedule of the Gregorian calendar cycles every 400 years.
/// In one cycle, there are:
///
/// * 100 years multiple of 4
/// * 4 years multiple of 100
/// * 1 year multiple of 400
const LEAP_DAYS_PER_400YEARS: i32 = 100 - 4 + 1;
const DAYS_PER_COMMON_YEAR: i32 = 365;
const DAYS_PER_400YEARS: i32 = DAYS_PER_COMMON_YEAR * 400 + LEAP_DAYS_PER_400YEARS;
| ide_dst(&self) -> | identifier_name |
time_zones.rs | use core::fmt;
use super::{NaiveDateTime, DateTime, UnixTimestamp, Month, DayOfTheWeek};
use num::{div_floor, positive_rem}; | fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError>;
}
/// When a time zone makes clock jump forward or back at any instant in time
/// (for example twice a year with daylight-saving time, a.k.a. summer-time period)
/// This error is returned when either:
///
/// * Clocks went back and this local time occurred at multiple instants in time,
/// making its interpretation or conversion ambiguous.
///
/// * Clocks jumped forward and this local time did not occur.
/// It does not represent any real instant in time.
/// It could be argued that a range of local times all represent the same instant,
/// but this library does not implement the conversion that way.
#[derive(Eq, PartialEq)]
pub struct LocalTimeConversionError {
/// Make the type opaque to allow for future extensions
_private: (),
}
impl fmt::Debug for LocalTimeConversionError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "LocalTimeConversionError")
}
}
/// Implemented for time zones where `LocalTimeConversionError` never occurs,
/// namely for `Utc` and `FixedOffsetFromUtc`.
///
/// Any UTC-offset change in a time zone creates local times that either don’t occur or occur twice.
/// `TimeZone::to_timestamp` returns `Err(LocalTimeConversionError)` for such local times.
pub trait UnambiguousTimeZone: TimeZone {
fn to_unambiguous_timestamp(&self, d: &NaiveDateTime) -> UnixTimestamp {
self.to_timestamp(d).unwrap()
}
}
/// The *Coordinated Universal Time* time time zone.
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct Utc;
impl UnambiguousTimeZone for Utc {}
impl TimeZone for Utc {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
let days_since_unix = div_floor(u.0, SECONDS_PER_DAY) as i32;
let days = days_since_unix + days_since_d0(1970);
let year = div_floor(days * 400, DAYS_PER_400YEARS) as i32;
let day_of_the_year = days - days_since_d0(year);
let (month, day) = Month::from_day_of_the_year(day_of_the_year, year.into());
let hour = positive_rem(div_floor(u.0, SECONDS_PER_HOUR), 24) as u8;
let minute = positive_rem(div_floor(u.0, SECONDS_PER_MINUTE), 60) as u8;
let second = positive_rem(u.0, 60) as u8;
NaiveDateTime::new(year, month, day, hour, minute, second)
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
Ok(UnixTimestamp(
i64::from(days_since_unix(d)) * SECONDS_PER_DAY
+ i64::from(d.hour) * SECONDS_PER_HOUR
+ i64::from(d.minute) * SECONDS_PER_MINUTE
+ i64::from(d.second)
))
}
}
/// The offset is typically positive east of Greenwich (longitude 0°), negative west.
///
/// For example, Japan Standard Time is UTC+09:00:
///
/// ```rust
/// use gregor::FixedOffsetFromUtc;
/// let jst = FixedOffsetFromUtc::from_hours_and_minutes(9, 0);
/// ```
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct FixedOffsetFromUtc {
seconds_ahead_of_utc: i32,
}
impl FixedOffsetFromUtc {
pub fn from_hours_and_minutes(hours: i32, minutes: i32) -> Self {
FixedOffsetFromUtc {
seconds_ahead_of_utc: (hours * 60 + minutes) * 60,
}
}
}
impl UnambiguousTimeZone for FixedOffsetFromUtc {}
impl TimeZone for FixedOffsetFromUtc {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
// When local time is ahead of UTC (positive offset)
// that instant happened before midnight UTC
// so there are more seconds since then.
// (Add the offset rather than subtract it.)
// Seconds since *this time zone*’s midnight of 1970-01-01.
let seconds = u.0 + i64::from(self.seconds_ahead_of_utc);
// This is not really a Unix timestamp or a UTC date-time,
// but the two errors compensate to give a date-time in this time zone.
Utc.from_timestamp(UnixTimestamp(seconds))
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
// Pretend this is UTC to obtain seconds since *this time zone*’s midnight of 1970-01-01.
let seconds = Utc.to_unambiguous_timestamp(d).0;
// For positives offsets (ahead of UTC) this is earlier in time than UTC midnight
// (with more seconds), so *subtract* the offset to make a Unix timestamp.
Ok(UnixTimestamp(seconds - i64::from(self.seconds_ahead_of_utc)))
}
}
pub trait DaylightSaving {
fn offset_outside_dst(&self) -> FixedOffsetFromUtc;
fn offset_during_dst(&self) -> FixedOffsetFromUtc;
fn is_in_dst(&self, t: UnixTimestamp) -> bool;
}
impl<Tz: DaylightSaving> TimeZone for Tz {
fn from_timestamp(&self, u: UnixTimestamp) -> NaiveDateTime {
let offset = if self.is_in_dst(u) {
self.offset_during_dst()
} else {
self.offset_outside_dst()
};
offset.from_timestamp(u)
}
fn to_timestamp(&self, d: &NaiveDateTime) -> Result<UnixTimestamp, LocalTimeConversionError> {
// The actual timestamp/instant is one of these two:
let assuming_outside = self.offset_outside_dst().to_unambiguous_timestamp(d);
let assuming_during = self.offset_during_dst().to_unambiguous_timestamp(d);
// Let’s take Central Europe for example.
// When converted to UTC, `assuming_outside` and `assuming_during` respectively
// represent date-times one hour and two hours before `d`.
// They are one hour apart.
//
// If both timestamps are in the same DST period (during DST or outside)
// then we know for sure which of `assuming_outside` or `assuming_during` is correct.
//
// If they disagree, that means their one hour span contains a DST change:
//
// * 1 am UTC is between `d - 2 hours` and `d - 1 hour`
// * `d - 2 hours` < 1am UTC, and 1am UTC <= `d - 1 hour`
// * `d` < 3 am local time, and 2 am local time <= `d`
// * `d` is between 2 am and 3 am local time.
//
// * In October when clocks go "back", this kind of local time happens twice the same day:
// it’s ambiguous.
// * In March when clocks go "forward", that hour is skipped entirely.
// This kind of local time does not exist. This `d` value might come from buggy code.
match (self.is_in_dst(assuming_outside), self.is_in_dst(assuming_during)) {
(true, true) => Ok(assuming_during),
(false, false) => Ok(assuming_outside),
_ => Err(LocalTimeConversionError { _private: () }),
}
}
}
/// CET (Central European Time) / CEST (Central European Summer Time)
#[derive(Debug, Eq, PartialEq, Copy, Clone, Default)]
pub struct CentralEurope;
impl DaylightSaving for CentralEurope {
fn offset_outside_dst(&self) -> FixedOffsetFromUtc {
FixedOffsetFromUtc::from_hours_and_minutes(1, 0)
}
fn offset_during_dst(&self) -> FixedOffsetFromUtc {
FixedOffsetFromUtc::from_hours_and_minutes(2, 0)
}
fn is_in_dst(&self, t: UnixTimestamp) -> bool {
use Month::*;
let d = DateTime::from_timestamp(t, Utc);
// Directive 2000/84/EC of the European Parliament and of the Council
// of 19 January 2001 on summer-time arrangements
// http://eur-lex.europa.eu/legal-content/EN/ALL/?uri=CELEX:32000L0084
//
// > Article 1
//
// > For the purposes of this Directive "summer-time period"
// > shall mean the period of the year
// > during which clocks are put forward by 60 minutes compared with the rest of the year.
// >
// > Article 2
// >
// > From 2002 onwards, the summer-time period shall begin, in every Member State,
// > at 1.00 a.m., Greenwich Mean Time, on the last Sunday in March.
// >
// > Article 3
// >
// > From 2002 onwards, the summer-time period shall end, in every Member State,
// > at 1.00 a.m., Greenwich Mean Time, on the last Sunday in October.
if d.month() < March || d.month() > October {
false
} else if d.month() > March && d.month() < October {
true
} else if d.month() == March {
!before_last_sunday_1_am(&d)
} else if d.month() == October {
before_last_sunday_1_am(&d)
} else {
unreachable!()
}
}
}
fn before_last_sunday_1_am(d: &DateTime<Utc>) -> bool {
let last_sunday = last_of_the_month(d, DayOfTheWeek::Sunday);
d.day() < last_sunday || (
d.day() == last_sunday &&
(d.hour(), d.minute(), d.second()) < (1, 0, 0)
)
}
fn last_of_the_month(d: &DateTime<Utc>, requested_dow: DayOfTheWeek) -> u8 {
let last_day = d.month().length(d.year().into());
let last_dow = NaiveDateTime::new(d.year(), d.month(), last_day, 0, 0, 0).day_of_the_week();
let difference = i32::from(last_dow.to_iso_number()) - i32::from(requested_dow.to_iso_number());
last_day - (positive_rem(difference, 7) as u8)
}
pub fn days_since_unix(d: &NaiveDateTime) -> i32 {
(d.year - 1970) * DAYS_PER_COMMON_YEAR
+ leap_days_since_y0(d.year) - leap_days_since_y0(1970)
+ d.month.days_since_january_1st(d.year.into())
+ i32::from(d.day - 1)
}
/// How many leap days occurred between January of year 0 and January of the given year
/// (in Gregorian calendar).
pub fn leap_days_since_y0(year: i32) -> i32 {
if year > 0 {
let year = year - 1; // Don’t include Feb 29 of the given year, if any.
// +1 because year 0 is a leap year.
((year / 4) - (year / 100) + (year / 400)) + 1
} else {
let year = -year;
-((year / 4) - (year / 100) + (year / 400))
}
}
/// Days between January 1st of year 0 and January 1st of the given year.
fn days_since_d0(year: i32) -> i32 {
year * DAYS_PER_COMMON_YEAR + leap_days_since_y0(year)
}
const SECONDS_PER_MINUTE: i64 = 60;
const SECONDS_PER_HOUR: i64 = SECONDS_PER_MINUTE * 60;
const SECONDS_PER_DAY: i64 = SECONDS_PER_HOUR * 24;
/// The leap year schedule of the Gregorian calendar cycles every 400 years.
/// In one cycle, there are:
///
/// * 100 years multiple of 4
/// * 4 years multiple of 100
/// * 1 year multiple of 400
const LEAP_DAYS_PER_400YEARS: i32 = 100 - 4 + 1;
const DAYS_PER_COMMON_YEAR: i32 = 365;
const DAYS_PER_400YEARS: i32 = DAYS_PER_COMMON_YEAR * 400 + LEAP_DAYS_PER_400YEARS; |
pub trait TimeZone {
fn from_timestamp(&self, t: UnixTimestamp) -> NaiveDateTime; | random_line_split |
selector.rs | use std::collections::hash_map;
use std::fmt;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::sync::{Arc, Mutex, Weak};
use std::time::Duration;
use sys;
use sys::fuchsia::{
assert_fuchsia_ready_repr, epoll_event_to_ready, poll_opts_to_wait_async, EventedFd,
EventedFdInner, FuchsiaReady,
};
use zircon;
use zircon::AsHandleRef;
use zircon_sys::zx_handle_t;
use {io, Event, PollOpt, Ready, Token};
/// The kind of registration-- file descriptor or handle.
///
/// The last bit of a token is set to indicate the type of the registration.
#[derive(Copy, Clone, Eq, PartialEq)]
enum RegType {
Fd,
Handle,
}
fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result<u64> {
let key = token.0 as u64;
let msb = 1u64 << 63;
if (key & msb)!= 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Most-significant bit of token must remain unset.",
));
}
Ok(match reg_type {
RegType::Fd => key,
RegType::Handle => key | msb,
})
}
fn token_and_type_from_key(key: u64) -> (Token, RegType) {
let msb = 1u64 << 63;
(
Token((key &!msb) as usize),
if (key & msb) == 0 {
RegType::Fd
} else {
RegType::Handle
},
)
}
/// Each Selector has a globally unique(ish) ID associated with it. This ID
/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
/// registered with the `Selector`. If a type that is previously associated with
/// a `Selector` attempts to register itself with a different `Selector`, the
/// operation will return with an error. This matches windows behavior.
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
pub struct Selector {
id: usize,
/// Zircon object on which the handles have been registered, and on which events occur
port: Arc<zircon::Port>,
/// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt
/// used to prevent having to lock `tokens_to_rereg` when it is empty.
has_tokens_to_rereg: AtomicBool,
/// List of `Token`s corresponding to registrations that need to be reregistered before the
/// next `port::wait`. This is necessary to provide level-triggered behavior for
/// `Async::repeating` registrations.
///
/// When a level-triggered `Async::repeating` event is seen, its token is added to this list so
/// that it will be reregistered before the next `port::wait` call, making `port::wait` return
/// immediately if the signal was high during the reregistration.
///
/// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_
/// `token_to_fd`.
tokens_to_rereg: Mutex<Vec<Token>>,
/// Map from tokens to weak references to `EventedFdInner`-- a structure describing a
/// file handle, its associated `fdio` object, and its current registration.
token_to_fd: Mutex<hash_map::HashMap<Token, Weak<EventedFdInner>>>,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
// Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is
// compatible with Ready.
assert_fuchsia_ready_repr();
let port = Arc::new(zircon::Port::create(zircon::PortOpts::Default)?);
// offset by 1 to avoid choosing 0 as the id of a selector
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
let has_tokens_to_rereg = AtomicBool::new(false);
let tokens_to_rereg = Mutex::new(Vec::new());
let token_to_fd = Mutex::new(hash_map::HashMap::new());
Ok(Selector {
id: id,
port: port,
has_tokens_to_rereg: has_tokens_to_rereg,
tokens_to_rereg: tokens_to_rereg,
token_to_fd: token_to_fd,
})
}
pub fn id(&self) -> usize {
self.id
}
/// Returns a reference to the underlying port `Arc`.
pub fn port(&self) -> &Arc<zircon::Port> {
&self.port
}
/// Reregisters all registrations pointed to by the `tokens_to_rereg` list
/// if `has_tokens_to_rereg`.
fn reregister_handles(&self) -> io::Result<()> {
// We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg`
// written before the store using `Ordering::Release`.
if self.has_tokens_to_rereg.load(Ordering::Acquire) {
let mut tokens = self.tokens_to_rereg.lock().unwrap();
let token_to_fd = self.token_to_fd.lock().unwrap();
for token in tokens.drain(0..) {
if let Some(eventedfd) = token_to_fd.get(&token).and_then(|h| h.upgrade())
{
eventedfd.rereg_for_level(&self.port);
}
}
self.has_tokens_to_rereg.store(false, Ordering::Release);
}
Ok(())
}
pub fn select(
&self,
evts: &mut Events,
_awakener: Token,
timeout: Option<Duration>,
) -> io::Result<bool> {
evts.clear();
self.reregister_handles()?;
let deadline = match timeout {
Some(duration) => {
let nanos = duration
.as_secs()
.saturating_mul(1_000_000_000)
.saturating_add(duration.subsec_nanos() as u64);
zircon::deadline_after(nanos)
}
None => zircon::ZX_TIME_INFINITE,
};
let packet = match self.port.wait(deadline) {
Ok(packet) => packet,
Err(zircon::Status::ErrTimedOut) => return Ok(false),
Err(e) => Err(e)?,
};
let observed_signals = match packet.contents() {
zircon::PacketContents::SignalOne(signal_packet) => signal_packet.observed(),
zircon::PacketContents::SignalRep(signal_packet) => signal_packet.observed(),
zircon::PacketContents::User(_user_packet) => {
// User packets are only ever sent by an Awakener
return Ok(true);
}
};
let key = packet.key();
let (token, reg_type) = token_and_type_from_key(key);
match reg_type {
RegType::Handle => {
// We can return immediately-- no lookup or registration necessary.
evts.events
.push(Event::new(Ready::from(observed_signals), token));
Ok(false)
}
RegType::Fd => {
// Convert the signals to epoll events using __fdio_wait_end,
// and add to reregistration list if necessary.
let events: u32;
{
let handle = if let Some(handle) = self
.token_to_fd
.lock()
.unwrap()
.get(&token)
.and_then(|h| h.upgrade())
{
handle
} else {
// This handle is apparently in the process of removal.
// It has been removed from the list, but port_cancel has not been called.
return Ok(false);
};
events = unsafe {
let mut events: u32 = mem::uninitialized();
sys::fuchsia::sys::__fdio_wait_end(
handle.fdio(),
observed_signals,
&mut events,
);
events
};
// If necessary, queue to be reregistered before next port_await
let needs_to_rereg = {
let registration_lock = handle.registration().lock().unwrap();
registration_lock
.as_ref()
.and_then(|r| r.rereg_signals())
.is_some()
};
if needs_to_rereg {
let mut tokens_to_rereg_lock =
self.tokens_to_rereg.lock().unwrap();
tokens_to_rereg_lock.push(token);
// We use `Ordering::Release` to make sure that we see all `tokens_to_rereg`
// written before the store.
self.has_tokens_to_rereg.store(true, Ordering::Release);
}
}
evts.events | .push(Event::new(epoll_event_to_ready(events), token));
Ok(false)
}
}
}
/// Register event interests for the given IO handle with the OS
pub fn register_fd(
&self,
handle: &zircon::Handle,
fd: &EventedFd,
token: Token,
signals: zircon::Signals,
poll_opts: PollOpt,
) -> io::Result<()> {
{
let mut token_to_fd = self.token_to_fd.lock().unwrap();
match token_to_fd.entry(token) {
hash_map::Entry::Occupied(_) => {
return Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"Attempted to register a filedescriptor on an existing token.",
))
}
hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)),
};
}
let wait_async_opts = poll_opts_to_wait_async(poll_opts);
let wait_res = handle.wait_async_handle(
&self.port,
token.0 as u64,
signals,
wait_async_opts,
);
if wait_res.is_err() {
self.token_to_fd.lock().unwrap().remove(&token);
}
Ok(wait_res?)
}
/// Deregister event interests for the given IO handle with the OS
pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> {
self.token_to_fd.lock().unwrap().remove(&token);
// We ignore NotFound errors since oneshots are automatically deregistered,
// but mio will attempt to deregister them manually.
self.port
.cancel(&*handle, token.0 as u64)
.map_err(io::Error::from)
.or_else(|e| {
if e.kind() == io::ErrorKind::NotFound {
Ok(())
} else {
Err(e)
}
})
}
pub fn register_handle(
&self,
handle: zx_handle_t,
token: Token,
interests: Ready,
poll_opts: PollOpt,
) -> io::Result<()> {
if poll_opts.is_level() &&!poll_opts.is_oneshot() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Repeated level-triggered events are not supported on Fuchsia handles.",
));
}
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = temp_handle.wait_async_handle(
&self.port,
key_from_token_and_type(token, RegType::Handle)?,
FuchsiaReady::from(interests).into_zx_signals(),
poll_opts_to_wait_async(poll_opts),
);
mem::forget(temp_handle);
Ok(res?)
}
pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()> {
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = self.port.cancel(
&temp_handle,
key_from_token_and_type(token, RegType::Handle)?,
);
mem::forget(temp_handle);
Ok(res?)
}
}
pub struct Events {
events: Vec<Event>,
}
impl Events {
pub fn with_capacity(_u: usize) -> Events {
// The Fuchsia selector only handles one event at a time,
// so we ignore the default capacity and set it to one.
Events {
events: Vec::with_capacity(1),
}
}
pub fn len(&self) -> usize {
self.events.len()
}
pub fn capacity(&self) -> usize {
self.events.capacity()
}
pub fn is_empty(&self) -> bool {
self.events.is_empty()
}
pub fn get(&self, idx: usize) -> Option<Event> {
self.events.get(idx).map(|e| *e)
}
pub fn push_event(&mut self, event: Event) {
self.events.push(event)
}
pub fn clear(&mut self) {
self.events.events.drain(0..);
}
}
impl fmt::Debug for Events {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Events")
.field("len", &self.len())
.finish()
}
} | random_line_split |
|
selector.rs | use std::collections::hash_map;
use std::fmt;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::sync::{Arc, Mutex, Weak};
use std::time::Duration;
use sys;
use sys::fuchsia::{
assert_fuchsia_ready_repr, epoll_event_to_ready, poll_opts_to_wait_async, EventedFd,
EventedFdInner, FuchsiaReady,
};
use zircon;
use zircon::AsHandleRef;
use zircon_sys::zx_handle_t;
use {io, Event, PollOpt, Ready, Token};
/// The kind of registration-- file descriptor or handle.
///
/// The last bit of a token is set to indicate the type of the registration.
#[derive(Copy, Clone, Eq, PartialEq)]
enum RegType {
Fd,
Handle,
}
fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result<u64> {
let key = token.0 as u64;
let msb = 1u64 << 63;
if (key & msb)!= 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Most-significant bit of token must remain unset.",
));
}
Ok(match reg_type {
RegType::Fd => key,
RegType::Handle => key | msb,
})
}
fn token_and_type_from_key(key: u64) -> (Token, RegType) {
let msb = 1u64 << 63;
(
Token((key &!msb) as usize),
if (key & msb) == 0 {
RegType::Fd
} else {
RegType::Handle
},
)
}
/// Each Selector has a globally unique(ish) ID associated with it. This ID
/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
/// registered with the `Selector`. If a type that is previously associated with
/// a `Selector` attempts to register itself with a different `Selector`, the
/// operation will return with an error. This matches windows behavior.
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
pub struct Selector {
id: usize,
/// Zircon object on which the handles have been registered, and on which events occur
port: Arc<zircon::Port>,
/// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt
/// used to prevent having to lock `tokens_to_rereg` when it is empty.
has_tokens_to_rereg: AtomicBool,
/// List of `Token`s corresponding to registrations that need to be reregistered before the
/// next `port::wait`. This is necessary to provide level-triggered behavior for
/// `Async::repeating` registrations.
///
/// When a level-triggered `Async::repeating` event is seen, its token is added to this list so
/// that it will be reregistered before the next `port::wait` call, making `port::wait` return
/// immediately if the signal was high during the reregistration.
///
/// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_
/// `token_to_fd`.
tokens_to_rereg: Mutex<Vec<Token>>,
/// Map from tokens to weak references to `EventedFdInner`-- a structure describing a
/// file handle, its associated `fdio` object, and its current registration.
token_to_fd: Mutex<hash_map::HashMap<Token, Weak<EventedFdInner>>>,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
// Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is
// compatible with Ready.
assert_fuchsia_ready_repr();
let port = Arc::new(zircon::Port::create(zircon::PortOpts::Default)?);
// offset by 1 to avoid choosing 0 as the id of a selector
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
let has_tokens_to_rereg = AtomicBool::new(false);
let tokens_to_rereg = Mutex::new(Vec::new());
let token_to_fd = Mutex::new(hash_map::HashMap::new());
Ok(Selector {
id: id,
port: port,
has_tokens_to_rereg: has_tokens_to_rereg,
tokens_to_rereg: tokens_to_rereg,
token_to_fd: token_to_fd,
})
}
pub fn id(&self) -> usize {
self.id
}
/// Returns a reference to the underlying port `Arc`.
pub fn | (&self) -> &Arc<zircon::Port> {
&self.port
}
/// Reregisters all registrations pointed to by the `tokens_to_rereg` list
/// if `has_tokens_to_rereg`.
fn reregister_handles(&self) -> io::Result<()> {
// We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg`
// written before the store using `Ordering::Release`.
if self.has_tokens_to_rereg.load(Ordering::Acquire) {
let mut tokens = self.tokens_to_rereg.lock().unwrap();
let token_to_fd = self.token_to_fd.lock().unwrap();
for token in tokens.drain(0..) {
if let Some(eventedfd) = token_to_fd.get(&token).and_then(|h| h.upgrade())
{
eventedfd.rereg_for_level(&self.port);
}
}
self.has_tokens_to_rereg.store(false, Ordering::Release);
}
Ok(())
}
pub fn select(
&self,
evts: &mut Events,
_awakener: Token,
timeout: Option<Duration>,
) -> io::Result<bool> {
evts.clear();
self.reregister_handles()?;
let deadline = match timeout {
Some(duration) => {
let nanos = duration
.as_secs()
.saturating_mul(1_000_000_000)
.saturating_add(duration.subsec_nanos() as u64);
zircon::deadline_after(nanos)
}
None => zircon::ZX_TIME_INFINITE,
};
let packet = match self.port.wait(deadline) {
Ok(packet) => packet,
Err(zircon::Status::ErrTimedOut) => return Ok(false),
Err(e) => Err(e)?,
};
let observed_signals = match packet.contents() {
zircon::PacketContents::SignalOne(signal_packet) => signal_packet.observed(),
zircon::PacketContents::SignalRep(signal_packet) => signal_packet.observed(),
zircon::PacketContents::User(_user_packet) => {
// User packets are only ever sent by an Awakener
return Ok(true);
}
};
let key = packet.key();
let (token, reg_type) = token_and_type_from_key(key);
match reg_type {
RegType::Handle => {
// We can return immediately-- no lookup or registration necessary.
evts.events
.push(Event::new(Ready::from(observed_signals), token));
Ok(false)
}
RegType::Fd => {
// Convert the signals to epoll events using __fdio_wait_end,
// and add to reregistration list if necessary.
let events: u32;
{
let handle = if let Some(handle) = self
.token_to_fd
.lock()
.unwrap()
.get(&token)
.and_then(|h| h.upgrade())
{
handle
} else {
// This handle is apparently in the process of removal.
// It has been removed from the list, but port_cancel has not been called.
return Ok(false);
};
events = unsafe {
let mut events: u32 = mem::uninitialized();
sys::fuchsia::sys::__fdio_wait_end(
handle.fdio(),
observed_signals,
&mut events,
);
events
};
// If necessary, queue to be reregistered before next port_await
let needs_to_rereg = {
let registration_lock = handle.registration().lock().unwrap();
registration_lock
.as_ref()
.and_then(|r| r.rereg_signals())
.is_some()
};
if needs_to_rereg {
let mut tokens_to_rereg_lock =
self.tokens_to_rereg.lock().unwrap();
tokens_to_rereg_lock.push(token);
// We use `Ordering::Release` to make sure that we see all `tokens_to_rereg`
// written before the store.
self.has_tokens_to_rereg.store(true, Ordering::Release);
}
}
evts.events
.push(Event::new(epoll_event_to_ready(events), token));
Ok(false)
}
}
}
/// Register event interests for the given IO handle with the OS
pub fn register_fd(
&self,
handle: &zircon::Handle,
fd: &EventedFd,
token: Token,
signals: zircon::Signals,
poll_opts: PollOpt,
) -> io::Result<()> {
{
let mut token_to_fd = self.token_to_fd.lock().unwrap();
match token_to_fd.entry(token) {
hash_map::Entry::Occupied(_) => {
return Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"Attempted to register a filedescriptor on an existing token.",
))
}
hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)),
};
}
let wait_async_opts = poll_opts_to_wait_async(poll_opts);
let wait_res = handle.wait_async_handle(
&self.port,
token.0 as u64,
signals,
wait_async_opts,
);
if wait_res.is_err() {
self.token_to_fd.lock().unwrap().remove(&token);
}
Ok(wait_res?)
}
/// Deregister event interests for the given IO handle with the OS
pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> {
self.token_to_fd.lock().unwrap().remove(&token);
// We ignore NotFound errors since oneshots are automatically deregistered,
// but mio will attempt to deregister them manually.
self.port
.cancel(&*handle, token.0 as u64)
.map_err(io::Error::from)
.or_else(|e| {
if e.kind() == io::ErrorKind::NotFound {
Ok(())
} else {
Err(e)
}
})
}
pub fn register_handle(
&self,
handle: zx_handle_t,
token: Token,
interests: Ready,
poll_opts: PollOpt,
) -> io::Result<()> {
if poll_opts.is_level() &&!poll_opts.is_oneshot() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Repeated level-triggered events are not supported on Fuchsia handles.",
));
}
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = temp_handle.wait_async_handle(
&self.port,
key_from_token_and_type(token, RegType::Handle)?,
FuchsiaReady::from(interests).into_zx_signals(),
poll_opts_to_wait_async(poll_opts),
);
mem::forget(temp_handle);
Ok(res?)
}
pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()> {
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = self.port.cancel(
&temp_handle,
key_from_token_and_type(token, RegType::Handle)?,
);
mem::forget(temp_handle);
Ok(res?)
}
}
pub struct Events {
events: Vec<Event>,
}
impl Events {
pub fn with_capacity(_u: usize) -> Events {
// The Fuchsia selector only handles one event at a time,
// so we ignore the default capacity and set it to one.
Events {
events: Vec::with_capacity(1),
}
}
pub fn len(&self) -> usize {
self.events.len()
}
pub fn capacity(&self) -> usize {
self.events.capacity()
}
pub fn is_empty(&self) -> bool {
self.events.is_empty()
}
pub fn get(&self, idx: usize) -> Option<Event> {
self.events.get(idx).map(|e| *e)
}
pub fn push_event(&mut self, event: Event) {
self.events.push(event)
}
pub fn clear(&mut self) {
self.events.events.drain(0..);
}
}
impl fmt::Debug for Events {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Events")
.field("len", &self.len())
.finish()
}
}
| port | identifier_name |
selector.rs | use std::collections::hash_map;
use std::fmt;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::sync::{Arc, Mutex, Weak};
use std::time::Duration;
use sys;
use sys::fuchsia::{
assert_fuchsia_ready_repr, epoll_event_to_ready, poll_opts_to_wait_async, EventedFd,
EventedFdInner, FuchsiaReady,
};
use zircon;
use zircon::AsHandleRef;
use zircon_sys::zx_handle_t;
use {io, Event, PollOpt, Ready, Token};
/// The kind of registration-- file descriptor or handle.
///
/// The last bit of a token is set to indicate the type of the registration.
#[derive(Copy, Clone, Eq, PartialEq)]
enum RegType {
Fd,
Handle,
}
fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result<u64> {
let key = token.0 as u64;
let msb = 1u64 << 63;
if (key & msb)!= 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Most-significant bit of token must remain unset.",
));
}
Ok(match reg_type {
RegType::Fd => key,
RegType::Handle => key | msb,
})
}
fn token_and_type_from_key(key: u64) -> (Token, RegType) {
let msb = 1u64 << 63;
(
Token((key &!msb) as usize),
if (key & msb) == 0 {
RegType::Fd
} else {
RegType::Handle
},
)
}
/// Each Selector has a globally unique(ish) ID associated with it. This ID
/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
/// registered with the `Selector`. If a type that is previously associated with
/// a `Selector` attempts to register itself with a different `Selector`, the
/// operation will return with an error. This matches windows behavior.
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
pub struct Selector {
id: usize,
/// Zircon object on which the handles have been registered, and on which events occur
port: Arc<zircon::Port>,
/// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt
/// used to prevent having to lock `tokens_to_rereg` when it is empty.
has_tokens_to_rereg: AtomicBool,
/// List of `Token`s corresponding to registrations that need to be reregistered before the
/// next `port::wait`. This is necessary to provide level-triggered behavior for
/// `Async::repeating` registrations.
///
/// When a level-triggered `Async::repeating` event is seen, its token is added to this list so
/// that it will be reregistered before the next `port::wait` call, making `port::wait` return
/// immediately if the signal was high during the reregistration.
///
/// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_
/// `token_to_fd`.
tokens_to_rereg: Mutex<Vec<Token>>,
/// Map from tokens to weak references to `EventedFdInner`-- a structure describing a
/// file handle, its associated `fdio` object, and its current registration.
token_to_fd: Mutex<hash_map::HashMap<Token, Weak<EventedFdInner>>>,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
// Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is
// compatible with Ready.
assert_fuchsia_ready_repr();
let port = Arc::new(zircon::Port::create(zircon::PortOpts::Default)?);
// offset by 1 to avoid choosing 0 as the id of a selector
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
let has_tokens_to_rereg = AtomicBool::new(false);
let tokens_to_rereg = Mutex::new(Vec::new());
let token_to_fd = Mutex::new(hash_map::HashMap::new());
Ok(Selector {
id: id,
port: port,
has_tokens_to_rereg: has_tokens_to_rereg,
tokens_to_rereg: tokens_to_rereg,
token_to_fd: token_to_fd,
})
}
pub fn id(&self) -> usize {
self.id
}
/// Returns a reference to the underlying port `Arc`.
pub fn port(&self) -> &Arc<zircon::Port> {
&self.port
}
/// Reregisters all registrations pointed to by the `tokens_to_rereg` list
/// if `has_tokens_to_rereg`.
fn reregister_handles(&self) -> io::Result<()> {
// We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg`
// written before the store using `Ordering::Release`.
if self.has_tokens_to_rereg.load(Ordering::Acquire) {
let mut tokens = self.tokens_to_rereg.lock().unwrap();
let token_to_fd = self.token_to_fd.lock().unwrap();
for token in tokens.drain(0..) {
if let Some(eventedfd) = token_to_fd.get(&token).and_then(|h| h.upgrade())
{
eventedfd.rereg_for_level(&self.port);
}
}
self.has_tokens_to_rereg.store(false, Ordering::Release);
}
Ok(())
}
pub fn select(
&self,
evts: &mut Events,
_awakener: Token,
timeout: Option<Duration>,
) -> io::Result<bool> {
evts.clear();
self.reregister_handles()?;
let deadline = match timeout {
Some(duration) => |
None => zircon::ZX_TIME_INFINITE,
};
let packet = match self.port.wait(deadline) {
Ok(packet) => packet,
Err(zircon::Status::ErrTimedOut) => return Ok(false),
Err(e) => Err(e)?,
};
let observed_signals = match packet.contents() {
zircon::PacketContents::SignalOne(signal_packet) => signal_packet.observed(),
zircon::PacketContents::SignalRep(signal_packet) => signal_packet.observed(),
zircon::PacketContents::User(_user_packet) => {
// User packets are only ever sent by an Awakener
return Ok(true);
}
};
let key = packet.key();
let (token, reg_type) = token_and_type_from_key(key);
match reg_type {
RegType::Handle => {
// We can return immediately-- no lookup or registration necessary.
evts.events
.push(Event::new(Ready::from(observed_signals), token));
Ok(false)
}
RegType::Fd => {
// Convert the signals to epoll events using __fdio_wait_end,
// and add to reregistration list if necessary.
let events: u32;
{
let handle = if let Some(handle) = self
.token_to_fd
.lock()
.unwrap()
.get(&token)
.and_then(|h| h.upgrade())
{
handle
} else {
// This handle is apparently in the process of removal.
// It has been removed from the list, but port_cancel has not been called.
return Ok(false);
};
events = unsafe {
let mut events: u32 = mem::uninitialized();
sys::fuchsia::sys::__fdio_wait_end(
handle.fdio(),
observed_signals,
&mut events,
);
events
};
// If necessary, queue to be reregistered before next port_await
let needs_to_rereg = {
let registration_lock = handle.registration().lock().unwrap();
registration_lock
.as_ref()
.and_then(|r| r.rereg_signals())
.is_some()
};
if needs_to_rereg {
let mut tokens_to_rereg_lock =
self.tokens_to_rereg.lock().unwrap();
tokens_to_rereg_lock.push(token);
// We use `Ordering::Release` to make sure that we see all `tokens_to_rereg`
// written before the store.
self.has_tokens_to_rereg.store(true, Ordering::Release);
}
}
evts.events
.push(Event::new(epoll_event_to_ready(events), token));
Ok(false)
}
}
}
/// Register event interests for the given IO handle with the OS
pub fn register_fd(
&self,
handle: &zircon::Handle,
fd: &EventedFd,
token: Token,
signals: zircon::Signals,
poll_opts: PollOpt,
) -> io::Result<()> {
{
let mut token_to_fd = self.token_to_fd.lock().unwrap();
match token_to_fd.entry(token) {
hash_map::Entry::Occupied(_) => {
return Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"Attempted to register a filedescriptor on an existing token.",
))
}
hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)),
};
}
let wait_async_opts = poll_opts_to_wait_async(poll_opts);
let wait_res = handle.wait_async_handle(
&self.port,
token.0 as u64,
signals,
wait_async_opts,
);
if wait_res.is_err() {
self.token_to_fd.lock().unwrap().remove(&token);
}
Ok(wait_res?)
}
/// Deregister event interests for the given IO handle with the OS
pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> {
self.token_to_fd.lock().unwrap().remove(&token);
// We ignore NotFound errors since oneshots are automatically deregistered,
// but mio will attempt to deregister them manually.
self.port
.cancel(&*handle, token.0 as u64)
.map_err(io::Error::from)
.or_else(|e| {
if e.kind() == io::ErrorKind::NotFound {
Ok(())
} else {
Err(e)
}
})
}
pub fn register_handle(
&self,
handle: zx_handle_t,
token: Token,
interests: Ready,
poll_opts: PollOpt,
) -> io::Result<()> {
if poll_opts.is_level() &&!poll_opts.is_oneshot() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Repeated level-triggered events are not supported on Fuchsia handles.",
));
}
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = temp_handle.wait_async_handle(
&self.port,
key_from_token_and_type(token, RegType::Handle)?,
FuchsiaReady::from(interests).into_zx_signals(),
poll_opts_to_wait_async(poll_opts),
);
mem::forget(temp_handle);
Ok(res?)
}
pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()> {
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = self.port.cancel(
&temp_handle,
key_from_token_and_type(token, RegType::Handle)?,
);
mem::forget(temp_handle);
Ok(res?)
}
}
pub struct Events {
events: Vec<Event>,
}
impl Events {
pub fn with_capacity(_u: usize) -> Events {
// The Fuchsia selector only handles one event at a time,
// so we ignore the default capacity and set it to one.
Events {
events: Vec::with_capacity(1),
}
}
pub fn len(&self) -> usize {
self.events.len()
}
pub fn capacity(&self) -> usize {
self.events.capacity()
}
pub fn is_empty(&self) -> bool {
self.events.is_empty()
}
pub fn get(&self, idx: usize) -> Option<Event> {
self.events.get(idx).map(|e| *e)
}
pub fn push_event(&mut self, event: Event) {
self.events.push(event)
}
pub fn clear(&mut self) {
self.events.events.drain(0..);
}
}
impl fmt::Debug for Events {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Events")
.field("len", &self.len())
.finish()
}
}
| {
let nanos = duration
.as_secs()
.saturating_mul(1_000_000_000)
.saturating_add(duration.subsec_nanos() as u64);
zircon::deadline_after(nanos)
} | conditional_block |
selector.rs | use std::collections::hash_map;
use std::fmt;
use std::mem;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::sync::{Arc, Mutex, Weak};
use std::time::Duration;
use sys;
use sys::fuchsia::{
assert_fuchsia_ready_repr, epoll_event_to_ready, poll_opts_to_wait_async, EventedFd,
EventedFdInner, FuchsiaReady,
};
use zircon;
use zircon::AsHandleRef;
use zircon_sys::zx_handle_t;
use {io, Event, PollOpt, Ready, Token};
/// The kind of registration-- file descriptor or handle.
///
/// The last bit of a token is set to indicate the type of the registration.
#[derive(Copy, Clone, Eq, PartialEq)]
enum RegType {
Fd,
Handle,
}
fn key_from_token_and_type(token: Token, reg_type: RegType) -> io::Result<u64> {
let key = token.0 as u64;
let msb = 1u64 << 63;
if (key & msb)!= 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Most-significant bit of token must remain unset.",
));
}
Ok(match reg_type {
RegType::Fd => key,
RegType::Handle => key | msb,
})
}
fn token_and_type_from_key(key: u64) -> (Token, RegType) {
let msb = 1u64 << 63;
(
Token((key &!msb) as usize),
if (key & msb) == 0 {
RegType::Fd
} else {
RegType::Handle
},
)
}
/// Each Selector has a globally unique(ish) ID associated with it. This ID
/// gets tracked by `TcpStream`, `TcpListener`, etc... when they are first
/// registered with the `Selector`. If a type that is previously associated with
/// a `Selector` attempts to register itself with a different `Selector`, the
/// operation will return with an error. This matches windows behavior.
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
pub struct Selector {
id: usize,
/// Zircon object on which the handles have been registered, and on which events occur
port: Arc<zircon::Port>,
/// Whether or not `tokens_to_rereg` contains any elements. This is a best-effort attempt
/// used to prevent having to lock `tokens_to_rereg` when it is empty.
has_tokens_to_rereg: AtomicBool,
/// List of `Token`s corresponding to registrations that need to be reregistered before the
/// next `port::wait`. This is necessary to provide level-triggered behavior for
/// `Async::repeating` registrations.
///
/// When a level-triggered `Async::repeating` event is seen, its token is added to this list so
/// that it will be reregistered before the next `port::wait` call, making `port::wait` return
/// immediately if the signal was high during the reregistration.
///
/// Note: when used at the same time, the `tokens_to_rereg` lock should be taken out _before_
/// `token_to_fd`.
tokens_to_rereg: Mutex<Vec<Token>>,
/// Map from tokens to weak references to `EventedFdInner`-- a structure describing a
/// file handle, its associated `fdio` object, and its current registration.
token_to_fd: Mutex<hash_map::HashMap<Token, Weak<EventedFdInner>>>,
}
impl Selector {
pub fn new() -> io::Result<Selector> {
// Assertion from fuchsia/ready.rs to make sure that FuchsiaReady's representation is
// compatible with Ready.
assert_fuchsia_ready_repr();
let port = Arc::new(zircon::Port::create(zircon::PortOpts::Default)?);
// offset by 1 to avoid choosing 0 as the id of a selector
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed) + 1;
let has_tokens_to_rereg = AtomicBool::new(false);
let tokens_to_rereg = Mutex::new(Vec::new());
let token_to_fd = Mutex::new(hash_map::HashMap::new());
Ok(Selector {
id: id,
port: port,
has_tokens_to_rereg: has_tokens_to_rereg,
tokens_to_rereg: tokens_to_rereg,
token_to_fd: token_to_fd,
})
}
pub fn id(&self) -> usize {
self.id
}
/// Returns a reference to the underlying port `Arc`.
pub fn port(&self) -> &Arc<zircon::Port> {
&self.port
}
/// Reregisters all registrations pointed to by the `tokens_to_rereg` list
/// if `has_tokens_to_rereg`.
fn reregister_handles(&self) -> io::Result<()> {
// We use `Ordering::Acquire` to make sure that we see all `tokens_to_rereg`
// written before the store using `Ordering::Release`.
if self.has_tokens_to_rereg.load(Ordering::Acquire) {
let mut tokens = self.tokens_to_rereg.lock().unwrap();
let token_to_fd = self.token_to_fd.lock().unwrap();
for token in tokens.drain(0..) {
if let Some(eventedfd) = token_to_fd.get(&token).and_then(|h| h.upgrade())
{
eventedfd.rereg_for_level(&self.port);
}
}
self.has_tokens_to_rereg.store(false, Ordering::Release);
}
Ok(())
}
pub fn select(
&self,
evts: &mut Events,
_awakener: Token,
timeout: Option<Duration>,
) -> io::Result<bool> {
evts.clear();
self.reregister_handles()?;
let deadline = match timeout {
Some(duration) => {
let nanos = duration
.as_secs()
.saturating_mul(1_000_000_000)
.saturating_add(duration.subsec_nanos() as u64);
zircon::deadline_after(nanos)
}
None => zircon::ZX_TIME_INFINITE,
};
let packet = match self.port.wait(deadline) {
Ok(packet) => packet,
Err(zircon::Status::ErrTimedOut) => return Ok(false),
Err(e) => Err(e)?,
};
let observed_signals = match packet.contents() {
zircon::PacketContents::SignalOne(signal_packet) => signal_packet.observed(),
zircon::PacketContents::SignalRep(signal_packet) => signal_packet.observed(),
zircon::PacketContents::User(_user_packet) => {
// User packets are only ever sent by an Awakener
return Ok(true);
}
};
let key = packet.key();
let (token, reg_type) = token_and_type_from_key(key);
match reg_type {
RegType::Handle => {
// We can return immediately-- no lookup or registration necessary.
evts.events
.push(Event::new(Ready::from(observed_signals), token));
Ok(false)
}
RegType::Fd => {
// Convert the signals to epoll events using __fdio_wait_end,
// and add to reregistration list if necessary.
let events: u32;
{
let handle = if let Some(handle) = self
.token_to_fd
.lock()
.unwrap()
.get(&token)
.and_then(|h| h.upgrade())
{
handle
} else {
// This handle is apparently in the process of removal.
// It has been removed from the list, but port_cancel has not been called.
return Ok(false);
};
events = unsafe {
let mut events: u32 = mem::uninitialized();
sys::fuchsia::sys::__fdio_wait_end(
handle.fdio(),
observed_signals,
&mut events,
);
events
};
// If necessary, queue to be reregistered before next port_await
let needs_to_rereg = {
let registration_lock = handle.registration().lock().unwrap();
registration_lock
.as_ref()
.and_then(|r| r.rereg_signals())
.is_some()
};
if needs_to_rereg {
let mut tokens_to_rereg_lock =
self.tokens_to_rereg.lock().unwrap();
tokens_to_rereg_lock.push(token);
// We use `Ordering::Release` to make sure that we see all `tokens_to_rereg`
// written before the store.
self.has_tokens_to_rereg.store(true, Ordering::Release);
}
}
evts.events
.push(Event::new(epoll_event_to_ready(events), token));
Ok(false)
}
}
}
/// Register event interests for the given IO handle with the OS
pub fn register_fd(
&self,
handle: &zircon::Handle,
fd: &EventedFd,
token: Token,
signals: zircon::Signals,
poll_opts: PollOpt,
) -> io::Result<()> {
{
let mut token_to_fd = self.token_to_fd.lock().unwrap();
match token_to_fd.entry(token) {
hash_map::Entry::Occupied(_) => {
return Err(io::Error::new(
io::ErrorKind::AlreadyExists,
"Attempted to register a filedescriptor on an existing token.",
))
}
hash_map::Entry::Vacant(slot) => slot.insert(Arc::downgrade(&fd.inner)),
};
}
let wait_async_opts = poll_opts_to_wait_async(poll_opts);
let wait_res = handle.wait_async_handle(
&self.port,
token.0 as u64,
signals,
wait_async_opts,
);
if wait_res.is_err() {
self.token_to_fd.lock().unwrap().remove(&token);
}
Ok(wait_res?)
}
/// Deregister event interests for the given IO handle with the OS
pub fn deregister_fd(&self, handle: &zircon::Handle, token: Token) -> io::Result<()> |
pub fn register_handle(
&self,
handle: zx_handle_t,
token: Token,
interests: Ready,
poll_opts: PollOpt,
) -> io::Result<()> {
if poll_opts.is_level() &&!poll_opts.is_oneshot() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Repeated level-triggered events are not supported on Fuchsia handles.",
));
}
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = temp_handle.wait_async_handle(
&self.port,
key_from_token_and_type(token, RegType::Handle)?,
FuchsiaReady::from(interests).into_zx_signals(),
poll_opts_to_wait_async(poll_opts),
);
mem::forget(temp_handle);
Ok(res?)
}
pub fn deregister_handle(&self, handle: zx_handle_t, token: Token) -> io::Result<()> {
let temp_handle = unsafe { zircon::Handle::from_raw(handle) };
let res = self.port.cancel(
&temp_handle,
key_from_token_and_type(token, RegType::Handle)?,
);
mem::forget(temp_handle);
Ok(res?)
}
}
pub struct Events {
events: Vec<Event>,
}
impl Events {
pub fn with_capacity(_u: usize) -> Events {
// The Fuchsia selector only handles one event at a time,
// so we ignore the default capacity and set it to one.
Events {
events: Vec::with_capacity(1),
}
}
pub fn len(&self) -> usize {
self.events.len()
}
pub fn capacity(&self) -> usize {
self.events.capacity()
}
pub fn is_empty(&self) -> bool {
self.events.is_empty()
}
pub fn get(&self, idx: usize) -> Option<Event> {
self.events.get(idx).map(|e| *e)
}
pub fn push_event(&mut self, event: Event) {
self.events.push(event)
}
pub fn clear(&mut self) {
self.events.events.drain(0..);
}
}
impl fmt::Debug for Events {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Events")
.field("len", &self.len())
.finish()
}
}
| {
self.token_to_fd.lock().unwrap().remove(&token);
// We ignore NotFound errors since oneshots are automatically deregistered,
// but mio will attempt to deregister them manually.
self.port
.cancel(&*handle, token.0 as u64)
.map_err(io::Error::from)
.or_else(|e| {
if e.kind() == io::ErrorKind::NotFound {
Ok(())
} else {
Err(e)
}
})
} | identifier_body |
window.rs | use std::{collections::HashMap, sync::Arc};
use log::warn;
use unicode_segmentation::UnicodeSegmentation;
use crate::{
bridge::GridLineCell,
editor::{grid::CharacterGrid, style::Style, AnchorInfo, DrawCommand, DrawCommandBatcher},
renderer::{LineFragment, WindowDrawCommand},
};
pub enum WindowType {
Editor,
Message,
}
pub struct Window {
grid_id: u64,
grid: CharacterGrid,
pub window_type: WindowType,
pub anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
draw_command_batcher: Arc<DrawCommandBatcher>,
}
impl Window {
pub fn new(
grid_id: u64,
window_type: WindowType,
anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
grid_size: (u64, u64),
draw_command_batcher: Arc<DrawCommandBatcher>,
) -> Window {
let window = Window {
grid_id,
grid: CharacterGrid::new(grid_size),
window_type,
anchor_info,
grid_position,
draw_command_batcher,
};
window.send_updated_position();
window
}
fn send_command(&self, command: WindowDrawCommand) {
self.draw_command_batcher
.queue(DrawCommand::Window {
grid_id: self.grid_id,
command,
})
.ok();
}
fn send_updated_position(&self) {
self.send_command(WindowDrawCommand::Position {
grid_position: self.grid_position,
grid_size: (self.grid.width, self.grid.height),
floating_order: self.anchor_info.clone().map(|anchor| anchor.sort_order),
});
}
pub fn get_cursor_grid_cell(
&self,
window_left: u64,
window_top: u64,
) -> (String, Option<Arc<Style>>, bool) {
let grid_cell = match self.grid.get_cell(window_left, window_top) {
Some((character, style)) => (character.clone(), style.clone()),
_ => (' '.to_string(), None),
};
let double_width = match self.grid.get_cell(window_left + 1, window_top) {
Some((character, _)) => character.is_empty(),
_ => false,
};
(grid_cell.0, grid_cell.1, double_width)
}
pub fn get_width(&self) -> u64 {
self.grid.width
}
pub fn get_height(&self) -> u64 {
self.grid.height
}
pub fn get_grid_position(&self) -> (f64, f64) {
self.grid_position
}
pub fn position(
&mut self,
anchor_info: Option<AnchorInfo>,
grid_size: (u64, u64),
grid_position: (f64, f64),
) {
self.grid.resize(grid_size);
self.anchor_info = anchor_info;
self.grid_position = grid_position;
self.send_updated_position();
self.redraw();
}
pub fn resize(&mut self, new_size: (u64, u64)) {
self.grid.resize(new_size);
self.send_updated_position();
self.redraw();
}
fn modify_grid(
&mut self,
row_index: u64,
column_pos: &mut u64,
cell: GridLineCell,
defined_styles: &HashMap<u64, Arc<Style>>,
previous_style: &mut Option<Arc<Style>>,
) {
// Get the defined style from the style list.
let style = match cell.highlight_id {
Some(0) => None,
Some(style_id) => defined_styles.get(&style_id).cloned(),
None => previous_style.clone(),
};
// Compute text.
let mut text = cell.text;
if let Some(times) = cell.repeat {
text = text.repeat(times as usize);
}
// Insert the contents of the cell into the grid.
if text.is_empty() {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (text, style.clone());
}
*column_pos += 1;
} else {
for character in text.graphemes(true) {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (character.to_string(), style.clone());
}
*column_pos += 1;
}
}
*previous_style = style;
}
// Build a line fragment for the given row starting from current_start up until the next style
// change or double width character.
fn build_line_fragment(&self, row_index: u64, start: u64) -> (u64, LineFragment) {
let row = self.grid.row(row_index).unwrap();
let (_, style) = &row[start as usize];
let mut text = String::new();
let mut width = 0;
for possible_end_index in start..self.grid.width {
let (character, possible_end_style) = &row[possible_end_index as usize];
// Style doesn't match. Draw what we've got.
if style!= possible_end_style {
break;
}
width += 1;
// The previous character is double width, so send this as its own draw command.
if character.is_empty() {
break;
}
// Add the grid cell to the cells to render.
text.push_str(character);
}
let line_fragment = LineFragment {
text,
window_left: start,
window_top: row_index,
width,
style: style.clone(),
};
(start + width, line_fragment)
}
// Redraw line by calling build_line_fragment starting at 0
// until current_start is greater than the grid width and sending the resulting
// fragments as a batch.
fn redraw_line(&self, row: u64) {
let mut current_start = 0;
let mut line_fragments = Vec::new();
while current_start < self.grid.width {
let (next_start, line_fragment) = self.build_line_fragment(row, current_start);
current_start = next_start;
line_fragments.push(line_fragment);
}
self.send_command(WindowDrawCommand::DrawLine(line_fragments));
}
pub fn draw_grid_line(
&mut self,
row: u64,
column_start: u64,
cells: Vec<GridLineCell>,
defined_styles: &HashMap<u64, Arc<Style>>,
) {
let mut previous_style = None;
if row < self.grid.height {
let mut column_pos = column_start;
for cell in cells {
self.modify_grid(
row,
&mut column_pos,
cell,
defined_styles,
&mut previous_style,
);
}
// Due to the limitations of the current rendering strategy, some underlines get
// clipped by the line below. To mitigate that, we redraw the adjacent lines whenever
// an individual line is redrawn. Unfortunately, some clipping still happens.
// TODO: figure out how to solve this
if row < self.grid.height - 1 {
self.redraw_line(row + 1);
}
self.redraw_line(row);
if row > 0 {
self.redraw_line(row - 1);
}
} else {
warn!("Draw command out of bounds");
}
}
pub fn scroll_region(
&mut self,
top: u64,
bottom: u64,
left: u64,
right: u64,
rows: i64,
cols: i64,
) {
let mut top_to_bottom;
let mut bottom_to_top;
let y_iter: &mut dyn Iterator<Item = i64> = if rows > 0 {
top_to_bottom = (top as i64 + rows)..bottom as i64;
&mut top_to_bottom
} else {
bottom_to_top = (top as i64..(bottom as i64 + rows)).rev();
&mut bottom_to_top
};
self.send_command(WindowDrawCommand::Scroll {
top,
bottom,
left,
right,
rows,
cols,
});
// Scrolls must not only translate the rendered texture, but also must move the grid data
// accordingly so that future renders work correctly.
for y in y_iter {
let dest_y = y - rows;
let mut cols_left;
let mut cols_right;
if dest_y >= 0 && dest_y < self.grid.height as i64 {
let x_iter: &mut dyn Iterator<Item = i64> = if cols > 0 {
cols_left = (left as i64 + cols)..right as i64;
&mut cols_left
} else {
cols_right = (left as i64..(right as i64 + cols)).rev();
&mut cols_right
};
for x in x_iter {
let dest_x = x - cols;
let cell_data = self.grid.get_cell(x as u64, y as u64).cloned();
if let Some(cell_data) = cell_data {
if let Some(dest_cell) =
self.grid.get_cell_mut(dest_x as u64, dest_y as u64)
{
*dest_cell = cell_data;
}
}
}
}
}
}
pub fn clear(&mut self) |
pub fn redraw(&self) {
self.send_command(WindowDrawCommand::Clear);
// Draw the lines from the bottom up so that underlines don't get overwritten by the line
// below.
for row in (0..self.grid.height).rev() {
self.redraw_line(row);
}
}
pub fn hide(&self) {
self.send_command(WindowDrawCommand::Hide);
}
pub fn show(&self) {
self.send_command(WindowDrawCommand::Show);
}
pub fn close(&self) {
self.send_command(WindowDrawCommand::Close);
}
pub fn update_viewport(&self, scroll_delta: f64) {
self.send_command(WindowDrawCommand::Viewport { scroll_delta });
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::event_aggregator::EVENT_AGGREGATOR;
#[test]
fn window_separator_modifies_grid_and_sends_draw_command() {
let mut draw_command_receiver = EVENT_AGGREGATOR.register_event::<Vec<DrawCommand>>();
let draw_command_batcher = Arc::new(DrawCommandBatcher::new());
let mut window = Window::new(
1,
WindowType::Editor,
None,
(0.0, 0.0),
(114, 64),
draw_command_batcher.clone(),
);
draw_command_batcher.send_batch();
draw_command_receiver
.try_recv()
.expect("Could not receive commands");
window.draw_grid_line(
1,
70,
vec![GridLineCell {
text: "|".to_owned(),
highlight_id: None,
repeat: None,
}],
&HashMap::new(),
);
assert_eq!(window.grid.get_cell(70, 1), Some(&("|".to_owned(), None)));
draw_command_batcher.send_batch();
let sent_commands = draw_command_receiver
.try_recv()
.expect("Could not receive commands");
assert!(!sent_commands.is_empty());
}
}
| {
self.grid.clear();
self.send_command(WindowDrawCommand::Clear);
} | identifier_body |
window.rs | use std::{collections::HashMap, sync::Arc};
use log::warn;
use unicode_segmentation::UnicodeSegmentation;
use crate::{
bridge::GridLineCell,
editor::{grid::CharacterGrid, style::Style, AnchorInfo, DrawCommand, DrawCommandBatcher},
renderer::{LineFragment, WindowDrawCommand},
};
pub enum WindowType {
Editor,
Message,
}
pub struct Window {
grid_id: u64,
grid: CharacterGrid,
pub window_type: WindowType,
pub anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
draw_command_batcher: Arc<DrawCommandBatcher>,
}
impl Window {
pub fn new(
grid_id: u64,
window_type: WindowType,
anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
grid_size: (u64, u64),
draw_command_batcher: Arc<DrawCommandBatcher>,
) -> Window {
let window = Window {
grid_id,
grid: CharacterGrid::new(grid_size),
window_type,
anchor_info,
grid_position,
draw_command_batcher,
};
window.send_updated_position();
window
}
fn | (&self, command: WindowDrawCommand) {
self.draw_command_batcher
.queue(DrawCommand::Window {
grid_id: self.grid_id,
command,
})
.ok();
}
fn send_updated_position(&self) {
self.send_command(WindowDrawCommand::Position {
grid_position: self.grid_position,
grid_size: (self.grid.width, self.grid.height),
floating_order: self.anchor_info.clone().map(|anchor| anchor.sort_order),
});
}
pub fn get_cursor_grid_cell(
&self,
window_left: u64,
window_top: u64,
) -> (String, Option<Arc<Style>>, bool) {
let grid_cell = match self.grid.get_cell(window_left, window_top) {
Some((character, style)) => (character.clone(), style.clone()),
_ => (' '.to_string(), None),
};
let double_width = match self.grid.get_cell(window_left + 1, window_top) {
Some((character, _)) => character.is_empty(),
_ => false,
};
(grid_cell.0, grid_cell.1, double_width)
}
pub fn get_width(&self) -> u64 {
self.grid.width
}
pub fn get_height(&self) -> u64 {
self.grid.height
}
pub fn get_grid_position(&self) -> (f64, f64) {
self.grid_position
}
pub fn position(
&mut self,
anchor_info: Option<AnchorInfo>,
grid_size: (u64, u64),
grid_position: (f64, f64),
) {
self.grid.resize(grid_size);
self.anchor_info = anchor_info;
self.grid_position = grid_position;
self.send_updated_position();
self.redraw();
}
pub fn resize(&mut self, new_size: (u64, u64)) {
self.grid.resize(new_size);
self.send_updated_position();
self.redraw();
}
fn modify_grid(
&mut self,
row_index: u64,
column_pos: &mut u64,
cell: GridLineCell,
defined_styles: &HashMap<u64, Arc<Style>>,
previous_style: &mut Option<Arc<Style>>,
) {
// Get the defined style from the style list.
let style = match cell.highlight_id {
Some(0) => None,
Some(style_id) => defined_styles.get(&style_id).cloned(),
None => previous_style.clone(),
};
// Compute text.
let mut text = cell.text;
if let Some(times) = cell.repeat {
text = text.repeat(times as usize);
}
// Insert the contents of the cell into the grid.
if text.is_empty() {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (text, style.clone());
}
*column_pos += 1;
} else {
for character in text.graphemes(true) {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (character.to_string(), style.clone());
}
*column_pos += 1;
}
}
*previous_style = style;
}
// Build a line fragment for the given row starting from current_start up until the next style
// change or double width character.
fn build_line_fragment(&self, row_index: u64, start: u64) -> (u64, LineFragment) {
let row = self.grid.row(row_index).unwrap();
let (_, style) = &row[start as usize];
let mut text = String::new();
let mut width = 0;
for possible_end_index in start..self.grid.width {
let (character, possible_end_style) = &row[possible_end_index as usize];
// Style doesn't match. Draw what we've got.
if style!= possible_end_style {
break;
}
width += 1;
// The previous character is double width, so send this as its own draw command.
if character.is_empty() {
break;
}
// Add the grid cell to the cells to render.
text.push_str(character);
}
let line_fragment = LineFragment {
text,
window_left: start,
window_top: row_index,
width,
style: style.clone(),
};
(start + width, line_fragment)
}
// Redraw line by calling build_line_fragment starting at 0
// until current_start is greater than the grid width and sending the resulting
// fragments as a batch.
fn redraw_line(&self, row: u64) {
let mut current_start = 0;
let mut line_fragments = Vec::new();
while current_start < self.grid.width {
let (next_start, line_fragment) = self.build_line_fragment(row, current_start);
current_start = next_start;
line_fragments.push(line_fragment);
}
self.send_command(WindowDrawCommand::DrawLine(line_fragments));
}
pub fn draw_grid_line(
&mut self,
row: u64,
column_start: u64,
cells: Vec<GridLineCell>,
defined_styles: &HashMap<u64, Arc<Style>>,
) {
let mut previous_style = None;
if row < self.grid.height {
let mut column_pos = column_start;
for cell in cells {
self.modify_grid(
row,
&mut column_pos,
cell,
defined_styles,
&mut previous_style,
);
}
// Due to the limitations of the current rendering strategy, some underlines get
// clipped by the line below. To mitigate that, we redraw the adjacent lines whenever
// an individual line is redrawn. Unfortunately, some clipping still happens.
// TODO: figure out how to solve this
if row < self.grid.height - 1 {
self.redraw_line(row + 1);
}
self.redraw_line(row);
if row > 0 {
self.redraw_line(row - 1);
}
} else {
warn!("Draw command out of bounds");
}
}
pub fn scroll_region(
&mut self,
top: u64,
bottom: u64,
left: u64,
right: u64,
rows: i64,
cols: i64,
) {
let mut top_to_bottom;
let mut bottom_to_top;
let y_iter: &mut dyn Iterator<Item = i64> = if rows > 0 {
top_to_bottom = (top as i64 + rows)..bottom as i64;
&mut top_to_bottom
} else {
bottom_to_top = (top as i64..(bottom as i64 + rows)).rev();
&mut bottom_to_top
};
self.send_command(WindowDrawCommand::Scroll {
top,
bottom,
left,
right,
rows,
cols,
});
// Scrolls must not only translate the rendered texture, but also must move the grid data
// accordingly so that future renders work correctly.
for y in y_iter {
let dest_y = y - rows;
let mut cols_left;
let mut cols_right;
if dest_y >= 0 && dest_y < self.grid.height as i64 {
let x_iter: &mut dyn Iterator<Item = i64> = if cols > 0 {
cols_left = (left as i64 + cols)..right as i64;
&mut cols_left
} else {
cols_right = (left as i64..(right as i64 + cols)).rev();
&mut cols_right
};
for x in x_iter {
let dest_x = x - cols;
let cell_data = self.grid.get_cell(x as u64, y as u64).cloned();
if let Some(cell_data) = cell_data {
if let Some(dest_cell) =
self.grid.get_cell_mut(dest_x as u64, dest_y as u64)
{
*dest_cell = cell_data;
}
}
}
}
}
}
pub fn clear(&mut self) {
self.grid.clear();
self.send_command(WindowDrawCommand::Clear);
}
pub fn redraw(&self) {
self.send_command(WindowDrawCommand::Clear);
// Draw the lines from the bottom up so that underlines don't get overwritten by the line
// below.
for row in (0..self.grid.height).rev() {
self.redraw_line(row);
}
}
pub fn hide(&self) {
self.send_command(WindowDrawCommand::Hide);
}
pub fn show(&self) {
self.send_command(WindowDrawCommand::Show);
}
pub fn close(&self) {
self.send_command(WindowDrawCommand::Close);
}
pub fn update_viewport(&self, scroll_delta: f64) {
self.send_command(WindowDrawCommand::Viewport { scroll_delta });
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::event_aggregator::EVENT_AGGREGATOR;
#[test]
fn window_separator_modifies_grid_and_sends_draw_command() {
let mut draw_command_receiver = EVENT_AGGREGATOR.register_event::<Vec<DrawCommand>>();
let draw_command_batcher = Arc::new(DrawCommandBatcher::new());
let mut window = Window::new(
1,
WindowType::Editor,
None,
(0.0, 0.0),
(114, 64),
draw_command_batcher.clone(),
);
draw_command_batcher.send_batch();
draw_command_receiver
.try_recv()
.expect("Could not receive commands");
window.draw_grid_line(
1,
70,
vec![GridLineCell {
text: "|".to_owned(),
highlight_id: None,
repeat: None,
}],
&HashMap::new(),
);
assert_eq!(window.grid.get_cell(70, 1), Some(&("|".to_owned(), None)));
draw_command_batcher.send_batch();
let sent_commands = draw_command_receiver
.try_recv()
.expect("Could not receive commands");
assert!(!sent_commands.is_empty());
}
}
| send_command | identifier_name |
window.rs | use std::{collections::HashMap, sync::Arc};
use log::warn;
use unicode_segmentation::UnicodeSegmentation;
use crate::{
bridge::GridLineCell,
editor::{grid::CharacterGrid, style::Style, AnchorInfo, DrawCommand, DrawCommandBatcher},
renderer::{LineFragment, WindowDrawCommand},
};
pub enum WindowType {
Editor,
Message,
}
pub struct Window {
grid_id: u64,
grid: CharacterGrid,
pub window_type: WindowType,
pub anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
draw_command_batcher: Arc<DrawCommandBatcher>,
}
impl Window {
pub fn new(
grid_id: u64,
window_type: WindowType,
anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
grid_size: (u64, u64),
draw_command_batcher: Arc<DrawCommandBatcher>,
) -> Window {
let window = Window {
grid_id,
grid: CharacterGrid::new(grid_size),
window_type,
anchor_info,
grid_position,
draw_command_batcher,
};
window.send_updated_position();
window
}
fn send_command(&self, command: WindowDrawCommand) {
self.draw_command_batcher
.queue(DrawCommand::Window {
grid_id: self.grid_id,
command,
})
.ok();
}
fn send_updated_position(&self) {
self.send_command(WindowDrawCommand::Position {
grid_position: self.grid_position,
grid_size: (self.grid.width, self.grid.height),
floating_order: self.anchor_info.clone().map(|anchor| anchor.sort_order),
});
}
pub fn get_cursor_grid_cell(
&self,
window_left: u64,
window_top: u64,
) -> (String, Option<Arc<Style>>, bool) {
let grid_cell = match self.grid.get_cell(window_left, window_top) {
Some((character, style)) => (character.clone(), style.clone()),
_ => (' '.to_string(), None),
};
let double_width = match self.grid.get_cell(window_left + 1, window_top) {
Some((character, _)) => character.is_empty(),
_ => false,
};
(grid_cell.0, grid_cell.1, double_width)
}
pub fn get_width(&self) -> u64 {
self.grid.width
}
pub fn get_height(&self) -> u64 {
self.grid.height
}
pub fn get_grid_position(&self) -> (f64, f64) {
self.grid_position
}
pub fn position(
&mut self,
anchor_info: Option<AnchorInfo>,
grid_size: (u64, u64),
grid_position: (f64, f64),
) {
self.grid.resize(grid_size);
self.anchor_info = anchor_info;
self.grid_position = grid_position;
self.send_updated_position();
self.redraw();
}
pub fn resize(&mut self, new_size: (u64, u64)) {
self.grid.resize(new_size);
self.send_updated_position();
self.redraw();
}
fn modify_grid(
&mut self,
row_index: u64,
column_pos: &mut u64,
cell: GridLineCell,
defined_styles: &HashMap<u64, Arc<Style>>,
previous_style: &mut Option<Arc<Style>>,
) {
// Get the defined style from the style list.
let style = match cell.highlight_id {
Some(0) => None,
Some(style_id) => defined_styles.get(&style_id).cloned(),
None => previous_style.clone(),
};
// Compute text.
let mut text = cell.text;
if let Some(times) = cell.repeat {
text = text.repeat(times as usize);
}
// Insert the contents of the cell into the grid.
if text.is_empty() {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (text, style.clone());
}
*column_pos += 1;
} else {
for character in text.graphemes(true) {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (character.to_string(), style.clone());
}
*column_pos += 1;
}
}
*previous_style = style;
}
// Build a line fragment for the given row starting from current_start up until the next style
// change or double width character.
fn build_line_fragment(&self, row_index: u64, start: u64) -> (u64, LineFragment) {
let row = self.grid.row(row_index).unwrap();
let (_, style) = &row[start as usize];
let mut text = String::new();
let mut width = 0;
for possible_end_index in start..self.grid.width {
let (character, possible_end_style) = &row[possible_end_index as usize];
// Style doesn't match. Draw what we've got.
if style!= possible_end_style {
break;
}
width += 1;
// The previous character is double width, so send this as its own draw command.
if character.is_empty() {
break;
}
// Add the grid cell to the cells to render.
text.push_str(character);
}
let line_fragment = LineFragment {
text,
window_left: start,
window_top: row_index,
width,
style: style.clone(),
};
(start + width, line_fragment)
}
// Redraw line by calling build_line_fragment starting at 0
// until current_start is greater than the grid width and sending the resulting
// fragments as a batch.
fn redraw_line(&self, row: u64) {
let mut current_start = 0;
let mut line_fragments = Vec::new();
while current_start < self.grid.width {
let (next_start, line_fragment) = self.build_line_fragment(row, current_start);
current_start = next_start;
line_fragments.push(line_fragment);
}
self.send_command(WindowDrawCommand::DrawLine(line_fragments));
}
pub fn draw_grid_line(
&mut self,
row: u64,
column_start: u64,
cells: Vec<GridLineCell>,
defined_styles: &HashMap<u64, Arc<Style>>,
) {
let mut previous_style = None;
if row < self.grid.height {
let mut column_pos = column_start;
for cell in cells {
self.modify_grid(
row,
&mut column_pos,
cell,
defined_styles,
&mut previous_style,
);
}
// Due to the limitations of the current rendering strategy, some underlines get
// clipped by the line below. To mitigate that, we redraw the adjacent lines whenever
// an individual line is redrawn. Unfortunately, some clipping still happens.
// TODO: figure out how to solve this
if row < self.grid.height - 1 |
self.redraw_line(row);
if row > 0 {
self.redraw_line(row - 1);
}
} else {
warn!("Draw command out of bounds");
}
}
pub fn scroll_region(
&mut self,
top: u64,
bottom: u64,
left: u64,
right: u64,
rows: i64,
cols: i64,
) {
let mut top_to_bottom;
let mut bottom_to_top;
let y_iter: &mut dyn Iterator<Item = i64> = if rows > 0 {
top_to_bottom = (top as i64 + rows)..bottom as i64;
&mut top_to_bottom
} else {
bottom_to_top = (top as i64..(bottom as i64 + rows)).rev();
&mut bottom_to_top
};
self.send_command(WindowDrawCommand::Scroll {
top,
bottom,
left,
right,
rows,
cols,
});
// Scrolls must not only translate the rendered texture, but also must move the grid data
// accordingly so that future renders work correctly.
for y in y_iter {
let dest_y = y - rows;
let mut cols_left;
let mut cols_right;
if dest_y >= 0 && dest_y < self.grid.height as i64 {
let x_iter: &mut dyn Iterator<Item = i64> = if cols > 0 {
cols_left = (left as i64 + cols)..right as i64;
&mut cols_left
} else {
cols_right = (left as i64..(right as i64 + cols)).rev();
&mut cols_right
};
for x in x_iter {
let dest_x = x - cols;
let cell_data = self.grid.get_cell(x as u64, y as u64).cloned();
if let Some(cell_data) = cell_data {
if let Some(dest_cell) =
self.grid.get_cell_mut(dest_x as u64, dest_y as u64)
{
*dest_cell = cell_data;
}
}
}
}
}
}
pub fn clear(&mut self) {
self.grid.clear();
self.send_command(WindowDrawCommand::Clear);
}
pub fn redraw(&self) {
self.send_command(WindowDrawCommand::Clear);
// Draw the lines from the bottom up so that underlines don't get overwritten by the line
// below.
for row in (0..self.grid.height).rev() {
self.redraw_line(row);
}
}
pub fn hide(&self) {
self.send_command(WindowDrawCommand::Hide);
}
pub fn show(&self) {
self.send_command(WindowDrawCommand::Show);
}
pub fn close(&self) {
self.send_command(WindowDrawCommand::Close);
}
pub fn update_viewport(&self, scroll_delta: f64) {
self.send_command(WindowDrawCommand::Viewport { scroll_delta });
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::event_aggregator::EVENT_AGGREGATOR;
#[test]
fn window_separator_modifies_grid_and_sends_draw_command() {
let mut draw_command_receiver = EVENT_AGGREGATOR.register_event::<Vec<DrawCommand>>();
let draw_command_batcher = Arc::new(DrawCommandBatcher::new());
let mut window = Window::new(
1,
WindowType::Editor,
None,
(0.0, 0.0),
(114, 64),
draw_command_batcher.clone(),
);
draw_command_batcher.send_batch();
draw_command_receiver
.try_recv()
.expect("Could not receive commands");
window.draw_grid_line(
1,
70,
vec![GridLineCell {
text: "|".to_owned(),
highlight_id: None,
repeat: None,
}],
&HashMap::new(),
);
assert_eq!(window.grid.get_cell(70, 1), Some(&("|".to_owned(), None)));
draw_command_batcher.send_batch();
let sent_commands = draw_command_receiver
.try_recv()
.expect("Could not receive commands");
assert!(!sent_commands.is_empty());
}
}
| {
self.redraw_line(row + 1);
} | conditional_block |
window.rs | use std::{collections::HashMap, sync::Arc};
use log::warn;
use unicode_segmentation::UnicodeSegmentation;
use crate::{
bridge::GridLineCell,
editor::{grid::CharacterGrid, style::Style, AnchorInfo, DrawCommand, DrawCommandBatcher},
renderer::{LineFragment, WindowDrawCommand},
};
pub enum WindowType {
Editor,
Message,
}
pub struct Window {
grid_id: u64,
grid: CharacterGrid,
pub window_type: WindowType,
pub anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
draw_command_batcher: Arc<DrawCommandBatcher>,
}
impl Window {
pub fn new(
grid_id: u64,
window_type: WindowType,
anchor_info: Option<AnchorInfo>,
grid_position: (f64, f64),
grid_size: (u64, u64),
draw_command_batcher: Arc<DrawCommandBatcher>,
) -> Window {
let window = Window {
grid_id,
grid: CharacterGrid::new(grid_size),
window_type,
anchor_info,
grid_position,
draw_command_batcher,
};
window.send_updated_position();
window
}
fn send_command(&self, command: WindowDrawCommand) {
self.draw_command_batcher
.queue(DrawCommand::Window {
grid_id: self.grid_id,
command,
})
.ok();
}
fn send_updated_position(&self) {
self.send_command(WindowDrawCommand::Position {
grid_position: self.grid_position,
grid_size: (self.grid.width, self.grid.height),
floating_order: self.anchor_info.clone().map(|anchor| anchor.sort_order),
});
}
pub fn get_cursor_grid_cell(
&self,
window_left: u64,
window_top: u64,
) -> (String, Option<Arc<Style>>, bool) {
let grid_cell = match self.grid.get_cell(window_left, window_top) {
Some((character, style)) => (character.clone(), style.clone()),
_ => (' '.to_string(), None),
};
let double_width = match self.grid.get_cell(window_left + 1, window_top) {
Some((character, _)) => character.is_empty(),
_ => false,
};
(grid_cell.0, grid_cell.1, double_width)
}
pub fn get_width(&self) -> u64 {
self.grid.width
}
pub fn get_height(&self) -> u64 {
self.grid.height
}
pub fn get_grid_position(&self) -> (f64, f64) {
self.grid_position
}
pub fn position(
&mut self,
anchor_info: Option<AnchorInfo>,
grid_size: (u64, u64),
grid_position: (f64, f64),
) {
self.grid.resize(grid_size);
self.anchor_info = anchor_info;
self.grid_position = grid_position;
self.send_updated_position();
self.redraw();
}
pub fn resize(&mut self, new_size: (u64, u64)) {
self.grid.resize(new_size);
self.send_updated_position();
self.redraw();
}
fn modify_grid(
&mut self,
row_index: u64,
column_pos: &mut u64,
cell: GridLineCell,
defined_styles: &HashMap<u64, Arc<Style>>,
previous_style: &mut Option<Arc<Style>>,
) {
// Get the defined style from the style list.
let style = match cell.highlight_id {
Some(0) => None,
Some(style_id) => defined_styles.get(&style_id).cloned(),
None => previous_style.clone(),
};
// Compute text.
let mut text = cell.text;
if let Some(times) = cell.repeat {
text = text.repeat(times as usize);
}
// Insert the contents of the cell into the grid.
if text.is_empty() {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (text, style.clone());
}
*column_pos += 1;
} else {
for character in text.graphemes(true) {
if let Some(cell) = self.grid.get_cell_mut(*column_pos, row_index) {
*cell = (character.to_string(), style.clone());
}
*column_pos += 1;
}
}
*previous_style = style;
}
// Build a line fragment for the given row starting from current_start up until the next style
// change or double width character.
fn build_line_fragment(&self, row_index: u64, start: u64) -> (u64, LineFragment) {
let row = self.grid.row(row_index).unwrap();
let (_, style) = &row[start as usize];
let mut text = String::new();
let mut width = 0;
for possible_end_index in start..self.grid.width {
let (character, possible_end_style) = &row[possible_end_index as usize];
// Style doesn't match. Draw what we've got.
if style!= possible_end_style {
break;
}
width += 1;
// The previous character is double width, so send this as its own draw command.
if character.is_empty() {
break; | }
let line_fragment = LineFragment {
text,
window_left: start,
window_top: row_index,
width,
style: style.clone(),
};
(start + width, line_fragment)
}
// Redraw line by calling build_line_fragment starting at 0
// until current_start is greater than the grid width and sending the resulting
// fragments as a batch.
fn redraw_line(&self, row: u64) {
let mut current_start = 0;
let mut line_fragments = Vec::new();
while current_start < self.grid.width {
let (next_start, line_fragment) = self.build_line_fragment(row, current_start);
current_start = next_start;
line_fragments.push(line_fragment);
}
self.send_command(WindowDrawCommand::DrawLine(line_fragments));
}
pub fn draw_grid_line(
&mut self,
row: u64,
column_start: u64,
cells: Vec<GridLineCell>,
defined_styles: &HashMap<u64, Arc<Style>>,
) {
let mut previous_style = None;
if row < self.grid.height {
let mut column_pos = column_start;
for cell in cells {
self.modify_grid(
row,
&mut column_pos,
cell,
defined_styles,
&mut previous_style,
);
}
// Due to the limitations of the current rendering strategy, some underlines get
// clipped by the line below. To mitigate that, we redraw the adjacent lines whenever
// an individual line is redrawn. Unfortunately, some clipping still happens.
// TODO: figure out how to solve this
if row < self.grid.height - 1 {
self.redraw_line(row + 1);
}
self.redraw_line(row);
if row > 0 {
self.redraw_line(row - 1);
}
} else {
warn!("Draw command out of bounds");
}
}
pub fn scroll_region(
&mut self,
top: u64,
bottom: u64,
left: u64,
right: u64,
rows: i64,
cols: i64,
) {
let mut top_to_bottom;
let mut bottom_to_top;
let y_iter: &mut dyn Iterator<Item = i64> = if rows > 0 {
top_to_bottom = (top as i64 + rows)..bottom as i64;
&mut top_to_bottom
} else {
bottom_to_top = (top as i64..(bottom as i64 + rows)).rev();
&mut bottom_to_top
};
self.send_command(WindowDrawCommand::Scroll {
top,
bottom,
left,
right,
rows,
cols,
});
// Scrolls must not only translate the rendered texture, but also must move the grid data
// accordingly so that future renders work correctly.
for y in y_iter {
let dest_y = y - rows;
let mut cols_left;
let mut cols_right;
if dest_y >= 0 && dest_y < self.grid.height as i64 {
let x_iter: &mut dyn Iterator<Item = i64> = if cols > 0 {
cols_left = (left as i64 + cols)..right as i64;
&mut cols_left
} else {
cols_right = (left as i64..(right as i64 + cols)).rev();
&mut cols_right
};
for x in x_iter {
let dest_x = x - cols;
let cell_data = self.grid.get_cell(x as u64, y as u64).cloned();
if let Some(cell_data) = cell_data {
if let Some(dest_cell) =
self.grid.get_cell_mut(dest_x as u64, dest_y as u64)
{
*dest_cell = cell_data;
}
}
}
}
}
}
pub fn clear(&mut self) {
self.grid.clear();
self.send_command(WindowDrawCommand::Clear);
}
pub fn redraw(&self) {
self.send_command(WindowDrawCommand::Clear);
// Draw the lines from the bottom up so that underlines don't get overwritten by the line
// below.
for row in (0..self.grid.height).rev() {
self.redraw_line(row);
}
}
pub fn hide(&self) {
self.send_command(WindowDrawCommand::Hide);
}
pub fn show(&self) {
self.send_command(WindowDrawCommand::Show);
}
pub fn close(&self) {
self.send_command(WindowDrawCommand::Close);
}
pub fn update_viewport(&self, scroll_delta: f64) {
self.send_command(WindowDrawCommand::Viewport { scroll_delta });
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::event_aggregator::EVENT_AGGREGATOR;
#[test]
fn window_separator_modifies_grid_and_sends_draw_command() {
let mut draw_command_receiver = EVENT_AGGREGATOR.register_event::<Vec<DrawCommand>>();
let draw_command_batcher = Arc::new(DrawCommandBatcher::new());
let mut window = Window::new(
1,
WindowType::Editor,
None,
(0.0, 0.0),
(114, 64),
draw_command_batcher.clone(),
);
draw_command_batcher.send_batch();
draw_command_receiver
.try_recv()
.expect("Could not receive commands");
window.draw_grid_line(
1,
70,
vec![GridLineCell {
text: "|".to_owned(),
highlight_id: None,
repeat: None,
}],
&HashMap::new(),
);
assert_eq!(window.grid.get_cell(70, 1), Some(&("|".to_owned(), None)));
draw_command_batcher.send_batch();
let sent_commands = draw_command_receiver
.try_recv()
.expect("Could not receive commands");
assert!(!sent_commands.is_empty());
}
} | }
// Add the grid cell to the cells to render.
text.push_str(character); | random_line_split |
font_atlas.rs | #[macro_use]
extern crate glium;
use euclid::Rect;
use font_kit::font::Font;
use glium::backend::Facade;
use glium::texture::Texture2d;
use glium::{glutin, Surface};
use lyon_path::math::{Angle, Point, Vector};
use lyon_path::Segment;
use msdfgen::{compute_msdf, recolor_contours, Contour, PathCollector};
use std::collections::HashMap;
const SDF_DIMENSION: u32 = 32;
fn get_font() -> Font {
use font_kit::family_name::FamilyName;
use font_kit::properties::{Properties, Style};
use font_kit::source::SystemSource;
let source = SystemSource::new();
source
.select_best_match(
&[FamilyName::Serif],
Properties::new().style(Style::Normal),
)
.expect("Failed to select a good font")
.load()
.unwrap()
}
/// Get a glyph ID for a character, its contours, and the typographic bounds for that glyph
/// TODO: this should also return font.origin() so we can offset the EM-space
/// computations by it. However, on freetype that always returns 0 so for the
/// moment we'll get away without it
fn get_glyph(font: &Font, chr: char) -> (u32, Vec<Contour>, Rect<f32>) {
use font_kit::hinting::HintingOptions;
use lyon_path::builder::FlatPathBuilder;
let glyph_id = font.glyph_for_char(chr).unwrap();
let mut builder = PathCollector::new();
font.outline(glyph_id, HintingOptions::None, &mut builder)
.unwrap();
(
glyph_id,
builder.build(),
font.typographic_bounds(glyph_id).unwrap(),
)
}
/// Rescale contours so they fit in the provided rectangle.
/// Returns the scaled contours along with the transformation used to rescale the contours
fn rescale_contours(
mut contours: Vec<Contour>,
initial_bounds: Rect<f32>,
bounds: lyon_path::math::Rect,
) -> (Vec<Contour>, euclid::Transform2D<f32>) {
let initial_scale = initial_bounds.size.width.max(initial_bounds.size.height);
let bounds_scale = bounds.size.width.max(bounds.size.height);
let transformation =
euclid::Transform2D::create_translation(-initial_bounds.origin.x, -initial_bounds.origin.y)
.post_scale(bounds_scale / initial_scale, bounds_scale / initial_scale)
.post_translate(bounds.origin.to_vector());
for contour in &mut contours {
for mut elem in &mut contour.elements {
elem.segment = match elem.segment {
Segment::Line(s) => Segment::Line(s.transform(&transformation)),
Segment::Quadratic(s) => Segment::Quadratic(s.transform(&transformation)),
Segment::Cubic(s) => Segment::Cubic(s.transform(&transformation)),
Segment::Arc(s) => Segment::Arc(lyon_geom::Arc {
center: transformation.transform_point(&s.center),
..s
}),
}
}
}
(contours, transformation)
}
#[derive(Copy, Clone)]
struct Vertex2D {
position: [f32; 2],
uv: [f32; 2],
color: [f32; 3],
}
glium::implement_vertex!(Vertex2D, position, uv, color);
/// All the information required to render a character from a string
#[derive(Clone, Copy, Debug)]
struct RenderChar {
/// The position of the vertices
verts: Rect<f32>,
/// The UV coordinates of the vertices
uv: Rect<f32>,
}
impl RenderChar {
fn verts(&self) -> [Vertex2D; 4] {
macro_rules! vertex {
($p: expr, $t: expr) => {{
let color = [rand::random(), rand::random(), rand::random()];
let p = $p;
let t = $t;
Vertex2D {
position: [p.x, p.y],
uv: [t.x, t.y],
color: color.clone(),
}
}};
}
[
vertex!(self.verts.bottom_left(), self.uv.bottom_left()),
vertex!(self.verts.origin, self.uv.origin),
vertex!(self.verts.bottom_right(), self.uv.bottom_right()),
vertex!(self.verts.top_right(), self.uv.top_right()),
]
}
}
/// The information about a glyph that gets cached in the font atlas.
/// Since every letter has a different scaling factor to make maximum use of the MSDF pixels,
/// we need to keep track of the offset and scale from font unit space. This
/// information is required when positioning characters to get the right scale
/// and positioning for the geometry.
#[derive(Clone, Copy, Debug)]
struct GlyphInformation {
id: u32,
/// Where it actually is in the atlas texture
uv: Rect<f32>,
/// The font-space rectangle covered by the uv rectangle
font_units: Rect<f32>,
}
struct FontAtlas<'font, 'facade, T: Facade> {
/// Used when a string requires new glyphs
font: &'font Font,
/// Reference to the facade that is when we need to grow the atlas texture
facade: &'facade T,
/// The scale of each character
char_dim: u32,
/// The current dimensions of the texture
alloced_size: u32,
/// The x coordinate at which to place the next character,
next_x: u32,
/// The y coordinate at which to place the next character,
next_y: u32,
/// The actual backing texture that includes all of the distances.
/// All the distance values should be roughly in [-1, 1]
tex: Texture2d,
/// Texture coordinates of every character we know about
/// Technically, this should probably use glyph ids as keys
locations: HashMap<char, GlyphInformation>,
}
impl<'font, 'facade, T: Facade> FontAtlas<'font, 'facade, T> {
/// Create a new atlas.
fn build(
char_dim: u32,
font: &'font Font,
facade: &'facade T,
) -> Result<Self, glium::texture::TextureCreationError> {
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
let alloced_size = char_dim * 16;
let tex = Texture2d::empty_with_format(
facade,
UncompressedFloatFormat::F16F16F16,
MipmapsOption::NoMipmap,
alloced_size,
alloced_size,
)?;
println!("Allocated {0:?}x{0:?} texture", alloced_size);
Ok(Self {
locations: Default::default(),
next_x: 0,
next_y: 0,
font,
facade,
char_dim,
tex,
alloced_size,
})
}
/// Get the glyph information for a character, either pulling them from the cache
/// or generating the MSDF
fn character_information(&mut self, c: char) -> GlyphInformation {
if!self.locations.contains_key(&c) {
const INIT_UV_BORDER: f32 = 0.2;
const UV_BORDER: f32 = 0.1;
let (glyph_id, contours, font_unit_rect) = get_glyph(self.font, c);
let uv_rect = Rect::new(
Point::new(INIT_UV_BORDER, INIT_UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * INIT_UV_BORDER, 1.0 - 2.0 * INIT_UV_BORDER),
);
let (contours, transform) = rescale_contours(contours, font_unit_rect, uv_rect);
// Build the contours and upload thfont_unit to the texture
let contours = recolor_contours(contours, Angle::degrees(3.0), 1);
let msdf = compute_msdf(&contours, self.char_dim as usize);
self.tex.write(
glium::Rect {
left: self.next_x,
bottom: self.next_y,
width: self.char_dim,
height: self.char_dim,
},
msdf,
);
// Compute the final positions of the font_unit and uv rectangles
// transform should just be a scale and transform, easily invertable
let inv_transform = transform.inverse().unwrap();
let uv_rect = Rect::new(
Point::new(UV_BORDER, UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * UV_BORDER, 1.0 - 2.0 * UV_BORDER),
);
let font_unit_rect = inv_transform.transform_rect(&uv_rect);
let alloc_scale = 1.0 / self.alloced_size as f32;
let uv_rect = uv_rect.scale(
self.char_dim as f32 * alloc_scale,
self.char_dim as f32 * alloc_scale,
);
let uv_rect = uv_rect
.translate(&(Vector::new(self.next_x as f32, self.next_y as f32) * alloc_scale));
// Make sure to advance to the next character slot
self.next_x += self.char_dim;
if self.next_x == self.alloced_size {
self.next_x = 0;
self.next_y += self.char_dim;
}
let tr = GlyphInformation {
id: glyph_id,
uv: uv_rect,
font_units: font_unit_rect,
};
self.locations.insert(c, tr);
}
self.locations[&c]
}
/// Layout a string.
/// TODO: hide things with interior mutability so that this doesn't take &mut
fn layout_string(&mut self, start: Point, size_in_points: f32, s: &str) -> Vec<RenderChar> {
let metrics = self.font.metrics();
eprintln!("{:?}", metrics);
let mut tr = Vec::new();
let scale = size_in_points / metrics.units_per_em as f32;
let mut transform = euclid::Transform2D::create_scale(scale, scale)
.post_translate(start.to_vector() + Vector::new(0.0, metrics.descent * -scale));
for c in s.chars() {
let information = self.character_information(c);
tr.push(RenderChar {
verts: transform.transform_rect(&information.font_units),
uv: information.uv,
});
transform = transform.post_translate(
self.font
.advance(information.id)
.unwrap_or(Vector::new(0.0, 0.0)) * scale,
);
}
tr
}
}
fn main() {
let mut events_loop = glutin::EventsLoop::new();
let mut window_size: glutin::dpi::LogicalSize = (512u32, 512).into();
let window = glutin::WindowBuilder::new().with_dimensions(window_size);
let context = glutin::ContextBuilder::new();
let context = context.with_gl_profile(glutin::GlProfile::Core);
let context = context.with_gl_debug_flag(true);
let display =
glium::Display::new(window, context, &events_loop).expect("Error creating GL display");
let hidpi_factor = display.gl_window().window().get_hidpi_factor() as f32;
println!("{:?}", hidpi_factor);
let font = get_font();
let bg_shader = program!(&display,
410 => {
vertex: r#"
#version 410
in vec2 position;
in vec2 uv;
in vec3 color;
out vec3 cross_color;
out vec2 cross_uv;
uniform mat4 transform;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * transform;
cross_color = color;
cross_uv = uv;
}"#,
fragment: r#"
#version 410
uniform sampler2D tex;
in vec2 cross_uv;
in vec3 cross_color;
out vec4 color;
#define RADIUS 0.05
float band_around(float center, float r, float f) {
return smoothstep(center - r, center, f) -
smoothstep(center, center + r, f);
}
float remap(float f) {
return smoothstep(-RADIUS, RADIUS, f);
}
void main() {
vec3 x = texture(tex, cross_uv).rgb;
float v = max(min(x.r, x.g), min(max(x.r, x.g), x.b));
float c = remap(v);
color = vec4(cross_color.rgb, c);
}"#,
},
)
.unwrap();
let mut font_atlas =
FontAtlas::build(SDF_DIMENSION, &font, &display).expect("Failed to build font atlas");
let layout = font_atlas.layout_string(
Point::new(72.0, 72.0),
16.0,
// ":{<~The lazy cat jumps over the xenophobic dog, yodeling~>}",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()`~'\";/.,<>?",
);
let mut vertices = Vec::with_capacity(layout.len() * 4);
let mut indices = Vec::with_capacity(layout.len() * 5);
for c in &layout {
let base = vertices.len() as u16;
vertices.extend_from_slice(&c.verts());
indices.push(base);
indices.push(base + 1);
indices.push(base + 2);
indices.push(base + 3);
indices.push(std::u16::MAX);
}
let tex_vbo = glium::VertexBuffer::immutable(&display, &vertices).unwrap();
let index_buffer = glium::index::IndexBuffer::new(
&display,
glium::index::PrimitiveType::TriangleStrip,
&indices,
)
.unwrap();
let mut closed = false;
while!closed {
let params = glium::DrawParameters {
blend: glium::Blend::alpha_blending(),
primitive_restart_index: true,
..Default::default()
};
// This transform converts from point-space, with (0, 0) in the bottom left corner
// to NDC
// The final 96.0 / 72.0 scaling is because virtual DPI is based on 96
// DPI while points are 1/72 of an inch
let transform = euclid::Transform3D::create_translation(-1.0, -1.0, 0.0)
.pre_scale(2.0 / (window_size.width as f32), 2.0 / (window_size.height as f32), 1.0)
.pre_scale(96.0 / 72.0, 96.0 / 72.0, 1.0);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
let uniforms = uniform!(
tex: font_atlas.tex.sampled(),
transform: transform.to_column_arrays(),
);
target
.draw(&tex_vbo, &index_buffer, &bg_shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
events_loop.poll_events(|ev| match ev {
glutin::Event::WindowEvent { event,.. } => match event {
glutin::WindowEvent::CloseRequested => closed = true,
glutin::WindowEvent::KeyboardInput { input,.. } => match input.virtual_keycode {
_ => {}
},
glutin::WindowEvent::Resized(new_size) => {
window_size = new_size;
}
_ => {}
},
_ => |
})
}
}
| {} | conditional_block |
font_atlas.rs | #[macro_use]
extern crate glium;
use euclid::Rect;
use font_kit::font::Font;
use glium::backend::Facade;
use glium::texture::Texture2d;
use glium::{glutin, Surface};
use lyon_path::math::{Angle, Point, Vector};
use lyon_path::Segment;
use msdfgen::{compute_msdf, recolor_contours, Contour, PathCollector};
use std::collections::HashMap;
const SDF_DIMENSION: u32 = 32;
fn get_font() -> Font {
use font_kit::family_name::FamilyName;
use font_kit::properties::{Properties, Style};
use font_kit::source::SystemSource;
let source = SystemSource::new();
source
.select_best_match(
&[FamilyName::Serif],
Properties::new().style(Style::Normal),
)
.expect("Failed to select a good font")
.load()
.unwrap()
}
/// Get a glyph ID for a character, its contours, and the typographic bounds for that glyph
/// TODO: this should also return font.origin() so we can offset the EM-space
/// computations by it. However, on freetype that always returns 0 so for the
/// moment we'll get away without it
fn get_glyph(font: &Font, chr: char) -> (u32, Vec<Contour>, Rect<f32>) {
use font_kit::hinting::HintingOptions;
use lyon_path::builder::FlatPathBuilder;
let glyph_id = font.glyph_for_char(chr).unwrap();
let mut builder = PathCollector::new();
font.outline(glyph_id, HintingOptions::None, &mut builder)
.unwrap();
(
glyph_id,
builder.build(),
font.typographic_bounds(glyph_id).unwrap(),
)
}
/// Rescale contours so they fit in the provided rectangle.
/// Returns the scaled contours along with the transformation used to rescale the contours
fn rescale_contours(
mut contours: Vec<Contour>,
initial_bounds: Rect<f32>,
bounds: lyon_path::math::Rect,
) -> (Vec<Contour>, euclid::Transform2D<f32>) {
let initial_scale = initial_bounds.size.width.max(initial_bounds.size.height);
let bounds_scale = bounds.size.width.max(bounds.size.height);
let transformation =
euclid::Transform2D::create_translation(-initial_bounds.origin.x, -initial_bounds.origin.y)
.post_scale(bounds_scale / initial_scale, bounds_scale / initial_scale)
.post_translate(bounds.origin.to_vector());
for contour in &mut contours {
for mut elem in &mut contour.elements {
elem.segment = match elem.segment {
Segment::Line(s) => Segment::Line(s.transform(&transformation)),
Segment::Quadratic(s) => Segment::Quadratic(s.transform(&transformation)),
Segment::Cubic(s) => Segment::Cubic(s.transform(&transformation)),
Segment::Arc(s) => Segment::Arc(lyon_geom::Arc {
center: transformation.transform_point(&s.center),
..s
}), | }
(contours, transformation)
}
#[derive(Copy, Clone)]
struct Vertex2D {
position: [f32; 2],
uv: [f32; 2],
color: [f32; 3],
}
glium::implement_vertex!(Vertex2D, position, uv, color);
/// All the information required to render a character from a string
#[derive(Clone, Copy, Debug)]
struct RenderChar {
/// The position of the vertices
verts: Rect<f32>,
/// The UV coordinates of the vertices
uv: Rect<f32>,
}
impl RenderChar {
fn verts(&self) -> [Vertex2D; 4] {
macro_rules! vertex {
($p: expr, $t: expr) => {{
let color = [rand::random(), rand::random(), rand::random()];
let p = $p;
let t = $t;
Vertex2D {
position: [p.x, p.y],
uv: [t.x, t.y],
color: color.clone(),
}
}};
}
[
vertex!(self.verts.bottom_left(), self.uv.bottom_left()),
vertex!(self.verts.origin, self.uv.origin),
vertex!(self.verts.bottom_right(), self.uv.bottom_right()),
vertex!(self.verts.top_right(), self.uv.top_right()),
]
}
}
/// The information about a glyph that gets cached in the font atlas.
/// Since every letter has a different scaling factor to make maximum use of the MSDF pixels,
/// we need to keep track of the offset and scale from font unit space. This
/// information is required when positioning characters to get the right scale
/// and positioning for the geometry.
#[derive(Clone, Copy, Debug)]
struct GlyphInformation {
id: u32,
/// Where it actually is in the atlas texture
uv: Rect<f32>,
/// The font-space rectangle covered by the uv rectangle
font_units: Rect<f32>,
}
struct FontAtlas<'font, 'facade, T: Facade> {
/// Used when a string requires new glyphs
font: &'font Font,
/// Reference to the facade that is when we need to grow the atlas texture
facade: &'facade T,
/// The scale of each character
char_dim: u32,
/// The current dimensions of the texture
alloced_size: u32,
/// The x coordinate at which to place the next character,
next_x: u32,
/// The y coordinate at which to place the next character,
next_y: u32,
/// The actual backing texture that includes all of the distances.
/// All the distance values should be roughly in [-1, 1]
tex: Texture2d,
/// Texture coordinates of every character we know about
/// Technically, this should probably use glyph ids as keys
locations: HashMap<char, GlyphInformation>,
}
impl<'font, 'facade, T: Facade> FontAtlas<'font, 'facade, T> {
/// Create a new atlas.
fn build(
char_dim: u32,
font: &'font Font,
facade: &'facade T,
) -> Result<Self, glium::texture::TextureCreationError> {
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
let alloced_size = char_dim * 16;
let tex = Texture2d::empty_with_format(
facade,
UncompressedFloatFormat::F16F16F16,
MipmapsOption::NoMipmap,
alloced_size,
alloced_size,
)?;
println!("Allocated {0:?}x{0:?} texture", alloced_size);
Ok(Self {
locations: Default::default(),
next_x: 0,
next_y: 0,
font,
facade,
char_dim,
tex,
alloced_size,
})
}
/// Get the glyph information for a character, either pulling them from the cache
/// or generating the MSDF
fn character_information(&mut self, c: char) -> GlyphInformation {
if!self.locations.contains_key(&c) {
const INIT_UV_BORDER: f32 = 0.2;
const UV_BORDER: f32 = 0.1;
let (glyph_id, contours, font_unit_rect) = get_glyph(self.font, c);
let uv_rect = Rect::new(
Point::new(INIT_UV_BORDER, INIT_UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * INIT_UV_BORDER, 1.0 - 2.0 * INIT_UV_BORDER),
);
let (contours, transform) = rescale_contours(contours, font_unit_rect, uv_rect);
// Build the contours and upload thfont_unit to the texture
let contours = recolor_contours(contours, Angle::degrees(3.0), 1);
let msdf = compute_msdf(&contours, self.char_dim as usize);
self.tex.write(
glium::Rect {
left: self.next_x,
bottom: self.next_y,
width: self.char_dim,
height: self.char_dim,
},
msdf,
);
// Compute the final positions of the font_unit and uv rectangles
// transform should just be a scale and transform, easily invertable
let inv_transform = transform.inverse().unwrap();
let uv_rect = Rect::new(
Point::new(UV_BORDER, UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * UV_BORDER, 1.0 - 2.0 * UV_BORDER),
);
let font_unit_rect = inv_transform.transform_rect(&uv_rect);
let alloc_scale = 1.0 / self.alloced_size as f32;
let uv_rect = uv_rect.scale(
self.char_dim as f32 * alloc_scale,
self.char_dim as f32 * alloc_scale,
);
let uv_rect = uv_rect
.translate(&(Vector::new(self.next_x as f32, self.next_y as f32) * alloc_scale));
// Make sure to advance to the next character slot
self.next_x += self.char_dim;
if self.next_x == self.alloced_size {
self.next_x = 0;
self.next_y += self.char_dim;
}
let tr = GlyphInformation {
id: glyph_id,
uv: uv_rect,
font_units: font_unit_rect,
};
self.locations.insert(c, tr);
}
self.locations[&c]
}
/// Layout a string.
/// TODO: hide things with interior mutability so that this doesn't take &mut
fn layout_string(&mut self, start: Point, size_in_points: f32, s: &str) -> Vec<RenderChar> {
let metrics = self.font.metrics();
eprintln!("{:?}", metrics);
let mut tr = Vec::new();
let scale = size_in_points / metrics.units_per_em as f32;
let mut transform = euclid::Transform2D::create_scale(scale, scale)
.post_translate(start.to_vector() + Vector::new(0.0, metrics.descent * -scale));
for c in s.chars() {
let information = self.character_information(c);
tr.push(RenderChar {
verts: transform.transform_rect(&information.font_units),
uv: information.uv,
});
transform = transform.post_translate(
self.font
.advance(information.id)
.unwrap_or(Vector::new(0.0, 0.0)) * scale,
);
}
tr
}
}
fn main() {
let mut events_loop = glutin::EventsLoop::new();
let mut window_size: glutin::dpi::LogicalSize = (512u32, 512).into();
let window = glutin::WindowBuilder::new().with_dimensions(window_size);
let context = glutin::ContextBuilder::new();
let context = context.with_gl_profile(glutin::GlProfile::Core);
let context = context.with_gl_debug_flag(true);
let display =
glium::Display::new(window, context, &events_loop).expect("Error creating GL display");
let hidpi_factor = display.gl_window().window().get_hidpi_factor() as f32;
println!("{:?}", hidpi_factor);
let font = get_font();
let bg_shader = program!(&display,
410 => {
vertex: r#"
#version 410
in vec2 position;
in vec2 uv;
in vec3 color;
out vec3 cross_color;
out vec2 cross_uv;
uniform mat4 transform;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * transform;
cross_color = color;
cross_uv = uv;
}"#,
fragment: r#"
#version 410
uniform sampler2D tex;
in vec2 cross_uv;
in vec3 cross_color;
out vec4 color;
#define RADIUS 0.05
float band_around(float center, float r, float f) {
return smoothstep(center - r, center, f) -
smoothstep(center, center + r, f);
}
float remap(float f) {
return smoothstep(-RADIUS, RADIUS, f);
}
void main() {
vec3 x = texture(tex, cross_uv).rgb;
float v = max(min(x.r, x.g), min(max(x.r, x.g), x.b));
float c = remap(v);
color = vec4(cross_color.rgb, c);
}"#,
},
)
.unwrap();
let mut font_atlas =
FontAtlas::build(SDF_DIMENSION, &font, &display).expect("Failed to build font atlas");
let layout = font_atlas.layout_string(
Point::new(72.0, 72.0),
16.0,
// ":{<~The lazy cat jumps over the xenophobic dog, yodeling~>}",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()`~'\";/.,<>?",
);
let mut vertices = Vec::with_capacity(layout.len() * 4);
let mut indices = Vec::with_capacity(layout.len() * 5);
for c in &layout {
let base = vertices.len() as u16;
vertices.extend_from_slice(&c.verts());
indices.push(base);
indices.push(base + 1);
indices.push(base + 2);
indices.push(base + 3);
indices.push(std::u16::MAX);
}
let tex_vbo = glium::VertexBuffer::immutable(&display, &vertices).unwrap();
let index_buffer = glium::index::IndexBuffer::new(
&display,
glium::index::PrimitiveType::TriangleStrip,
&indices,
)
.unwrap();
let mut closed = false;
while!closed {
let params = glium::DrawParameters {
blend: glium::Blend::alpha_blending(),
primitive_restart_index: true,
..Default::default()
};
// This transform converts from point-space, with (0, 0) in the bottom left corner
// to NDC
// The final 96.0 / 72.0 scaling is because virtual DPI is based on 96
// DPI while points are 1/72 of an inch
let transform = euclid::Transform3D::create_translation(-1.0, -1.0, 0.0)
.pre_scale(2.0 / (window_size.width as f32), 2.0 / (window_size.height as f32), 1.0)
.pre_scale(96.0 / 72.0, 96.0 / 72.0, 1.0);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
let uniforms = uniform!(
tex: font_atlas.tex.sampled(),
transform: transform.to_column_arrays(),
);
target
.draw(&tex_vbo, &index_buffer, &bg_shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
events_loop.poll_events(|ev| match ev {
glutin::Event::WindowEvent { event,.. } => match event {
glutin::WindowEvent::CloseRequested => closed = true,
glutin::WindowEvent::KeyboardInput { input,.. } => match input.virtual_keycode {
_ => {}
},
glutin::WindowEvent::Resized(new_size) => {
window_size = new_size;
}
_ => {}
},
_ => {}
})
}
} | }
} | random_line_split |
font_atlas.rs | #[macro_use]
extern crate glium;
use euclid::Rect;
use font_kit::font::Font;
use glium::backend::Facade;
use glium::texture::Texture2d;
use glium::{glutin, Surface};
use lyon_path::math::{Angle, Point, Vector};
use lyon_path::Segment;
use msdfgen::{compute_msdf, recolor_contours, Contour, PathCollector};
use std::collections::HashMap;
const SDF_DIMENSION: u32 = 32;
fn get_font() -> Font |
/// Get a glyph ID for a character, its contours, and the typographic bounds for that glyph
/// TODO: this should also return font.origin() so we can offset the EM-space
/// computations by it. However, on freetype that always returns 0 so for the
/// moment we'll get away without it
fn get_glyph(font: &Font, chr: char) -> (u32, Vec<Contour>, Rect<f32>) {
use font_kit::hinting::HintingOptions;
use lyon_path::builder::FlatPathBuilder;
let glyph_id = font.glyph_for_char(chr).unwrap();
let mut builder = PathCollector::new();
font.outline(glyph_id, HintingOptions::None, &mut builder)
.unwrap();
(
glyph_id,
builder.build(),
font.typographic_bounds(glyph_id).unwrap(),
)
}
/// Rescale contours so they fit in the provided rectangle.
/// Returns the scaled contours along with the transformation used to rescale the contours
fn rescale_contours(
mut contours: Vec<Contour>,
initial_bounds: Rect<f32>,
bounds: lyon_path::math::Rect,
) -> (Vec<Contour>, euclid::Transform2D<f32>) {
let initial_scale = initial_bounds.size.width.max(initial_bounds.size.height);
let bounds_scale = bounds.size.width.max(bounds.size.height);
let transformation =
euclid::Transform2D::create_translation(-initial_bounds.origin.x, -initial_bounds.origin.y)
.post_scale(bounds_scale / initial_scale, bounds_scale / initial_scale)
.post_translate(bounds.origin.to_vector());
for contour in &mut contours {
for mut elem in &mut contour.elements {
elem.segment = match elem.segment {
Segment::Line(s) => Segment::Line(s.transform(&transformation)),
Segment::Quadratic(s) => Segment::Quadratic(s.transform(&transformation)),
Segment::Cubic(s) => Segment::Cubic(s.transform(&transformation)),
Segment::Arc(s) => Segment::Arc(lyon_geom::Arc {
center: transformation.transform_point(&s.center),
..s
}),
}
}
}
(contours, transformation)
}
#[derive(Copy, Clone)]
struct Vertex2D {
position: [f32; 2],
uv: [f32; 2],
color: [f32; 3],
}
glium::implement_vertex!(Vertex2D, position, uv, color);
/// All the information required to render a character from a string
#[derive(Clone, Copy, Debug)]
struct RenderChar {
/// The position of the vertices
verts: Rect<f32>,
/// The UV coordinates of the vertices
uv: Rect<f32>,
}
impl RenderChar {
fn verts(&self) -> [Vertex2D; 4] {
macro_rules! vertex {
($p: expr, $t: expr) => {{
let color = [rand::random(), rand::random(), rand::random()];
let p = $p;
let t = $t;
Vertex2D {
position: [p.x, p.y],
uv: [t.x, t.y],
color: color.clone(),
}
}};
}
[
vertex!(self.verts.bottom_left(), self.uv.bottom_left()),
vertex!(self.verts.origin, self.uv.origin),
vertex!(self.verts.bottom_right(), self.uv.bottom_right()),
vertex!(self.verts.top_right(), self.uv.top_right()),
]
}
}
/// The information about a glyph that gets cached in the font atlas.
/// Since every letter has a different scaling factor to make maximum use of the MSDF pixels,
/// we need to keep track of the offset and scale from font unit space. This
/// information is required when positioning characters to get the right scale
/// and positioning for the geometry.
#[derive(Clone, Copy, Debug)]
struct GlyphInformation {
id: u32,
/// Where it actually is in the atlas texture
uv: Rect<f32>,
/// The font-space rectangle covered by the uv rectangle
font_units: Rect<f32>,
}
struct FontAtlas<'font, 'facade, T: Facade> {
/// Used when a string requires new glyphs
font: &'font Font,
/// Reference to the facade that is when we need to grow the atlas texture
facade: &'facade T,
/// The scale of each character
char_dim: u32,
/// The current dimensions of the texture
alloced_size: u32,
/// The x coordinate at which to place the next character,
next_x: u32,
/// The y coordinate at which to place the next character,
next_y: u32,
/// The actual backing texture that includes all of the distances.
/// All the distance values should be roughly in [-1, 1]
tex: Texture2d,
/// Texture coordinates of every character we know about
/// Technically, this should probably use glyph ids as keys
locations: HashMap<char, GlyphInformation>,
}
impl<'font, 'facade, T: Facade> FontAtlas<'font, 'facade, T> {
/// Create a new atlas.
fn build(
char_dim: u32,
font: &'font Font,
facade: &'facade T,
) -> Result<Self, glium::texture::TextureCreationError> {
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
let alloced_size = char_dim * 16;
let tex = Texture2d::empty_with_format(
facade,
UncompressedFloatFormat::F16F16F16,
MipmapsOption::NoMipmap,
alloced_size,
alloced_size,
)?;
println!("Allocated {0:?}x{0:?} texture", alloced_size);
Ok(Self {
locations: Default::default(),
next_x: 0,
next_y: 0,
font,
facade,
char_dim,
tex,
alloced_size,
})
}
/// Get the glyph information for a character, either pulling them from the cache
/// or generating the MSDF
fn character_information(&mut self, c: char) -> GlyphInformation {
if!self.locations.contains_key(&c) {
const INIT_UV_BORDER: f32 = 0.2;
const UV_BORDER: f32 = 0.1;
let (glyph_id, contours, font_unit_rect) = get_glyph(self.font, c);
let uv_rect = Rect::new(
Point::new(INIT_UV_BORDER, INIT_UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * INIT_UV_BORDER, 1.0 - 2.0 * INIT_UV_BORDER),
);
let (contours, transform) = rescale_contours(contours, font_unit_rect, uv_rect);
// Build the contours and upload thfont_unit to the texture
let contours = recolor_contours(contours, Angle::degrees(3.0), 1);
let msdf = compute_msdf(&contours, self.char_dim as usize);
self.tex.write(
glium::Rect {
left: self.next_x,
bottom: self.next_y,
width: self.char_dim,
height: self.char_dim,
},
msdf,
);
// Compute the final positions of the font_unit and uv rectangles
// transform should just be a scale and transform, easily invertable
let inv_transform = transform.inverse().unwrap();
let uv_rect = Rect::new(
Point::new(UV_BORDER, UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * UV_BORDER, 1.0 - 2.0 * UV_BORDER),
);
let font_unit_rect = inv_transform.transform_rect(&uv_rect);
let alloc_scale = 1.0 / self.alloced_size as f32;
let uv_rect = uv_rect.scale(
self.char_dim as f32 * alloc_scale,
self.char_dim as f32 * alloc_scale,
);
let uv_rect = uv_rect
.translate(&(Vector::new(self.next_x as f32, self.next_y as f32) * alloc_scale));
// Make sure to advance to the next character slot
self.next_x += self.char_dim;
if self.next_x == self.alloced_size {
self.next_x = 0;
self.next_y += self.char_dim;
}
let tr = GlyphInformation {
id: glyph_id,
uv: uv_rect,
font_units: font_unit_rect,
};
self.locations.insert(c, tr);
}
self.locations[&c]
}
/// Layout a string.
/// TODO: hide things with interior mutability so that this doesn't take &mut
fn layout_string(&mut self, start: Point, size_in_points: f32, s: &str) -> Vec<RenderChar> {
let metrics = self.font.metrics();
eprintln!("{:?}", metrics);
let mut tr = Vec::new();
let scale = size_in_points / metrics.units_per_em as f32;
let mut transform = euclid::Transform2D::create_scale(scale, scale)
.post_translate(start.to_vector() + Vector::new(0.0, metrics.descent * -scale));
for c in s.chars() {
let information = self.character_information(c);
tr.push(RenderChar {
verts: transform.transform_rect(&information.font_units),
uv: information.uv,
});
transform = transform.post_translate(
self.font
.advance(information.id)
.unwrap_or(Vector::new(0.0, 0.0)) * scale,
);
}
tr
}
}
fn main() {
let mut events_loop = glutin::EventsLoop::new();
let mut window_size: glutin::dpi::LogicalSize = (512u32, 512).into();
let window = glutin::WindowBuilder::new().with_dimensions(window_size);
let context = glutin::ContextBuilder::new();
let context = context.with_gl_profile(glutin::GlProfile::Core);
let context = context.with_gl_debug_flag(true);
let display =
glium::Display::new(window, context, &events_loop).expect("Error creating GL display");
let hidpi_factor = display.gl_window().window().get_hidpi_factor() as f32;
println!("{:?}", hidpi_factor);
let font = get_font();
let bg_shader = program!(&display,
410 => {
vertex: r#"
#version 410
in vec2 position;
in vec2 uv;
in vec3 color;
out vec3 cross_color;
out vec2 cross_uv;
uniform mat4 transform;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * transform;
cross_color = color;
cross_uv = uv;
}"#,
fragment: r#"
#version 410
uniform sampler2D tex;
in vec2 cross_uv;
in vec3 cross_color;
out vec4 color;
#define RADIUS 0.05
float band_around(float center, float r, float f) {
return smoothstep(center - r, center, f) -
smoothstep(center, center + r, f);
}
float remap(float f) {
return smoothstep(-RADIUS, RADIUS, f);
}
void main() {
vec3 x = texture(tex, cross_uv).rgb;
float v = max(min(x.r, x.g), min(max(x.r, x.g), x.b));
float c = remap(v);
color = vec4(cross_color.rgb, c);
}"#,
},
)
.unwrap();
let mut font_atlas =
FontAtlas::build(SDF_DIMENSION, &font, &display).expect("Failed to build font atlas");
let layout = font_atlas.layout_string(
Point::new(72.0, 72.0),
16.0,
// ":{<~The lazy cat jumps over the xenophobic dog, yodeling~>}",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()`~'\";/.,<>?",
);
let mut vertices = Vec::with_capacity(layout.len() * 4);
let mut indices = Vec::with_capacity(layout.len() * 5);
for c in &layout {
let base = vertices.len() as u16;
vertices.extend_from_slice(&c.verts());
indices.push(base);
indices.push(base + 1);
indices.push(base + 2);
indices.push(base + 3);
indices.push(std::u16::MAX);
}
let tex_vbo = glium::VertexBuffer::immutable(&display, &vertices).unwrap();
let index_buffer = glium::index::IndexBuffer::new(
&display,
glium::index::PrimitiveType::TriangleStrip,
&indices,
)
.unwrap();
let mut closed = false;
while!closed {
let params = glium::DrawParameters {
blend: glium::Blend::alpha_blending(),
primitive_restart_index: true,
..Default::default()
};
// This transform converts from point-space, with (0, 0) in the bottom left corner
// to NDC
// The final 96.0 / 72.0 scaling is because virtual DPI is based on 96
// DPI while points are 1/72 of an inch
let transform = euclid::Transform3D::create_translation(-1.0, -1.0, 0.0)
.pre_scale(2.0 / (window_size.width as f32), 2.0 / (window_size.height as f32), 1.0)
.pre_scale(96.0 / 72.0, 96.0 / 72.0, 1.0);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
let uniforms = uniform!(
tex: font_atlas.tex.sampled(),
transform: transform.to_column_arrays(),
);
target
.draw(&tex_vbo, &index_buffer, &bg_shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
events_loop.poll_events(|ev| match ev {
glutin::Event::WindowEvent { event,.. } => match event {
glutin::WindowEvent::CloseRequested => closed = true,
glutin::WindowEvent::KeyboardInput { input,.. } => match input.virtual_keycode {
_ => {}
},
glutin::WindowEvent::Resized(new_size) => {
window_size = new_size;
}
_ => {}
},
_ => {}
})
}
}
| {
use font_kit::family_name::FamilyName;
use font_kit::properties::{Properties, Style};
use font_kit::source::SystemSource;
let source = SystemSource::new();
source
.select_best_match(
&[FamilyName::Serif],
Properties::new().style(Style::Normal),
)
.expect("Failed to select a good font")
.load()
.unwrap()
} | identifier_body |
font_atlas.rs | #[macro_use]
extern crate glium;
use euclid::Rect;
use font_kit::font::Font;
use glium::backend::Facade;
use glium::texture::Texture2d;
use glium::{glutin, Surface};
use lyon_path::math::{Angle, Point, Vector};
use lyon_path::Segment;
use msdfgen::{compute_msdf, recolor_contours, Contour, PathCollector};
use std::collections::HashMap;
const SDF_DIMENSION: u32 = 32;
fn get_font() -> Font {
use font_kit::family_name::FamilyName;
use font_kit::properties::{Properties, Style};
use font_kit::source::SystemSource;
let source = SystemSource::new();
source
.select_best_match(
&[FamilyName::Serif],
Properties::new().style(Style::Normal),
)
.expect("Failed to select a good font")
.load()
.unwrap()
}
/// Get a glyph ID for a character, its contours, and the typographic bounds for that glyph
/// TODO: this should also return font.origin() so we can offset the EM-space
/// computations by it. However, on freetype that always returns 0 so for the
/// moment we'll get away without it
fn get_glyph(font: &Font, chr: char) -> (u32, Vec<Contour>, Rect<f32>) {
use font_kit::hinting::HintingOptions;
use lyon_path::builder::FlatPathBuilder;
let glyph_id = font.glyph_for_char(chr).unwrap();
let mut builder = PathCollector::new();
font.outline(glyph_id, HintingOptions::None, &mut builder)
.unwrap();
(
glyph_id,
builder.build(),
font.typographic_bounds(glyph_id).unwrap(),
)
}
/// Rescale contours so they fit in the provided rectangle.
/// Returns the scaled contours along with the transformation used to rescale the contours
fn rescale_contours(
mut contours: Vec<Contour>,
initial_bounds: Rect<f32>,
bounds: lyon_path::math::Rect,
) -> (Vec<Contour>, euclid::Transform2D<f32>) {
let initial_scale = initial_bounds.size.width.max(initial_bounds.size.height);
let bounds_scale = bounds.size.width.max(bounds.size.height);
let transformation =
euclid::Transform2D::create_translation(-initial_bounds.origin.x, -initial_bounds.origin.y)
.post_scale(bounds_scale / initial_scale, bounds_scale / initial_scale)
.post_translate(bounds.origin.to_vector());
for contour in &mut contours {
for mut elem in &mut contour.elements {
elem.segment = match elem.segment {
Segment::Line(s) => Segment::Line(s.transform(&transformation)),
Segment::Quadratic(s) => Segment::Quadratic(s.transform(&transformation)),
Segment::Cubic(s) => Segment::Cubic(s.transform(&transformation)),
Segment::Arc(s) => Segment::Arc(lyon_geom::Arc {
center: transformation.transform_point(&s.center),
..s
}),
}
}
}
(contours, transformation)
}
#[derive(Copy, Clone)]
struct | {
position: [f32; 2],
uv: [f32; 2],
color: [f32; 3],
}
glium::implement_vertex!(Vertex2D, position, uv, color);
/// All the information required to render a character from a string
#[derive(Clone, Copy, Debug)]
struct RenderChar {
/// The position of the vertices
verts: Rect<f32>,
/// The UV coordinates of the vertices
uv: Rect<f32>,
}
impl RenderChar {
fn verts(&self) -> [Vertex2D; 4] {
macro_rules! vertex {
($p: expr, $t: expr) => {{
let color = [rand::random(), rand::random(), rand::random()];
let p = $p;
let t = $t;
Vertex2D {
position: [p.x, p.y],
uv: [t.x, t.y],
color: color.clone(),
}
}};
}
[
vertex!(self.verts.bottom_left(), self.uv.bottom_left()),
vertex!(self.verts.origin, self.uv.origin),
vertex!(self.verts.bottom_right(), self.uv.bottom_right()),
vertex!(self.verts.top_right(), self.uv.top_right()),
]
}
}
/// The information about a glyph that gets cached in the font atlas.
/// Since every letter has a different scaling factor to make maximum use of the MSDF pixels,
/// we need to keep track of the offset and scale from font unit space. This
/// information is required when positioning characters to get the right scale
/// and positioning for the geometry.
#[derive(Clone, Copy, Debug)]
struct GlyphInformation {
id: u32,
/// Where it actually is in the atlas texture
uv: Rect<f32>,
/// The font-space rectangle covered by the uv rectangle
font_units: Rect<f32>,
}
struct FontAtlas<'font, 'facade, T: Facade> {
/// Used when a string requires new glyphs
font: &'font Font,
/// Reference to the facade that is when we need to grow the atlas texture
facade: &'facade T,
/// The scale of each character
char_dim: u32,
/// The current dimensions of the texture
alloced_size: u32,
/// The x coordinate at which to place the next character,
next_x: u32,
/// The y coordinate at which to place the next character,
next_y: u32,
/// The actual backing texture that includes all of the distances.
/// All the distance values should be roughly in [-1, 1]
tex: Texture2d,
/// Texture coordinates of every character we know about
/// Technically, this should probably use glyph ids as keys
locations: HashMap<char, GlyphInformation>,
}
impl<'font, 'facade, T: Facade> FontAtlas<'font, 'facade, T> {
/// Create a new atlas.
fn build(
char_dim: u32,
font: &'font Font,
facade: &'facade T,
) -> Result<Self, glium::texture::TextureCreationError> {
use glium::texture::{MipmapsOption, UncompressedFloatFormat};
let alloced_size = char_dim * 16;
let tex = Texture2d::empty_with_format(
facade,
UncompressedFloatFormat::F16F16F16,
MipmapsOption::NoMipmap,
alloced_size,
alloced_size,
)?;
println!("Allocated {0:?}x{0:?} texture", alloced_size);
Ok(Self {
locations: Default::default(),
next_x: 0,
next_y: 0,
font,
facade,
char_dim,
tex,
alloced_size,
})
}
/// Get the glyph information for a character, either pulling them from the cache
/// or generating the MSDF
fn character_information(&mut self, c: char) -> GlyphInformation {
if!self.locations.contains_key(&c) {
const INIT_UV_BORDER: f32 = 0.2;
const UV_BORDER: f32 = 0.1;
let (glyph_id, contours, font_unit_rect) = get_glyph(self.font, c);
let uv_rect = Rect::new(
Point::new(INIT_UV_BORDER, INIT_UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * INIT_UV_BORDER, 1.0 - 2.0 * INIT_UV_BORDER),
);
let (contours, transform) = rescale_contours(contours, font_unit_rect, uv_rect);
// Build the contours and upload thfont_unit to the texture
let contours = recolor_contours(contours, Angle::degrees(3.0), 1);
let msdf = compute_msdf(&contours, self.char_dim as usize);
self.tex.write(
glium::Rect {
left: self.next_x,
bottom: self.next_y,
width: self.char_dim,
height: self.char_dim,
},
msdf,
);
// Compute the final positions of the font_unit and uv rectangles
// transform should just be a scale and transform, easily invertable
let inv_transform = transform.inverse().unwrap();
let uv_rect = Rect::new(
Point::new(UV_BORDER, UV_BORDER),
euclid::TypedSize2D::new(1.0 - 2.0 * UV_BORDER, 1.0 - 2.0 * UV_BORDER),
);
let font_unit_rect = inv_transform.transform_rect(&uv_rect);
let alloc_scale = 1.0 / self.alloced_size as f32;
let uv_rect = uv_rect.scale(
self.char_dim as f32 * alloc_scale,
self.char_dim as f32 * alloc_scale,
);
let uv_rect = uv_rect
.translate(&(Vector::new(self.next_x as f32, self.next_y as f32) * alloc_scale));
// Make sure to advance to the next character slot
self.next_x += self.char_dim;
if self.next_x == self.alloced_size {
self.next_x = 0;
self.next_y += self.char_dim;
}
let tr = GlyphInformation {
id: glyph_id,
uv: uv_rect,
font_units: font_unit_rect,
};
self.locations.insert(c, tr);
}
self.locations[&c]
}
/// Layout a string.
/// TODO: hide things with interior mutability so that this doesn't take &mut
fn layout_string(&mut self, start: Point, size_in_points: f32, s: &str) -> Vec<RenderChar> {
let metrics = self.font.metrics();
eprintln!("{:?}", metrics);
let mut tr = Vec::new();
let scale = size_in_points / metrics.units_per_em as f32;
let mut transform = euclid::Transform2D::create_scale(scale, scale)
.post_translate(start.to_vector() + Vector::new(0.0, metrics.descent * -scale));
for c in s.chars() {
let information = self.character_information(c);
tr.push(RenderChar {
verts: transform.transform_rect(&information.font_units),
uv: information.uv,
});
transform = transform.post_translate(
self.font
.advance(information.id)
.unwrap_or(Vector::new(0.0, 0.0)) * scale,
);
}
tr
}
}
fn main() {
let mut events_loop = glutin::EventsLoop::new();
let mut window_size: glutin::dpi::LogicalSize = (512u32, 512).into();
let window = glutin::WindowBuilder::new().with_dimensions(window_size);
let context = glutin::ContextBuilder::new();
let context = context.with_gl_profile(glutin::GlProfile::Core);
let context = context.with_gl_debug_flag(true);
let display =
glium::Display::new(window, context, &events_loop).expect("Error creating GL display");
let hidpi_factor = display.gl_window().window().get_hidpi_factor() as f32;
println!("{:?}", hidpi_factor);
let font = get_font();
let bg_shader = program!(&display,
410 => {
vertex: r#"
#version 410
in vec2 position;
in vec2 uv;
in vec3 color;
out vec3 cross_color;
out vec2 cross_uv;
uniform mat4 transform;
void main() {
gl_Position = vec4(position, 0.0, 1.0) * transform;
cross_color = color;
cross_uv = uv;
}"#,
fragment: r#"
#version 410
uniform sampler2D tex;
in vec2 cross_uv;
in vec3 cross_color;
out vec4 color;
#define RADIUS 0.05
float band_around(float center, float r, float f) {
return smoothstep(center - r, center, f) -
smoothstep(center, center + r, f);
}
float remap(float f) {
return smoothstep(-RADIUS, RADIUS, f);
}
void main() {
vec3 x = texture(tex, cross_uv).rgb;
float v = max(min(x.r, x.g), min(max(x.r, x.g), x.b));
float c = remap(v);
color = vec4(cross_color.rgb, c);
}"#,
},
)
.unwrap();
let mut font_atlas =
FontAtlas::build(SDF_DIMENSION, &font, &display).expect("Failed to build font atlas");
let layout = font_atlas.layout_string(
Point::new(72.0, 72.0),
16.0,
// ":{<~The lazy cat jumps over the xenophobic dog, yodeling~>}",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()`~'\";/.,<>?",
);
let mut vertices = Vec::with_capacity(layout.len() * 4);
let mut indices = Vec::with_capacity(layout.len() * 5);
for c in &layout {
let base = vertices.len() as u16;
vertices.extend_from_slice(&c.verts());
indices.push(base);
indices.push(base + 1);
indices.push(base + 2);
indices.push(base + 3);
indices.push(std::u16::MAX);
}
let tex_vbo = glium::VertexBuffer::immutable(&display, &vertices).unwrap();
let index_buffer = glium::index::IndexBuffer::new(
&display,
glium::index::PrimitiveType::TriangleStrip,
&indices,
)
.unwrap();
let mut closed = false;
while!closed {
let params = glium::DrawParameters {
blend: glium::Blend::alpha_blending(),
primitive_restart_index: true,
..Default::default()
};
// This transform converts from point-space, with (0, 0) in the bottom left corner
// to NDC
// The final 96.0 / 72.0 scaling is because virtual DPI is based on 96
// DPI while points are 1/72 of an inch
let transform = euclid::Transform3D::create_translation(-1.0, -1.0, 0.0)
.pre_scale(2.0 / (window_size.width as f32), 2.0 / (window_size.height as f32), 1.0)
.pre_scale(96.0 / 72.0, 96.0 / 72.0, 1.0);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
let uniforms = uniform!(
tex: font_atlas.tex.sampled(),
transform: transform.to_column_arrays(),
);
target
.draw(&tex_vbo, &index_buffer, &bg_shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
events_loop.poll_events(|ev| match ev {
glutin::Event::WindowEvent { event,.. } => match event {
glutin::WindowEvent::CloseRequested => closed = true,
glutin::WindowEvent::KeyboardInput { input,.. } => match input.virtual_keycode {
_ => {}
},
glutin::WindowEvent::Resized(new_size) => {
window_size = new_size;
}
_ => {}
},
_ => {}
})
}
}
| Vertex2D | identifier_name |
conpty.rs | ::{ReadFile, WriteFile};
use crate::pty::conpty::winapi::um::handleapi::*;
use crate::pty::conpty::winapi::um::minwinbase::STILL_ACTIVE;
use crate::pty::conpty::winapi::um::namedpipeapi::CreatePipe;
use crate::pty::conpty::winapi::um::processthreadsapi::*;
use crate::pty::conpty::winapi::um::winbase::EXTENDED_STARTUPINFO_PRESENT;
use crate::pty::conpty::winapi::um::winbase::STARTUPINFOEXW;
use crate::pty::conpty::winapi::um::wincon::COORD;
use std::env;
use std::ffi::{OsStr, OsString};
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::ffi::OsStringExt;
use std::os::windows::raw::HANDLE;
use std::path::Path;
use std::ptr;
use std::sync::{Arc, Mutex};
const PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE: usize = 0x00020016;
#[derive(Debug)]
pub struct Command {
args: Vec<OsString>,
input: Option<OwnedHandle>,
output: Option<OwnedHandle>,
hpc: Option<HPCON>,
}
impl Command {
pub fn new<S: AsRef<OsStr>>(program: S) -> Self {
Self {
args: vec![program.as_ref().to_owned()],
input: None,
output: None,
hpc: None,
}
}
fn search_path(exe: &OsStr) -> OsString {
if let Some(path) = env::var_os("PATH") {
let extensions = env::var_os("PATHEXT").unwrap_or(".EXE".into());
for path in env::split_paths(&path) {
// Check for exactly the user's string in this path dir
let candidate = path.join(&exe);
if candidate.exists() {
return candidate.into_os_string();
}
// otherwise try tacking on some extensions.
// Note that this really replaces the extension in the
// user specified path, so this is potentially wrong.
for ext in env::split_paths(&extensions) {
// PATHEXT includes the leading `.`, but `with_extension`
// doesn't want that
let ext = ext.to_str().expect("PATHEXT entries must be utf8");
let path = path.join(&exe).with_extension(&ext[1..]);
if path.exists() {
return path.into_os_string();
}
}
}
}
exe.to_owned()
}
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
// FIXME: quoting!
self.args.push(arg.as_ref().to_owned());
self
}
pub fn args<I, S>(&mut self, args: I) -> &mut Command
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
for arg in args {
self.arg(arg);
}
self
}
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
where
K: AsRef<OsStr>,
V: AsRef<OsStr>,
{
eprintln!(
"ignoring env {:?}={:?} for child; FIXME: implement this!",
key.as_ref(),
val.as_ref()
);
self
}
fn set_pty(&mut self, input: OwnedHandle, output: OwnedHandle, con: HPCON) -> &mut Command |
fn cmdline(&self) -> Result<(Vec<u16>, Vec<u16>), Error> {
let mut cmdline = Vec::<u16>::new();
let exe = Self::search_path(&self.args[0]);
Self::append_quoted(&exe, &mut cmdline);
// Ensure that we nul terminate the module name, otherwise we'll
// ask CreateProcessW to start something random!
let mut exe: Vec<u16> = exe.encode_wide().collect();
exe.push(0);
for arg in self.args.iter().skip(1) {
cmdline.push(''as u16);
ensure!(
!arg.encode_wide().any(|c| c == 0),
"invalid encoding for command line argument {:?}",
arg
);
Self::append_quoted(arg, &mut cmdline);
}
// Ensure that the command line is nul terminated too!
cmdline.push(0);
Ok((exe, cmdline))
}
// Borrowed from https://github.com/hniksic/rust-subprocess/blob/873dfed165173e52907beb87118b2c0c05d8b8a1/src/popen.rs#L1117
// which in turn was translated from ArgvQuote at http://tinyurl.com/zmgtnls
fn append_quoted(arg: &OsStr, cmdline: &mut Vec<u16>) {
if!arg.is_empty()
&&!arg.encode_wide().any(|c| {
c =='' as u16
|| c == '\t' as u16
|| c == '\n' as u16
|| c == '\x0b' as u16
|| c == '\"' as u16
})
{
cmdline.extend(arg.encode_wide());
return;
}
cmdline.push('"' as u16);
let arg: Vec<_> = arg.encode_wide().collect();
let mut i = 0;
while i < arg.len() {
let mut num_backslashes = 0;
while i < arg.len() && arg[i] == '\\' as u16 {
i += 1;
num_backslashes += 1;
}
if i == arg.len() {
for _ in 0..num_backslashes * 2 {
cmdline.push('\\' as u16);
}
break;
} else if arg[i] == b'"' as u16 {
for _ in 0..num_backslashes * 2 + 1 {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
} else {
for _ in 0..num_backslashes {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
}
i += 1;
}
cmdline.push('"' as u16);
}
pub fn spawn(&mut self) -> Result<Child, Error> {
let mut si: STARTUPINFOEXW = unsafe { mem::zeroed() };
si.StartupInfo.cb = mem::size_of::<STARTUPINFOEXW>() as u32;
let mut attrs = ProcThreadAttributeList::with_capacity(1)?;
attrs.set_pty(*self.hpc.as_ref().unwrap())?;
si.lpAttributeList = attrs.as_mut_ptr();
let mut pi: PROCESS_INFORMATION = unsafe { mem::zeroed() };
let (mut exe, mut cmdline) = self.cmdline()?;
let cmd_os = OsString::from_wide(&cmdline);
eprintln!(
"Running: module: {} {:?}",
Path::new(&OsString::from_wide(&exe)).display(),
cmd_os
);
let res = unsafe {
CreateProcessW(
exe.as_mut_slice().as_mut_ptr(),
cmdline.as_mut_slice().as_mut_ptr(),
ptr::null_mut(),
ptr::null_mut(),
0,
EXTENDED_STARTUPINFO_PRESENT,
ptr::null_mut(), // FIXME: env
ptr::null_mut(),
&mut si.StartupInfo,
&mut pi,
)
};
if res == 0 {
let err = IoError::last_os_error();
bail!("CreateProcessW `{:?}` failed: {}", cmd_os, err);
}
// Make sure we close out the thread handle so we don't leak it;
// we do this simply by making it owned
let _main_thread = OwnedHandle { handle: pi.hThread };
let proc = OwnedHandle {
handle: pi.hProcess,
};
Ok(Child { proc })
}
}
struct ProcThreadAttributeList {
data: Vec<u8>,
}
impl ProcThreadAttributeList {
pub fn with_capacity(num_attributes: DWORD) -> Result<Self, Error> {
let mut bytes_required: usize = 0;
unsafe {
InitializeProcThreadAttributeList(
ptr::null_mut(),
num_attributes,
0,
&mut bytes_required,
)
};
let mut data = Vec::with_capacity(bytes_required);
// We have the right capacity, so force the vec to consider itself
// that length. The contents of those bytes will be maintained
// by the win32 apis used in this impl.
unsafe { data.set_len(bytes_required) };
let attr_ptr = data.as_mut_slice().as_mut_ptr() as *mut _;
let res = unsafe {
InitializeProcThreadAttributeList(attr_ptr, num_attributes, 0, &mut bytes_required)
};
ensure!(
res!= 0,
"InitializeProcThreadAttributeList failed: {}",
IoError::last_os_error()
);
Ok(Self { data })
}
pub fn as_mut_ptr(&mut self) -> LPPROC_THREAD_ATTRIBUTE_LIST {
self.data.as_mut_slice().as_mut_ptr() as *mut _
}
pub fn set_pty(&mut self, con: HPCON) -> Result<(), Error> {
let res = unsafe {
UpdateProcThreadAttribute(
self.as_mut_ptr(),
0,
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
con,
mem::size_of::<HPCON>(),
ptr::null_mut(),
ptr::null_mut(),
)
};
ensure!(
res!= 0,
"UpdateProcThreadAttribute failed: {}",
IoError::last_os_error()
);
Ok(())
}
}
impl Drop for ProcThreadAttributeList {
fn drop(&mut self) {
unsafe { DeleteProcThreadAttributeList(self.as_mut_ptr()) };
}
}
#[derive(Debug)]
pub struct Child {
proc: OwnedHandle,
}
impl Child {
pub fn try_wait(&mut self) -> IoResult<Option<ExitStatus>> {
let mut status: DWORD = 0;
let res = unsafe { GetExitCodeProcess(self.proc.handle, &mut status) };
if res!= 0 {
if status == STILL_ACTIVE {
Ok(None)
} else {
Ok(Some(ExitStatus { status }))
}
} else {
Ok(None)
}
}
}
#[derive(Debug)]
pub struct ExitStatus {
status: DWORD,
}
type HPCON = HANDLE;
extern "system" {
fn CreatePseudoConsole(
size: COORD,
hInput: HANDLE,
hOutput: HANDLE,
flags: DWORD,
hpc: *mut HPCON,
) -> HRESULT;
fn ResizePseudoConsole(hpc: HPCON, size: COORD) -> HRESULT;
fn ClosePseudoConsole(hpc: HPCON);
}
struct PsuedoCon {
con: HPCON,
}
unsafe impl Send for PsuedoCon {}
unsafe impl Sync for PsuedoCon {}
impl Drop for PsuedoCon {
fn drop(&mut self) {
unsafe { ClosePseudoConsole(self.con) };
}
}
impl PsuedoCon {
fn new(size: COORD, input: &OwnedHandle, output: &OwnedHandle) -> Result<Self, Error> {
let mut con: HPCON = INVALID_HANDLE_VALUE;
let result = unsafe { CreatePseudoConsole(size, input.handle, output.handle, 0, &mut con) };
ensure!(
result == S_OK,
"failed to create psuedo console: HRESULT {}",
result
);
Ok(Self { con })
}
fn resize(&self, size: COORD) -> Result<(), Error> {
let result = unsafe { ResizePseudoConsole(self.con, size) };
ensure!(
result == S_OK,
"failed to resize console to {}x{}: HRESULT: {}",
size.X,
size.Y,
result
);
Ok(())
}
}
#[derive(Debug)]
struct OwnedHandle {
handle: HANDLE,
}
unsafe impl Send for OwnedHandle {}
impl Drop for OwnedHandle {
fn drop(&mut self) {
if self.handle!= INVALID_HANDLE_VALUE &&!self.handle.is_null() {
unsafe { CloseHandle(self.handle) };
}
}
}
impl OwnedHandle {
fn try_clone(&self) -> Result<Self, IoError> {
if self.handle == INVALID_HANDLE_VALUE || self.handle.is_null() {
return Ok(OwnedHandle {
handle: self.handle,
});
}
let proc = unsafe { GetCurrentProcess() };
let mut duped = INVALID_HANDLE_VALUE;
let ok = unsafe {
DuplicateHandle(
proc,
self.handle as *mut _,
proc,
&mut duped,
0,
0,
winapi::um::winnt::DUPLICATE_SAME_ACCESS,
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(OwnedHandle {
handle: duped as *mut _,
})
}
}
}
struct Inner {
con: PsuedoCon,
readable: OwnedHandle,
writable: OwnedHandle,
size: winsize,
}
impl Inner {
pub fn resize(
&mut self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
self.con.resize(COORD {
X: num_cols as i16,
Y: num_rows as i16,
})?;
self.size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
Ok(())
}
}
#[derive(Clone)]
pub struct MasterPty {
inner: Arc<Mutex<Inner>>,
}
pub struct SlavePty {
inner: Arc<Mutex<Inner>>,
}
#[derive(Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
pub struct winsize {
pub ws_row: u16,
pub ws_col: u16,
pub ws_xpixel: u16,
pub ws_ypixel: u16,
}
impl MasterPty {
pub fn resize(
&self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
let mut inner = self.inner.lock().unwrap();
inner.resize(num_rows, num_cols, pixel_width, pixel_height)
}
pub fn get_size(&self) -> Result<winsize, Error> {
let inner = self.inner.lock().unwrap();
Ok(inner.size.clone())
}
pub fn try_clone(&self) -> Result<Self, Error> {
// FIXME: this isn't great. Replace this with a way to
// clone the output handle and read it.
let inner = self.inner.lock().unwrap();
Ok(Self {
inner: Arc::new(Mutex::new(Inner {
con: PsuedoCon {
con: INVALID_HANDLE_VALUE,
},
readable: inner.readable.try_clone()?,
writable: inner.writable.try_clone()?,
size: inner.size,
})),
})
}
pub fn clear_nonblocking(&self) -> Result<(), Error> {
Ok(())
}
}
impl io::Write for MasterPty {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
let mut num_wrote = 0;
let ok = unsafe {
WriteFile(
self.inner.lock().unwrap().writable.handle as *mut _,
buf.as_ptr() as *const _,
buf.len() as u32,
&mut num_wrote,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_wrote as usize)
}
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
}
impl io::Read for MasterPty {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let mut num_read = 0;
let ok = unsafe {
ReadFile(
self.inner.lock().unwrap().readable.handle as *mut _,
buf.as_mut_ptr() as *mut _,
buf.len() as u32,
&mut num_read,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_read as usize)
}
}
}
impl SlavePty {
pub fn spawn_command(self, mut cmd: Command) -> Result<Child, Error> {
let inner = self.inner.lock().unwrap();
cmd.set_pty(
inner.writable.try_clone()?,
inner.readable.try_clone()?,
inner.con.con,
);
cmd.spawn()
}
}
fn pipe() -> Result<(OwnedHandle, OwnedHandle), Error> {
let mut read: HANDLE = INVALID_HANDLE_VALUE;
let mut write: HANDLE = INVALID_HANDLE_VALUE;
if unsafe { CreatePipe(&mut read, &mut write, ptr::null_mut(), 0) } == 0 {
bail!("CreatePipe failed: {}", IoError::last_os_error());
}
Ok((OwnedHandle { handle: read }, OwnedHandle { handle: write }))
}
pub fn openpty(
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(MasterPty, SlavePty), Error> {
let (stdin_read, stdin_write) = pipe()?;
let (stdout_read, stdout_write) = pipe()?;
let con = PsuedoCon::new(
COORD {
X: num_cols as i16,
Y: num_rows as i16,
},
&stdin_read,
&stdout_write,
)?;
let size = winsize {
ws_row: num_rows,
| {
self.input.replace(input);
self.output.replace(output);
self.hpc.replace(con);
self
} | identifier_body |
conpty.rs | ::{ReadFile, WriteFile};
use crate::pty::conpty::winapi::um::handleapi::*;
use crate::pty::conpty::winapi::um::minwinbase::STILL_ACTIVE;
use crate::pty::conpty::winapi::um::namedpipeapi::CreatePipe;
use crate::pty::conpty::winapi::um::processthreadsapi::*;
use crate::pty::conpty::winapi::um::winbase::EXTENDED_STARTUPINFO_PRESENT;
use crate::pty::conpty::winapi::um::winbase::STARTUPINFOEXW;
use crate::pty::conpty::winapi::um::wincon::COORD;
use std::env;
use std::ffi::{OsStr, OsString};
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::ffi::OsStringExt;
use std::os::windows::raw::HANDLE;
use std::path::Path;
use std::ptr;
use std::sync::{Arc, Mutex};
const PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE: usize = 0x00020016;
#[derive(Debug)]
pub struct Command {
args: Vec<OsString>,
input: Option<OwnedHandle>,
output: Option<OwnedHandle>,
hpc: Option<HPCON>,
}
impl Command {
pub fn new<S: AsRef<OsStr>>(program: S) -> Self {
Self {
args: vec![program.as_ref().to_owned()],
input: None,
output: None,
hpc: None,
}
}
fn | (exe: &OsStr) -> OsString {
if let Some(path) = env::var_os("PATH") {
let extensions = env::var_os("PATHEXT").unwrap_or(".EXE".into());
for path in env::split_paths(&path) {
// Check for exactly the user's string in this path dir
let candidate = path.join(&exe);
if candidate.exists() {
return candidate.into_os_string();
}
// otherwise try tacking on some extensions.
// Note that this really replaces the extension in the
// user specified path, so this is potentially wrong.
for ext in env::split_paths(&extensions) {
// PATHEXT includes the leading `.`, but `with_extension`
// doesn't want that
let ext = ext.to_str().expect("PATHEXT entries must be utf8");
let path = path.join(&exe).with_extension(&ext[1..]);
if path.exists() {
return path.into_os_string();
}
}
}
}
exe.to_owned()
}
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
// FIXME: quoting!
self.args.push(arg.as_ref().to_owned());
self
}
pub fn args<I, S>(&mut self, args: I) -> &mut Command
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
for arg in args {
self.arg(arg);
}
self
}
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
where
K: AsRef<OsStr>,
V: AsRef<OsStr>,
{
eprintln!(
"ignoring env {:?}={:?} for child; FIXME: implement this!",
key.as_ref(),
val.as_ref()
);
self
}
fn set_pty(&mut self, input: OwnedHandle, output: OwnedHandle, con: HPCON) -> &mut Command {
self.input.replace(input);
self.output.replace(output);
self.hpc.replace(con);
self
}
fn cmdline(&self) -> Result<(Vec<u16>, Vec<u16>), Error> {
let mut cmdline = Vec::<u16>::new();
let exe = Self::search_path(&self.args[0]);
Self::append_quoted(&exe, &mut cmdline);
// Ensure that we nul terminate the module name, otherwise we'll
// ask CreateProcessW to start something random!
let mut exe: Vec<u16> = exe.encode_wide().collect();
exe.push(0);
for arg in self.args.iter().skip(1) {
cmdline.push(''as u16);
ensure!(
!arg.encode_wide().any(|c| c == 0),
"invalid encoding for command line argument {:?}",
arg
);
Self::append_quoted(arg, &mut cmdline);
}
// Ensure that the command line is nul terminated too!
cmdline.push(0);
Ok((exe, cmdline))
}
// Borrowed from https://github.com/hniksic/rust-subprocess/blob/873dfed165173e52907beb87118b2c0c05d8b8a1/src/popen.rs#L1117
// which in turn was translated from ArgvQuote at http://tinyurl.com/zmgtnls
fn append_quoted(arg: &OsStr, cmdline: &mut Vec<u16>) {
if!arg.is_empty()
&&!arg.encode_wide().any(|c| {
c =='' as u16
|| c == '\t' as u16
|| c == '\n' as u16
|| c == '\x0b' as u16
|| c == '\"' as u16
})
{
cmdline.extend(arg.encode_wide());
return;
}
cmdline.push('"' as u16);
let arg: Vec<_> = arg.encode_wide().collect();
let mut i = 0;
while i < arg.len() {
let mut num_backslashes = 0;
while i < arg.len() && arg[i] == '\\' as u16 {
i += 1;
num_backslashes += 1;
}
if i == arg.len() {
for _ in 0..num_backslashes * 2 {
cmdline.push('\\' as u16);
}
break;
} else if arg[i] == b'"' as u16 {
for _ in 0..num_backslashes * 2 + 1 {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
} else {
for _ in 0..num_backslashes {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
}
i += 1;
}
cmdline.push('"' as u16);
}
pub fn spawn(&mut self) -> Result<Child, Error> {
let mut si: STARTUPINFOEXW = unsafe { mem::zeroed() };
si.StartupInfo.cb = mem::size_of::<STARTUPINFOEXW>() as u32;
let mut attrs = ProcThreadAttributeList::with_capacity(1)?;
attrs.set_pty(*self.hpc.as_ref().unwrap())?;
si.lpAttributeList = attrs.as_mut_ptr();
let mut pi: PROCESS_INFORMATION = unsafe { mem::zeroed() };
let (mut exe, mut cmdline) = self.cmdline()?;
let cmd_os = OsString::from_wide(&cmdline);
eprintln!(
"Running: module: {} {:?}",
Path::new(&OsString::from_wide(&exe)).display(),
cmd_os
);
let res = unsafe {
CreateProcessW(
exe.as_mut_slice().as_mut_ptr(),
cmdline.as_mut_slice().as_mut_ptr(),
ptr::null_mut(),
ptr::null_mut(),
0,
EXTENDED_STARTUPINFO_PRESENT,
ptr::null_mut(), // FIXME: env
ptr::null_mut(),
&mut si.StartupInfo,
&mut pi,
)
};
if res == 0 {
let err = IoError::last_os_error();
bail!("CreateProcessW `{:?}` failed: {}", cmd_os, err);
}
// Make sure we close out the thread handle so we don't leak it;
// we do this simply by making it owned
let _main_thread = OwnedHandle { handle: pi.hThread };
let proc = OwnedHandle {
handle: pi.hProcess,
};
Ok(Child { proc })
}
}
struct ProcThreadAttributeList {
data: Vec<u8>,
}
impl ProcThreadAttributeList {
pub fn with_capacity(num_attributes: DWORD) -> Result<Self, Error> {
let mut bytes_required: usize = 0;
unsafe {
InitializeProcThreadAttributeList(
ptr::null_mut(),
num_attributes,
0,
&mut bytes_required,
)
};
let mut data = Vec::with_capacity(bytes_required);
// We have the right capacity, so force the vec to consider itself
// that length. The contents of those bytes will be maintained
// by the win32 apis used in this impl.
unsafe { data.set_len(bytes_required) };
let attr_ptr = data.as_mut_slice().as_mut_ptr() as *mut _;
let res = unsafe {
InitializeProcThreadAttributeList(attr_ptr, num_attributes, 0, &mut bytes_required)
};
ensure!(
res!= 0,
"InitializeProcThreadAttributeList failed: {}",
IoError::last_os_error()
);
Ok(Self { data })
}
pub fn as_mut_ptr(&mut self) -> LPPROC_THREAD_ATTRIBUTE_LIST {
self.data.as_mut_slice().as_mut_ptr() as *mut _
}
pub fn set_pty(&mut self, con: HPCON) -> Result<(), Error> {
let res = unsafe {
UpdateProcThreadAttribute(
self.as_mut_ptr(),
0,
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
con,
mem::size_of::<HPCON>(),
ptr::null_mut(),
ptr::null_mut(),
)
};
ensure!(
res!= 0,
"UpdateProcThreadAttribute failed: {}",
IoError::last_os_error()
);
Ok(())
}
}
impl Drop for ProcThreadAttributeList {
fn drop(&mut self) {
unsafe { DeleteProcThreadAttributeList(self.as_mut_ptr()) };
}
}
#[derive(Debug)]
pub struct Child {
proc: OwnedHandle,
}
impl Child {
pub fn try_wait(&mut self) -> IoResult<Option<ExitStatus>> {
let mut status: DWORD = 0;
let res = unsafe { GetExitCodeProcess(self.proc.handle, &mut status) };
if res!= 0 {
if status == STILL_ACTIVE {
Ok(None)
} else {
Ok(Some(ExitStatus { status }))
}
} else {
Ok(None)
}
}
}
#[derive(Debug)]
pub struct ExitStatus {
status: DWORD,
}
type HPCON = HANDLE;
extern "system" {
fn CreatePseudoConsole(
size: COORD,
hInput: HANDLE,
hOutput: HANDLE,
flags: DWORD,
hpc: *mut HPCON,
) -> HRESULT;
fn ResizePseudoConsole(hpc: HPCON, size: COORD) -> HRESULT;
fn ClosePseudoConsole(hpc: HPCON);
}
struct PsuedoCon {
con: HPCON,
}
unsafe impl Send for PsuedoCon {}
unsafe impl Sync for PsuedoCon {}
impl Drop for PsuedoCon {
fn drop(&mut self) {
unsafe { ClosePseudoConsole(self.con) };
}
}
impl PsuedoCon {
fn new(size: COORD, input: &OwnedHandle, output: &OwnedHandle) -> Result<Self, Error> {
let mut con: HPCON = INVALID_HANDLE_VALUE;
let result = unsafe { CreatePseudoConsole(size, input.handle, output.handle, 0, &mut con) };
ensure!(
result == S_OK,
"failed to create psuedo console: HRESULT {}",
result
);
Ok(Self { con })
}
fn resize(&self, size: COORD) -> Result<(), Error> {
let result = unsafe { ResizePseudoConsole(self.con, size) };
ensure!(
result == S_OK,
"failed to resize console to {}x{}: HRESULT: {}",
size.X,
size.Y,
result
);
Ok(())
}
}
#[derive(Debug)]
struct OwnedHandle {
handle: HANDLE,
}
unsafe impl Send for OwnedHandle {}
impl Drop for OwnedHandle {
fn drop(&mut self) {
if self.handle!= INVALID_HANDLE_VALUE &&!self.handle.is_null() {
unsafe { CloseHandle(self.handle) };
}
}
}
impl OwnedHandle {
fn try_clone(&self) -> Result<Self, IoError> {
if self.handle == INVALID_HANDLE_VALUE || self.handle.is_null() {
return Ok(OwnedHandle {
handle: self.handle,
});
}
let proc = unsafe { GetCurrentProcess() };
let mut duped = INVALID_HANDLE_VALUE;
let ok = unsafe {
DuplicateHandle(
proc,
self.handle as *mut _,
proc,
&mut duped,
0,
0,
winapi::um::winnt::DUPLICATE_SAME_ACCESS,
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(OwnedHandle {
handle: duped as *mut _,
})
}
}
}
struct Inner {
con: PsuedoCon,
readable: OwnedHandle,
writable: OwnedHandle,
size: winsize,
}
impl Inner {
pub fn resize(
&mut self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
self.con.resize(COORD {
X: num_cols as i16,
Y: num_rows as i16,
})?;
self.size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
Ok(())
}
}
#[derive(Clone)]
pub struct MasterPty {
inner: Arc<Mutex<Inner>>,
}
pub struct SlavePty {
inner: Arc<Mutex<Inner>>,
}
#[derive(Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
pub struct winsize {
pub ws_row: u16,
pub ws_col: u16,
pub ws_xpixel: u16,
pub ws_ypixel: u16,
}
impl MasterPty {
pub fn resize(
&self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
let mut inner = self.inner.lock().unwrap();
inner.resize(num_rows, num_cols, pixel_width, pixel_height)
}
pub fn get_size(&self) -> Result<winsize, Error> {
let inner = self.inner.lock().unwrap();
Ok(inner.size.clone())
}
pub fn try_clone(&self) -> Result<Self, Error> {
// FIXME: this isn't great. Replace this with a way to
// clone the output handle and read it.
let inner = self.inner.lock().unwrap();
Ok(Self {
inner: Arc::new(Mutex::new(Inner {
con: PsuedoCon {
con: INVALID_HANDLE_VALUE,
},
readable: inner.readable.try_clone()?,
writable: inner.writable.try_clone()?,
size: inner.size,
})),
})
}
pub fn clear_nonblocking(&self) -> Result<(), Error> {
Ok(())
}
}
impl io::Write for MasterPty {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
let mut num_wrote = 0;
let ok = unsafe {
WriteFile(
self.inner.lock().unwrap().writable.handle as *mut _,
buf.as_ptr() as *const _,
buf.len() as u32,
&mut num_wrote,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_wrote as usize)
}
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
}
impl io::Read for MasterPty {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let mut num_read = 0;
let ok = unsafe {
ReadFile(
self.inner.lock().unwrap().readable.handle as *mut _,
buf.as_mut_ptr() as *mut _,
buf.len() as u32,
&mut num_read,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_read as usize)
}
}
}
impl SlavePty {
pub fn spawn_command(self, mut cmd: Command) -> Result<Child, Error> {
let inner = self.inner.lock().unwrap();
cmd.set_pty(
inner.writable.try_clone()?,
inner.readable.try_clone()?,
inner.con.con,
);
cmd.spawn()
}
}
fn pipe() -> Result<(OwnedHandle, OwnedHandle), Error> {
let mut read: HANDLE = INVALID_HANDLE_VALUE;
let mut write: HANDLE = INVALID_HANDLE_VALUE;
if unsafe { CreatePipe(&mut read, &mut write, ptr::null_mut(), 0) } == 0 {
bail!("CreatePipe failed: {}", IoError::last_os_error());
}
Ok((OwnedHandle { handle: read }, OwnedHandle { handle: write }))
}
pub fn openpty(
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(MasterPty, SlavePty), Error> {
let (stdin_read, stdin_write) = pipe()?;
let (stdout_read, stdout_write) = pipe()?;
let con = PsuedoCon::new(
COORD {
X: num_cols as i16,
Y: num_rows as i16,
},
&stdin_read,
&stdout_write,
)?;
let size = winsize {
ws_row: num_rows,
| search_path | identifier_name |
conpty.rs | ::{ReadFile, WriteFile};
use crate::pty::conpty::winapi::um::handleapi::*;
use crate::pty::conpty::winapi::um::minwinbase::STILL_ACTIVE;
use crate::pty::conpty::winapi::um::namedpipeapi::CreatePipe;
use crate::pty::conpty::winapi::um::processthreadsapi::*;
use crate::pty::conpty::winapi::um::winbase::EXTENDED_STARTUPINFO_PRESENT;
use crate::pty::conpty::winapi::um::winbase::STARTUPINFOEXW;
use crate::pty::conpty::winapi::um::wincon::COORD;
use std::env;
use std::ffi::{OsStr, OsString};
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::ffi::OsStringExt;
use std::os::windows::raw::HANDLE;
use std::path::Path;
use std::ptr;
use std::sync::{Arc, Mutex};
const PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE: usize = 0x00020016;
#[derive(Debug)]
pub struct Command {
args: Vec<OsString>,
input: Option<OwnedHandle>,
output: Option<OwnedHandle>,
hpc: Option<HPCON>,
}
impl Command {
pub fn new<S: AsRef<OsStr>>(program: S) -> Self {
Self {
args: vec![program.as_ref().to_owned()],
input: None,
output: None,
hpc: None,
}
}
fn search_path(exe: &OsStr) -> OsString {
if let Some(path) = env::var_os("PATH") {
let extensions = env::var_os("PATHEXT").unwrap_or(".EXE".into());
for path in env::split_paths(&path) {
// Check for exactly the user's string in this path dir
let candidate = path.join(&exe);
if candidate.exists() {
return candidate.into_os_string();
}
// otherwise try tacking on some extensions.
// Note that this really replaces the extension in the
// user specified path, so this is potentially wrong.
for ext in env::split_paths(&extensions) {
// PATHEXT includes the leading `.`, but `with_extension`
// doesn't want that
let ext = ext.to_str().expect("PATHEXT entries must be utf8");
let path = path.join(&exe).with_extension(&ext[1..]);
if path.exists() {
return path.into_os_string();
}
}
}
}
exe.to_owned()
}
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
// FIXME: quoting!
self.args.push(arg.as_ref().to_owned());
self
}
pub fn args<I, S>(&mut self, args: I) -> &mut Command
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
for arg in args {
self.arg(arg);
}
self
}
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
where
K: AsRef<OsStr>,
V: AsRef<OsStr>,
{
eprintln!(
"ignoring env {:?}={:?} for child; FIXME: implement this!",
key.as_ref(),
val.as_ref()
);
self
}
fn set_pty(&mut self, input: OwnedHandle, output: OwnedHandle, con: HPCON) -> &mut Command {
self.input.replace(input);
self.output.replace(output);
self.hpc.replace(con);
self
}
fn cmdline(&self) -> Result<(Vec<u16>, Vec<u16>), Error> {
let mut cmdline = Vec::<u16>::new();
let exe = Self::search_path(&self.args[0]);
Self::append_quoted(&exe, &mut cmdline);
// Ensure that we nul terminate the module name, otherwise we'll
// ask CreateProcessW to start something random!
let mut exe: Vec<u16> = exe.encode_wide().collect();
exe.push(0);
for arg in self.args.iter().skip(1) {
cmdline.push(''as u16);
ensure!(
!arg.encode_wide().any(|c| c == 0),
"invalid encoding for command line argument {:?}",
arg
);
Self::append_quoted(arg, &mut cmdline);
}
// Ensure that the command line is nul terminated too!
cmdline.push(0);
Ok((exe, cmdline))
}
// Borrowed from https://github.com/hniksic/rust-subprocess/blob/873dfed165173e52907beb87118b2c0c05d8b8a1/src/popen.rs#L1117
// which in turn was translated from ArgvQuote at http://tinyurl.com/zmgtnls
fn append_quoted(arg: &OsStr, cmdline: &mut Vec<u16>) {
if!arg.is_empty()
&&!arg.encode_wide().any(|c| {
c =='' as u16
|| c == '\t' as u16
|| c == '\n' as u16
|| c == '\x0b' as u16
|| c == '\"' as u16
})
{
cmdline.extend(arg.encode_wide());
return;
}
cmdline.push('"' as u16);
let arg: Vec<_> = arg.encode_wide().collect();
let mut i = 0;
while i < arg.len() {
let mut num_backslashes = 0;
while i < arg.len() && arg[i] == '\\' as u16 {
i += 1;
num_backslashes += 1;
}
if i == arg.len() {
for _ in 0..num_backslashes * 2 {
cmdline.push('\\' as u16);
}
break;
} else if arg[i] == b'"' as u16 {
for _ in 0..num_backslashes * 2 + 1 {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
} else {
for _ in 0..num_backslashes {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
}
i += 1;
}
cmdline.push('"' as u16);
}
pub fn spawn(&mut self) -> Result<Child, Error> {
let mut si: STARTUPINFOEXW = unsafe { mem::zeroed() };
si.StartupInfo.cb = mem::size_of::<STARTUPINFOEXW>() as u32;
let mut attrs = ProcThreadAttributeList::with_capacity(1)?;
attrs.set_pty(*self.hpc.as_ref().unwrap())?;
si.lpAttributeList = attrs.as_mut_ptr();
let mut pi: PROCESS_INFORMATION = unsafe { mem::zeroed() };
let (mut exe, mut cmdline) = self.cmdline()?;
let cmd_os = OsString::from_wide(&cmdline);
eprintln!(
"Running: module: {} {:?}",
Path::new(&OsString::from_wide(&exe)).display(),
cmd_os
);
let res = unsafe {
CreateProcessW(
exe.as_mut_slice().as_mut_ptr(),
cmdline.as_mut_slice().as_mut_ptr(),
ptr::null_mut(),
ptr::null_mut(),
0,
EXTENDED_STARTUPINFO_PRESENT,
ptr::null_mut(), // FIXME: env
ptr::null_mut(),
&mut si.StartupInfo,
&mut pi,
)
};
if res == 0 {
let err = IoError::last_os_error();
bail!("CreateProcessW `{:?}` failed: {}", cmd_os, err);
}
// Make sure we close out the thread handle so we don't leak it;
// we do this simply by making it owned
let _main_thread = OwnedHandle { handle: pi.hThread };
let proc = OwnedHandle {
handle: pi.hProcess,
};
Ok(Child { proc })
}
}
struct ProcThreadAttributeList {
data: Vec<u8>,
}
impl ProcThreadAttributeList {
pub fn with_capacity(num_attributes: DWORD) -> Result<Self, Error> {
let mut bytes_required: usize = 0;
unsafe {
InitializeProcThreadAttributeList(
ptr::null_mut(),
num_attributes,
0,
&mut bytes_required,
)
};
let mut data = Vec::with_capacity(bytes_required);
// We have the right capacity, so force the vec to consider itself
// that length. The contents of those bytes will be maintained
// by the win32 apis used in this impl.
unsafe { data.set_len(bytes_required) };
let attr_ptr = data.as_mut_slice().as_mut_ptr() as *mut _;
let res = unsafe {
InitializeProcThreadAttributeList(attr_ptr, num_attributes, 0, &mut bytes_required)
};
ensure!(
res!= 0,
"InitializeProcThreadAttributeList failed: {}",
IoError::last_os_error()
);
Ok(Self { data })
}
pub fn as_mut_ptr(&mut self) -> LPPROC_THREAD_ATTRIBUTE_LIST {
self.data.as_mut_slice().as_mut_ptr() as *mut _
}
pub fn set_pty(&mut self, con: HPCON) -> Result<(), Error> {
let res = unsafe {
UpdateProcThreadAttribute(
self.as_mut_ptr(),
0,
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
con,
mem::size_of::<HPCON>(),
ptr::null_mut(),
ptr::null_mut(),
)
};
ensure!(
res!= 0,
"UpdateProcThreadAttribute failed: {}",
IoError::last_os_error()
);
Ok(())
}
}
impl Drop for ProcThreadAttributeList {
fn drop(&mut self) {
unsafe { DeleteProcThreadAttributeList(self.as_mut_ptr()) };
}
}
#[derive(Debug)]
pub struct Child {
proc: OwnedHandle,
}
impl Child {
pub fn try_wait(&mut self) -> IoResult<Option<ExitStatus>> {
let mut status: DWORD = 0;
let res = unsafe { GetExitCodeProcess(self.proc.handle, &mut status) };
if res!= 0 {
if status == STILL_ACTIVE {
Ok(None)
} else {
Ok(Some(ExitStatus { status }))
}
} else {
Ok(None)
}
}
}
#[derive(Debug)]
pub struct ExitStatus {
status: DWORD,
}
type HPCON = HANDLE;
extern "system" {
fn CreatePseudoConsole(
size: COORD,
hInput: HANDLE,
hOutput: HANDLE,
flags: DWORD,
hpc: *mut HPCON,
) -> HRESULT;
fn ResizePseudoConsole(hpc: HPCON, size: COORD) -> HRESULT;
fn ClosePseudoConsole(hpc: HPCON);
}
struct PsuedoCon {
con: HPCON,
}
unsafe impl Send for PsuedoCon {}
unsafe impl Sync for PsuedoCon {}
impl Drop for PsuedoCon {
fn drop(&mut self) {
unsafe { ClosePseudoConsole(self.con) };
}
}
impl PsuedoCon {
fn new(size: COORD, input: &OwnedHandle, output: &OwnedHandle) -> Result<Self, Error> {
let mut con: HPCON = INVALID_HANDLE_VALUE;
let result = unsafe { CreatePseudoConsole(size, input.handle, output.handle, 0, &mut con) };
ensure!(
result == S_OK,
"failed to create psuedo console: HRESULT {}",
result
);
Ok(Self { con })
}
fn resize(&self, size: COORD) -> Result<(), Error> {
let result = unsafe { ResizePseudoConsole(self.con, size) };
ensure!(
result == S_OK,
"failed to resize console to {}x{}: HRESULT: {}",
size.X,
size.Y,
result
);
Ok(())
}
}
#[derive(Debug)]
struct OwnedHandle {
handle: HANDLE,
}
unsafe impl Send for OwnedHandle {}
impl Drop for OwnedHandle {
fn drop(&mut self) {
if self.handle!= INVALID_HANDLE_VALUE &&!self.handle.is_null() |
}
}
impl OwnedHandle {
fn try_clone(&self) -> Result<Self, IoError> {
if self.handle == INVALID_HANDLE_VALUE || self.handle.is_null() {
return Ok(OwnedHandle {
handle: self.handle,
});
}
let proc = unsafe { GetCurrentProcess() };
let mut duped = INVALID_HANDLE_VALUE;
let ok = unsafe {
DuplicateHandle(
proc,
self.handle as *mut _,
proc,
&mut duped,
0,
0,
winapi::um::winnt::DUPLICATE_SAME_ACCESS,
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(OwnedHandle {
handle: duped as *mut _,
})
}
}
}
struct Inner {
con: PsuedoCon,
readable: OwnedHandle,
writable: OwnedHandle,
size: winsize,
}
impl Inner {
pub fn resize(
&mut self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
self.con.resize(COORD {
X: num_cols as i16,
Y: num_rows as i16,
})?;
self.size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
Ok(())
}
}
#[derive(Clone)]
pub struct MasterPty {
inner: Arc<Mutex<Inner>>,
}
pub struct SlavePty {
inner: Arc<Mutex<Inner>>,
}
#[derive(Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
pub struct winsize {
pub ws_row: u16,
pub ws_col: u16,
pub ws_xpixel: u16,
pub ws_ypixel: u16,
}
impl MasterPty {
pub fn resize(
&self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
let mut inner = self.inner.lock().unwrap();
inner.resize(num_rows, num_cols, pixel_width, pixel_height)
}
pub fn get_size(&self) -> Result<winsize, Error> {
let inner = self.inner.lock().unwrap();
Ok(inner.size.clone())
}
pub fn try_clone(&self) -> Result<Self, Error> {
// FIXME: this isn't great. Replace this with a way to
// clone the output handle and read it.
let inner = self.inner.lock().unwrap();
Ok(Self {
inner: Arc::new(Mutex::new(Inner {
con: PsuedoCon {
con: INVALID_HANDLE_VALUE,
},
readable: inner.readable.try_clone()?,
writable: inner.writable.try_clone()?,
size: inner.size,
})),
})
}
pub fn clear_nonblocking(&self) -> Result<(), Error> {
Ok(())
}
}
impl io::Write for MasterPty {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
let mut num_wrote = 0;
let ok = unsafe {
WriteFile(
self.inner.lock().unwrap().writable.handle as *mut _,
buf.as_ptr() as *const _,
buf.len() as u32,
&mut num_wrote,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_wrote as usize)
}
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
}
impl io::Read for MasterPty {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let mut num_read = 0;
let ok = unsafe {
ReadFile(
self.inner.lock().unwrap().readable.handle as *mut _,
buf.as_mut_ptr() as *mut _,
buf.len() as u32,
&mut num_read,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_read as usize)
}
}
}
impl SlavePty {
pub fn spawn_command(self, mut cmd: Command) -> Result<Child, Error> {
let inner = self.inner.lock().unwrap();
cmd.set_pty(
inner.writable.try_clone()?,
inner.readable.try_clone()?,
inner.con.con,
);
cmd.spawn()
}
}
fn pipe() -> Result<(OwnedHandle, OwnedHandle), Error> {
let mut read: HANDLE = INVALID_HANDLE_VALUE;
let mut write: HANDLE = INVALID_HANDLE_VALUE;
if unsafe { CreatePipe(&mut read, &mut write, ptr::null_mut(), 0) } == 0 {
bail!("CreatePipe failed: {}", IoError::last_os_error());
}
Ok((OwnedHandle { handle: read }, OwnedHandle { handle: write }))
}
pub fn openpty(
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(MasterPty, SlavePty), Error> {
let (stdin_read, stdin_write) = pipe()?;
let (stdout_read, stdout_write) = pipe()?;
let con = PsuedoCon::new(
COORD {
X: num_cols as i16,
Y: num_rows as i16,
},
&stdin_read,
&stdout_write,
)?;
let size = winsize {
ws_row: num_rows,
| {
unsafe { CloseHandle(self.handle) };
} | conditional_block |
conpty.rs | eapi::{ReadFile, WriteFile};
use crate::pty::conpty::winapi::um::handleapi::*;
use crate::pty::conpty::winapi::um::minwinbase::STILL_ACTIVE;
use crate::pty::conpty::winapi::um::namedpipeapi::CreatePipe;
use crate::pty::conpty::winapi::um::processthreadsapi::*;
use crate::pty::conpty::winapi::um::winbase::EXTENDED_STARTUPINFO_PRESENT;
use crate::pty::conpty::winapi::um::winbase::STARTUPINFOEXW;
use crate::pty::conpty::winapi::um::wincon::COORD;
use std::env;
use std::ffi::{OsStr, OsString};
use std::mem;
use std::os::windows::ffi::OsStrExt;
use std::os::windows::ffi::OsStringExt;
use std::os::windows::raw::HANDLE;
use std::path::Path;
use std::ptr;
use std::sync::{Arc, Mutex};
const PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE: usize = 0x00020016;
#[derive(Debug)]
pub struct Command {
args: Vec<OsString>,
input: Option<OwnedHandle>,
output: Option<OwnedHandle>,
hpc: Option<HPCON>,
}
impl Command {
pub fn new<S: AsRef<OsStr>>(program: S) -> Self {
Self {
args: vec![program.as_ref().to_owned()],
input: None,
output: None,
hpc: None,
}
}
fn search_path(exe: &OsStr) -> OsString {
if let Some(path) = env::var_os("PATH") {
let extensions = env::var_os("PATHEXT").unwrap_or(".EXE".into());
for path in env::split_paths(&path) {
// Check for exactly the user's string in this path dir
let candidate = path.join(&exe);
if candidate.exists() {
return candidate.into_os_string();
}
// otherwise try tacking on some extensions.
// Note that this really replaces the extension in the
// user specified path, so this is potentially wrong.
for ext in env::split_paths(&extensions) {
// PATHEXT includes the leading `.`, but `with_extension`
// doesn't want that
let ext = ext.to_str().expect("PATHEXT entries must be utf8");
let path = path.join(&exe).with_extension(&ext[1..]);
if path.exists() {
return path.into_os_string();
}
}
}
}
exe.to_owned()
}
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
// FIXME: quoting!
self.args.push(arg.as_ref().to_owned());
self
}
pub fn args<I, S>(&mut self, args: I) -> &mut Command
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
for arg in args {
self.arg(arg);
}
self
}
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
where
K: AsRef<OsStr>,
V: AsRef<OsStr>,
{
eprintln!(
"ignoring env {:?}={:?} for child; FIXME: implement this!",
key.as_ref(),
val.as_ref()
);
self
}
fn set_pty(&mut self, input: OwnedHandle, output: OwnedHandle, con: HPCON) -> &mut Command {
self.input.replace(input);
self.output.replace(output);
self.hpc.replace(con);
self
}
fn cmdline(&self) -> Result<(Vec<u16>, Vec<u16>), Error> {
let mut cmdline = Vec::<u16>::new();
let exe = Self::search_path(&self.args[0]);
Self::append_quoted(&exe, &mut cmdline);
// Ensure that we nul terminate the module name, otherwise we'll
// ask CreateProcessW to start something random!
let mut exe: Vec<u16> = exe.encode_wide().collect();
exe.push(0);
for arg in self.args.iter().skip(1) {
cmdline.push(''as u16);
ensure!(
!arg.encode_wide().any(|c| c == 0),
"invalid encoding for command line argument {:?}",
arg
);
Self::append_quoted(arg, &mut cmdline);
}
// Ensure that the command line is nul terminated too!
cmdline.push(0);
Ok((exe, cmdline))
}
// Borrowed from https://github.com/hniksic/rust-subprocess/blob/873dfed165173e52907beb87118b2c0c05d8b8a1/src/popen.rs#L1117
// which in turn was translated from ArgvQuote at http://tinyurl.com/zmgtnls
fn append_quoted(arg: &OsStr, cmdline: &mut Vec<u16>) {
if!arg.is_empty()
&&!arg.encode_wide().any(|c| {
c =='' as u16
|| c == '\t' as u16
|| c == '\n' as u16
|| c == '\x0b' as u16
|| c == '\"' as u16
})
{
cmdline.extend(arg.encode_wide());
return;
}
cmdline.push('"' as u16);
let arg: Vec<_> = arg.encode_wide().collect();
let mut i = 0;
while i < arg.len() {
let mut num_backslashes = 0;
while i < arg.len() && arg[i] == '\\' as u16 {
i += 1;
num_backslashes += 1;
}
if i == arg.len() {
for _ in 0..num_backslashes * 2 {
cmdline.push('\\' as u16);
}
break;
} else if arg[i] == b'"' as u16 {
for _ in 0..num_backslashes * 2 + 1 {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
} else {
for _ in 0..num_backslashes {
cmdline.push('\\' as u16);
}
cmdline.push(arg[i]);
}
i += 1;
}
cmdline.push('"' as u16);
}
pub fn spawn(&mut self) -> Result<Child, Error> {
let mut si: STARTUPINFOEXW = unsafe { mem::zeroed() };
si.StartupInfo.cb = mem::size_of::<STARTUPINFOEXW>() as u32;
let mut attrs = ProcThreadAttributeList::with_capacity(1)?;
attrs.set_pty(*self.hpc.as_ref().unwrap())?;
si.lpAttributeList = attrs.as_mut_ptr();
let mut pi: PROCESS_INFORMATION = unsafe { mem::zeroed() };
let (mut exe, mut cmdline) = self.cmdline()?;
let cmd_os = OsString::from_wide(&cmdline);
eprintln!(
"Running: module: {} {:?}",
Path::new(&OsString::from_wide(&exe)).display(),
cmd_os
);
let res = unsafe {
CreateProcessW(
exe.as_mut_slice().as_mut_ptr(),
cmdline.as_mut_slice().as_mut_ptr(),
ptr::null_mut(),
ptr::null_mut(),
0,
EXTENDED_STARTUPINFO_PRESENT,
ptr::null_mut(), // FIXME: env
ptr::null_mut(),
&mut si.StartupInfo,
&mut pi,
)
};
if res == 0 {
let err = IoError::last_os_error();
bail!("CreateProcessW `{:?}` failed: {}", cmd_os, err);
}
// Make sure we close out the thread handle so we don't leak it;
// we do this simply by making it owned
let _main_thread = OwnedHandle { handle: pi.hThread };
let proc = OwnedHandle {
handle: pi.hProcess,
};
Ok(Child { proc })
}
}
struct ProcThreadAttributeList {
data: Vec<u8>,
}
impl ProcThreadAttributeList {
pub fn with_capacity(num_attributes: DWORD) -> Result<Self, Error> {
let mut bytes_required: usize = 0; | &mut bytes_required,
)
};
let mut data = Vec::with_capacity(bytes_required);
// We have the right capacity, so force the vec to consider itself
// that length. The contents of those bytes will be maintained
// by the win32 apis used in this impl.
unsafe { data.set_len(bytes_required) };
let attr_ptr = data.as_mut_slice().as_mut_ptr() as *mut _;
let res = unsafe {
InitializeProcThreadAttributeList(attr_ptr, num_attributes, 0, &mut bytes_required)
};
ensure!(
res!= 0,
"InitializeProcThreadAttributeList failed: {}",
IoError::last_os_error()
);
Ok(Self { data })
}
pub fn as_mut_ptr(&mut self) -> LPPROC_THREAD_ATTRIBUTE_LIST {
self.data.as_mut_slice().as_mut_ptr() as *mut _
}
pub fn set_pty(&mut self, con: HPCON) -> Result<(), Error> {
let res = unsafe {
UpdateProcThreadAttribute(
self.as_mut_ptr(),
0,
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
con,
mem::size_of::<HPCON>(),
ptr::null_mut(),
ptr::null_mut(),
)
};
ensure!(
res!= 0,
"UpdateProcThreadAttribute failed: {}",
IoError::last_os_error()
);
Ok(())
}
}
impl Drop for ProcThreadAttributeList {
fn drop(&mut self) {
unsafe { DeleteProcThreadAttributeList(self.as_mut_ptr()) };
}
}
#[derive(Debug)]
pub struct Child {
proc: OwnedHandle,
}
impl Child {
pub fn try_wait(&mut self) -> IoResult<Option<ExitStatus>> {
let mut status: DWORD = 0;
let res = unsafe { GetExitCodeProcess(self.proc.handle, &mut status) };
if res!= 0 {
if status == STILL_ACTIVE {
Ok(None)
} else {
Ok(Some(ExitStatus { status }))
}
} else {
Ok(None)
}
}
}
#[derive(Debug)]
pub struct ExitStatus {
status: DWORD,
}
type HPCON = HANDLE;
extern "system" {
fn CreatePseudoConsole(
size: COORD,
hInput: HANDLE,
hOutput: HANDLE,
flags: DWORD,
hpc: *mut HPCON,
) -> HRESULT;
fn ResizePseudoConsole(hpc: HPCON, size: COORD) -> HRESULT;
fn ClosePseudoConsole(hpc: HPCON);
}
struct PsuedoCon {
con: HPCON,
}
unsafe impl Send for PsuedoCon {}
unsafe impl Sync for PsuedoCon {}
impl Drop for PsuedoCon {
fn drop(&mut self) {
unsafe { ClosePseudoConsole(self.con) };
}
}
impl PsuedoCon {
fn new(size: COORD, input: &OwnedHandle, output: &OwnedHandle) -> Result<Self, Error> {
let mut con: HPCON = INVALID_HANDLE_VALUE;
let result = unsafe { CreatePseudoConsole(size, input.handle, output.handle, 0, &mut con) };
ensure!(
result == S_OK,
"failed to create psuedo console: HRESULT {}",
result
);
Ok(Self { con })
}
fn resize(&self, size: COORD) -> Result<(), Error> {
let result = unsafe { ResizePseudoConsole(self.con, size) };
ensure!(
result == S_OK,
"failed to resize console to {}x{}: HRESULT: {}",
size.X,
size.Y,
result
);
Ok(())
}
}
#[derive(Debug)]
struct OwnedHandle {
handle: HANDLE,
}
unsafe impl Send for OwnedHandle {}
impl Drop for OwnedHandle {
fn drop(&mut self) {
if self.handle!= INVALID_HANDLE_VALUE &&!self.handle.is_null() {
unsafe { CloseHandle(self.handle) };
}
}
}
impl OwnedHandle {
fn try_clone(&self) -> Result<Self, IoError> {
if self.handle == INVALID_HANDLE_VALUE || self.handle.is_null() {
return Ok(OwnedHandle {
handle: self.handle,
});
}
let proc = unsafe { GetCurrentProcess() };
let mut duped = INVALID_HANDLE_VALUE;
let ok = unsafe {
DuplicateHandle(
proc,
self.handle as *mut _,
proc,
&mut duped,
0,
0,
winapi::um::winnt::DUPLICATE_SAME_ACCESS,
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(OwnedHandle {
handle: duped as *mut _,
})
}
}
}
struct Inner {
con: PsuedoCon,
readable: OwnedHandle,
writable: OwnedHandle,
size: winsize,
}
impl Inner {
pub fn resize(
&mut self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
self.con.resize(COORD {
X: num_cols as i16,
Y: num_rows as i16,
})?;
self.size = winsize {
ws_row: num_rows,
ws_col: num_cols,
ws_xpixel: pixel_width,
ws_ypixel: pixel_height,
};
Ok(())
}
}
#[derive(Clone)]
pub struct MasterPty {
inner: Arc<Mutex<Inner>>,
}
pub struct SlavePty {
inner: Arc<Mutex<Inner>>,
}
#[derive(Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
pub struct winsize {
pub ws_row: u16,
pub ws_col: u16,
pub ws_xpixel: u16,
pub ws_ypixel: u16,
}
impl MasterPty {
pub fn resize(
&self,
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(), Error> {
let mut inner = self.inner.lock().unwrap();
inner.resize(num_rows, num_cols, pixel_width, pixel_height)
}
pub fn get_size(&self) -> Result<winsize, Error> {
let inner = self.inner.lock().unwrap();
Ok(inner.size.clone())
}
pub fn try_clone(&self) -> Result<Self, Error> {
// FIXME: this isn't great. Replace this with a way to
// clone the output handle and read it.
let inner = self.inner.lock().unwrap();
Ok(Self {
inner: Arc::new(Mutex::new(Inner {
con: PsuedoCon {
con: INVALID_HANDLE_VALUE,
},
readable: inner.readable.try_clone()?,
writable: inner.writable.try_clone()?,
size: inner.size,
})),
})
}
pub fn clear_nonblocking(&self) -> Result<(), Error> {
Ok(())
}
}
impl io::Write for MasterPty {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
let mut num_wrote = 0;
let ok = unsafe {
WriteFile(
self.inner.lock().unwrap().writable.handle as *mut _,
buf.as_ptr() as *const _,
buf.len() as u32,
&mut num_wrote,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_wrote as usize)
}
}
fn flush(&mut self) -> Result<(), io::Error> {
Ok(())
}
}
impl io::Read for MasterPty {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
let mut num_read = 0;
let ok = unsafe {
ReadFile(
self.inner.lock().unwrap().readable.handle as *mut _,
buf.as_mut_ptr() as *mut _,
buf.len() as u32,
&mut num_read,
ptr::null_mut(),
)
};
if ok == 0 {
Err(IoError::last_os_error())
} else {
Ok(num_read as usize)
}
}
}
impl SlavePty {
pub fn spawn_command(self, mut cmd: Command) -> Result<Child, Error> {
let inner = self.inner.lock().unwrap();
cmd.set_pty(
inner.writable.try_clone()?,
inner.readable.try_clone()?,
inner.con.con,
);
cmd.spawn()
}
}
fn pipe() -> Result<(OwnedHandle, OwnedHandle), Error> {
let mut read: HANDLE = INVALID_HANDLE_VALUE;
let mut write: HANDLE = INVALID_HANDLE_VALUE;
if unsafe { CreatePipe(&mut read, &mut write, ptr::null_mut(), 0) } == 0 {
bail!("CreatePipe failed: {}", IoError::last_os_error());
}
Ok((OwnedHandle { handle: read }, OwnedHandle { handle: write }))
}
pub fn openpty(
num_rows: u16,
num_cols: u16,
pixel_width: u16,
pixel_height: u16,
) -> Result<(MasterPty, SlavePty), Error> {
let (stdin_read, stdin_write) = pipe()?;
let (stdout_read, stdout_write) = pipe()?;
let con = PsuedoCon::new(
COORD {
X: num_cols as i16,
Y: num_rows as i16,
},
&stdin_read,
&stdout_write,
)?;
let size = winsize {
ws_row: num_rows,
| unsafe {
InitializeProcThreadAttributeList(
ptr::null_mut(),
num_attributes,
0, | random_line_split |
helpers.rs | //! A module with ide helpers for high-level ide features.
pub mod import_assets;
pub mod insert_use;
pub mod merge_imports;
pub mod rust_doc;
pub mod generated_lints;
use std::collections::VecDeque;
use base_db::FileId;
use either::Either;
use hir::{Crate, Enum, ItemInNs, MacroDef, Module, ModuleDef, Name, ScopeDef, Semantics, Trait};
use syntax::{
ast::{self, make, LoopBodyOwner},
AstNode, Direction, SyntaxElement, SyntaxKind, SyntaxToken, TokenAtOffset, WalkEvent, T,
};
use crate::RootDatabase;
pub fn item_name(db: &RootDatabase, item: ItemInNs) -> Option<Name> {
match item {
ItemInNs::Types(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Values(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Macros(macro_def_id) => MacroDef::from(macro_def_id).name(db),
}
}
/// Resolves the path at the cursor token as a derive macro if it inside a token tree of a derive attribute.
pub fn try_resolve_derive_input_at(
sema: &Semantics<RootDatabase>,
derive_attr: &ast::Attr,
cursor: &SyntaxToken,
) -> Option<MacroDef> {
use itertools::Itertools;
if cursor.kind()!= T![ident] {
return None;
}
let tt = match derive_attr.as_simple_call() {
Some((name, tt))
if name == "derive" && tt.syntax().text_range().contains_range(cursor.text_range()) =>
{
tt
}
_ => return None,
};
let tokens: Vec<_> = cursor
.siblings_with_tokens(Direction::Prev)
.flat_map(SyntaxElement::into_token)
.take_while(|tok| tok.kind()!= T!['('] && tok.kind()!= T![,])
.collect();
let path = ast::Path::parse(&tokens.into_iter().rev().join("")).ok()?;
match sema.scope(tt.syntax()).speculative_resolve(&path) {
Some(hir::PathResolution::Macro(makro)) if makro.kind() == hir::MacroKind::Derive => {
Some(makro)
}
_ => None,
}
}
/// Picks the token with the highest rank returned by the passed in function.
pub fn pick_best_token(
tokens: TokenAtOffset<SyntaxToken>,
f: impl Fn(SyntaxKind) -> usize,
) -> Option<SyntaxToken> {
tokens.max_by_key(move |t| f(t.kind()))
}
/// Converts the mod path struct into its ast representation.
pub fn | (path: &hir::ModPath) -> ast::Path {
let _p = profile::span("mod_path_to_ast");
let mut segments = Vec::new();
let mut is_abs = false;
match path.kind {
hir::PathKind::Plain => {}
hir::PathKind::Super(0) => segments.push(make::path_segment_self()),
hir::PathKind::Super(n) => segments.extend((0..n).map(|_| make::path_segment_super())),
hir::PathKind::DollarCrate(_) | hir::PathKind::Crate => {
segments.push(make::path_segment_crate())
}
hir::PathKind::Abs => is_abs = true,
}
segments.extend(
path.segments()
.iter()
.map(|segment| make::path_segment(make::name_ref(&segment.to_string()))),
);
make::path_from_segments(segments, is_abs)
}
/// Iterates all `ModuleDef`s and `Impl` blocks of the given file.
pub fn visit_file_defs(
sema: &Semantics<RootDatabase>,
file_id: FileId,
cb: &mut dyn FnMut(Either<hir::ModuleDef, hir::Impl>),
) {
let db = sema.db;
let module = match sema.to_module_def(file_id) {
Some(it) => it,
None => return,
};
let mut defs: VecDeque<_> = module.declarations(db).into();
while let Some(def) = defs.pop_front() {
if let ModuleDef::Module(submodule) = def {
if let hir::ModuleSource::Module(_) = submodule.definition_source(db).value {
defs.extend(submodule.declarations(db));
submodule.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
}
cb(Either::Left(def));
}
module.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
/// Helps with finding well-know things inside the standard library. This is
/// somewhat similar to the known paths infra inside hir, but it different; We
/// want to make sure that IDE specific paths don't become interesting inside
/// the compiler itself as well.
///
/// Note that, by default, rust-analyzer tests **do not** include core or std
/// libraries. If you are writing tests for functionality using [`FamousDefs`],
/// you'd want to include minicore (see `test_utils::MiniCore`) declaration at
/// the start of your tests:
///
/// ```
/// //- minicore: iterator, ord, derive
/// ```
pub struct FamousDefs<'a, 'b>(pub &'a Semantics<'b, RootDatabase>, pub Option<Crate>);
#[allow(non_snake_case)]
impl FamousDefs<'_, '_> {
pub fn std(&self) -> Option<Crate> {
self.find_crate("std")
}
pub fn core(&self) -> Option<Crate> {
self.find_crate("core")
}
pub fn core_cmp_Ord(&self) -> Option<Trait> {
self.find_trait("core:cmp:Ord")
}
pub fn core_convert_From(&self) -> Option<Trait> {
self.find_trait("core:convert:From")
}
pub fn core_convert_Into(&self) -> Option<Trait> {
self.find_trait("core:convert:Into")
}
pub fn core_option_Option(&self) -> Option<Enum> {
self.find_enum("core:option:Option")
}
pub fn core_result_Result(&self) -> Option<Enum> {
self.find_enum("core:result:Result")
}
pub fn core_default_Default(&self) -> Option<Trait> {
self.find_trait("core:default:Default")
}
pub fn core_iter_Iterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:iterator:Iterator")
}
pub fn core_iter_IntoIterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:collect:IntoIterator")
}
pub fn core_iter(&self) -> Option<Module> {
self.find_module("core:iter")
}
pub fn core_ops_Deref(&self) -> Option<Trait> {
self.find_trait("core:ops:Deref")
}
fn find_trait(&self, path: &str) -> Option<Trait> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Trait(it)) => Some(it),
_ => None,
}
}
fn find_enum(&self, path: &str) -> Option<Enum> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Adt(hir::Adt::Enum(it))) => Some(it),
_ => None,
}
}
fn find_module(&self, path: &str) -> Option<Module> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Module(it)) => Some(it),
_ => None,
}
}
fn find_crate(&self, name: &str) -> Option<Crate> {
let krate = self.1?;
let db = self.0.db;
let res =
krate.dependencies(db).into_iter().find(|dep| dep.name.to_string() == name)?.krate;
Some(res)
}
fn find_def(&self, path: &str) -> Option<ScopeDef> {
let db = self.0.db;
let mut path = path.split(':');
let trait_ = path.next_back()?;
let std_crate = path.next()?;
let std_crate = self.find_crate(std_crate)?;
let mut module = std_crate.root_module(db);
for segment in path {
module = module.children(db).find_map(|child| {
let name = child.name(db)?;
if name.to_string() == segment {
Some(child)
} else {
None
}
})?;
}
let def =
module.scope(db, None).into_iter().find(|(name, _def)| name.to_string() == trait_)?.1;
Some(def)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SnippetCap {
_private: (),
}
impl SnippetCap {
pub const fn new(allow_snippets: bool) -> Option<SnippetCap> {
if allow_snippets {
Some(SnippetCap { _private: () })
} else {
None
}
}
}
/// Calls `cb` on each expression inside `expr` that is at "tail position".
/// Does not walk into `break` or `return` expressions.
pub fn for_each_tail_expr(expr: &ast::Expr, cb: &mut dyn FnMut(&ast::Expr)) {
match expr {
ast::Expr::BlockExpr(b) => {
if let Some(e) = b.tail_expr() {
for_each_tail_expr(&e, cb);
}
}
ast::Expr::EffectExpr(e) => match e.effect() {
ast::Effect::Label(label) => {
for_each_break_expr(Some(label), e.block_expr(), &mut |b| {
cb(&ast::Expr::BreakExpr(b))
});
if let Some(b) = e.block_expr() {
for_each_tail_expr(&ast::Expr::BlockExpr(b), cb);
}
}
ast::Effect::Unsafe(_) => {
if let Some(e) = e.block_expr().and_then(|b| b.tail_expr()) {
for_each_tail_expr(&e, cb);
}
}
ast::Effect::Async(_) | ast::Effect::Try(_) | ast::Effect::Const(_) => cb(expr),
},
ast::Expr::IfExpr(if_) => {
let mut if_ = if_.clone();
loop {
if let Some(block) = if_.then_branch() {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
}
match if_.else_branch() {
Some(ast::ElseBranch::IfExpr(it)) => if_ = it,
Some(ast::ElseBranch::Block(block)) => {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
break;
}
None => break,
}
}
}
ast::Expr::LoopExpr(l) => {
for_each_break_expr(l.label(), l.loop_body(), &mut |b| cb(&ast::Expr::BreakExpr(b)))
}
ast::Expr::MatchExpr(m) => {
if let Some(arms) = m.match_arm_list() {
arms.arms().filter_map(|arm| arm.expr()).for_each(|e| for_each_tail_expr(&e, cb));
}
}
ast::Expr::ArrayExpr(_)
| ast::Expr::AwaitExpr(_)
| ast::Expr::BinExpr(_)
| ast::Expr::BoxExpr(_)
| ast::Expr::BreakExpr(_)
| ast::Expr::CallExpr(_)
| ast::Expr::CastExpr(_)
| ast::Expr::ClosureExpr(_)
| ast::Expr::ContinueExpr(_)
| ast::Expr::FieldExpr(_)
| ast::Expr::ForExpr(_)
| ast::Expr::IndexExpr(_)
| ast::Expr::Literal(_)
| ast::Expr::MacroCall(_)
| ast::Expr::MacroStmts(_)
| ast::Expr::MethodCallExpr(_)
| ast::Expr::ParenExpr(_)
| ast::Expr::PathExpr(_)
| ast::Expr::PrefixExpr(_)
| ast::Expr::RangeExpr(_)
| ast::Expr::RecordExpr(_)
| ast::Expr::RefExpr(_)
| ast::Expr::ReturnExpr(_)
| ast::Expr::TryExpr(_)
| ast::Expr::TupleExpr(_)
| ast::Expr::WhileExpr(_)
| ast::Expr::YieldExpr(_) => cb(expr),
}
}
/// Calls `cb` on each break expr inside of `body` that is applicable for the given label.
pub fn for_each_break_expr(
label: Option<ast::Label>,
body: Option<ast::BlockExpr>,
cb: &mut dyn FnMut(ast::BreakExpr),
) {
let label = label.and_then(|lbl| lbl.lifetime());
let mut depth = 0;
if let Some(b) = body {
let preorder = &mut b.syntax().preorder();
let ev_as_expr = |ev| match ev {
WalkEvent::Enter(it) => Some(WalkEvent::Enter(ast::Expr::cast(it)?)),
WalkEvent::Leave(it) => Some(WalkEvent::Leave(ast::Expr::cast(it)?)),
};
let eq_label = |lt: Option<ast::Lifetime>| {
lt.zip(label.as_ref()).map_or(false, |(lt, lbl)| lt.text() == lbl.text())
};
while let Some(node) = preorder.find_map(ev_as_expr) {
match node {
WalkEvent::Enter(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth += 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth += 1,
ast::Expr::BreakExpr(b)
if (depth == 0 && b.lifetime().is_none()) || eq_label(b.lifetime()) =>
{
cb(b);
}
_ => (),
},
WalkEvent::Leave(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth -= 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth -= 1,
_ => (),
},
}
}
}
}
| mod_path_to_ast | identifier_name |
helpers.rs | //! A module with ide helpers for high-level ide features.
pub mod import_assets;
pub mod insert_use;
pub mod merge_imports;
pub mod rust_doc;
pub mod generated_lints;
use std::collections::VecDeque;
use base_db::FileId;
use either::Either;
use hir::{Crate, Enum, ItemInNs, MacroDef, Module, ModuleDef, Name, ScopeDef, Semantics, Trait};
use syntax::{
ast::{self, make, LoopBodyOwner},
AstNode, Direction, SyntaxElement, SyntaxKind, SyntaxToken, TokenAtOffset, WalkEvent, T,
};
use crate::RootDatabase;
pub fn item_name(db: &RootDatabase, item: ItemInNs) -> Option<Name> {
match item {
ItemInNs::Types(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Values(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Macros(macro_def_id) => MacroDef::from(macro_def_id).name(db),
}
}
/// Resolves the path at the cursor token as a derive macro if it inside a token tree of a derive attribute.
pub fn try_resolve_derive_input_at(
sema: &Semantics<RootDatabase>,
derive_attr: &ast::Attr,
cursor: &SyntaxToken,
) -> Option<MacroDef> {
use itertools::Itertools;
if cursor.kind()!= T![ident] {
return None;
}
let tt = match derive_attr.as_simple_call() {
Some((name, tt))
if name == "derive" && tt.syntax().text_range().contains_range(cursor.text_range()) =>
{
tt
}
_ => return None,
};
let tokens: Vec<_> = cursor
.siblings_with_tokens(Direction::Prev)
.flat_map(SyntaxElement::into_token)
.take_while(|tok| tok.kind()!= T!['('] && tok.kind()!= T![,])
.collect();
let path = ast::Path::parse(&tokens.into_iter().rev().join("")).ok()?;
match sema.scope(tt.syntax()).speculative_resolve(&path) {
Some(hir::PathResolution::Macro(makro)) if makro.kind() == hir::MacroKind::Derive => {
Some(makro)
}
_ => None,
}
}
/// Picks the token with the highest rank returned by the passed in function.
pub fn pick_best_token(
tokens: TokenAtOffset<SyntaxToken>,
f: impl Fn(SyntaxKind) -> usize,
) -> Option<SyntaxToken> {
tokens.max_by_key(move |t| f(t.kind()))
}
/// Converts the mod path struct into its ast representation.
pub fn mod_path_to_ast(path: &hir::ModPath) -> ast::Path {
let _p = profile::span("mod_path_to_ast");
let mut segments = Vec::new();
let mut is_abs = false;
match path.kind {
hir::PathKind::Plain => {}
hir::PathKind::Super(0) => segments.push(make::path_segment_self()),
hir::PathKind::Super(n) => segments.extend((0..n).map(|_| make::path_segment_super())),
hir::PathKind::DollarCrate(_) | hir::PathKind::Crate => {
segments.push(make::path_segment_crate())
}
hir::PathKind::Abs => is_abs = true,
}
segments.extend(
path.segments()
.iter()
.map(|segment| make::path_segment(make::name_ref(&segment.to_string()))),
);
make::path_from_segments(segments, is_abs)
}
/// Iterates all `ModuleDef`s and `Impl` blocks of the given file.
pub fn visit_file_defs(
sema: &Semantics<RootDatabase>,
file_id: FileId,
cb: &mut dyn FnMut(Either<hir::ModuleDef, hir::Impl>),
) {
let db = sema.db;
let module = match sema.to_module_def(file_id) {
Some(it) => it,
None => return,
};
let mut defs: VecDeque<_> = module.declarations(db).into();
while let Some(def) = defs.pop_front() {
if let ModuleDef::Module(submodule) = def {
if let hir::ModuleSource::Module(_) = submodule.definition_source(db).value {
defs.extend(submodule.declarations(db));
submodule.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
}
cb(Either::Left(def));
}
module.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
/// Helps with finding well-know things inside the standard library. This is
/// somewhat similar to the known paths infra inside hir, but it different; We
/// want to make sure that IDE specific paths don't become interesting inside
/// the compiler itself as well.
///
/// Note that, by default, rust-analyzer tests **do not** include core or std
/// libraries. If you are writing tests for functionality using [`FamousDefs`],
/// you'd want to include minicore (see `test_utils::MiniCore`) declaration at
/// the start of your tests:
///
/// ```
/// //- minicore: iterator, ord, derive
/// ```
pub struct FamousDefs<'a, 'b>(pub &'a Semantics<'b, RootDatabase>, pub Option<Crate>);
#[allow(non_snake_case)]
impl FamousDefs<'_, '_> {
pub fn std(&self) -> Option<Crate> {
self.find_crate("std")
}
pub fn core(&self) -> Option<Crate> {
self.find_crate("core")
}
pub fn core_cmp_Ord(&self) -> Option<Trait> {
self.find_trait("core:cmp:Ord")
}
pub fn core_convert_From(&self) -> Option<Trait> {
self.find_trait("core:convert:From")
}
pub fn core_convert_Into(&self) -> Option<Trait> {
self.find_trait("core:convert:Into")
}
pub fn core_option_Option(&self) -> Option<Enum> {
self.find_enum("core:option:Option")
}
pub fn core_result_Result(&self) -> Option<Enum> {
self.find_enum("core:result:Result")
}
pub fn core_default_Default(&self) -> Option<Trait> {
self.find_trait("core:default:Default")
}
pub fn core_iter_Iterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:iterator:Iterator")
}
pub fn core_iter_IntoIterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:collect:IntoIterator")
}
pub fn core_iter(&self) -> Option<Module> {
self.find_module("core:iter")
}
pub fn core_ops_Deref(&self) -> Option<Trait> {
self.find_trait("core:ops:Deref")
}
fn find_trait(&self, path: &str) -> Option<Trait> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Trait(it)) => Some(it),
_ => None,
}
}
fn find_enum(&self, path: &str) -> Option<Enum> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Adt(hir::Adt::Enum(it))) => Some(it),
_ => None,
}
}
fn find_module(&self, path: &str) -> Option<Module> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Module(it)) => Some(it),
_ => None,
}
}
fn find_crate(&self, name: &str) -> Option<Crate> {
let krate = self.1?;
let db = self.0.db;
let res =
krate.dependencies(db).into_iter().find(|dep| dep.name.to_string() == name)?.krate; | let mut path = path.split(':');
let trait_ = path.next_back()?;
let std_crate = path.next()?;
let std_crate = self.find_crate(std_crate)?;
let mut module = std_crate.root_module(db);
for segment in path {
module = module.children(db).find_map(|child| {
let name = child.name(db)?;
if name.to_string() == segment {
Some(child)
} else {
None
}
})?;
}
let def =
module.scope(db, None).into_iter().find(|(name, _def)| name.to_string() == trait_)?.1;
Some(def)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SnippetCap {
_private: (),
}
impl SnippetCap {
pub const fn new(allow_snippets: bool) -> Option<SnippetCap> {
if allow_snippets {
Some(SnippetCap { _private: () })
} else {
None
}
}
}
/// Calls `cb` on each expression inside `expr` that is at "tail position".
/// Does not walk into `break` or `return` expressions.
pub fn for_each_tail_expr(expr: &ast::Expr, cb: &mut dyn FnMut(&ast::Expr)) {
match expr {
ast::Expr::BlockExpr(b) => {
if let Some(e) = b.tail_expr() {
for_each_tail_expr(&e, cb);
}
}
ast::Expr::EffectExpr(e) => match e.effect() {
ast::Effect::Label(label) => {
for_each_break_expr(Some(label), e.block_expr(), &mut |b| {
cb(&ast::Expr::BreakExpr(b))
});
if let Some(b) = e.block_expr() {
for_each_tail_expr(&ast::Expr::BlockExpr(b), cb);
}
}
ast::Effect::Unsafe(_) => {
if let Some(e) = e.block_expr().and_then(|b| b.tail_expr()) {
for_each_tail_expr(&e, cb);
}
}
ast::Effect::Async(_) | ast::Effect::Try(_) | ast::Effect::Const(_) => cb(expr),
},
ast::Expr::IfExpr(if_) => {
let mut if_ = if_.clone();
loop {
if let Some(block) = if_.then_branch() {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
}
match if_.else_branch() {
Some(ast::ElseBranch::IfExpr(it)) => if_ = it,
Some(ast::ElseBranch::Block(block)) => {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
break;
}
None => break,
}
}
}
ast::Expr::LoopExpr(l) => {
for_each_break_expr(l.label(), l.loop_body(), &mut |b| cb(&ast::Expr::BreakExpr(b)))
}
ast::Expr::MatchExpr(m) => {
if let Some(arms) = m.match_arm_list() {
arms.arms().filter_map(|arm| arm.expr()).for_each(|e| for_each_tail_expr(&e, cb));
}
}
ast::Expr::ArrayExpr(_)
| ast::Expr::AwaitExpr(_)
| ast::Expr::BinExpr(_)
| ast::Expr::BoxExpr(_)
| ast::Expr::BreakExpr(_)
| ast::Expr::CallExpr(_)
| ast::Expr::CastExpr(_)
| ast::Expr::ClosureExpr(_)
| ast::Expr::ContinueExpr(_)
| ast::Expr::FieldExpr(_)
| ast::Expr::ForExpr(_)
| ast::Expr::IndexExpr(_)
| ast::Expr::Literal(_)
| ast::Expr::MacroCall(_)
| ast::Expr::MacroStmts(_)
| ast::Expr::MethodCallExpr(_)
| ast::Expr::ParenExpr(_)
| ast::Expr::PathExpr(_)
| ast::Expr::PrefixExpr(_)
| ast::Expr::RangeExpr(_)
| ast::Expr::RecordExpr(_)
| ast::Expr::RefExpr(_)
| ast::Expr::ReturnExpr(_)
| ast::Expr::TryExpr(_)
| ast::Expr::TupleExpr(_)
| ast::Expr::WhileExpr(_)
| ast::Expr::YieldExpr(_) => cb(expr),
}
}
/// Calls `cb` on each break expr inside of `body` that is applicable for the given label.
pub fn for_each_break_expr(
label: Option<ast::Label>,
body: Option<ast::BlockExpr>,
cb: &mut dyn FnMut(ast::BreakExpr),
) {
let label = label.and_then(|lbl| lbl.lifetime());
let mut depth = 0;
if let Some(b) = body {
let preorder = &mut b.syntax().preorder();
let ev_as_expr = |ev| match ev {
WalkEvent::Enter(it) => Some(WalkEvent::Enter(ast::Expr::cast(it)?)),
WalkEvent::Leave(it) => Some(WalkEvent::Leave(ast::Expr::cast(it)?)),
};
let eq_label = |lt: Option<ast::Lifetime>| {
lt.zip(label.as_ref()).map_or(false, |(lt, lbl)| lt.text() == lbl.text())
};
while let Some(node) = preorder.find_map(ev_as_expr) {
match node {
WalkEvent::Enter(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth += 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth += 1,
ast::Expr::BreakExpr(b)
if (depth == 0 && b.lifetime().is_none()) || eq_label(b.lifetime()) =>
{
cb(b);
}
_ => (),
},
WalkEvent::Leave(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth -= 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth -= 1,
_ => (),
},
}
}
}
} | Some(res)
}
fn find_def(&self, path: &str) -> Option<ScopeDef> {
let db = self.0.db; | random_line_split |
helpers.rs | //! A module with ide helpers for high-level ide features.
pub mod import_assets;
pub mod insert_use;
pub mod merge_imports;
pub mod rust_doc;
pub mod generated_lints;
use std::collections::VecDeque;
use base_db::FileId;
use either::Either;
use hir::{Crate, Enum, ItemInNs, MacroDef, Module, ModuleDef, Name, ScopeDef, Semantics, Trait};
use syntax::{
ast::{self, make, LoopBodyOwner},
AstNode, Direction, SyntaxElement, SyntaxKind, SyntaxToken, TokenAtOffset, WalkEvent, T,
};
use crate::RootDatabase;
pub fn item_name(db: &RootDatabase, item: ItemInNs) -> Option<Name> {
match item {
ItemInNs::Types(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Values(module_def_id) => ModuleDef::from(module_def_id).name(db),
ItemInNs::Macros(macro_def_id) => MacroDef::from(macro_def_id).name(db),
}
}
/// Resolves the path at the cursor token as a derive macro if it inside a token tree of a derive attribute.
pub fn try_resolve_derive_input_at(
sema: &Semantics<RootDatabase>,
derive_attr: &ast::Attr,
cursor: &SyntaxToken,
) -> Option<MacroDef> {
use itertools::Itertools;
if cursor.kind()!= T![ident] {
return None;
}
let tt = match derive_attr.as_simple_call() {
Some((name, tt))
if name == "derive" && tt.syntax().text_range().contains_range(cursor.text_range()) =>
{
tt
}
_ => return None,
};
let tokens: Vec<_> = cursor
.siblings_with_tokens(Direction::Prev)
.flat_map(SyntaxElement::into_token)
.take_while(|tok| tok.kind()!= T!['('] && tok.kind()!= T![,])
.collect();
let path = ast::Path::parse(&tokens.into_iter().rev().join("")).ok()?;
match sema.scope(tt.syntax()).speculative_resolve(&path) {
Some(hir::PathResolution::Macro(makro)) if makro.kind() == hir::MacroKind::Derive => {
Some(makro)
}
_ => None,
}
}
/// Picks the token with the highest rank returned by the passed in function.
pub fn pick_best_token(
tokens: TokenAtOffset<SyntaxToken>,
f: impl Fn(SyntaxKind) -> usize,
) -> Option<SyntaxToken> {
tokens.max_by_key(move |t| f(t.kind()))
}
/// Converts the mod path struct into its ast representation.
pub fn mod_path_to_ast(path: &hir::ModPath) -> ast::Path {
let _p = profile::span("mod_path_to_ast");
let mut segments = Vec::new();
let mut is_abs = false;
match path.kind {
hir::PathKind::Plain => {}
hir::PathKind::Super(0) => segments.push(make::path_segment_self()),
hir::PathKind::Super(n) => segments.extend((0..n).map(|_| make::path_segment_super())),
hir::PathKind::DollarCrate(_) | hir::PathKind::Crate => {
segments.push(make::path_segment_crate())
}
hir::PathKind::Abs => is_abs = true,
}
segments.extend(
path.segments()
.iter()
.map(|segment| make::path_segment(make::name_ref(&segment.to_string()))),
);
make::path_from_segments(segments, is_abs)
}
/// Iterates all `ModuleDef`s and `Impl` blocks of the given file.
pub fn visit_file_defs(
sema: &Semantics<RootDatabase>,
file_id: FileId,
cb: &mut dyn FnMut(Either<hir::ModuleDef, hir::Impl>),
) {
let db = sema.db;
let module = match sema.to_module_def(file_id) {
Some(it) => it,
None => return,
};
let mut defs: VecDeque<_> = module.declarations(db).into();
while let Some(def) = defs.pop_front() {
if let ModuleDef::Module(submodule) = def {
if let hir::ModuleSource::Module(_) = submodule.definition_source(db).value {
defs.extend(submodule.declarations(db));
submodule.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
}
cb(Either::Left(def));
}
module.impl_defs(db).into_iter().for_each(|impl_| cb(Either::Right(impl_)));
}
/// Helps with finding well-know things inside the standard library. This is
/// somewhat similar to the known paths infra inside hir, but it different; We
/// want to make sure that IDE specific paths don't become interesting inside
/// the compiler itself as well.
///
/// Note that, by default, rust-analyzer tests **do not** include core or std
/// libraries. If you are writing tests for functionality using [`FamousDefs`],
/// you'd want to include minicore (see `test_utils::MiniCore`) declaration at
/// the start of your tests:
///
/// ```
/// //- minicore: iterator, ord, derive
/// ```
pub struct FamousDefs<'a, 'b>(pub &'a Semantics<'b, RootDatabase>, pub Option<Crate>);
#[allow(non_snake_case)]
impl FamousDefs<'_, '_> {
pub fn std(&self) -> Option<Crate> {
self.find_crate("std")
}
pub fn core(&self) -> Option<Crate> {
self.find_crate("core")
}
pub fn core_cmp_Ord(&self) -> Option<Trait> {
self.find_trait("core:cmp:Ord")
}
pub fn core_convert_From(&self) -> Option<Trait> |
pub fn core_convert_Into(&self) -> Option<Trait> {
self.find_trait("core:convert:Into")
}
pub fn core_option_Option(&self) -> Option<Enum> {
self.find_enum("core:option:Option")
}
pub fn core_result_Result(&self) -> Option<Enum> {
self.find_enum("core:result:Result")
}
pub fn core_default_Default(&self) -> Option<Trait> {
self.find_trait("core:default:Default")
}
pub fn core_iter_Iterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:iterator:Iterator")
}
pub fn core_iter_IntoIterator(&self) -> Option<Trait> {
self.find_trait("core:iter:traits:collect:IntoIterator")
}
pub fn core_iter(&self) -> Option<Module> {
self.find_module("core:iter")
}
pub fn core_ops_Deref(&self) -> Option<Trait> {
self.find_trait("core:ops:Deref")
}
fn find_trait(&self, path: &str) -> Option<Trait> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Trait(it)) => Some(it),
_ => None,
}
}
fn find_enum(&self, path: &str) -> Option<Enum> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Adt(hir::Adt::Enum(it))) => Some(it),
_ => None,
}
}
fn find_module(&self, path: &str) -> Option<Module> {
match self.find_def(path)? {
hir::ScopeDef::ModuleDef(hir::ModuleDef::Module(it)) => Some(it),
_ => None,
}
}
fn find_crate(&self, name: &str) -> Option<Crate> {
let krate = self.1?;
let db = self.0.db;
let res =
krate.dependencies(db).into_iter().find(|dep| dep.name.to_string() == name)?.krate;
Some(res)
}
fn find_def(&self, path: &str) -> Option<ScopeDef> {
let db = self.0.db;
let mut path = path.split(':');
let trait_ = path.next_back()?;
let std_crate = path.next()?;
let std_crate = self.find_crate(std_crate)?;
let mut module = std_crate.root_module(db);
for segment in path {
module = module.children(db).find_map(|child| {
let name = child.name(db)?;
if name.to_string() == segment {
Some(child)
} else {
None
}
})?;
}
let def =
module.scope(db, None).into_iter().find(|(name, _def)| name.to_string() == trait_)?.1;
Some(def)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SnippetCap {
_private: (),
}
impl SnippetCap {
pub const fn new(allow_snippets: bool) -> Option<SnippetCap> {
if allow_snippets {
Some(SnippetCap { _private: () })
} else {
None
}
}
}
/// Calls `cb` on each expression inside `expr` that is at "tail position".
/// Does not walk into `break` or `return` expressions.
pub fn for_each_tail_expr(expr: &ast::Expr, cb: &mut dyn FnMut(&ast::Expr)) {
match expr {
ast::Expr::BlockExpr(b) => {
if let Some(e) = b.tail_expr() {
for_each_tail_expr(&e, cb);
}
}
ast::Expr::EffectExpr(e) => match e.effect() {
ast::Effect::Label(label) => {
for_each_break_expr(Some(label), e.block_expr(), &mut |b| {
cb(&ast::Expr::BreakExpr(b))
});
if let Some(b) = e.block_expr() {
for_each_tail_expr(&ast::Expr::BlockExpr(b), cb);
}
}
ast::Effect::Unsafe(_) => {
if let Some(e) = e.block_expr().and_then(|b| b.tail_expr()) {
for_each_tail_expr(&e, cb);
}
}
ast::Effect::Async(_) | ast::Effect::Try(_) | ast::Effect::Const(_) => cb(expr),
},
ast::Expr::IfExpr(if_) => {
let mut if_ = if_.clone();
loop {
if let Some(block) = if_.then_branch() {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
}
match if_.else_branch() {
Some(ast::ElseBranch::IfExpr(it)) => if_ = it,
Some(ast::ElseBranch::Block(block)) => {
for_each_tail_expr(&ast::Expr::BlockExpr(block), cb);
break;
}
None => break,
}
}
}
ast::Expr::LoopExpr(l) => {
for_each_break_expr(l.label(), l.loop_body(), &mut |b| cb(&ast::Expr::BreakExpr(b)))
}
ast::Expr::MatchExpr(m) => {
if let Some(arms) = m.match_arm_list() {
arms.arms().filter_map(|arm| arm.expr()).for_each(|e| for_each_tail_expr(&e, cb));
}
}
ast::Expr::ArrayExpr(_)
| ast::Expr::AwaitExpr(_)
| ast::Expr::BinExpr(_)
| ast::Expr::BoxExpr(_)
| ast::Expr::BreakExpr(_)
| ast::Expr::CallExpr(_)
| ast::Expr::CastExpr(_)
| ast::Expr::ClosureExpr(_)
| ast::Expr::ContinueExpr(_)
| ast::Expr::FieldExpr(_)
| ast::Expr::ForExpr(_)
| ast::Expr::IndexExpr(_)
| ast::Expr::Literal(_)
| ast::Expr::MacroCall(_)
| ast::Expr::MacroStmts(_)
| ast::Expr::MethodCallExpr(_)
| ast::Expr::ParenExpr(_)
| ast::Expr::PathExpr(_)
| ast::Expr::PrefixExpr(_)
| ast::Expr::RangeExpr(_)
| ast::Expr::RecordExpr(_)
| ast::Expr::RefExpr(_)
| ast::Expr::ReturnExpr(_)
| ast::Expr::TryExpr(_)
| ast::Expr::TupleExpr(_)
| ast::Expr::WhileExpr(_)
| ast::Expr::YieldExpr(_) => cb(expr),
}
}
/// Calls `cb` on each break expr inside of `body` that is applicable for the given label.
pub fn for_each_break_expr(
label: Option<ast::Label>,
body: Option<ast::BlockExpr>,
cb: &mut dyn FnMut(ast::BreakExpr),
) {
let label = label.and_then(|lbl| lbl.lifetime());
let mut depth = 0;
if let Some(b) = body {
let preorder = &mut b.syntax().preorder();
let ev_as_expr = |ev| match ev {
WalkEvent::Enter(it) => Some(WalkEvent::Enter(ast::Expr::cast(it)?)),
WalkEvent::Leave(it) => Some(WalkEvent::Leave(ast::Expr::cast(it)?)),
};
let eq_label = |lt: Option<ast::Lifetime>| {
lt.zip(label.as_ref()).map_or(false, |(lt, lbl)| lt.text() == lbl.text())
};
while let Some(node) = preorder.find_map(ev_as_expr) {
match node {
WalkEvent::Enter(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth += 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth += 1,
ast::Expr::BreakExpr(b)
if (depth == 0 && b.lifetime().is_none()) || eq_label(b.lifetime()) =>
{
cb(b);
}
_ => (),
},
WalkEvent::Leave(expr) => match expr {
ast::Expr::LoopExpr(_) | ast::Expr::WhileExpr(_) | ast::Expr::ForExpr(_) => {
depth -= 1
}
ast::Expr::EffectExpr(e) if e.label().is_some() => depth -= 1,
_ => (),
},
}
}
}
}
| {
self.find_trait("core:convert:From")
} | identifier_body |
pool.rs | Send + 'a);
struct Work {
func: WorkInner<'static>
}
struct JobStatus {
wait: bool,
job_finished: mpsc::Receiver<Result<(), ()>>,
}
/// A token representing a job submitted to the thread pool.
///
/// This helps ensure that a job is finished before borrowed resources
/// in the job (and the pool itself) are invalidated.
///
/// If the job panics, this handle will ensure the main thread also
/// panics (either via `wait` or in the destructor).
pub struct JobHandle<'pool, 'f> {
pool: &'pool mut Pool,
status: Arc<Mutex<JobStatus>>,
_funcs: marker::PhantomData<&'f ()>,
}
impl JobStatus {
fn wait(&mut self) {
if self.wait {
self.wait = false;
self.job_finished.recv().unwrap().unwrap();
}
}
}
impl<'pool, 'f> JobHandle<'pool, 'f> {
/// Block until the job is finished.
///
/// # Panics
///
/// This will panic if the job panicked.
pub fn wait(&self) {
self.status.lock().unwrap().wait();
}
}
impl<'pool, 'f> Drop for JobHandle<'pool, 'f> {
fn drop(&mut self) {
self.wait();
self.pool.job_status = None;
}
}
impl Drop for Pool {
fn drop(&mut self) {
let (tx, rx) = mpsc::channel();
self.job_queue.send((None, tx)).unwrap();
rx.recv().unwrap().unwrap();
}
}
struct PanicCanary<'a> {
flag: &'a atomic::AtomicBool
}
impl<'a> Drop for PanicCanary<'a> {
fn drop(&mut self) {
if thread::panicking() {
self.flag.store(true, atomic::Ordering::SeqCst)
}
}
}
impl Pool {
/// Create a new thread pool with `n_threads` worker threads.
pub fn new(n_threads: usize) -> Pool {
let (tx, rx) = mpsc::channel::<(Option<Job>, mpsc::Sender<Result<(), ()>>)>();
thread::spawn(move || {
let panicked = Arc::new(atomic::AtomicBool::new(false));
let mut _guards = Vec::with_capacity(n_threads);
let mut txs = Vec::with_capacity(n_threads);
for i in 0..n_threads {
let id = WorkerId { n: i };
let (subtx, subrx) = mpsc::channel::<Work>();
txs.push(subtx);
let panicked = panicked.clone();
_guards.push(thread::spawn(move || {
let _canary = PanicCanary {
flag: &panicked
};
loop {
match subrx.recv() {
Ok(mut work) => {
(work.func)(id)
}
Err(_) => break,
}
}
}))
}
loop {
match rx.recv() {
Ok((Some(job), finished_tx)) => {
(job.func).call_box(&txs);
let job_panicked = panicked.load(atomic::Ordering::SeqCst);
let msg = if job_panicked { Err(()) } else { Ok(()) };
finished_tx.send(msg).unwrap();
if job_panicked { break }
}
Ok((None, finished_tx)) => {
finished_tx.send(Ok(())).unwrap();
break
}
Err(_) => break,
}
}
});
Pool {
job_queue: tx,
job_status: None,
n_threads: n_threads,
}
}
/// Execute `f` on each element of `iter`.
///
/// This panics if `f` panics, although the precise time and
/// number of elements consumed after the element that panics is
/// not specified.
///
/// # Examples
///
/// ```rust
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// let mut v = [0; 8];
///
/// // set each element, in parallel
/// pool.for_(&mut v, |element| *element = 3);
///
/// assert_eq!(v, [3; 8]);
/// ```
pub fn for_<Iter: IntoIterator, F>(&mut self, iter: Iter, ref f: F)
where Iter::Item: Send,
Iter: Send,
F: Fn(Iter::Item) + Sync
{
let (needwork_tx, needwork_rx) = mpsc::channel();
let mut work_txs = Vec::with_capacity(self.n_threads);
let mut work_rxs = Vec::with_capacity(self.n_threads);
for _ in 0..self.n_threads {
let (t, r) = mpsc::channel();
work_txs.push(t);
work_rxs.push(r);
}
let mut work_rxs = work_rxs.into_iter();
crossbeam::scope(|scope| unsafe {
let handle = self.execute(
scope,
needwork_tx,
|needwork_tx| {
let mut needwork_tx = Some(needwork_tx.clone());
let mut work_rx = Some(work_rxs.next().unwrap());
move |id| {
let work_rx = work_rx.take().unwrap();
let needwork = needwork_tx.take().unwrap();
loop {
needwork.send(id).unwrap();
match work_rx.recv() {
Ok(Some(elem)) => {
f(elem);
}
Ok(None) | Err(_) => break
}
}
}
},
move |needwork_tx| {
let mut iter = iter.into_iter().fuse();
drop(needwork_tx);
loop {
match needwork_rx.recv() {
// closed, done!
Err(_) => break,
Ok(id) => {
work_txs[id.n].send(iter.next()).unwrap();
}
}
}
});
handle.wait();
})
}
/// Execute `f` on each element in `iter` in parallel across the
/// pool's threads, with unspecified yield order.
///
/// This behaves like `map`, but does not make efforts to ensure
/// that the elements are returned in the order of `iter`, hence
/// this is cheaper.
///
/// The iterator yields `(uint, T)` tuples, where the `uint` is
/// the index of the element in the original iterator.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// # fn main() {
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// // adjust each element in parallel, and iterate over them as
/// // they are generated (or as close to that as possible)
/// crossbeam::scope(|scope| {
/// for (index, output) in pool.unordered_map(scope, 0..8, |i| i + 10) {
/// // each element is exactly 10 more than its original index
/// assert_eq!(output, index as i32 + 10);
/// }
/// })
/// # }
/// ```
pub fn unordered_map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> UnorderedParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Sync + Send + Fn(I::Item) -> T,
T: Send + 'a
{
let nthreads = self.n_threads;
let (needwork_tx, needwork_rx) = mpsc::channel();
let (work_tx, work_rx) = mpsc::channel();
struct Shared<Chan, Atom, F> {
work: Chan,
sent: Atom,
finished: Atom,
func: F,
}
let shared = Arc::new(Shared {
work: Mutex::new(work_rx),
sent: atomic::AtomicUsize::new(0),
finished: atomic::AtomicUsize::new(0),
func: f,
});
let (tx, rx) = mpsc::channel();
const INITIAL_FACTOR: usize = 4;
const BUFFER_FACTOR: usize = INITIAL_FACTOR / 2;
let handle = unsafe {
self.execute(scope, (needwork_tx, shared),
move |&mut (ref needwork_tx, ref shared)| {
let mut needwork_tx = Some(needwork_tx.clone());
let tx = tx.clone();
let shared = shared.clone();
move |_id| {
let needwork = needwork_tx.take().unwrap();
loop {
let data = {
let guard = shared.work.lock().unwrap();
guard.recv()
};
match data {
Ok(Some((idx, elem))) => {
let data = (shared.func)(elem);
let status = tx.send(Packet {
idx: idx, data: data
});
// the user disconnected,
// so there's no point
// computing more.
if status.is_err() {
let _ = needwork.send(true);
break
}
}
Ok(None) | Err(_) => {
break
}
};
let old =
shared.finished.fetch_add(1, atomic::Ordering::SeqCst);
let sent = shared.sent.load(atomic::Ordering::SeqCst);
if old + BUFFER_FACTOR * nthreads == sent {
if needwork.send(false).is_err() {
break
}
}
}
}
},
move |(needwork_tx, shared)| {
let mut iter = iter.into_iter().fuse().enumerate();
drop(needwork_tx);
let mut send_data = |n: usize| {
shared.sent.fetch_add(n, atomic::Ordering::SeqCst);
for _ in 0..n {
// TODO: maybe this could instead send
// several elements at a time, to
// reduce the number of
// allocations/atomic operations
// performed.
//
// Downside: work will be
// distributed chunkier.
let _ = work_tx.send(iter.next());
}
};
send_data(INITIAL_FACTOR * nthreads);
loop {
match needwork_rx.recv() { | Ok(false) => {
// ignore return, because we
// need to wait until the
// workers have exited (i.e,
// the Err arm above)
let _ = send_data(BUFFER_FACTOR * nthreads);
}
}
}
})
};
UnorderedParMap {
rx: rx,
_guard: handle,
}
}
/// Execute `f` on `iter` in parallel across the pool's threads,
/// returning an iterator that yields the results in the order of
/// the elements of `iter` to which they correspond.
///
/// This is a drop-in replacement for `iter.map(f)`, that runs in
/// parallel, and consumes `iter` as the pool's threads complete
/// their previous tasks.
///
/// See `unordered_map` if the output order is unimportant.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// use simple_parallel::Pool;
///
/// # fn main() {
/// let mut pool = Pool::new(4);
///
/// // create a vector by adjusting 0..8, in parallel
/// let elements: Vec<_> = crossbeam::scope(|scope| {
/// pool.map(scope, 0..8, |i| i + 10).collect()
/// });
///
/// assert_eq!(elements, &[10, 11, 12, 13, 14, 15, 16, 17]);
/// # }
/// ```
pub fn map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> ParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Send + Sync + Fn(I::Item) -> T,
T: Send + 'a
{
ParMap {
unordered: self.unordered_map(scope, iter, f),
looking_for: 0,
queue: BinaryHeap::new(),
}
}
}
/// Low-level/internal functionality.
impl Pool {
/// Run a job on the thread pool.
///
/// `gen_fn` is called `self.n_threads` times to create the
/// functions to execute on the worker threads. Each of these is
/// immediately called exactly once on a worker thread (that is,
/// they are semantically `FnOnce`), and `main_fn` is also called,
/// on the supervisor thread. It is expected that the workers and
/// | // closed, done!
Ok(true) | Err(_) => break, | random_line_split |
pool.rs | + 'a);
struct | {
func: WorkInner<'static>
}
struct JobStatus {
wait: bool,
job_finished: mpsc::Receiver<Result<(), ()>>,
}
/// A token representing a job submitted to the thread pool.
///
/// This helps ensure that a job is finished before borrowed resources
/// in the job (and the pool itself) are invalidated.
///
/// If the job panics, this handle will ensure the main thread also
/// panics (either via `wait` or in the destructor).
pub struct JobHandle<'pool, 'f> {
pool: &'pool mut Pool,
status: Arc<Mutex<JobStatus>>,
_funcs: marker::PhantomData<&'f ()>,
}
impl JobStatus {
fn wait(&mut self) {
if self.wait {
self.wait = false;
self.job_finished.recv().unwrap().unwrap();
}
}
}
impl<'pool, 'f> JobHandle<'pool, 'f> {
/// Block until the job is finished.
///
/// # Panics
///
/// This will panic if the job panicked.
pub fn wait(&self) {
self.status.lock().unwrap().wait();
}
}
impl<'pool, 'f> Drop for JobHandle<'pool, 'f> {
fn drop(&mut self) {
self.wait();
self.pool.job_status = None;
}
}
impl Drop for Pool {
fn drop(&mut self) {
let (tx, rx) = mpsc::channel();
self.job_queue.send((None, tx)).unwrap();
rx.recv().unwrap().unwrap();
}
}
struct PanicCanary<'a> {
flag: &'a atomic::AtomicBool
}
impl<'a> Drop for PanicCanary<'a> {
fn drop(&mut self) {
if thread::panicking() {
self.flag.store(true, atomic::Ordering::SeqCst)
}
}
}
impl Pool {
/// Create a new thread pool with `n_threads` worker threads.
pub fn new(n_threads: usize) -> Pool {
let (tx, rx) = mpsc::channel::<(Option<Job>, mpsc::Sender<Result<(), ()>>)>();
thread::spawn(move || {
let panicked = Arc::new(atomic::AtomicBool::new(false));
let mut _guards = Vec::with_capacity(n_threads);
let mut txs = Vec::with_capacity(n_threads);
for i in 0..n_threads {
let id = WorkerId { n: i };
let (subtx, subrx) = mpsc::channel::<Work>();
txs.push(subtx);
let panicked = panicked.clone();
_guards.push(thread::spawn(move || {
let _canary = PanicCanary {
flag: &panicked
};
loop {
match subrx.recv() {
Ok(mut work) => {
(work.func)(id)
}
Err(_) => break,
}
}
}))
}
loop {
match rx.recv() {
Ok((Some(job), finished_tx)) => {
(job.func).call_box(&txs);
let job_panicked = panicked.load(atomic::Ordering::SeqCst);
let msg = if job_panicked { Err(()) } else { Ok(()) };
finished_tx.send(msg).unwrap();
if job_panicked { break }
}
Ok((None, finished_tx)) => {
finished_tx.send(Ok(())).unwrap();
break
}
Err(_) => break,
}
}
});
Pool {
job_queue: tx,
job_status: None,
n_threads: n_threads,
}
}
/// Execute `f` on each element of `iter`.
///
/// This panics if `f` panics, although the precise time and
/// number of elements consumed after the element that panics is
/// not specified.
///
/// # Examples
///
/// ```rust
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// let mut v = [0; 8];
///
/// // set each element, in parallel
/// pool.for_(&mut v, |element| *element = 3);
///
/// assert_eq!(v, [3; 8]);
/// ```
pub fn for_<Iter: IntoIterator, F>(&mut self, iter: Iter, ref f: F)
where Iter::Item: Send,
Iter: Send,
F: Fn(Iter::Item) + Sync
{
let (needwork_tx, needwork_rx) = mpsc::channel();
let mut work_txs = Vec::with_capacity(self.n_threads);
let mut work_rxs = Vec::with_capacity(self.n_threads);
for _ in 0..self.n_threads {
let (t, r) = mpsc::channel();
work_txs.push(t);
work_rxs.push(r);
}
let mut work_rxs = work_rxs.into_iter();
crossbeam::scope(|scope| unsafe {
let handle = self.execute(
scope,
needwork_tx,
|needwork_tx| {
let mut needwork_tx = Some(needwork_tx.clone());
let mut work_rx = Some(work_rxs.next().unwrap());
move |id| {
let work_rx = work_rx.take().unwrap();
let needwork = needwork_tx.take().unwrap();
loop {
needwork.send(id).unwrap();
match work_rx.recv() {
Ok(Some(elem)) => {
f(elem);
}
Ok(None) | Err(_) => break
}
}
}
},
move |needwork_tx| {
let mut iter = iter.into_iter().fuse();
drop(needwork_tx);
loop {
match needwork_rx.recv() {
// closed, done!
Err(_) => break,
Ok(id) => {
work_txs[id.n].send(iter.next()).unwrap();
}
}
}
});
handle.wait();
})
}
/// Execute `f` on each element in `iter` in parallel across the
/// pool's threads, with unspecified yield order.
///
/// This behaves like `map`, but does not make efforts to ensure
/// that the elements are returned in the order of `iter`, hence
/// this is cheaper.
///
/// The iterator yields `(uint, T)` tuples, where the `uint` is
/// the index of the element in the original iterator.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// # fn main() {
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// // adjust each element in parallel, and iterate over them as
/// // they are generated (or as close to that as possible)
/// crossbeam::scope(|scope| {
/// for (index, output) in pool.unordered_map(scope, 0..8, |i| i + 10) {
/// // each element is exactly 10 more than its original index
/// assert_eq!(output, index as i32 + 10);
/// }
/// })
/// # }
/// ```
pub fn unordered_map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> UnorderedParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Sync + Send + Fn(I::Item) -> T,
T: Send + 'a
{
let nthreads = self.n_threads;
let (needwork_tx, needwork_rx) = mpsc::channel();
let (work_tx, work_rx) = mpsc::channel();
struct Shared<Chan, Atom, F> {
work: Chan,
sent: Atom,
finished: Atom,
func: F,
}
let shared = Arc::new(Shared {
work: Mutex::new(work_rx),
sent: atomic::AtomicUsize::new(0),
finished: atomic::AtomicUsize::new(0),
func: f,
});
let (tx, rx) = mpsc::channel();
const INITIAL_FACTOR: usize = 4;
const BUFFER_FACTOR: usize = INITIAL_FACTOR / 2;
let handle = unsafe {
self.execute(scope, (needwork_tx, shared),
move |&mut (ref needwork_tx, ref shared)| {
let mut needwork_tx = Some(needwork_tx.clone());
let tx = tx.clone();
let shared = shared.clone();
move |_id| {
let needwork = needwork_tx.take().unwrap();
loop {
let data = {
let guard = shared.work.lock().unwrap();
guard.recv()
};
match data {
Ok(Some((idx, elem))) => {
let data = (shared.func)(elem);
let status = tx.send(Packet {
idx: idx, data: data
});
// the user disconnected,
// so there's no point
// computing more.
if status.is_err() {
let _ = needwork.send(true);
break
}
}
Ok(None) | Err(_) => {
break
}
};
let old =
shared.finished.fetch_add(1, atomic::Ordering::SeqCst);
let sent = shared.sent.load(atomic::Ordering::SeqCst);
if old + BUFFER_FACTOR * nthreads == sent {
if needwork.send(false).is_err() {
break
}
}
}
}
},
move |(needwork_tx, shared)| {
let mut iter = iter.into_iter().fuse().enumerate();
drop(needwork_tx);
let mut send_data = |n: usize| {
shared.sent.fetch_add(n, atomic::Ordering::SeqCst);
for _ in 0..n {
// TODO: maybe this could instead send
// several elements at a time, to
// reduce the number of
// allocations/atomic operations
// performed.
//
// Downside: work will be
// distributed chunkier.
let _ = work_tx.send(iter.next());
}
};
send_data(INITIAL_FACTOR * nthreads);
loop {
match needwork_rx.recv() {
// closed, done!
Ok(true) | Err(_) => break,
Ok(false) => {
// ignore return, because we
// need to wait until the
// workers have exited (i.e,
// the Err arm above)
let _ = send_data(BUFFER_FACTOR * nthreads);
}
}
}
})
};
UnorderedParMap {
rx: rx,
_guard: handle,
}
}
/// Execute `f` on `iter` in parallel across the pool's threads,
/// returning an iterator that yields the results in the order of
/// the elements of `iter` to which they correspond.
///
/// This is a drop-in replacement for `iter.map(f)`, that runs in
/// parallel, and consumes `iter` as the pool's threads complete
/// their previous tasks.
///
/// See `unordered_map` if the output order is unimportant.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// use simple_parallel::Pool;
///
/// # fn main() {
/// let mut pool = Pool::new(4);
///
/// // create a vector by adjusting 0..8, in parallel
/// let elements: Vec<_> = crossbeam::scope(|scope| {
/// pool.map(scope, 0..8, |i| i + 10).collect()
/// });
///
/// assert_eq!(elements, &[10, 11, 12, 13, 14, 15, 16, 17]);
/// # }
/// ```
pub fn map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> ParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Send + Sync + Fn(I::Item) -> T,
T: Send + 'a
{
ParMap {
unordered: self.unordered_map(scope, iter, f),
looking_for: 0,
queue: BinaryHeap::new(),
}
}
}
/// Low-level/internal functionality.
impl Pool {
/// Run a job on the thread pool.
///
/// `gen_fn` is called `self.n_threads` times to create the
/// functions to execute on the worker threads. Each of these is
/// immediately called exactly once on a worker thread (that is,
/// they are semantically `FnOnce`), and `main_fn` is also called,
/// on the supervisor thread. It is expected that the workers and
| Work | identifier_name |
pool.rs | + 'a);
struct Work {
func: WorkInner<'static>
}
struct JobStatus {
wait: bool,
job_finished: mpsc::Receiver<Result<(), ()>>,
}
/// A token representing a job submitted to the thread pool.
///
/// This helps ensure that a job is finished before borrowed resources
/// in the job (and the pool itself) are invalidated.
///
/// If the job panics, this handle will ensure the main thread also
/// panics (either via `wait` or in the destructor).
pub struct JobHandle<'pool, 'f> {
pool: &'pool mut Pool,
status: Arc<Mutex<JobStatus>>,
_funcs: marker::PhantomData<&'f ()>,
}
impl JobStatus {
fn wait(&mut self) {
if self.wait {
self.wait = false;
self.job_finished.recv().unwrap().unwrap();
}
}
}
impl<'pool, 'f> JobHandle<'pool, 'f> {
/// Block until the job is finished.
///
/// # Panics
///
/// This will panic if the job panicked.
pub fn wait(&self) {
self.status.lock().unwrap().wait();
}
}
impl<'pool, 'f> Drop for JobHandle<'pool, 'f> {
fn drop(&mut self) {
self.wait();
self.pool.job_status = None;
}
}
impl Drop for Pool {
fn drop(&mut self) {
let (tx, rx) = mpsc::channel();
self.job_queue.send((None, tx)).unwrap();
rx.recv().unwrap().unwrap();
}
}
struct PanicCanary<'a> {
flag: &'a atomic::AtomicBool
}
impl<'a> Drop for PanicCanary<'a> {
fn drop(&mut self) {
if thread::panicking() {
self.flag.store(true, atomic::Ordering::SeqCst)
}
}
}
impl Pool {
/// Create a new thread pool with `n_threads` worker threads.
pub fn new(n_threads: usize) -> Pool {
let (tx, rx) = mpsc::channel::<(Option<Job>, mpsc::Sender<Result<(), ()>>)>();
thread::spawn(move || {
let panicked = Arc::new(atomic::AtomicBool::new(false));
let mut _guards = Vec::with_capacity(n_threads);
let mut txs = Vec::with_capacity(n_threads);
for i in 0..n_threads {
let id = WorkerId { n: i };
let (subtx, subrx) = mpsc::channel::<Work>();
txs.push(subtx);
let panicked = panicked.clone();
_guards.push(thread::spawn(move || {
let _canary = PanicCanary {
flag: &panicked
};
loop {
match subrx.recv() {
Ok(mut work) => {
(work.func)(id)
}
Err(_) => break,
}
}
}))
}
loop {
match rx.recv() {
Ok((Some(job), finished_tx)) => {
(job.func).call_box(&txs);
let job_panicked = panicked.load(atomic::Ordering::SeqCst);
let msg = if job_panicked { Err(()) } else { Ok(()) };
finished_tx.send(msg).unwrap();
if job_panicked { break }
}
Ok((None, finished_tx)) => {
finished_tx.send(Ok(())).unwrap();
break
}
Err(_) => break,
}
}
});
Pool {
job_queue: tx,
job_status: None,
n_threads: n_threads,
}
}
/// Execute `f` on each element of `iter`.
///
/// This panics if `f` panics, although the precise time and
/// number of elements consumed after the element that panics is
/// not specified.
///
/// # Examples
///
/// ```rust
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// let mut v = [0; 8];
///
/// // set each element, in parallel
/// pool.for_(&mut v, |element| *element = 3);
///
/// assert_eq!(v, [3; 8]);
/// ```
pub fn for_<Iter: IntoIterator, F>(&mut self, iter: Iter, ref f: F)
where Iter::Item: Send,
Iter: Send,
F: Fn(Iter::Item) + Sync
{
let (needwork_tx, needwork_rx) = mpsc::channel();
let mut work_txs = Vec::with_capacity(self.n_threads);
let mut work_rxs = Vec::with_capacity(self.n_threads);
for _ in 0..self.n_threads {
let (t, r) = mpsc::channel();
work_txs.push(t);
work_rxs.push(r);
}
let mut work_rxs = work_rxs.into_iter();
crossbeam::scope(|scope| unsafe {
let handle = self.execute(
scope,
needwork_tx,
|needwork_tx| {
let mut needwork_tx = Some(needwork_tx.clone());
let mut work_rx = Some(work_rxs.next().unwrap());
move |id| {
let work_rx = work_rx.take().unwrap();
let needwork = needwork_tx.take().unwrap();
loop {
needwork.send(id).unwrap();
match work_rx.recv() {
Ok(Some(elem)) => {
f(elem);
}
Ok(None) | Err(_) => break
}
}
}
},
move |needwork_tx| {
let mut iter = iter.into_iter().fuse();
drop(needwork_tx);
loop {
match needwork_rx.recv() {
// closed, done!
Err(_) => break,
Ok(id) => {
work_txs[id.n].send(iter.next()).unwrap();
}
}
}
});
handle.wait();
})
}
/// Execute `f` on each element in `iter` in parallel across the
/// pool's threads, with unspecified yield order.
///
/// This behaves like `map`, but does not make efforts to ensure
/// that the elements are returned in the order of `iter`, hence
/// this is cheaper.
///
/// The iterator yields `(uint, T)` tuples, where the `uint` is
/// the index of the element in the original iterator.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// # fn main() {
/// use simple_parallel::Pool;
///
/// let mut pool = Pool::new(4);
///
/// // adjust each element in parallel, and iterate over them as
/// // they are generated (or as close to that as possible)
/// crossbeam::scope(|scope| {
/// for (index, output) in pool.unordered_map(scope, 0..8, |i| i + 10) {
/// // each element is exactly 10 more than its original index
/// assert_eq!(output, index as i32 + 10);
/// }
/// })
/// # }
/// ```
pub fn unordered_map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> UnorderedParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Sync + Send + Fn(I::Item) -> T,
T: Send + 'a
{
let nthreads = self.n_threads;
let (needwork_tx, needwork_rx) = mpsc::channel();
let (work_tx, work_rx) = mpsc::channel();
struct Shared<Chan, Atom, F> {
work: Chan,
sent: Atom,
finished: Atom,
func: F,
}
let shared = Arc::new(Shared {
work: Mutex::new(work_rx),
sent: atomic::AtomicUsize::new(0),
finished: atomic::AtomicUsize::new(0),
func: f,
});
let (tx, rx) = mpsc::channel();
const INITIAL_FACTOR: usize = 4;
const BUFFER_FACTOR: usize = INITIAL_FACTOR / 2;
let handle = unsafe {
self.execute(scope, (needwork_tx, shared),
move |&mut (ref needwork_tx, ref shared)| {
let mut needwork_tx = Some(needwork_tx.clone());
let tx = tx.clone();
let shared = shared.clone();
move |_id| {
let needwork = needwork_tx.take().unwrap();
loop {
let data = {
let guard = shared.work.lock().unwrap();
guard.recv()
};
match data {
Ok(Some((idx, elem))) => {
let data = (shared.func)(elem);
let status = tx.send(Packet {
idx: idx, data: data
});
// the user disconnected,
// so there's no point
// computing more.
if status.is_err() {
let _ = needwork.send(true);
break
}
}
Ok(None) | Err(_) => {
break
}
};
let old =
shared.finished.fetch_add(1, atomic::Ordering::SeqCst);
let sent = shared.sent.load(atomic::Ordering::SeqCst);
if old + BUFFER_FACTOR * nthreads == sent {
if needwork.send(false).is_err() |
}
}
}
},
move |(needwork_tx, shared)| {
let mut iter = iter.into_iter().fuse().enumerate();
drop(needwork_tx);
let mut send_data = |n: usize| {
shared.sent.fetch_add(n, atomic::Ordering::SeqCst);
for _ in 0..n {
// TODO: maybe this could instead send
// several elements at a time, to
// reduce the number of
// allocations/atomic operations
// performed.
//
// Downside: work will be
// distributed chunkier.
let _ = work_tx.send(iter.next());
}
};
send_data(INITIAL_FACTOR * nthreads);
loop {
match needwork_rx.recv() {
// closed, done!
Ok(true) | Err(_) => break,
Ok(false) => {
// ignore return, because we
// need to wait until the
// workers have exited (i.e,
// the Err arm above)
let _ = send_data(BUFFER_FACTOR * nthreads);
}
}
}
})
};
UnorderedParMap {
rx: rx,
_guard: handle,
}
}
/// Execute `f` on `iter` in parallel across the pool's threads,
/// returning an iterator that yields the results in the order of
/// the elements of `iter` to which they correspond.
///
/// This is a drop-in replacement for `iter.map(f)`, that runs in
/// parallel, and consumes `iter` as the pool's threads complete
/// their previous tasks.
///
/// See `unordered_map` if the output order is unimportant.
///
/// # Examples
///
/// ```rust
/// extern crate crossbeam;
/// extern crate simple_parallel;
/// use simple_parallel::Pool;
///
/// # fn main() {
/// let mut pool = Pool::new(4);
///
/// // create a vector by adjusting 0..8, in parallel
/// let elements: Vec<_> = crossbeam::scope(|scope| {
/// pool.map(scope, 0..8, |i| i + 10).collect()
/// });
///
/// assert_eq!(elements, &[10, 11, 12, 13, 14, 15, 16, 17]);
/// # }
/// ```
pub fn map<'pool, 'a, I: IntoIterator, F, T>(&'pool mut self, scope: &Scope<'a>, iter: I, f: F)
-> ParMap<'pool, 'a, T>
where I: 'a + Send,
I::Item: Send + 'a,
F: 'a + Send + Sync + Fn(I::Item) -> T,
T: Send + 'a
{
ParMap {
unordered: self.unordered_map(scope, iter, f),
looking_for: 0,
queue: BinaryHeap::new(),
}
}
}
/// Low-level/internal functionality.
impl Pool {
/// Run a job on the thread pool.
///
/// `gen_fn` is called `self.n_threads` times to create the
/// functions to execute on the worker threads. Each of these is
/// immediately called exactly once on a worker thread (that is,
/// they are semantically `FnOnce`), and `main_fn` is also called,
/// on the supervisor thread. It is expected that the workers and
| {
break
} | conditional_block |
needless_pass_by_ref_mut.rs | use super::needless_pass_by_value::requires_exact_signature;
use clippy_utils::diagnostics::span_lint_hir_and_then;
use clippy_utils::source::snippet;
use clippy_utils::{get_parent_node, is_from_proc_macro, is_self};
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_qpath, FnKind, Visitor};
use rustc_hir::{Body, ExprKind, FnDecl, HirId, HirIdMap, HirIdSet, Impl, ItemKind, Mutability, Node, PatKind, QPath};
use rustc_hir_typeck::expr_use_visitor as euv;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::associated_body;
use rustc_middle::hir::nested_filter::OnlyBodies;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::{self, Ty, UpvarId, UpvarPath};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::kw;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
declare_clippy_lint! {
/// ### What it does
/// Check if a `&mut` function argument is actually used mutably.
///
/// Be careful if the function is publicly reexported as it would break compatibility with
/// users of this function.
///
/// ### Why is this bad?
/// Less `mut` means less fights with the borrow checker. It can also lead to more
/// opportunities for parallelization.
///
/// ### Example
/// ```rust
/// fn foo(y: &mut i32) -> i32 {
/// 12 + *y
/// }
/// ```
/// Use instead:
/// ```rust
/// fn foo(y: &i32) -> i32 {
/// 12 + *y
/// }
/// ```
#[clippy::version = "1.72.0"]
pub NEEDLESS_PASS_BY_REF_MUT,
suspicious,
"using a `&mut` argument when it's not mutated"
}
#[derive(Clone)]
pub struct NeedlessPassByRefMut<'tcx> {
avoid_breaking_exported_api: bool,
used_fn_def_ids: FxHashSet<LocalDefId>,
fn_def_ids_to_maybe_unused_mut: FxIndexMap<LocalDefId, Vec<rustc_hir::Ty<'tcx>>>,
}
impl NeedlessPassByRefMut<'_> {
pub fn new(avoid_breaking_exported_api: bool) -> Self {
Self {
avoid_breaking_exported_api,
used_fn_def_ids: FxHashSet::default(),
fn_def_ids_to_maybe_unused_mut: FxIndexMap::default(),
}
}
}
impl_lint_pass!(NeedlessPassByRefMut<'_> => [NEEDLESS_PASS_BY_REF_MUT]);
fn should_skip<'tcx>(
cx: &LateContext<'tcx>,
input: rustc_hir::Ty<'tcx>,
ty: Ty<'_>,
arg: &rustc_hir::Param<'_>,
) -> bool {
// We check if this a `&mut`. `ref_mutability` returns `None` if it's not a reference.
if!matches!(ty.ref_mutability(), Some(Mutability::Mut)) {
return true;
}
if is_self(arg) {
return true;
}
if let PatKind::Binding(.., name, _) = arg.pat.kind {
// If it's a potentially unused variable, we don't check it.
if name.name == kw::Underscore || name.as_str().starts_with('_') {
return true;
}
}
// All spans generated from a proc-macro invocation are the same...
is_from_proc_macro(cx, &input)
}
impl<'tcx> LateLintPass<'tcx> for NeedlessPassByRefMut<'tcx> {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
kind: FnKind<'tcx>,
decl: &'tcx FnDecl<'tcx>,
body: &'tcx Body<'_>,
span: Span,
fn_def_id: LocalDefId,
) {
if span.from_expansion() {
return;
}
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(fn_def_id);
let is_async = match kind {
FnKind::ItemFn(.., header) => {
let attrs = cx.tcx.hir().attrs(hir_id);
if header.abi!= Abi::Rust || requires_exact_signature(attrs) {
return;
}
header.is_async()
},
FnKind::Method(.., sig) => sig.header.is_async(),
FnKind::Closure => return,
};
// Exclude non-inherent impls
if let Some(Node::Item(item)) = cx.tcx.hir().find_parent(hir_id) {
if matches!(
item.kind,
ItemKind::Impl(Impl { of_trait: Some(_),.. }) | ItemKind::Trait(..)
) {
return;
}
}
let fn_sig = cx.tcx.fn_sig(fn_def_id).subst_identity();
let fn_sig = cx.tcx.liberate_late_bound_regions(fn_def_id.to_def_id(), fn_sig);
// If there are no `&mut` argument, no need to go any further.
let mut it = decl
.inputs
.iter()
.zip(fn_sig.inputs())
.zip(body.params)
.filter(|((&input, &ty), arg)|!should_skip(cx, input, ty, arg))
.peekable();
if it.peek().is_none() {
return;
}
// Collect variables mutably used and spans which will need dereferencings from the
// function body.
let MutablyUsedVariablesCtxt { mutably_used_vars,.. } = {
let mut ctx = MutablyUsedVariablesCtxt::default();
let infcx = cx.tcx.infer_ctxt().build();
euv::ExprUseVisitor::new(&mut ctx, &infcx, fn_def_id, cx.param_env, cx.typeck_results()).consume_body(body);
if is_async {
let closures = ctx.async_closures.clone();
let hir = cx.tcx.hir();
for closure in closures {
ctx.prev_bind = None;
ctx.prev_move_to_closure.clear();
if let Some(body) = hir
.find_by_def_id(closure)
.and_then(associated_body)
.map(|(_, body_id)| hir.body(body_id))
{
euv::ExprUseVisitor::new(&mut ctx, &infcx, closure, cx.param_env, cx.typeck_results())
.consume_body(body);
}
}
}
ctx
};
for ((&input, &_), arg) in it {
// Only take `&mut` arguments.
if let PatKind::Binding(_, canonical_id,..) = arg.pat.kind
&&!mutably_used_vars.contains(&canonical_id)
{
self.fn_def_ids_to_maybe_unused_mut.entry(fn_def_id).or_default().push(input);
}
}
}
fn check_crate_post(&mut self, cx: &LateContext<'tcx>) {
cx.tcx.hir().visit_all_item_likes_in_crate(&mut FnNeedsMutVisitor {
cx,
used_fn_def_ids: &mut self.used_fn_def_ids,
});
for (fn_def_id, unused) in self
.fn_def_ids_to_maybe_unused_mut
.iter()
.filter(|(def_id, _)|!self.used_fn_def_ids.contains(def_id))
{
let show_semver_warning =
self.avoid_breaking_exported_api && cx.effective_visibilities.is_exported(*fn_def_id);
for input in unused {
// If the argument is never used mutably, we emit the warning.
let sp = input.span;
if let rustc_hir::TyKind::Ref(_, inner_ty) = input.kind {
span_lint_hir_and_then(
cx,
NEEDLESS_PASS_BY_REF_MUT,
cx.tcx.hir().local_def_id_to_hir_id(*fn_def_id),
sp,
"this argument is a mutable reference, but not used mutably",
|diag| {
diag.span_suggestion(
sp,
"consider changing to".to_string(),
format!("&{}", snippet(cx, cx.tcx.hir().span(inner_ty.ty.hir_id), "_"),),
Applicability::Unspecified,
);
if show_semver_warning {
diag.warn("changing this function will impact semver compatibility");
}
},
);
}
}
}
}
}
#[derive(Default)]
struct MutablyUsedVariablesCtxt {
mutably_used_vars: HirIdSet,
prev_bind: Option<HirId>,
prev_move_to_closure: HirIdSet,
aliases: HirIdMap<HirId>,
async_closures: FxHashSet<LocalDefId>,
}
impl MutablyUsedVariablesCtxt {
fn add_mutably_used_var(&mut self, mut used_id: HirId) {
while let Some(id) = self.aliases.get(&used_id) {
self.mutably_used_vars.insert(used_id);
used_id = *id;
}
self.mutably_used_vars.insert(used_id);
}
}
impl<'tcx> euv::Delegate<'tcx> for MutablyUsedVariablesCtxt {
fn consume(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
if let euv::Place {
base:
euv::PlaceBase::Local(vid)
| euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
base_ty,
..
} = &cmt.place
{
if let Some(bind_id) = self.prev_bind.take() {
if bind_id!= *vid {
self.aliases.insert(bind_id, *vid);
}
} else if!self.prev_move_to_closure.contains(vid)
&& matches!(base_ty.ref_mutability(), Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
self.prev_bind = None;
self.prev_move_to_closure.remove(vid);
}
}
fn borrow(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId, borrow: ty::BorrowKind) {
self.prev_bind = None;
if let euv::Place {
base: euv::PlaceBase::Local(vid),
base_ty,
..
} = &cmt.place
{
// If this is a mutable borrow, it was obviously used mutably so we add it. However
// for `UniqueImmBorrow`, it's interesting because if you do: `array[0] = value` inside
// a closure, it'll return this variant whereas if you have just an index access, it'll
// return `ImmBorrow`. So if there is "Unique" and it's a mutable reference, we add it
// to the mutably used variables set.
if borrow == ty::BorrowKind::MutBorrow
|| (borrow == ty::BorrowKind::UniqueImmBorrow && base_ty.ref_mutability() == Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
}
}
fn mutate(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) |
fn copy(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
}
fn fake_read(
&mut self,
cmt: &rustc_hir_typeck::expr_use_visitor::PlaceWithHirId<'tcx>,
cause: FakeReadCause,
_id: HirId,
) {
if let euv::Place {
base:
euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
..
} = &cmt.place
{
if let FakeReadCause::ForLet(Some(inner)) = cause {
// Seems like we are inside an async function. We need to store the closure `DefId`
// to go through it afterwards.
self.async_closures.insert(inner);
self.aliases.insert(cmt.hir_id, *vid);
self.prev_move_to_closure.insert(*vid);
}
}
}
fn bind(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, id: HirId) {
self.prev_bind = Some(id);
}
}
/// A final pass to check for paths referencing this function that require the argument to be
/// `&mut`, basically if the function is ever used as a `fn`-like argument.
struct FnNeedsMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
used_fn_def_ids: &'a mut FxHashSet<LocalDefId>,
}
impl<'tcx> Visitor<'tcx> for FnNeedsMutVisitor<'_, 'tcx> {
type NestedFilter = OnlyBodies;
fn nested_visit_map(&mut self) -> Self::Map {
self.cx.tcx.hir()
}
fn visit_qpath(&mut self, qpath: &'tcx QPath<'tcx>, hir_id: HirId, _: Span) {
walk_qpath(self, qpath, hir_id);
let Self { cx, used_fn_def_ids } = self;
// #11182; do not lint if mutability is required elsewhere
if let Node::Expr(expr) = cx.tcx.hir().get(hir_id)
&& let Some(parent) = get_parent_node(cx.tcx, expr.hir_id)
&& let ty::FnDef(def_id, _) = cx.tcx.typeck(cx.tcx.hir().enclosing_body_owner(hir_id)).expr_ty(expr).kind()
&& let Some(def_id) = def_id.as_local()
{
if let Node::Expr(e) = parent
&& let ExprKind::Call(call, _) = e.kind
&& call.hir_id == expr.hir_id
{
return;
}
// We don't need to check each argument individually as you cannot coerce a function
// taking `&mut` -> `&`, for some reason, so if we've gotten this far we know it's
// passed as a `fn`-like argument (or is unified) and should ignore every "unused"
// argument entirely
used_fn_def_ids.insert(def_id);
}
}
}
| {
self.prev_bind = None;
if let euv::Place {
projections,
base: euv::PlaceBase::Local(vid),
..
} = &cmt.place
{
if !projections.is_empty() {
self.add_mutably_used_var(*vid);
}
}
} | identifier_body |
needless_pass_by_ref_mut.rs | use super::needless_pass_by_value::requires_exact_signature;
use clippy_utils::diagnostics::span_lint_hir_and_then;
use clippy_utils::source::snippet;
use clippy_utils::{get_parent_node, is_from_proc_macro, is_self};
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_qpath, FnKind, Visitor};
use rustc_hir::{Body, ExprKind, FnDecl, HirId, HirIdMap, HirIdSet, Impl, ItemKind, Mutability, Node, PatKind, QPath};
use rustc_hir_typeck::expr_use_visitor as euv;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::associated_body;
use rustc_middle::hir::nested_filter::OnlyBodies;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::{self, Ty, UpvarId, UpvarPath};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::kw;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
declare_clippy_lint! {
/// ### What it does
/// Check if a `&mut` function argument is actually used mutably.
///
/// Be careful if the function is publicly reexported as it would break compatibility with
/// users of this function.
///
/// ### Why is this bad?
/// Less `mut` means less fights with the borrow checker. It can also lead to more
/// opportunities for parallelization.
///
/// ### Example
/// ```rust
/// fn foo(y: &mut i32) -> i32 {
/// 12 + *y
/// }
/// ```
/// Use instead:
/// ```rust
/// fn foo(y: &i32) -> i32 {
/// 12 + *y
/// }
/// ```
#[clippy::version = "1.72.0"]
pub NEEDLESS_PASS_BY_REF_MUT,
suspicious,
"using a `&mut` argument when it's not mutated"
}
#[derive(Clone)]
pub struct NeedlessPassByRefMut<'tcx> {
avoid_breaking_exported_api: bool,
used_fn_def_ids: FxHashSet<LocalDefId>,
fn_def_ids_to_maybe_unused_mut: FxIndexMap<LocalDefId, Vec<rustc_hir::Ty<'tcx>>>,
}
impl NeedlessPassByRefMut<'_> {
pub fn new(avoid_breaking_exported_api: bool) -> Self {
Self {
avoid_breaking_exported_api,
used_fn_def_ids: FxHashSet::default(),
fn_def_ids_to_maybe_unused_mut: FxIndexMap::default(),
}
}
}
impl_lint_pass!(NeedlessPassByRefMut<'_> => [NEEDLESS_PASS_BY_REF_MUT]);
fn should_skip<'tcx>(
cx: &LateContext<'tcx>,
input: rustc_hir::Ty<'tcx>,
ty: Ty<'_>,
arg: &rustc_hir::Param<'_>,
) -> bool {
// We check if this a `&mut`. `ref_mutability` returns `None` if it's not a reference.
if!matches!(ty.ref_mutability(), Some(Mutability::Mut)) {
return true;
}
if is_self(arg) {
return true;
}
if let PatKind::Binding(.., name, _) = arg.pat.kind {
// If it's a potentially unused variable, we don't check it.
if name.name == kw::Underscore || name.as_str().starts_with('_') {
return true;
}
}
// All spans generated from a proc-macro invocation are the same...
is_from_proc_macro(cx, &input)
}
impl<'tcx> LateLintPass<'tcx> for NeedlessPassByRefMut<'tcx> {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
kind: FnKind<'tcx>,
decl: &'tcx FnDecl<'tcx>,
body: &'tcx Body<'_>,
span: Span,
fn_def_id: LocalDefId,
) {
if span.from_expansion() {
return;
}
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(fn_def_id);
let is_async = match kind {
FnKind::ItemFn(.., header) => {
let attrs = cx.tcx.hir().attrs(hir_id);
if header.abi!= Abi::Rust || requires_exact_signature(attrs) {
return;
}
header.is_async()
},
FnKind::Method(.., sig) => sig.header.is_async(),
FnKind::Closure => return,
};
// Exclude non-inherent impls
if let Some(Node::Item(item)) = cx.tcx.hir().find_parent(hir_id) {
if matches!(
item.kind,
ItemKind::Impl(Impl { of_trait: Some(_),.. }) | ItemKind::Trait(..)
) {
return;
}
}
let fn_sig = cx.tcx.fn_sig(fn_def_id).subst_identity();
let fn_sig = cx.tcx.liberate_late_bound_regions(fn_def_id.to_def_id(), fn_sig);
// If there are no `&mut` argument, no need to go any further.
let mut it = decl
.inputs
.iter()
.zip(fn_sig.inputs())
.zip(body.params)
.filter(|((&input, &ty), arg)|!should_skip(cx, input, ty, arg))
.peekable();
if it.peek().is_none() {
return;
}
// Collect variables mutably used and spans which will need dereferencings from the
// function body.
let MutablyUsedVariablesCtxt { mutably_used_vars,.. } = {
let mut ctx = MutablyUsedVariablesCtxt::default();
let infcx = cx.tcx.infer_ctxt().build();
euv::ExprUseVisitor::new(&mut ctx, &infcx, fn_def_id, cx.param_env, cx.typeck_results()).consume_body(body);
if is_async {
let closures = ctx.async_closures.clone();
let hir = cx.tcx.hir();
for closure in closures {
ctx.prev_bind = None;
ctx.prev_move_to_closure.clear();
if let Some(body) = hir
.find_by_def_id(closure)
.and_then(associated_body)
.map(|(_, body_id)| hir.body(body_id))
{
euv::ExprUseVisitor::new(&mut ctx, &infcx, closure, cx.param_env, cx.typeck_results())
.consume_body(body);
}
}
}
ctx
};
for ((&input, &_), arg) in it {
// Only take `&mut` arguments.
if let PatKind::Binding(_, canonical_id,..) = arg.pat.kind
&&!mutably_used_vars.contains(&canonical_id)
{
self.fn_def_ids_to_maybe_unused_mut.entry(fn_def_id).or_default().push(input);
}
}
}
fn check_crate_post(&mut self, cx: &LateContext<'tcx>) {
cx.tcx.hir().visit_all_item_likes_in_crate(&mut FnNeedsMutVisitor {
cx,
used_fn_def_ids: &mut self.used_fn_def_ids,
});
for (fn_def_id, unused) in self
.fn_def_ids_to_maybe_unused_mut
.iter()
.filter(|(def_id, _)|!self.used_fn_def_ids.contains(def_id))
{
let show_semver_warning =
self.avoid_breaking_exported_api && cx.effective_visibilities.is_exported(*fn_def_id);
for input in unused {
// If the argument is never used mutably, we emit the warning.
let sp = input.span;
if let rustc_hir::TyKind::Ref(_, inner_ty) = input.kind {
span_lint_hir_and_then(
cx,
NEEDLESS_PASS_BY_REF_MUT,
cx.tcx.hir().local_def_id_to_hir_id(*fn_def_id),
sp,
"this argument is a mutable reference, but not used mutably",
|diag| {
diag.span_suggestion(
sp,
"consider changing to".to_string(),
format!("&{}", snippet(cx, cx.tcx.hir().span(inner_ty.ty.hir_id), "_"),),
Applicability::Unspecified,
);
if show_semver_warning {
diag.warn("changing this function will impact semver compatibility");
}
},
);
}
}
}
}
}
#[derive(Default)]
struct MutablyUsedVariablesCtxt {
mutably_used_vars: HirIdSet,
prev_bind: Option<HirId>,
prev_move_to_closure: HirIdSet,
aliases: HirIdMap<HirId>,
async_closures: FxHashSet<LocalDefId>,
}
impl MutablyUsedVariablesCtxt {
fn add_mutably_used_var(&mut self, mut used_id: HirId) {
while let Some(id) = self.aliases.get(&used_id) {
self.mutably_used_vars.insert(used_id);
used_id = *id;
}
self.mutably_used_vars.insert(used_id);
}
}
impl<'tcx> euv::Delegate<'tcx> for MutablyUsedVariablesCtxt {
fn consume(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
if let euv::Place {
base:
euv::PlaceBase::Local(vid)
| euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
base_ty,
..
} = &cmt.place
{
if let Some(bind_id) = self.prev_bind.take() {
if bind_id!= *vid {
self.aliases.insert(bind_id, *vid);
}
} else if!self.prev_move_to_closure.contains(vid)
&& matches!(base_ty.ref_mutability(), Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
self.prev_bind = None;
self.prev_move_to_closure.remove(vid);
}
}
fn borrow(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId, borrow: ty::BorrowKind) {
self.prev_bind = None;
if let euv::Place {
base: euv::PlaceBase::Local(vid),
base_ty,
..
} = &cmt.place
{
// If this is a mutable borrow, it was obviously used mutably so we add it. However
// for `UniqueImmBorrow`, it's interesting because if you do: `array[0] = value` inside
// a closure, it'll return this variant whereas if you have just an index access, it'll
// return `ImmBorrow`. So if there is "Unique" and it's a mutable reference, we add it
// to the mutably used variables set.
if borrow == ty::BorrowKind::MutBorrow
|| (borrow == ty::BorrowKind::UniqueImmBorrow && base_ty.ref_mutability() == Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
}
}
fn mutate(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
if let euv::Place {
projections,
base: euv::PlaceBase::Local(vid),
..
} = &cmt.place
{
if!projections.is_empty() {
self.add_mutably_used_var(*vid);
}
}
}
fn copy(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
}
fn | (
&mut self,
cmt: &rustc_hir_typeck::expr_use_visitor::PlaceWithHirId<'tcx>,
cause: FakeReadCause,
_id: HirId,
) {
if let euv::Place {
base:
euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
..
} = &cmt.place
{
if let FakeReadCause::ForLet(Some(inner)) = cause {
// Seems like we are inside an async function. We need to store the closure `DefId`
// to go through it afterwards.
self.async_closures.insert(inner);
self.aliases.insert(cmt.hir_id, *vid);
self.prev_move_to_closure.insert(*vid);
}
}
}
fn bind(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, id: HirId) {
self.prev_bind = Some(id);
}
}
/// A final pass to check for paths referencing this function that require the argument to be
/// `&mut`, basically if the function is ever used as a `fn`-like argument.
struct FnNeedsMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
used_fn_def_ids: &'a mut FxHashSet<LocalDefId>,
}
impl<'tcx> Visitor<'tcx> for FnNeedsMutVisitor<'_, 'tcx> {
type NestedFilter = OnlyBodies;
fn nested_visit_map(&mut self) -> Self::Map {
self.cx.tcx.hir()
}
fn visit_qpath(&mut self, qpath: &'tcx QPath<'tcx>, hir_id: HirId, _: Span) {
walk_qpath(self, qpath, hir_id);
let Self { cx, used_fn_def_ids } = self;
// #11182; do not lint if mutability is required elsewhere
if let Node::Expr(expr) = cx.tcx.hir().get(hir_id)
&& let Some(parent) = get_parent_node(cx.tcx, expr.hir_id)
&& let ty::FnDef(def_id, _) = cx.tcx.typeck(cx.tcx.hir().enclosing_body_owner(hir_id)).expr_ty(expr).kind()
&& let Some(def_id) = def_id.as_local()
{
if let Node::Expr(e) = parent
&& let ExprKind::Call(call, _) = e.kind
&& call.hir_id == expr.hir_id
{
return;
}
// We don't need to check each argument individually as you cannot coerce a function
// taking `&mut` -> `&`, for some reason, so if we've gotten this far we know it's
// passed as a `fn`-like argument (or is unified) and should ignore every "unused"
// argument entirely
used_fn_def_ids.insert(def_id);
}
}
}
| fake_read | identifier_name |
needless_pass_by_ref_mut.rs | use super::needless_pass_by_value::requires_exact_signature;
use clippy_utils::diagnostics::span_lint_hir_and_then;
use clippy_utils::source::snippet;
use clippy_utils::{get_parent_node, is_from_proc_macro, is_self};
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_qpath, FnKind, Visitor};
use rustc_hir::{Body, ExprKind, FnDecl, HirId, HirIdMap, HirIdSet, Impl, ItemKind, Mutability, Node, PatKind, QPath};
use rustc_hir_typeck::expr_use_visitor as euv;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::associated_body;
use rustc_middle::hir::nested_filter::OnlyBodies;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::{self, Ty, UpvarId, UpvarPath};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::kw;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
declare_clippy_lint! {
/// ### What it does
/// Check if a `&mut` function argument is actually used mutably.
///
/// Be careful if the function is publicly reexported as it would break compatibility with
/// users of this function.
///
/// ### Why is this bad?
/// Less `mut` means less fights with the borrow checker. It can also lead to more
/// opportunities for parallelization.
///
/// ### Example
/// ```rust
/// fn foo(y: &mut i32) -> i32 {
/// 12 + *y
/// }
/// ```
/// Use instead:
/// ```rust
/// fn foo(y: &i32) -> i32 {
/// 12 + *y
/// }
/// ```
#[clippy::version = "1.72.0"]
pub NEEDLESS_PASS_BY_REF_MUT,
suspicious,
"using a `&mut` argument when it's not mutated"
}
#[derive(Clone)]
pub struct NeedlessPassByRefMut<'tcx> {
avoid_breaking_exported_api: bool,
used_fn_def_ids: FxHashSet<LocalDefId>,
fn_def_ids_to_maybe_unused_mut: FxIndexMap<LocalDefId, Vec<rustc_hir::Ty<'tcx>>>,
}
impl NeedlessPassByRefMut<'_> {
pub fn new(avoid_breaking_exported_api: bool) -> Self {
Self {
avoid_breaking_exported_api,
used_fn_def_ids: FxHashSet::default(),
fn_def_ids_to_maybe_unused_mut: FxIndexMap::default(),
}
}
}
impl_lint_pass!(NeedlessPassByRefMut<'_> => [NEEDLESS_PASS_BY_REF_MUT]);
fn should_skip<'tcx>(
cx: &LateContext<'tcx>,
input: rustc_hir::Ty<'tcx>,
ty: Ty<'_>,
arg: &rustc_hir::Param<'_>,
) -> bool {
// We check if this a `&mut`. `ref_mutability` returns `None` if it's not a reference.
if!matches!(ty.ref_mutability(), Some(Mutability::Mut)) {
return true;
}
if is_self(arg) {
return true;
}
if let PatKind::Binding(.., name, _) = arg.pat.kind {
// If it's a potentially unused variable, we don't check it.
if name.name == kw::Underscore || name.as_str().starts_with('_') {
return true;
}
}
// All spans generated from a proc-macro invocation are the same...
is_from_proc_macro(cx, &input)
}
impl<'tcx> LateLintPass<'tcx> for NeedlessPassByRefMut<'tcx> {
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
kind: FnKind<'tcx>,
decl: &'tcx FnDecl<'tcx>,
body: &'tcx Body<'_>,
span: Span,
fn_def_id: LocalDefId,
) {
if span.from_expansion() {
return;
}
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(fn_def_id);
let is_async = match kind {
FnKind::ItemFn(.., header) => {
let attrs = cx.tcx.hir().attrs(hir_id);
if header.abi!= Abi::Rust || requires_exact_signature(attrs) {
return;
}
header.is_async()
},
FnKind::Method(.., sig) => sig.header.is_async(),
FnKind::Closure => return,
};
// Exclude non-inherent impls
if let Some(Node::Item(item)) = cx.tcx.hir().find_parent(hir_id) {
if matches!(
item.kind,
ItemKind::Impl(Impl { of_trait: Some(_),.. }) | ItemKind::Trait(..)
) {
return;
}
}
let fn_sig = cx.tcx.fn_sig(fn_def_id).subst_identity();
let fn_sig = cx.tcx.liberate_late_bound_regions(fn_def_id.to_def_id(), fn_sig);
// If there are no `&mut` argument, no need to go any further.
let mut it = decl
.inputs
.iter()
.zip(fn_sig.inputs())
.zip(body.params)
.filter(|((&input, &ty), arg)|!should_skip(cx, input, ty, arg))
.peekable();
if it.peek().is_none() {
return;
}
// Collect variables mutably used and spans which will need dereferencings from the
// function body.
let MutablyUsedVariablesCtxt { mutably_used_vars,.. } = {
let mut ctx = MutablyUsedVariablesCtxt::default();
let infcx = cx.tcx.infer_ctxt().build();
euv::ExprUseVisitor::new(&mut ctx, &infcx, fn_def_id, cx.param_env, cx.typeck_results()).consume_body(body);
if is_async {
let closures = ctx.async_closures.clone();
let hir = cx.tcx.hir();
for closure in closures {
ctx.prev_bind = None;
ctx.prev_move_to_closure.clear();
if let Some(body) = hir
.find_by_def_id(closure)
.and_then(associated_body)
.map(|(_, body_id)| hir.body(body_id))
{
euv::ExprUseVisitor::new(&mut ctx, &infcx, closure, cx.param_env, cx.typeck_results())
.consume_body(body);
}
}
}
ctx
};
for ((&input, &_), arg) in it {
// Only take `&mut` arguments.
if let PatKind::Binding(_, canonical_id,..) = arg.pat.kind
&&!mutably_used_vars.contains(&canonical_id)
{
self.fn_def_ids_to_maybe_unused_mut.entry(fn_def_id).or_default().push(input);
}
}
}
fn check_crate_post(&mut self, cx: &LateContext<'tcx>) {
cx.tcx.hir().visit_all_item_likes_in_crate(&mut FnNeedsMutVisitor {
cx,
used_fn_def_ids: &mut self.used_fn_def_ids,
});
for (fn_def_id, unused) in self
.fn_def_ids_to_maybe_unused_mut
.iter()
.filter(|(def_id, _)|!self.used_fn_def_ids.contains(def_id))
{
let show_semver_warning =
self.avoid_breaking_exported_api && cx.effective_visibilities.is_exported(*fn_def_id);
for input in unused {
// If the argument is never used mutably, we emit the warning.
let sp = input.span;
if let rustc_hir::TyKind::Ref(_, inner_ty) = input.kind |
}
}
}
}
#[derive(Default)]
struct MutablyUsedVariablesCtxt {
mutably_used_vars: HirIdSet,
prev_bind: Option<HirId>,
prev_move_to_closure: HirIdSet,
aliases: HirIdMap<HirId>,
async_closures: FxHashSet<LocalDefId>,
}
impl MutablyUsedVariablesCtxt {
fn add_mutably_used_var(&mut self, mut used_id: HirId) {
while let Some(id) = self.aliases.get(&used_id) {
self.mutably_used_vars.insert(used_id);
used_id = *id;
}
self.mutably_used_vars.insert(used_id);
}
}
impl<'tcx> euv::Delegate<'tcx> for MutablyUsedVariablesCtxt {
fn consume(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
if let euv::Place {
base:
euv::PlaceBase::Local(vid)
| euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
base_ty,
..
} = &cmt.place
{
if let Some(bind_id) = self.prev_bind.take() {
if bind_id!= *vid {
self.aliases.insert(bind_id, *vid);
}
} else if!self.prev_move_to_closure.contains(vid)
&& matches!(base_ty.ref_mutability(), Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
self.prev_bind = None;
self.prev_move_to_closure.remove(vid);
}
}
fn borrow(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId, borrow: ty::BorrowKind) {
self.prev_bind = None;
if let euv::Place {
base: euv::PlaceBase::Local(vid),
base_ty,
..
} = &cmt.place
{
// If this is a mutable borrow, it was obviously used mutably so we add it. However
// for `UniqueImmBorrow`, it's interesting because if you do: `array[0] = value` inside
// a closure, it'll return this variant whereas if you have just an index access, it'll
// return `ImmBorrow`. So if there is "Unique" and it's a mutable reference, we add it
// to the mutably used variables set.
if borrow == ty::BorrowKind::MutBorrow
|| (borrow == ty::BorrowKind::UniqueImmBorrow && base_ty.ref_mutability() == Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
}
}
fn mutate(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
if let euv::Place {
projections,
base: euv::PlaceBase::Local(vid),
..
} = &cmt.place
{
if!projections.is_empty() {
self.add_mutably_used_var(*vid);
}
}
}
fn copy(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
}
fn fake_read(
&mut self,
cmt: &rustc_hir_typeck::expr_use_visitor::PlaceWithHirId<'tcx>,
cause: FakeReadCause,
_id: HirId,
) {
if let euv::Place {
base:
euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
..
} = &cmt.place
{
if let FakeReadCause::ForLet(Some(inner)) = cause {
// Seems like we are inside an async function. We need to store the closure `DefId`
// to go through it afterwards.
self.async_closures.insert(inner);
self.aliases.insert(cmt.hir_id, *vid);
self.prev_move_to_closure.insert(*vid);
}
}
}
fn bind(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, id: HirId) {
self.prev_bind = Some(id);
}
}
/// A final pass to check for paths referencing this function that require the argument to be
/// `&mut`, basically if the function is ever used as a `fn`-like argument.
struct FnNeedsMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
used_fn_def_ids: &'a mut FxHashSet<LocalDefId>,
}
impl<'tcx> Visitor<'tcx> for FnNeedsMutVisitor<'_, 'tcx> {
type NestedFilter = OnlyBodies;
fn nested_visit_map(&mut self) -> Self::Map {
self.cx.tcx.hir()
}
fn visit_qpath(&mut self, qpath: &'tcx QPath<'tcx>, hir_id: HirId, _: Span) {
walk_qpath(self, qpath, hir_id);
let Self { cx, used_fn_def_ids } = self;
// #11182; do not lint if mutability is required elsewhere
if let Node::Expr(expr) = cx.tcx.hir().get(hir_id)
&& let Some(parent) = get_parent_node(cx.tcx, expr.hir_id)
&& let ty::FnDef(def_id, _) = cx.tcx.typeck(cx.tcx.hir().enclosing_body_owner(hir_id)).expr_ty(expr).kind()
&& let Some(def_id) = def_id.as_local()
{
if let Node::Expr(e) = parent
&& let ExprKind::Call(call, _) = e.kind
&& call.hir_id == expr.hir_id
{
return;
}
// We don't need to check each argument individually as you cannot coerce a function
// taking `&mut` -> `&`, for some reason, so if we've gotten this far we know it's
// passed as a `fn`-like argument (or is unified) and should ignore every "unused"
// argument entirely
used_fn_def_ids.insert(def_id);
}
}
}
| {
span_lint_hir_and_then(
cx,
NEEDLESS_PASS_BY_REF_MUT,
cx.tcx.hir().local_def_id_to_hir_id(*fn_def_id),
sp,
"this argument is a mutable reference, but not used mutably",
|diag| {
diag.span_suggestion(
sp,
"consider changing to".to_string(),
format!("&{}", snippet(cx, cx.tcx.hir().span(inner_ty.ty.hir_id), "_"),),
Applicability::Unspecified,
);
if show_semver_warning {
diag.warn("changing this function will impact semver compatibility");
}
},
);
} | conditional_block |
needless_pass_by_ref_mut.rs | use super::needless_pass_by_value::requires_exact_signature;
use clippy_utils::diagnostics::span_lint_hir_and_then;
use clippy_utils::source::snippet;
use clippy_utils::{get_parent_node, is_from_proc_macro, is_self};
use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_qpath, FnKind, Visitor};
use rustc_hir::{Body, ExprKind, FnDecl, HirId, HirIdMap, HirIdSet, Impl, ItemKind, Mutability, Node, PatKind, QPath};
use rustc_hir_typeck::expr_use_visitor as euv;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::associated_body;
use rustc_middle::hir::nested_filter::OnlyBodies;
use rustc_middle::mir::FakeReadCause;
use rustc_middle::ty::{self, Ty, UpvarId, UpvarPath};
use rustc_session::{declare_tool_lint, impl_lint_pass};
use rustc_span::def_id::LocalDefId;
use rustc_span::symbol::kw;
use rustc_span::Span;
use rustc_target::spec::abi::Abi;
declare_clippy_lint! {
/// ### What it does
/// Check if a `&mut` function argument is actually used mutably.
///
/// Be careful if the function is publicly reexported as it would break compatibility with
/// users of this function.
///
/// ### Why is this bad?
/// Less `mut` means less fights with the borrow checker. It can also lead to more
/// opportunities for parallelization.
///
/// ### Example
/// ```rust
/// fn foo(y: &mut i32) -> i32 {
/// 12 + *y
/// }
/// ```
/// Use instead:
/// ```rust
/// fn foo(y: &i32) -> i32 {
/// 12 + *y
/// }
/// ```
#[clippy::version = "1.72.0"]
pub NEEDLESS_PASS_BY_REF_MUT,
suspicious,
"using a `&mut` argument when it's not mutated"
}
#[derive(Clone)]
pub struct NeedlessPassByRefMut<'tcx> {
avoid_breaking_exported_api: bool,
used_fn_def_ids: FxHashSet<LocalDefId>,
fn_def_ids_to_maybe_unused_mut: FxIndexMap<LocalDefId, Vec<rustc_hir::Ty<'tcx>>>,
}
impl NeedlessPassByRefMut<'_> {
pub fn new(avoid_breaking_exported_api: bool) -> Self {
Self {
avoid_breaking_exported_api,
used_fn_def_ids: FxHashSet::default(),
fn_def_ids_to_maybe_unused_mut: FxIndexMap::default(),
}
}
}
impl_lint_pass!(NeedlessPassByRefMut<'_> => [NEEDLESS_PASS_BY_REF_MUT]);
fn should_skip<'tcx>(
cx: &LateContext<'tcx>,
input: rustc_hir::Ty<'tcx>,
ty: Ty<'_>,
arg: &rustc_hir::Param<'_>,
) -> bool {
// We check if this a `&mut`. `ref_mutability` returns `None` if it's not a reference.
if!matches!(ty.ref_mutability(), Some(Mutability::Mut)) {
return true;
}
if is_self(arg) {
return true;
}
if let PatKind::Binding(.., name, _) = arg.pat.kind {
// If it's a potentially unused variable, we don't check it.
if name.name == kw::Underscore || name.as_str().starts_with('_') {
return true;
}
}
// All spans generated from a proc-macro invocation are the same...
is_from_proc_macro(cx, &input)
} | &mut self,
cx: &LateContext<'tcx>,
kind: FnKind<'tcx>,
decl: &'tcx FnDecl<'tcx>,
body: &'tcx Body<'_>,
span: Span,
fn_def_id: LocalDefId,
) {
if span.from_expansion() {
return;
}
let hir_id = cx.tcx.hir().local_def_id_to_hir_id(fn_def_id);
let is_async = match kind {
FnKind::ItemFn(.., header) => {
let attrs = cx.tcx.hir().attrs(hir_id);
if header.abi!= Abi::Rust || requires_exact_signature(attrs) {
return;
}
header.is_async()
},
FnKind::Method(.., sig) => sig.header.is_async(),
FnKind::Closure => return,
};
// Exclude non-inherent impls
if let Some(Node::Item(item)) = cx.tcx.hir().find_parent(hir_id) {
if matches!(
item.kind,
ItemKind::Impl(Impl { of_trait: Some(_),.. }) | ItemKind::Trait(..)
) {
return;
}
}
let fn_sig = cx.tcx.fn_sig(fn_def_id).subst_identity();
let fn_sig = cx.tcx.liberate_late_bound_regions(fn_def_id.to_def_id(), fn_sig);
// If there are no `&mut` argument, no need to go any further.
let mut it = decl
.inputs
.iter()
.zip(fn_sig.inputs())
.zip(body.params)
.filter(|((&input, &ty), arg)|!should_skip(cx, input, ty, arg))
.peekable();
if it.peek().is_none() {
return;
}
// Collect variables mutably used and spans which will need dereferencings from the
// function body.
let MutablyUsedVariablesCtxt { mutably_used_vars,.. } = {
let mut ctx = MutablyUsedVariablesCtxt::default();
let infcx = cx.tcx.infer_ctxt().build();
euv::ExprUseVisitor::new(&mut ctx, &infcx, fn_def_id, cx.param_env, cx.typeck_results()).consume_body(body);
if is_async {
let closures = ctx.async_closures.clone();
let hir = cx.tcx.hir();
for closure in closures {
ctx.prev_bind = None;
ctx.prev_move_to_closure.clear();
if let Some(body) = hir
.find_by_def_id(closure)
.and_then(associated_body)
.map(|(_, body_id)| hir.body(body_id))
{
euv::ExprUseVisitor::new(&mut ctx, &infcx, closure, cx.param_env, cx.typeck_results())
.consume_body(body);
}
}
}
ctx
};
for ((&input, &_), arg) in it {
// Only take `&mut` arguments.
if let PatKind::Binding(_, canonical_id,..) = arg.pat.kind
&&!mutably_used_vars.contains(&canonical_id)
{
self.fn_def_ids_to_maybe_unused_mut.entry(fn_def_id).or_default().push(input);
}
}
}
fn check_crate_post(&mut self, cx: &LateContext<'tcx>) {
cx.tcx.hir().visit_all_item_likes_in_crate(&mut FnNeedsMutVisitor {
cx,
used_fn_def_ids: &mut self.used_fn_def_ids,
});
for (fn_def_id, unused) in self
.fn_def_ids_to_maybe_unused_mut
.iter()
.filter(|(def_id, _)|!self.used_fn_def_ids.contains(def_id))
{
let show_semver_warning =
self.avoid_breaking_exported_api && cx.effective_visibilities.is_exported(*fn_def_id);
for input in unused {
// If the argument is never used mutably, we emit the warning.
let sp = input.span;
if let rustc_hir::TyKind::Ref(_, inner_ty) = input.kind {
span_lint_hir_and_then(
cx,
NEEDLESS_PASS_BY_REF_MUT,
cx.tcx.hir().local_def_id_to_hir_id(*fn_def_id),
sp,
"this argument is a mutable reference, but not used mutably",
|diag| {
diag.span_suggestion(
sp,
"consider changing to".to_string(),
format!("&{}", snippet(cx, cx.tcx.hir().span(inner_ty.ty.hir_id), "_"),),
Applicability::Unspecified,
);
if show_semver_warning {
diag.warn("changing this function will impact semver compatibility");
}
},
);
}
}
}
}
}
#[derive(Default)]
struct MutablyUsedVariablesCtxt {
mutably_used_vars: HirIdSet,
prev_bind: Option<HirId>,
prev_move_to_closure: HirIdSet,
aliases: HirIdMap<HirId>,
async_closures: FxHashSet<LocalDefId>,
}
impl MutablyUsedVariablesCtxt {
fn add_mutably_used_var(&mut self, mut used_id: HirId) {
while let Some(id) = self.aliases.get(&used_id) {
self.mutably_used_vars.insert(used_id);
used_id = *id;
}
self.mutably_used_vars.insert(used_id);
}
}
impl<'tcx> euv::Delegate<'tcx> for MutablyUsedVariablesCtxt {
fn consume(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
if let euv::Place {
base:
euv::PlaceBase::Local(vid)
| euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
base_ty,
..
} = &cmt.place
{
if let Some(bind_id) = self.prev_bind.take() {
if bind_id!= *vid {
self.aliases.insert(bind_id, *vid);
}
} else if!self.prev_move_to_closure.contains(vid)
&& matches!(base_ty.ref_mutability(), Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
self.prev_bind = None;
self.prev_move_to_closure.remove(vid);
}
}
fn borrow(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId, borrow: ty::BorrowKind) {
self.prev_bind = None;
if let euv::Place {
base: euv::PlaceBase::Local(vid),
base_ty,
..
} = &cmt.place
{
// If this is a mutable borrow, it was obviously used mutably so we add it. However
// for `UniqueImmBorrow`, it's interesting because if you do: `array[0] = value` inside
// a closure, it'll return this variant whereas if you have just an index access, it'll
// return `ImmBorrow`. So if there is "Unique" and it's a mutable reference, we add it
// to the mutably used variables set.
if borrow == ty::BorrowKind::MutBorrow
|| (borrow == ty::BorrowKind::UniqueImmBorrow && base_ty.ref_mutability() == Some(Mutability::Mut))
{
self.add_mutably_used_var(*vid);
}
}
}
fn mutate(&mut self, cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
if let euv::Place {
projections,
base: euv::PlaceBase::Local(vid),
..
} = &cmt.place
{
if!projections.is_empty() {
self.add_mutably_used_var(*vid);
}
}
}
fn copy(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, _id: HirId) {
self.prev_bind = None;
}
fn fake_read(
&mut self,
cmt: &rustc_hir_typeck::expr_use_visitor::PlaceWithHirId<'tcx>,
cause: FakeReadCause,
_id: HirId,
) {
if let euv::Place {
base:
euv::PlaceBase::Upvar(UpvarId {
var_path: UpvarPath { hir_id: vid },
..
}),
..
} = &cmt.place
{
if let FakeReadCause::ForLet(Some(inner)) = cause {
// Seems like we are inside an async function. We need to store the closure `DefId`
// to go through it afterwards.
self.async_closures.insert(inner);
self.aliases.insert(cmt.hir_id, *vid);
self.prev_move_to_closure.insert(*vid);
}
}
}
fn bind(&mut self, _cmt: &euv::PlaceWithHirId<'tcx>, id: HirId) {
self.prev_bind = Some(id);
}
}
/// A final pass to check for paths referencing this function that require the argument to be
/// `&mut`, basically if the function is ever used as a `fn`-like argument.
struct FnNeedsMutVisitor<'a, 'tcx> {
cx: &'a LateContext<'tcx>,
used_fn_def_ids: &'a mut FxHashSet<LocalDefId>,
}
impl<'tcx> Visitor<'tcx> for FnNeedsMutVisitor<'_, 'tcx> {
type NestedFilter = OnlyBodies;
fn nested_visit_map(&mut self) -> Self::Map {
self.cx.tcx.hir()
}
fn visit_qpath(&mut self, qpath: &'tcx QPath<'tcx>, hir_id: HirId, _: Span) {
walk_qpath(self, qpath, hir_id);
let Self { cx, used_fn_def_ids } = self;
// #11182; do not lint if mutability is required elsewhere
if let Node::Expr(expr) = cx.tcx.hir().get(hir_id)
&& let Some(parent) = get_parent_node(cx.tcx, expr.hir_id)
&& let ty::FnDef(def_id, _) = cx.tcx.typeck(cx.tcx.hir().enclosing_body_owner(hir_id)).expr_ty(expr).kind()
&& let Some(def_id) = def_id.as_local()
{
if let Node::Expr(e) = parent
&& let ExprKind::Call(call, _) = e.kind
&& call.hir_id == expr.hir_id
{
return;
}
// We don't need to check each argument individually as you cannot coerce a function
// taking `&mut` -> `&`, for some reason, so if we've gotten this far we know it's
// passed as a `fn`-like argument (or is unified) and should ignore every "unused"
// argument entirely
used_fn_def_ids.insert(def_id);
}
}
} |
impl<'tcx> LateLintPass<'tcx> for NeedlessPassByRefMut<'tcx> {
fn check_fn( | random_line_split |
messenger.rs | // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! LDK sends, receives, and forwards onion messages via the [`OnionMessenger`]. See its docs for
//! more information.
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
use chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient, Sign};
use ln::features::{InitFeatures, NodeFeatures};
use ln::msgs::{self, OnionMessageHandler};
use ln::onion_utils;
use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
use super::utils;
use util::events::OnionMessageProvider;
use util::logger::Logger;
use util::ser::Writeable;
use core::ops::Deref;
use sync::{Arc, Mutex};
use prelude::*;
/// A sender, receiver and forwarder of onion messages. In upcoming releases, this object will be
/// used to retrieve invoices and fulfill invoice requests from [offers]. Currently, only sending
/// and receiving empty onion messages is supported.
///
/// # Example
///
/// ```
/// # extern crate bitcoin;
/// # use bitcoin::hashes::_export::_core::time::Duration;
/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
/// # use lightning::chain::keysinterface::{InMemorySigner, KeysManager, KeysInterface};
/// # use lightning::onion_message::{BlindedRoute, Destination, OnionMessenger};
/// # use lightning::util::logger::{Logger, Record};
/// # use std::sync::Arc;
/// # struct FakeLogger {};
/// # impl Logger for FakeLogger {
/// # fn log(&self, record: &Record) { unimplemented!() }
/// # }
/// # let seed = [42u8; 32];
/// # let time = Duration::from_secs(123456);
/// # let keys_manager = KeysManager::new(&seed, time.as_secs(), time.subsec_nanos());
/// # let logger = Arc::new(FakeLogger {});
/// # let node_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
/// # let secp_ctx = Secp256k1::new();
/// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
/// # let (hop_node_id2, hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1,
/// hop_node_id1);
/// # let destination_node_id = hop_node_id1;
/// #
/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
/// // ChannelManager.
/// let onion_messenger = OnionMessenger::new(&keys_manager, logger);
///
/// // Send an empty onion message to a node id.
/// let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::Node(destination_node_id), reply_path);
///
/// // Create a blinded route to yourself, for someone to send an onion message to.
/// # let your_node_id = hop_node_id1;
/// let hops = [hop_node_id3, hop_node_id4, your_node_id];
/// let blinded_route = BlindedRoute::new(&hops, &keys_manager, &secp_ctx).unwrap();
///
/// // Send an empty onion message to a blinded route.
/// # let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::BlindedRoute(blinded_route), reply_path);
/// ```
///
/// [offers]: <https://github.com/lightning/bolts/pull/798>
/// [`OnionMessenger`]: crate::onion_message::OnionMessenger
pub struct OnionMessenger<Signer: Sign, K: Deref, L: Deref>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
keys_manager: K,
logger: L,
pending_messages: Mutex<HashMap<PublicKey, VecDeque<msgs::OnionMessage>>>,
secp_ctx: Secp256k1<secp256k1::All>,
// Coming soon:
// invoice_handler: InvoiceHandler,
// custom_handler: CustomHandler, // handles custom onion messages
}
/// The destination of an onion message.
pub enum Destination {
/// We're sending this onion message to a node.
Node(PublicKey),
/// We're sending this onion message to a blinded route.
BlindedRoute(BlindedRoute),
}
impl Destination {
pub(super) fn num_hops(&self) -> usize {
match self {
Destination::Node(_) => 1,
Destination::BlindedRoute(BlindedRoute { blinded_hops,.. }) => blinded_hops.len(),
}
}
}
/// Errors that may occur when [sending an onion message].
///
/// [sending an onion message]: OnionMessenger::send_onion_message
#[derive(Debug, PartialEq)]
pub enum SendError {
/// Errored computing onion message packet keys.
Secp256k1(secp256k1::Error),
/// Because implementations such as Eclair will drop onion messages where the message packet
/// exceeds 32834 bytes, we refuse to send messages where the packet exceeds this size.
TooBigPacket,
/// The provided [`Destination`] was an invalid [`BlindedRoute`], due to having fewer than two
/// blinded hops.
TooFewBlindedHops,
/// Our next-hop peer was offline or does not support onion message forwarding.
InvalidFirstHop,
/// Our next-hop peer's buffer was full or our total outbound buffer was full.
BufferFull,
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to
/// their respective handlers.
pub fn new(keys_manager: K, logger: L) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
OnionMessenger {
keys_manager,
pending_messages: Mutex::new(HashMap::new()),
secp_ctx,
logger,
}
}
/// Send an empty onion message to `destination`, routing it through `intermediate_nodes`.
/// See [`OnionMessenger`] for example usage.
pub fn send_onion_message(&self, intermediate_nodes: &[PublicKey], destination: Destination, reply_path: Option<BlindedRoute>) -> Result<(), SendError> {
if let Destination::BlindedRoute(BlindedRoute { ref blinded_hops,.. }) = destination {
if blinded_hops.len() < 2 {
return Err(SendError::TooFewBlindedHops);
}
}
let blinding_secret_bytes = self.keys_manager.get_secure_random_bytes();
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
let (introduction_node_id, blinding_point) = if intermediate_nodes.len()!= 0 {
(intermediate_nodes[0], PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret))
} else {
match destination {
Destination::Node(pk) => (pk, PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret)),
Destination::BlindedRoute(BlindedRoute { introduction_node_id, blinding_point,.. }) =>
(introduction_node_id, blinding_point),
}
};
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
&self.secp_ctx, intermediate_nodes, destination, reply_path, &blinding_secret)
.map_err(|e| SendError::Secp256k1(e))?;
let prng_seed = self.keys_manager.get_secure_random_bytes();
let onion_routing_packet = construct_onion_message_packet(
packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&introduction_node_id, &pending_per_peer_msgs) { return Err(SendError::BufferFull) }
match pending_per_peer_msgs.entry(introduction_node_id) {
hash_map::Entry::Vacant(_) => Err(SendError::InvalidFirstHop),
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(msgs::OnionMessage { blinding_point, onion_routing_packet });
Ok(())
}
}
}
#[cfg(test)]
pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<msgs::OnionMessage>> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
let mut msgs = HashMap::new();
// We don't want to disconnect the peers by removing them entirely from the original map, so we
// swap the pending message buffers individually.
for (peer_node_id, pending_messages) in &mut *pending_msgs {
msgs.insert(*peer_node_id, core::mem::take(pending_messages));
}
msgs
}
}
fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, VecDeque<msgs::OnionMessage>>) -> bool {
const MAX_TOTAL_BUFFER_SIZE: usize = (1 << 20) * 128;
const MAX_PER_PEER_BUFFER_SIZE: usize = (1 << 10) * 256;
let mut total_buffered_bytes = 0;
let mut peer_buffered_bytes = 0;
for (pk, peer_buf) in buffer {
for om in peer_buf {
let om_len = om.serialized_length();
if pk == peer_node_id {
peer_buffered_bytes += om_len;
}
total_buffered_bytes += om_len;
if total_buffered_bytes >= MAX_TOTAL_BUFFER_SIZE ||
peer_buffered_bytes >= MAX_PER_PEER_BUFFER_SIZE
{
return true
}
}
}
false
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageHandler for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Handle an incoming onion message. Currently, if a message was destined for us we will log, but
/// soon we'll delegate the onion message to a handler that can generate invoices or send
/// payments.
fn | (&self, _peer_node_id: &PublicKey, msg: &msgs::OnionMessage) {
let control_tlvs_ss = match self.keys_manager.ecdh(Recipient::Node, &msg.blinding_point, None) {
Ok(ss) => ss,
Err(e) => {
log_error!(self.logger, "Failed to retrieve node secret: {:?}", e);
return
}
};
let onion_decode_ss = {
let blinding_factor = {
let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
hmac.input(control_tlvs_ss.as_ref());
Hmac::from_engine(hmac).into_inner()
};
match self.keys_manager.ecdh(Recipient::Node, &msg.onion_routing_packet.public_key,
Some(&Scalar::from_be_bytes(blinding_factor).unwrap()))
{
Ok(ss) => ss.secret_bytes(),
Err(()) => {
log_trace!(self.logger, "Failed to compute onion packet shared secret");
return
}
}
};
match onion_utils::decode_next_hop(onion_decode_ss, &msg.onion_routing_packet.hop_data[..],
msg.onion_routing_packet.hmac, control_tlvs_ss)
{
Ok((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id }), reply_path,
}, None)) => {
log_info!(self.logger,
"Received an onion message with path_id: {:02x?} and {}reply_path",
path_id, if reply_path.is_some() { "" } else { "no " });
},
Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id, next_blinding_override
})), Some((next_hop_hmac, new_packet_bytes)))) => {
// TODO: we need to check whether `next_node_id` is our node, in which case this is a dummy
// blinded hop and this onion message is destined for us. In this situation, we should keep
// unwrapping the onion layers to get to the final payload. Since we don't have the option
// of creating blinded routes with dummy hops currently, we should be ok to not handle this
// for now.
let new_pubkey = match onion_utils::next_hop_packet_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key, &onion_decode_ss) {
Ok(pk) => pk,
Err(e) => {
log_trace!(self.logger, "Failed to compute next hop packet pubkey: {}", e);
return
}
};
let outgoing_packet = Packet {
version: 0,
public_key: new_pubkey,
hop_data: new_packet_bytes,
hmac: next_hop_hmac,
};
let onion_message = msgs::OnionMessage {
blinding_point: match next_blinding_override {
Some(blinding_point) => blinding_point,
None => {
let blinding_factor = {
let mut sha = Sha256::engine();
sha.input(&msg.blinding_point.serialize()[..]);
sha.input(control_tlvs_ss.as_ref());
Sha256::from_engine(sha).into_inner()
};
let next_blinding_point = msg.blinding_point;
match next_blinding_point.mul_tweak(&self.secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap()) {
Ok(bp) => bp,
Err(e) => {
log_trace!(self.logger, "Failed to compute next blinding point: {}", e);
return
}
}
},
},
onion_routing_packet: outgoing_packet,
};
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&next_node_id, &pending_per_peer_msgs) {
log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
return
}
#[cfg(fuzzing)]
pending_per_peer_msgs.entry(next_node_id).or_insert_with(VecDeque::new);
match pending_per_peer_msgs.entry(next_node_id) {
hash_map::Entry::Vacant(_) => {
log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
return
},
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(onion_message);
log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
}
};
},
Err(e) => {
log_trace!(self.logger, "Errored decoding onion message packet: {:?}", e);
},
_ => {
log_trace!(self.logger, "Received bogus onion message packet, either the sender encoded a final hop as a forwarding hop or vice versa");
},
};
}
fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) -> Result<(), ()> {
if init.features.supports_onion_messages() {
let mut peers = self.pending_messages.lock().unwrap();
peers.insert(their_node_id.clone(), VecDeque::new());
}
Ok(())
}
fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
let mut pending_msgs = self.pending_messages.lock().unwrap();
pending_msgs.remove(their_node_id);
}
fn provided_node_features(&self) -> NodeFeatures {
let mut features = NodeFeatures::empty();
features.set_onion_messages_optional();
features
}
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
let mut features = InitFeatures::empty();
features.set_onion_messages_optional();
features
}
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
return msgs.pop_front()
}
None
}
}
// TODO: parameterize the below Simple* types with OnionMessenger and handle the messages it
// produces
/// Useful for simplifying the parameters of [`SimpleArcChannelManager`] and
/// [`SimpleArcPeerManager`]. See their docs for more details.
///
/// (C-not exported) as `Arc`s don't make sense in bindings.
///
/// [`SimpleArcChannelManager`]: crate::ln::channelmanager::SimpleArcChannelManager
/// [`SimpleArcPeerManager`]: crate::ln::peer_handler::SimpleArcPeerManager
pub type SimpleArcOnionMessenger<L> = OnionMessenger<InMemorySigner, Arc<KeysManager>, Arc<L>>;
/// Useful for simplifying the parameters of [`SimpleRefChannelManager`] and
/// [`SimpleRefPeerManager`]. See their docs for more details.
///
/// (C-not exported) as general type aliases don't make sense in bindings.
///
/// [`SimpleRefChannelManager`]: crate::ln::channelmanager::SimpleRefChannelManager
/// [`SimpleRefPeerManager`]: crate::ln::peer_handler::SimpleRefPeerManager
pub type SimpleRefOnionMessenger<'a, 'b, L> = OnionMessenger<InMemorySigner, &'a KeysManager, &'b L>;
/// Construct onion packet payloads and keys for sending an onion message along the given
/// `unblinded_path` to the given `destination`.
fn packet_payloads_and_keys<T: secp256k1::Signing + secp256k1::Verification>(
secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Destination, mut reply_path:
Option<BlindedRoute>, session_priv: &SecretKey
) -> Result<(Vec<(Payload, [u8; 32])>, Vec<onion_utils::OnionKeys>), secp256k1::Error> {
let num_hops = unblinded_path.len() + destination.num_hops();
let mut payloads = Vec::with_capacity(num_hops);
let mut onion_packet_keys = Vec::with_capacity(num_hops);
let (mut intro_node_id_blinding_pt, num_blinded_hops) = if let Destination::BlindedRoute(BlindedRoute {
introduction_node_id, blinding_point, blinded_hops }) = &destination {
(Some((*introduction_node_id, *blinding_point)), blinded_hops.len()) } else { (None, 0) };
let num_unblinded_hops = num_hops - num_blinded_hops;
let mut unblinded_path_idx = 0;
let mut blinded_path_idx = 0;
let mut prev_control_tlvs_ss = None;
utils::construct_keys_callback(secp_ctx, unblinded_path, Some(destination), session_priv, |_, onion_packet_ss, ephemeral_pubkey, control_tlvs_ss, unblinded_pk_opt, enc_payload_opt| {
if num_unblinded_hops!= 0 && unblinded_path_idx < num_unblinded_hops {
if let Some(ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(
ForwardTlvs {
next_node_id: unblinded_pk_opt.unwrap(),
next_blinding_override: None,
}
)), ss));
}
prev_control_tlvs_ss = Some(control_tlvs_ss);
unblinded_path_idx += 1;
} else if let Some((intro_node_id, blinding_pt)) = intro_node_id_blinding_pt.take() {
if let Some(control_tlvs_ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id: intro_node_id,
next_blinding_override: Some(blinding_pt),
})), control_tlvs_ss));
}
if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(encrypted_payload)),
control_tlvs_ss));
} else { debug_assert!(false); }
blinded_path_idx += 1;
} else if blinded_path_idx < num_blinded_hops - 1 && enc_payload_opt.is_some() {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(enc_payload_opt.unwrap())),
control_tlvs_ss));
blinded_path_idx += 1;
} else if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Blinded(encrypted_payload),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(onion_packet_ss.as_ref());
onion_packet_keys.push(onion_utils::OnionKeys {
#[cfg(test)]
shared_secret: onion_packet_ss,
#[cfg(test)]
blinding_factor: [0; 32],
ephemeral_pubkey,
rho,
mu,
});
})?;
if let Some(control_tlvs_ss) = prev_control_tlvs_ss {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id: None, }),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
Ok((payloads, onion_packet_keys))
}
/// Errors if the serialized payload size exceeds onion_message::BIG_PACKET_HOP_DATA_LEN
fn construct_onion_message_packet(payloads: Vec<(Payload, [u8; 32])>, onion_keys: Vec<onion_utils::OnionKeys>, prng_seed: [u8; 32]) -> Result<Packet, ()> {
// Spec rationale:
// "`len` allows larger messages to be sent than the standard 1300 bytes allowed for an HTLC
// onion, but this should be used sparingly as it is reduces anonymity set, hence the
// recommendation that it either look like an HTLC onion, or if larger, be a fixed size."
let payloads_ser_len = onion_utils::payloads_serialized_length(&payloads);
let hop_data_len = if payloads_ser_len <= SMALL_PACKET_HOP_DATA_LEN {
SMALL_PACKET_HOP_DATA_LEN
} else if payloads_ser_len <= BIG_PACKET_HOP_DATA_LEN {
BIG_PACKET_HOP_DATA_LEN
} else { return Err(()) };
Ok(onion_utils::construct_onion_message_packet::<_, _>(
payloads, onion_keys, prng_seed, hop_data_len))
}
| handle_onion_message | identifier_name |
messenger.rs | // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! LDK sends, receives, and forwards onion messages via the [`OnionMessenger`]. See its docs for
//! more information.
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
use chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient, Sign};
use ln::features::{InitFeatures, NodeFeatures};
use ln::msgs::{self, OnionMessageHandler};
use ln::onion_utils;
use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
use super::utils;
use util::events::OnionMessageProvider;
use util::logger::Logger;
use util::ser::Writeable;
use core::ops::Deref;
use sync::{Arc, Mutex};
use prelude::*;
/// A sender, receiver and forwarder of onion messages. In upcoming releases, this object will be
/// used to retrieve invoices and fulfill invoice requests from [offers]. Currently, only sending
/// and receiving empty onion messages is supported.
///
/// # Example
///
/// ```
/// # extern crate bitcoin;
/// # use bitcoin::hashes::_export::_core::time::Duration;
/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
/// # use lightning::chain::keysinterface::{InMemorySigner, KeysManager, KeysInterface};
/// # use lightning::onion_message::{BlindedRoute, Destination, OnionMessenger};
/// # use lightning::util::logger::{Logger, Record};
/// # use std::sync::Arc;
/// # struct FakeLogger {};
/// # impl Logger for FakeLogger {
/// # fn log(&self, record: &Record) { unimplemented!() }
/// # }
/// # let seed = [42u8; 32];
/// # let time = Duration::from_secs(123456);
/// # let keys_manager = KeysManager::new(&seed, time.as_secs(), time.subsec_nanos());
/// # let logger = Arc::new(FakeLogger {});
/// # let node_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
/// # let secp_ctx = Secp256k1::new();
/// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
/// # let (hop_node_id2, hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1,
/// hop_node_id1);
/// # let destination_node_id = hop_node_id1;
/// #
/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
/// // ChannelManager.
/// let onion_messenger = OnionMessenger::new(&keys_manager, logger);
///
/// // Send an empty onion message to a node id.
/// let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::Node(destination_node_id), reply_path);
///
/// // Create a blinded route to yourself, for someone to send an onion message to.
/// # let your_node_id = hop_node_id1;
/// let hops = [hop_node_id3, hop_node_id4, your_node_id];
/// let blinded_route = BlindedRoute::new(&hops, &keys_manager, &secp_ctx).unwrap();
///
/// // Send an empty onion message to a blinded route.
/// # let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::BlindedRoute(blinded_route), reply_path);
/// ```
///
/// [offers]: <https://github.com/lightning/bolts/pull/798>
/// [`OnionMessenger`]: crate::onion_message::OnionMessenger
pub struct OnionMessenger<Signer: Sign, K: Deref, L: Deref>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
keys_manager: K,
logger: L,
pending_messages: Mutex<HashMap<PublicKey, VecDeque<msgs::OnionMessage>>>,
secp_ctx: Secp256k1<secp256k1::All>,
// Coming soon:
// invoice_handler: InvoiceHandler,
// custom_handler: CustomHandler, // handles custom onion messages
}
/// The destination of an onion message.
pub enum Destination {
/// We're sending this onion message to a node.
Node(PublicKey),
/// We're sending this onion message to a blinded route.
BlindedRoute(BlindedRoute),
}
impl Destination {
pub(super) fn num_hops(&self) -> usize {
match self {
Destination::Node(_) => 1,
Destination::BlindedRoute(BlindedRoute { blinded_hops,.. }) => blinded_hops.len(),
}
}
}
/// Errors that may occur when [sending an onion message].
///
/// [sending an onion message]: OnionMessenger::send_onion_message
#[derive(Debug, PartialEq)]
pub enum SendError {
/// Errored computing onion message packet keys.
Secp256k1(secp256k1::Error),
/// Because implementations such as Eclair will drop onion messages where the message packet
/// exceeds 32834 bytes, we refuse to send messages where the packet exceeds this size.
TooBigPacket,
/// The provided [`Destination`] was an invalid [`BlindedRoute`], due to having fewer than two
/// blinded hops.
TooFewBlindedHops,
/// Our next-hop peer was offline or does not support onion message forwarding.
InvalidFirstHop,
/// Our next-hop peer's buffer was full or our total outbound buffer was full.
BufferFull,
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to
/// their respective handlers.
pub fn new(keys_manager: K, logger: L) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
OnionMessenger {
keys_manager,
pending_messages: Mutex::new(HashMap::new()),
secp_ctx,
logger,
}
}
/// Send an empty onion message to `destination`, routing it through `intermediate_nodes`.
/// See [`OnionMessenger`] for example usage.
pub fn send_onion_message(&self, intermediate_nodes: &[PublicKey], destination: Destination, reply_path: Option<BlindedRoute>) -> Result<(), SendError> {
if let Destination::BlindedRoute(BlindedRoute { ref blinded_hops,.. }) = destination {
if blinded_hops.len() < 2 {
return Err(SendError::TooFewBlindedHops);
}
}
let blinding_secret_bytes = self.keys_manager.get_secure_random_bytes();
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
let (introduction_node_id, blinding_point) = if intermediate_nodes.len()!= 0 {
(intermediate_nodes[0], PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret))
} else {
match destination {
Destination::Node(pk) => (pk, PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret)),
Destination::BlindedRoute(BlindedRoute { introduction_node_id, blinding_point,.. }) =>
(introduction_node_id, blinding_point),
}
};
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
&self.secp_ctx, intermediate_nodes, destination, reply_path, &blinding_secret)
.map_err(|e| SendError::Secp256k1(e))?;
let prng_seed = self.keys_manager.get_secure_random_bytes();
let onion_routing_packet = construct_onion_message_packet(
packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&introduction_node_id, &pending_per_peer_msgs) { return Err(SendError::BufferFull) }
match pending_per_peer_msgs.entry(introduction_node_id) {
hash_map::Entry::Vacant(_) => Err(SendError::InvalidFirstHop),
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(msgs::OnionMessage { blinding_point, onion_routing_packet });
Ok(())
}
}
}
#[cfg(test)]
pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<msgs::OnionMessage>> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
let mut msgs = HashMap::new();
// We don't want to disconnect the peers by removing them entirely from the original map, so we
// swap the pending message buffers individually.
for (peer_node_id, pending_messages) in &mut *pending_msgs {
msgs.insert(*peer_node_id, core::mem::take(pending_messages));
}
msgs
}
}
fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, VecDeque<msgs::OnionMessage>>) -> bool {
const MAX_TOTAL_BUFFER_SIZE: usize = (1 << 20) * 128;
const MAX_PER_PEER_BUFFER_SIZE: usize = (1 << 10) * 256;
let mut total_buffered_bytes = 0;
let mut peer_buffered_bytes = 0;
for (pk, peer_buf) in buffer {
for om in peer_buf {
let om_len = om.serialized_length();
if pk == peer_node_id {
peer_buffered_bytes += om_len;
}
total_buffered_bytes += om_len;
if total_buffered_bytes >= MAX_TOTAL_BUFFER_SIZE ||
peer_buffered_bytes >= MAX_PER_PEER_BUFFER_SIZE
{
return true
}
}
}
false
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageHandler for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Handle an incoming onion message. Currently, if a message was destined for us we will log, but
/// soon we'll delegate the onion message to a handler that can generate invoices or send
/// payments.
fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &msgs::OnionMessage) {
let control_tlvs_ss = match self.keys_manager.ecdh(Recipient::Node, &msg.blinding_point, None) {
Ok(ss) => ss,
Err(e) => {
log_error!(self.logger, "Failed to retrieve node secret: {:?}", e);
return
}
};
let onion_decode_ss = {
let blinding_factor = {
let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
hmac.input(control_tlvs_ss.as_ref());
Hmac::from_engine(hmac).into_inner()
};
match self.keys_manager.ecdh(Recipient::Node, &msg.onion_routing_packet.public_key,
Some(&Scalar::from_be_bytes(blinding_factor).unwrap()))
{
Ok(ss) => ss.secret_bytes(),
Err(()) => {
log_trace!(self.logger, "Failed to compute onion packet shared secret");
return
}
}
};
match onion_utils::decode_next_hop(onion_decode_ss, &msg.onion_routing_packet.hop_data[..],
msg.onion_routing_packet.hmac, control_tlvs_ss)
{
Ok((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id }), reply_path,
}, None)) => {
log_info!(self.logger,
"Received an onion message with path_id: {:02x?} and {}reply_path",
path_id, if reply_path.is_some() { "" } else { "no " });
},
Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id, next_blinding_override
})), Some((next_hop_hmac, new_packet_bytes)))) => {
// TODO: we need to check whether `next_node_id` is our node, in which case this is a dummy
// blinded hop and this onion message is destined for us. In this situation, we should keep
// unwrapping the onion layers to get to the final payload. Since we don't have the option
// of creating blinded routes with dummy hops currently, we should be ok to not handle this
// for now.
let new_pubkey = match onion_utils::next_hop_packet_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key, &onion_decode_ss) {
Ok(pk) => pk,
Err(e) => {
log_trace!(self.logger, "Failed to compute next hop packet pubkey: {}", e);
return
}
};
let outgoing_packet = Packet { | public_key: new_pubkey,
hop_data: new_packet_bytes,
hmac: next_hop_hmac,
};
let onion_message = msgs::OnionMessage {
blinding_point: match next_blinding_override {
Some(blinding_point) => blinding_point,
None => {
let blinding_factor = {
let mut sha = Sha256::engine();
sha.input(&msg.blinding_point.serialize()[..]);
sha.input(control_tlvs_ss.as_ref());
Sha256::from_engine(sha).into_inner()
};
let next_blinding_point = msg.blinding_point;
match next_blinding_point.mul_tweak(&self.secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap()) {
Ok(bp) => bp,
Err(e) => {
log_trace!(self.logger, "Failed to compute next blinding point: {}", e);
return
}
}
},
},
onion_routing_packet: outgoing_packet,
};
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&next_node_id, &pending_per_peer_msgs) {
log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
return
}
#[cfg(fuzzing)]
pending_per_peer_msgs.entry(next_node_id).or_insert_with(VecDeque::new);
match pending_per_peer_msgs.entry(next_node_id) {
hash_map::Entry::Vacant(_) => {
log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
return
},
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(onion_message);
log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
}
};
},
Err(e) => {
log_trace!(self.logger, "Errored decoding onion message packet: {:?}", e);
},
_ => {
log_trace!(self.logger, "Received bogus onion message packet, either the sender encoded a final hop as a forwarding hop or vice versa");
},
};
}
fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) -> Result<(), ()> {
if init.features.supports_onion_messages() {
let mut peers = self.pending_messages.lock().unwrap();
peers.insert(their_node_id.clone(), VecDeque::new());
}
Ok(())
}
fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
let mut pending_msgs = self.pending_messages.lock().unwrap();
pending_msgs.remove(their_node_id);
}
fn provided_node_features(&self) -> NodeFeatures {
let mut features = NodeFeatures::empty();
features.set_onion_messages_optional();
features
}
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
let mut features = InitFeatures::empty();
features.set_onion_messages_optional();
features
}
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
return msgs.pop_front()
}
None
}
}
// TODO: parameterize the below Simple* types with OnionMessenger and handle the messages it
// produces
/// Useful for simplifying the parameters of [`SimpleArcChannelManager`] and
/// [`SimpleArcPeerManager`]. See their docs for more details.
///
/// (C-not exported) as `Arc`s don't make sense in bindings.
///
/// [`SimpleArcChannelManager`]: crate::ln::channelmanager::SimpleArcChannelManager
/// [`SimpleArcPeerManager`]: crate::ln::peer_handler::SimpleArcPeerManager
pub type SimpleArcOnionMessenger<L> = OnionMessenger<InMemorySigner, Arc<KeysManager>, Arc<L>>;
/// Useful for simplifying the parameters of [`SimpleRefChannelManager`] and
/// [`SimpleRefPeerManager`]. See their docs for more details.
///
/// (C-not exported) as general type aliases don't make sense in bindings.
///
/// [`SimpleRefChannelManager`]: crate::ln::channelmanager::SimpleRefChannelManager
/// [`SimpleRefPeerManager`]: crate::ln::peer_handler::SimpleRefPeerManager
pub type SimpleRefOnionMessenger<'a, 'b, L> = OnionMessenger<InMemorySigner, &'a KeysManager, &'b L>;
/// Construct onion packet payloads and keys for sending an onion message along the given
/// `unblinded_path` to the given `destination`.
fn packet_payloads_and_keys<T: secp256k1::Signing + secp256k1::Verification>(
secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Destination, mut reply_path:
Option<BlindedRoute>, session_priv: &SecretKey
) -> Result<(Vec<(Payload, [u8; 32])>, Vec<onion_utils::OnionKeys>), secp256k1::Error> {
let num_hops = unblinded_path.len() + destination.num_hops();
let mut payloads = Vec::with_capacity(num_hops);
let mut onion_packet_keys = Vec::with_capacity(num_hops);
let (mut intro_node_id_blinding_pt, num_blinded_hops) = if let Destination::BlindedRoute(BlindedRoute {
introduction_node_id, blinding_point, blinded_hops }) = &destination {
(Some((*introduction_node_id, *blinding_point)), blinded_hops.len()) } else { (None, 0) };
let num_unblinded_hops = num_hops - num_blinded_hops;
let mut unblinded_path_idx = 0;
let mut blinded_path_idx = 0;
let mut prev_control_tlvs_ss = None;
utils::construct_keys_callback(secp_ctx, unblinded_path, Some(destination), session_priv, |_, onion_packet_ss, ephemeral_pubkey, control_tlvs_ss, unblinded_pk_opt, enc_payload_opt| {
if num_unblinded_hops!= 0 && unblinded_path_idx < num_unblinded_hops {
if let Some(ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(
ForwardTlvs {
next_node_id: unblinded_pk_opt.unwrap(),
next_blinding_override: None,
}
)), ss));
}
prev_control_tlvs_ss = Some(control_tlvs_ss);
unblinded_path_idx += 1;
} else if let Some((intro_node_id, blinding_pt)) = intro_node_id_blinding_pt.take() {
if let Some(control_tlvs_ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id: intro_node_id,
next_blinding_override: Some(blinding_pt),
})), control_tlvs_ss));
}
if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(encrypted_payload)),
control_tlvs_ss));
} else { debug_assert!(false); }
blinded_path_idx += 1;
} else if blinded_path_idx < num_blinded_hops - 1 && enc_payload_opt.is_some() {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(enc_payload_opt.unwrap())),
control_tlvs_ss));
blinded_path_idx += 1;
} else if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Blinded(encrypted_payload),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(onion_packet_ss.as_ref());
onion_packet_keys.push(onion_utils::OnionKeys {
#[cfg(test)]
shared_secret: onion_packet_ss,
#[cfg(test)]
blinding_factor: [0; 32],
ephemeral_pubkey,
rho,
mu,
});
})?;
if let Some(control_tlvs_ss) = prev_control_tlvs_ss {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id: None, }),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
Ok((payloads, onion_packet_keys))
}
/// Errors if the serialized payload size exceeds onion_message::BIG_PACKET_HOP_DATA_LEN
fn construct_onion_message_packet(payloads: Vec<(Payload, [u8; 32])>, onion_keys: Vec<onion_utils::OnionKeys>, prng_seed: [u8; 32]) -> Result<Packet, ()> {
// Spec rationale:
// "`len` allows larger messages to be sent than the standard 1300 bytes allowed for an HTLC
// onion, but this should be used sparingly as it is reduces anonymity set, hence the
// recommendation that it either look like an HTLC onion, or if larger, be a fixed size."
let payloads_ser_len = onion_utils::payloads_serialized_length(&payloads);
let hop_data_len = if payloads_ser_len <= SMALL_PACKET_HOP_DATA_LEN {
SMALL_PACKET_HOP_DATA_LEN
} else if payloads_ser_len <= BIG_PACKET_HOP_DATA_LEN {
BIG_PACKET_HOP_DATA_LEN
} else { return Err(()) };
Ok(onion_utils::construct_onion_message_packet::<_, _>(
payloads, onion_keys, prng_seed, hop_data_len))
} | version: 0, | random_line_split |
messenger.rs | // This file is Copyright its original authors, visible in version control
// history.
//
// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// You may not use this file except in accordance with one or both of these
// licenses.
//! LDK sends, receives, and forwards onion messages via the [`OnionMessenger`]. See its docs for
//! more information.
use bitcoin::hashes::{Hash, HashEngine};
use bitcoin::hashes::hmac::{Hmac, HmacEngine};
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{self, PublicKey, Scalar, Secp256k1, SecretKey};
use chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient, Sign};
use ln::features::{InitFeatures, NodeFeatures};
use ln::msgs::{self, OnionMessageHandler};
use ln::onion_utils;
use super::blinded_route::{BlindedRoute, ForwardTlvs, ReceiveTlvs};
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
use super::utils;
use util::events::OnionMessageProvider;
use util::logger::Logger;
use util::ser::Writeable;
use core::ops::Deref;
use sync::{Arc, Mutex};
use prelude::*;
/// A sender, receiver and forwarder of onion messages. In upcoming releases, this object will be
/// used to retrieve invoices and fulfill invoice requests from [offers]. Currently, only sending
/// and receiving empty onion messages is supported.
///
/// # Example
///
/// ```
/// # extern crate bitcoin;
/// # use bitcoin::hashes::_export::_core::time::Duration;
/// # use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
/// # use lightning::chain::keysinterface::{InMemorySigner, KeysManager, KeysInterface};
/// # use lightning::onion_message::{BlindedRoute, Destination, OnionMessenger};
/// # use lightning::util::logger::{Logger, Record};
/// # use std::sync::Arc;
/// # struct FakeLogger {};
/// # impl Logger for FakeLogger {
/// # fn log(&self, record: &Record) { unimplemented!() }
/// # }
/// # let seed = [42u8; 32];
/// # let time = Duration::from_secs(123456);
/// # let keys_manager = KeysManager::new(&seed, time.as_secs(), time.subsec_nanos());
/// # let logger = Arc::new(FakeLogger {});
/// # let node_secret = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
/// # let secp_ctx = Secp256k1::new();
/// # let hop_node_id1 = PublicKey::from_secret_key(&secp_ctx, &node_secret);
/// # let (hop_node_id2, hop_node_id3, hop_node_id4) = (hop_node_id1, hop_node_id1,
/// hop_node_id1);
/// # let destination_node_id = hop_node_id1;
/// #
/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
/// // ChannelManager.
/// let onion_messenger = OnionMessenger::new(&keys_manager, logger);
///
/// // Send an empty onion message to a node id.
/// let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::Node(destination_node_id), reply_path);
///
/// // Create a blinded route to yourself, for someone to send an onion message to.
/// # let your_node_id = hop_node_id1;
/// let hops = [hop_node_id3, hop_node_id4, your_node_id];
/// let blinded_route = BlindedRoute::new(&hops, &keys_manager, &secp_ctx).unwrap();
///
/// // Send an empty onion message to a blinded route.
/// # let intermediate_hops = [hop_node_id1, hop_node_id2];
/// let reply_path = None;
/// onion_messenger.send_onion_message(&intermediate_hops, Destination::BlindedRoute(blinded_route), reply_path);
/// ```
///
/// [offers]: <https://github.com/lightning/bolts/pull/798>
/// [`OnionMessenger`]: crate::onion_message::OnionMessenger
pub struct OnionMessenger<Signer: Sign, K: Deref, L: Deref>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
keys_manager: K,
logger: L,
pending_messages: Mutex<HashMap<PublicKey, VecDeque<msgs::OnionMessage>>>,
secp_ctx: Secp256k1<secp256k1::All>,
// Coming soon:
// invoice_handler: InvoiceHandler,
// custom_handler: CustomHandler, // handles custom onion messages
}
/// The destination of an onion message.
pub enum Destination {
/// We're sending this onion message to a node.
Node(PublicKey),
/// We're sending this onion message to a blinded route.
BlindedRoute(BlindedRoute),
}
impl Destination {
pub(super) fn num_hops(&self) -> usize |
}
/// Errors that may occur when [sending an onion message].
///
/// [sending an onion message]: OnionMessenger::send_onion_message
#[derive(Debug, PartialEq)]
pub enum SendError {
/// Errored computing onion message packet keys.
Secp256k1(secp256k1::Error),
/// Because implementations such as Eclair will drop onion messages where the message packet
/// exceeds 32834 bytes, we refuse to send messages where the packet exceeds this size.
TooBigPacket,
/// The provided [`Destination`] was an invalid [`BlindedRoute`], due to having fewer than two
/// blinded hops.
TooFewBlindedHops,
/// Our next-hop peer was offline or does not support onion message forwarding.
InvalidFirstHop,
/// Our next-hop peer's buffer was full or our total outbound buffer was full.
BufferFull,
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to
/// their respective handlers.
pub fn new(keys_manager: K, logger: L) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
OnionMessenger {
keys_manager,
pending_messages: Mutex::new(HashMap::new()),
secp_ctx,
logger,
}
}
/// Send an empty onion message to `destination`, routing it through `intermediate_nodes`.
/// See [`OnionMessenger`] for example usage.
pub fn send_onion_message(&self, intermediate_nodes: &[PublicKey], destination: Destination, reply_path: Option<BlindedRoute>) -> Result<(), SendError> {
if let Destination::BlindedRoute(BlindedRoute { ref blinded_hops,.. }) = destination {
if blinded_hops.len() < 2 {
return Err(SendError::TooFewBlindedHops);
}
}
let blinding_secret_bytes = self.keys_manager.get_secure_random_bytes();
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
let (introduction_node_id, blinding_point) = if intermediate_nodes.len()!= 0 {
(intermediate_nodes[0], PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret))
} else {
match destination {
Destination::Node(pk) => (pk, PublicKey::from_secret_key(&self.secp_ctx, &blinding_secret)),
Destination::BlindedRoute(BlindedRoute { introduction_node_id, blinding_point,.. }) =>
(introduction_node_id, blinding_point),
}
};
let (packet_payloads, packet_keys) = packet_payloads_and_keys(
&self.secp_ctx, intermediate_nodes, destination, reply_path, &blinding_secret)
.map_err(|e| SendError::Secp256k1(e))?;
let prng_seed = self.keys_manager.get_secure_random_bytes();
let onion_routing_packet = construct_onion_message_packet(
packet_payloads, packet_keys, prng_seed).map_err(|()| SendError::TooBigPacket)?;
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&introduction_node_id, &pending_per_peer_msgs) { return Err(SendError::BufferFull) }
match pending_per_peer_msgs.entry(introduction_node_id) {
hash_map::Entry::Vacant(_) => Err(SendError::InvalidFirstHop),
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(msgs::OnionMessage { blinding_point, onion_routing_packet });
Ok(())
}
}
}
#[cfg(test)]
pub(super) fn release_pending_msgs(&self) -> HashMap<PublicKey, VecDeque<msgs::OnionMessage>> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
let mut msgs = HashMap::new();
// We don't want to disconnect the peers by removing them entirely from the original map, so we
// swap the pending message buffers individually.
for (peer_node_id, pending_messages) in &mut *pending_msgs {
msgs.insert(*peer_node_id, core::mem::take(pending_messages));
}
msgs
}
}
fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap<PublicKey, VecDeque<msgs::OnionMessage>>) -> bool {
const MAX_TOTAL_BUFFER_SIZE: usize = (1 << 20) * 128;
const MAX_PER_PEER_BUFFER_SIZE: usize = (1 << 10) * 256;
let mut total_buffered_bytes = 0;
let mut peer_buffered_bytes = 0;
for (pk, peer_buf) in buffer {
for om in peer_buf {
let om_len = om.serialized_length();
if pk == peer_node_id {
peer_buffered_bytes += om_len;
}
total_buffered_bytes += om_len;
if total_buffered_bytes >= MAX_TOTAL_BUFFER_SIZE ||
peer_buffered_bytes >= MAX_PER_PEER_BUFFER_SIZE
{
return true
}
}
}
false
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageHandler for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
/// Handle an incoming onion message. Currently, if a message was destined for us we will log, but
/// soon we'll delegate the onion message to a handler that can generate invoices or send
/// payments.
fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &msgs::OnionMessage) {
let control_tlvs_ss = match self.keys_manager.ecdh(Recipient::Node, &msg.blinding_point, None) {
Ok(ss) => ss,
Err(e) => {
log_error!(self.logger, "Failed to retrieve node secret: {:?}", e);
return
}
};
let onion_decode_ss = {
let blinding_factor = {
let mut hmac = HmacEngine::<Sha256>::new(b"blinded_node_id");
hmac.input(control_tlvs_ss.as_ref());
Hmac::from_engine(hmac).into_inner()
};
match self.keys_manager.ecdh(Recipient::Node, &msg.onion_routing_packet.public_key,
Some(&Scalar::from_be_bytes(blinding_factor).unwrap()))
{
Ok(ss) => ss.secret_bytes(),
Err(()) => {
log_trace!(self.logger, "Failed to compute onion packet shared secret");
return
}
}
};
match onion_utils::decode_next_hop(onion_decode_ss, &msg.onion_routing_packet.hop_data[..],
msg.onion_routing_packet.hmac, control_tlvs_ss)
{
Ok((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id }), reply_path,
}, None)) => {
log_info!(self.logger,
"Received an onion message with path_id: {:02x?} and {}reply_path",
path_id, if reply_path.is_some() { "" } else { "no " });
},
Ok((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id, next_blinding_override
})), Some((next_hop_hmac, new_packet_bytes)))) => {
// TODO: we need to check whether `next_node_id` is our node, in which case this is a dummy
// blinded hop and this onion message is destined for us. In this situation, we should keep
// unwrapping the onion layers to get to the final payload. Since we don't have the option
// of creating blinded routes with dummy hops currently, we should be ok to not handle this
// for now.
let new_pubkey = match onion_utils::next_hop_packet_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key, &onion_decode_ss) {
Ok(pk) => pk,
Err(e) => {
log_trace!(self.logger, "Failed to compute next hop packet pubkey: {}", e);
return
}
};
let outgoing_packet = Packet {
version: 0,
public_key: new_pubkey,
hop_data: new_packet_bytes,
hmac: next_hop_hmac,
};
let onion_message = msgs::OnionMessage {
blinding_point: match next_blinding_override {
Some(blinding_point) => blinding_point,
None => {
let blinding_factor = {
let mut sha = Sha256::engine();
sha.input(&msg.blinding_point.serialize()[..]);
sha.input(control_tlvs_ss.as_ref());
Sha256::from_engine(sha).into_inner()
};
let next_blinding_point = msg.blinding_point;
match next_blinding_point.mul_tweak(&self.secp_ctx, &Scalar::from_be_bytes(blinding_factor).unwrap()) {
Ok(bp) => bp,
Err(e) => {
log_trace!(self.logger, "Failed to compute next blinding point: {}", e);
return
}
}
},
},
onion_routing_packet: outgoing_packet,
};
let mut pending_per_peer_msgs = self.pending_messages.lock().unwrap();
if outbound_buffer_full(&next_node_id, &pending_per_peer_msgs) {
log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
return
}
#[cfg(fuzzing)]
pending_per_peer_msgs.entry(next_node_id).or_insert_with(VecDeque::new);
match pending_per_peer_msgs.entry(next_node_id) {
hash_map::Entry::Vacant(_) => {
log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
return
},
hash_map::Entry::Occupied(mut e) => {
e.get_mut().push_back(onion_message);
log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
}
};
},
Err(e) => {
log_trace!(self.logger, "Errored decoding onion message packet: {:?}", e);
},
_ => {
log_trace!(self.logger, "Received bogus onion message packet, either the sender encoded a final hop as a forwarding hop or vice versa");
},
};
}
fn peer_connected(&self, their_node_id: &PublicKey, init: &msgs::Init) -> Result<(), ()> {
if init.features.supports_onion_messages() {
let mut peers = self.pending_messages.lock().unwrap();
peers.insert(their_node_id.clone(), VecDeque::new());
}
Ok(())
}
fn peer_disconnected(&self, their_node_id: &PublicKey, _no_connection_possible: bool) {
let mut pending_msgs = self.pending_messages.lock().unwrap();
pending_msgs.remove(their_node_id);
}
fn provided_node_features(&self) -> NodeFeatures {
let mut features = NodeFeatures::empty();
features.set_onion_messages_optional();
features
}
fn provided_init_features(&self, _their_node_id: &PublicKey) -> InitFeatures {
let mut features = InitFeatures::empty();
features.set_onion_messages_optional();
features
}
}
impl<Signer: Sign, K: Deref, L: Deref> OnionMessageProvider for OnionMessenger<Signer, K, L>
where K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
fn next_onion_message_for_peer(&self, peer_node_id: PublicKey) -> Option<msgs::OnionMessage> {
let mut pending_msgs = self.pending_messages.lock().unwrap();
if let Some(msgs) = pending_msgs.get_mut(&peer_node_id) {
return msgs.pop_front()
}
None
}
}
// TODO: parameterize the below Simple* types with OnionMessenger and handle the messages it
// produces
/// Useful for simplifying the parameters of [`SimpleArcChannelManager`] and
/// [`SimpleArcPeerManager`]. See their docs for more details.
///
/// (C-not exported) as `Arc`s don't make sense in bindings.
///
/// [`SimpleArcChannelManager`]: crate::ln::channelmanager::SimpleArcChannelManager
/// [`SimpleArcPeerManager`]: crate::ln::peer_handler::SimpleArcPeerManager
pub type SimpleArcOnionMessenger<L> = OnionMessenger<InMemorySigner, Arc<KeysManager>, Arc<L>>;
/// Useful for simplifying the parameters of [`SimpleRefChannelManager`] and
/// [`SimpleRefPeerManager`]. See their docs for more details.
///
/// (C-not exported) as general type aliases don't make sense in bindings.
///
/// [`SimpleRefChannelManager`]: crate::ln::channelmanager::SimpleRefChannelManager
/// [`SimpleRefPeerManager`]: crate::ln::peer_handler::SimpleRefPeerManager
pub type SimpleRefOnionMessenger<'a, 'b, L> = OnionMessenger<InMemorySigner, &'a KeysManager, &'b L>;
/// Construct onion packet payloads and keys for sending an onion message along the given
/// `unblinded_path` to the given `destination`.
fn packet_payloads_and_keys<T: secp256k1::Signing + secp256k1::Verification>(
secp_ctx: &Secp256k1<T>, unblinded_path: &[PublicKey], destination: Destination, mut reply_path:
Option<BlindedRoute>, session_priv: &SecretKey
) -> Result<(Vec<(Payload, [u8; 32])>, Vec<onion_utils::OnionKeys>), secp256k1::Error> {
let num_hops = unblinded_path.len() + destination.num_hops();
let mut payloads = Vec::with_capacity(num_hops);
let mut onion_packet_keys = Vec::with_capacity(num_hops);
let (mut intro_node_id_blinding_pt, num_blinded_hops) = if let Destination::BlindedRoute(BlindedRoute {
introduction_node_id, blinding_point, blinded_hops }) = &destination {
(Some((*introduction_node_id, *blinding_point)), blinded_hops.len()) } else { (None, 0) };
let num_unblinded_hops = num_hops - num_blinded_hops;
let mut unblinded_path_idx = 0;
let mut blinded_path_idx = 0;
let mut prev_control_tlvs_ss = None;
utils::construct_keys_callback(secp_ctx, unblinded_path, Some(destination), session_priv, |_, onion_packet_ss, ephemeral_pubkey, control_tlvs_ss, unblinded_pk_opt, enc_payload_opt| {
if num_unblinded_hops!= 0 && unblinded_path_idx < num_unblinded_hops {
if let Some(ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(
ForwardTlvs {
next_node_id: unblinded_pk_opt.unwrap(),
next_blinding_override: None,
}
)), ss));
}
prev_control_tlvs_ss = Some(control_tlvs_ss);
unblinded_path_idx += 1;
} else if let Some((intro_node_id, blinding_pt)) = intro_node_id_blinding_pt.take() {
if let Some(control_tlvs_ss) = prev_control_tlvs_ss.take() {
payloads.push((Payload::Forward(ForwardControlTlvs::Unblinded(ForwardTlvs {
next_node_id: intro_node_id,
next_blinding_override: Some(blinding_pt),
})), control_tlvs_ss));
}
if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(encrypted_payload)),
control_tlvs_ss));
} else { debug_assert!(false); }
blinded_path_idx += 1;
} else if blinded_path_idx < num_blinded_hops - 1 && enc_payload_opt.is_some() {
payloads.push((Payload::Forward(ForwardControlTlvs::Blinded(enc_payload_opt.unwrap())),
control_tlvs_ss));
blinded_path_idx += 1;
} else if let Some(encrypted_payload) = enc_payload_opt {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Blinded(encrypted_payload),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(onion_packet_ss.as_ref());
onion_packet_keys.push(onion_utils::OnionKeys {
#[cfg(test)]
shared_secret: onion_packet_ss,
#[cfg(test)]
blinding_factor: [0; 32],
ephemeral_pubkey,
rho,
mu,
});
})?;
if let Some(control_tlvs_ss) = prev_control_tlvs_ss {
payloads.push((Payload::Receive {
control_tlvs: ReceiveControlTlvs::Unblinded(ReceiveTlvs { path_id: None, }),
reply_path: reply_path.take(),
}, control_tlvs_ss));
}
Ok((payloads, onion_packet_keys))
}
/// Errors if the serialized payload size exceeds onion_message::BIG_PACKET_HOP_DATA_LEN
fn construct_onion_message_packet(payloads: Vec<(Payload, [u8; 32])>, onion_keys: Vec<onion_utils::OnionKeys>, prng_seed: [u8; 32]) -> Result<Packet, ()> {
// Spec rationale:
// "`len` allows larger messages to be sent than the standard 1300 bytes allowed for an HTLC
// onion, but this should be used sparingly as it is reduces anonymity set, hence the
// recommendation that it either look like an HTLC onion, or if larger, be a fixed size."
let payloads_ser_len = onion_utils::payloads_serialized_length(&payloads);
let hop_data_len = if payloads_ser_len <= SMALL_PACKET_HOP_DATA_LEN {
SMALL_PACKET_HOP_DATA_LEN
} else if payloads_ser_len <= BIG_PACKET_HOP_DATA_LEN {
BIG_PACKET_HOP_DATA_LEN
} else { return Err(()) };
Ok(onion_utils::construct_onion_message_packet::<_, _>(
payloads, onion_keys, prng_seed, hop_data_len))
}
| {
match self {
Destination::Node(_) => 1,
Destination::BlindedRoute(BlindedRoute { blinded_hops, .. }) => blinded_hops.len(),
}
} | identifier_body |
joint_feldman.rs | //! Implements the Distributed Key Generation protocol from
//! [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
//! The protocol runs at minimum in two phases and at most in three phases.
use super::common::*;
use crate::primitives::{
group::Group,
phases::{Phase0, Phase1, Phase2, Phase3},
status::{Status, StatusMatrix},
types::*,
DKGError, DKGResult,
};
use threshold_bls::{
group::{Curve, Element},
poly::{Idx, Poly, PrivatePoly, PublicPoly},
sig::Share,
};
use rand_core::RngCore;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Debug};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
struct DKGInfo<C: Curve> {
private_key: C::Scalar,
public_key: C::Point,
index: Idx,
group: Group<C>,
secret: Poly<C::Scalar>,
public: Poly<C::Point>,
}
impl<C: Curve> DKGInfo<C> {
/// Returns the number of nodes participating in the group for this DKG
fn n(&self) -> usize {
self.group.len()
}
/// Returns the threshold of the group for this DKG
fn thr(&self) -> usize {
self.group.threshold
}
}
/// DKG is the struct containing the logic to run the Distributed Key Generation
/// protocol from [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
///
/// The protocol runs at minimum in two phases and at most in three phases as
/// described in the module documentation.
///
/// Each transition to a new phase is consuming the DKG state (struct) to produce
/// a new state that only accepts to transition to the next phase.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
pub struct DKG<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> DKG<C> {
/// Creates a new DKG instance from the provided private key and group.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new(private_key: C::Scalar, group: Group<C>) -> Result<DKG<C>, DKGError> {
use rand::prelude::*;
Self::new_rand(private_key, group, &mut thread_rng())
}
/// Creates a new DKG instance from the provided private key, group and RNG.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new_rand<R: RngCore>(
private_key: C::Scalar,
group: Group<C>,
rng: &mut R,
) -> Result<DKG<C>, DKGError> {
// get the public key
let mut public_key = C::Point::one();
public_key.mul(&private_key);
// make sure the private key is not identity element nor neutral element
if private_key == C::Scalar::zero() || private_key == C::Scalar::one() {
return Err(DKGError::PrivateKeyInvalid);
}
// check if the public key is part of the group
let index = group
.index(&public_key)
.ok_or(DKGError::PublicKeyNotFound)?;
// Generate a secret polynomial and commit to it
let secret = PrivatePoly::<C>::new_from(group.threshold - 1, rng);
let public = secret.commit::<C::Point>();
let info = DKGInfo {
private_key,
public_key,
index,
group,
secret,
public,
};
Ok(DKG { info })
}
}
impl<C: Curve> Phase0<C> for DKG<C> {
type Next = DKGWaitingShare<C>;
/// Evaluates the secret polynomial at the index of each DKG participant and encrypts
/// the result with the corresponding public key. Returns the bundled encrypted shares
/// as well as the next phase of the DKG.
fn encrypt_shares<R: RngCore>(
self,
rng: &mut R,
) -> DKGResult<(DKGWaitingShare<C>, Option<BundledShares<C>>)> {
let bundle = create_share_bundle(
self.info.index,
&self.info.secret,
&self.info.public,
&self.info.group,
rng,
)?;
let dw = DKGWaitingShare { info: self.info };
Ok((dw, Some(bundle)))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the shares from the previous phase's participants
/// as input. After processing the shares, if there were any complaints it will generate
/// a bundle of responses for the next phase.
pub struct DKGWaitingShare<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> Phase1<C> for DKGWaitingShare<C> {
type Next = DKGWaitingResponse<C>;
#[allow(unused_assignments)]
/// Tries to decrypt the provided shares and calculate the secret key and the
/// threshold public key. If `publish_all` is set to true then the returned
/// responses will include both complaints and successful statuses. Consider setting
/// it to false when communication complexity is high.
///
/// A complaint is returned in the following cases:
/// - invalid dealer index
/// - absentee shares for us
/// - invalid encryption
/// - invalid length of public polynomial
/// - invalid share w.r.t. public polynomial
fn process_shares(
self,
bundles: &[BundledShares<C>],
mut publish_all: bool,
) -> DKGResult<(DKGWaitingResponse<C>, Option<BundledResponses>)> {
publish_all = false;
let thr = self.info.thr();
let my_idx = self.info.index;
let (shares, publics, mut statuses) = process_shares_get_all(
&self.info.group,
&self.info.group,
Some(my_idx),
my_idx,
&self.info.private_key,
bundles,
)?;
// in DKG every dealer is also a share holder, we assume that a dealer
// will issue a valid share for itself
for n in self.info.group.nodes.iter() {
statuses.set(n.id(), n.id(), Status::Success);
}
// we check with `thr - 1` because we already have our shares
if shares.len() < thr - 1 {
// that means the threat model is not respected since there should
// be at least a threshold of honest shares
return Err(DKGError::NotEnoughValidShares(shares.len(), thr));
}
// The user's secret share is the sum of all received shares (remember:
// each share is an evaluation of a participant's private polynomial at
// our index)
let mut fshare = self.info.secret.eval(self.info.index).value;
// The public key polynomial is the sum of all shared polynomials
let mut fpub = self.info.public.clone();
shares.iter().for_each(|(&dealer_idx, share)| {
fpub.add(publics.get(&dealer_idx).unwrap());
fshare.add(share);
});
let bundle = compute_bundle_response(my_idx, &statuses, publish_all);
let new_dkg = DKGWaitingResponse::new(self.info, fshare, fpub, statuses, publics);
Ok((new_dkg, bundle))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the responses from the previous phase's participants
/// as input. The responses will be processed and justifications may be generated as a byproduct
/// if there are complaints.
pub struct DKGWaitingResponse<C: Curve> {
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
}
impl<C: Curve> DKGWaitingResponse<C> {
fn new(
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
) -> Self {
Self {
info,
dist_share,
dist_pub,
statuses,
publics,
}
}
}
impl<C: Curve> Phase2<C> for DKGWaitingResponse<C> {
type Next = DKGWaitingJustification<C>;
#[allow(clippy::type_complexity)]
/// Checks if the responses when applied to the status matrix result in a
/// matrix with only `Success` elements. If so, the protocol terminates.
///
/// If there are complaints in the Status matrix, then it will return an
/// error with the justifications required for Phase 3 of the DKG.
fn process_responses(
self,
responses: &[BundledResponses],
) -> Result<DKGOutput<C>, DKGResult<(Self::Next, Option<BundledJustification<C>>)>> {
let info = self.info;
let mut statuses = self.statuses;
set_statuses(
info.index,
&info.group,
&info.group,
&mut statuses,
responses,
);
// find out if justifications are required
// if there is a least one participant that issued one complaint
let justifications_required = info.group.nodes.iter().any(|n|!statuses.all_true(n.id()));
if justifications_required {
let bundled_justifications =
get_justification(info.index, &info.secret, &info.public, &statuses);
let dkg = DKGWaitingJustification {
info,
dist_share: self.dist_share,
dist_pub: self.dist_pub,
statuses: RefCell::new(statuses),
publics: self.publics,
};
return Err(Ok((dkg, bundled_justifications)));
}
// bingo! Returns the final share now and stop the protocol
let share = Share {
index: info.index,
private: self.dist_share,
};
Ok(DKGOutput {
// everybody is qualified in this case since there is no
// complaint at all
qual: info.group,
public: self.dist_pub,
share,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the justifications from the previous phase's participants
/// as input to produce either the final DKG Output, or an error.
pub struct DKGWaitingJustification<C: Curve> {
// TODO: transform that into one info variable that gets default value for
// missing parts depending in the round of the protocol.
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
// guaranteed to be of the right size (n)
statuses: RefCell<StatusMatrix>,
publics: HashMap<Idx, PublicPoly<C>>,
}
impl<C> Phase3<C> for DKGWaitingJustification<C>
where
C: Curve,
{
/// Accept a justification if the following conditions are true:
/// - bundle's dealer index is in range
/// - a justification was required for the given share (no-op)
/// - share corresponds to public polynomial received in the bundled shares during
/// first period.
/// Return an output if `len(qual) > thr`
fn process_justifications(
self,
justifs: &[BundledJustification<C>],
) -> Result<DKGOutput<C>, DKGError> {
// Calculate the share and public polynomial from the provided justifications
// (they will later be added to our existing share and public polynomial)
let mut add_share = C::Scalar::zero();
let mut add_public = PublicPoly::<C>::zero();
let valid_shares = internal_process_justifications(
self.info.index,
&self.info.group,
&mut self.statuses.borrow_mut(),
&self.publics,
justifs,
);
for (idx, share) in &valid_shares {
add_share.add(share);
// unwrap since internal_process_justi. gauarantees each share comes
// from a public polynomial we've seen in the first round.
add_public.add(self.publics.get(idx).unwrap());
}
// QUAL is the set of all entries in the matrix where all bits are set
let statuses = self.statuses.borrow();
let qual_indices = (0..self.info.n())
.filter(|&dealer| statuses.all_true(dealer as Idx))
.collect::<Vec<_>>();
let thr = self.info.group.threshold;
if qual_indices.len() < thr {
// too many unanswered justifications, DKG abort!
return Err(DKGError::NotEnoughJustifications(qual_indices.len(), thr));
}
// create a group out of the qualifying nodes
let qual_nodes = self
.info
.group
.nodes
.into_iter()
.filter(|n| qual_indices.contains(&(n.id() as usize)))
.collect();
let group = Group::<C>::new(qual_nodes, thr)?;
// add all good shares and public poly together
add_share.add(&self.dist_share);
add_public.add(&self.dist_pub);
let ds = Share {
index: self.info.index,
private: add_share,
};
Ok(DKGOutput { | qual: group,
public: add_public,
share: ds,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::primitives::{
common::tests::{check2, full_dkg, id_out, id_resp, invalid2, invalid_shares, setup_group},
default_threshold,
};
use std::fmt::Debug;
use threshold_bls::curve::bls12377::{G1Curve as BCurve, G1};
use serde::{de::DeserializeOwned, Serialize};
use static_assertions::assert_impl_all;
assert_impl_all!(Group<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGInfo<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKG<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(EncryptedShare<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledShares<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGOutput<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledJustification<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
fn setup_dkg<C: Curve>(n: usize) -> Vec<DKG<C>> {
let (privs, group) = setup_group::<C>(n, default_threshold(n));
privs
.into_iter()
.map(|p| DKG::new(p, group.clone()).unwrap())
.collect::<Vec<_>>()
}
#[test]
fn group_index() {
let n = 6;
let (privs, group) = setup_group::<BCurve>(n, default_threshold(n));
for (i, private) in privs.iter().enumerate() {
let mut public = G1::one();
public.mul(private);
let idx = group.index(&public).expect("should find public key");
assert_eq!(idx, i as Idx);
}
}
#[test]
fn test_full_dkg() {
let n = 5;
let thr = default_threshold(n);
full_dkg(thr, setup_dkg::<BCurve>(n));
}
#[test]
fn test_invalid_shares_dkg() {
let n = 5;
let thr = default_threshold(n);
invalid_shares(
thr,
setup_dkg::<BCurve>(n),
invalid2,
id_resp,
check2,
id_out,
)
.unwrap();
}
} | random_line_split |
|
joint_feldman.rs | //! Implements the Distributed Key Generation protocol from
//! [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
//! The protocol runs at minimum in two phases and at most in three phases.
use super::common::*;
use crate::primitives::{
group::Group,
phases::{Phase0, Phase1, Phase2, Phase3},
status::{Status, StatusMatrix},
types::*,
DKGError, DKGResult,
};
use threshold_bls::{
group::{Curve, Element},
poly::{Idx, Poly, PrivatePoly, PublicPoly},
sig::Share,
};
use rand_core::RngCore;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Debug};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
struct DKGInfo<C: Curve> {
private_key: C::Scalar,
public_key: C::Point,
index: Idx,
group: Group<C>,
secret: Poly<C::Scalar>,
public: Poly<C::Point>,
}
impl<C: Curve> DKGInfo<C> {
/// Returns the number of nodes participating in the group for this DKG
fn n(&self) -> usize {
self.group.len()
}
/// Returns the threshold of the group for this DKG
fn thr(&self) -> usize {
self.group.threshold
}
}
/// DKG is the struct containing the logic to run the Distributed Key Generation
/// protocol from [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
///
/// The protocol runs at minimum in two phases and at most in three phases as
/// described in the module documentation.
///
/// Each transition to a new phase is consuming the DKG state (struct) to produce
/// a new state that only accepts to transition to the next phase.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
pub struct DKG<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> DKG<C> {
/// Creates a new DKG instance from the provided private key and group.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new(private_key: C::Scalar, group: Group<C>) -> Result<DKG<C>, DKGError> {
use rand::prelude::*;
Self::new_rand(private_key, group, &mut thread_rng())
}
/// Creates a new DKG instance from the provided private key, group and RNG.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new_rand<R: RngCore>(
private_key: C::Scalar,
group: Group<C>,
rng: &mut R,
) -> Result<DKG<C>, DKGError> {
// get the public key
let mut public_key = C::Point::one();
public_key.mul(&private_key);
// make sure the private key is not identity element nor neutral element
if private_key == C::Scalar::zero() || private_key == C::Scalar::one() |
// check if the public key is part of the group
let index = group
.index(&public_key)
.ok_or(DKGError::PublicKeyNotFound)?;
// Generate a secret polynomial and commit to it
let secret = PrivatePoly::<C>::new_from(group.threshold - 1, rng);
let public = secret.commit::<C::Point>();
let info = DKGInfo {
private_key,
public_key,
index,
group,
secret,
public,
};
Ok(DKG { info })
}
}
impl<C: Curve> Phase0<C> for DKG<C> {
type Next = DKGWaitingShare<C>;
/// Evaluates the secret polynomial at the index of each DKG participant and encrypts
/// the result with the corresponding public key. Returns the bundled encrypted shares
/// as well as the next phase of the DKG.
fn encrypt_shares<R: RngCore>(
self,
rng: &mut R,
) -> DKGResult<(DKGWaitingShare<C>, Option<BundledShares<C>>)> {
let bundle = create_share_bundle(
self.info.index,
&self.info.secret,
&self.info.public,
&self.info.group,
rng,
)?;
let dw = DKGWaitingShare { info: self.info };
Ok((dw, Some(bundle)))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the shares from the previous phase's participants
/// as input. After processing the shares, if there were any complaints it will generate
/// a bundle of responses for the next phase.
pub struct DKGWaitingShare<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> Phase1<C> for DKGWaitingShare<C> {
type Next = DKGWaitingResponse<C>;
#[allow(unused_assignments)]
/// Tries to decrypt the provided shares and calculate the secret key and the
/// threshold public key. If `publish_all` is set to true then the returned
/// responses will include both complaints and successful statuses. Consider setting
/// it to false when communication complexity is high.
///
/// A complaint is returned in the following cases:
/// - invalid dealer index
/// - absentee shares for us
/// - invalid encryption
/// - invalid length of public polynomial
/// - invalid share w.r.t. public polynomial
fn process_shares(
self,
bundles: &[BundledShares<C>],
mut publish_all: bool,
) -> DKGResult<(DKGWaitingResponse<C>, Option<BundledResponses>)> {
publish_all = false;
let thr = self.info.thr();
let my_idx = self.info.index;
let (shares, publics, mut statuses) = process_shares_get_all(
&self.info.group,
&self.info.group,
Some(my_idx),
my_idx,
&self.info.private_key,
bundles,
)?;
// in DKG every dealer is also a share holder, we assume that a dealer
// will issue a valid share for itself
for n in self.info.group.nodes.iter() {
statuses.set(n.id(), n.id(), Status::Success);
}
// we check with `thr - 1` because we already have our shares
if shares.len() < thr - 1 {
// that means the threat model is not respected since there should
// be at least a threshold of honest shares
return Err(DKGError::NotEnoughValidShares(shares.len(), thr));
}
// The user's secret share is the sum of all received shares (remember:
// each share is an evaluation of a participant's private polynomial at
// our index)
let mut fshare = self.info.secret.eval(self.info.index).value;
// The public key polynomial is the sum of all shared polynomials
let mut fpub = self.info.public.clone();
shares.iter().for_each(|(&dealer_idx, share)| {
fpub.add(publics.get(&dealer_idx).unwrap());
fshare.add(share);
});
let bundle = compute_bundle_response(my_idx, &statuses, publish_all);
let new_dkg = DKGWaitingResponse::new(self.info, fshare, fpub, statuses, publics);
Ok((new_dkg, bundle))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the responses from the previous phase's participants
/// as input. The responses will be processed and justifications may be generated as a byproduct
/// if there are complaints.
pub struct DKGWaitingResponse<C: Curve> {
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
}
impl<C: Curve> DKGWaitingResponse<C> {
fn new(
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
) -> Self {
Self {
info,
dist_share,
dist_pub,
statuses,
publics,
}
}
}
impl<C: Curve> Phase2<C> for DKGWaitingResponse<C> {
type Next = DKGWaitingJustification<C>;
#[allow(clippy::type_complexity)]
/// Checks if the responses when applied to the status matrix result in a
/// matrix with only `Success` elements. If so, the protocol terminates.
///
/// If there are complaints in the Status matrix, then it will return an
/// error with the justifications required for Phase 3 of the DKG.
fn process_responses(
self,
responses: &[BundledResponses],
) -> Result<DKGOutput<C>, DKGResult<(Self::Next, Option<BundledJustification<C>>)>> {
let info = self.info;
let mut statuses = self.statuses;
set_statuses(
info.index,
&info.group,
&info.group,
&mut statuses,
responses,
);
// find out if justifications are required
// if there is a least one participant that issued one complaint
let justifications_required = info.group.nodes.iter().any(|n|!statuses.all_true(n.id()));
if justifications_required {
let bundled_justifications =
get_justification(info.index, &info.secret, &info.public, &statuses);
let dkg = DKGWaitingJustification {
info,
dist_share: self.dist_share,
dist_pub: self.dist_pub,
statuses: RefCell::new(statuses),
publics: self.publics,
};
return Err(Ok((dkg, bundled_justifications)));
}
// bingo! Returns the final share now and stop the protocol
let share = Share {
index: info.index,
private: self.dist_share,
};
Ok(DKGOutput {
// everybody is qualified in this case since there is no
// complaint at all
qual: info.group,
public: self.dist_pub,
share,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the justifications from the previous phase's participants
/// as input to produce either the final DKG Output, or an error.
pub struct DKGWaitingJustification<C: Curve> {
// TODO: transform that into one info variable that gets default value for
// missing parts depending in the round of the protocol.
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
// guaranteed to be of the right size (n)
statuses: RefCell<StatusMatrix>,
publics: HashMap<Idx, PublicPoly<C>>,
}
impl<C> Phase3<C> for DKGWaitingJustification<C>
where
C: Curve,
{
/// Accept a justification if the following conditions are true:
/// - bundle's dealer index is in range
/// - a justification was required for the given share (no-op)
/// - share corresponds to public polynomial received in the bundled shares during
/// first period.
/// Return an output if `len(qual) > thr`
fn process_justifications(
self,
justifs: &[BundledJustification<C>],
) -> Result<DKGOutput<C>, DKGError> {
// Calculate the share and public polynomial from the provided justifications
// (they will later be added to our existing share and public polynomial)
let mut add_share = C::Scalar::zero();
let mut add_public = PublicPoly::<C>::zero();
let valid_shares = internal_process_justifications(
self.info.index,
&self.info.group,
&mut self.statuses.borrow_mut(),
&self.publics,
justifs,
);
for (idx, share) in &valid_shares {
add_share.add(share);
// unwrap since internal_process_justi. gauarantees each share comes
// from a public polynomial we've seen in the first round.
add_public.add(self.publics.get(idx).unwrap());
}
// QUAL is the set of all entries in the matrix where all bits are set
let statuses = self.statuses.borrow();
let qual_indices = (0..self.info.n())
.filter(|&dealer| statuses.all_true(dealer as Idx))
.collect::<Vec<_>>();
let thr = self.info.group.threshold;
if qual_indices.len() < thr {
// too many unanswered justifications, DKG abort!
return Err(DKGError::NotEnoughJustifications(qual_indices.len(), thr));
}
// create a group out of the qualifying nodes
let qual_nodes = self
.info
.group
.nodes
.into_iter()
.filter(|n| qual_indices.contains(&(n.id() as usize)))
.collect();
let group = Group::<C>::new(qual_nodes, thr)?;
// add all good shares and public poly together
add_share.add(&self.dist_share);
add_public.add(&self.dist_pub);
let ds = Share {
index: self.info.index,
private: add_share,
};
Ok(DKGOutput {
qual: group,
public: add_public,
share: ds,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::primitives::{
common::tests::{check2, full_dkg, id_out, id_resp, invalid2, invalid_shares, setup_group},
default_threshold,
};
use std::fmt::Debug;
use threshold_bls::curve::bls12377::{G1Curve as BCurve, G1};
use serde::{de::DeserializeOwned, Serialize};
use static_assertions::assert_impl_all;
assert_impl_all!(Group<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGInfo<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKG<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(EncryptedShare<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledShares<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGOutput<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledJustification<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
fn setup_dkg<C: Curve>(n: usize) -> Vec<DKG<C>> {
let (privs, group) = setup_group::<C>(n, default_threshold(n));
privs
.into_iter()
.map(|p| DKG::new(p, group.clone()).unwrap())
.collect::<Vec<_>>()
}
#[test]
fn group_index() {
let n = 6;
let (privs, group) = setup_group::<BCurve>(n, default_threshold(n));
for (i, private) in privs.iter().enumerate() {
let mut public = G1::one();
public.mul(private);
let idx = group.index(&public).expect("should find public key");
assert_eq!(idx, i as Idx);
}
}
#[test]
fn test_full_dkg() {
let n = 5;
let thr = default_threshold(n);
full_dkg(thr, setup_dkg::<BCurve>(n));
}
#[test]
fn test_invalid_shares_dkg() {
let n = 5;
let thr = default_threshold(n);
invalid_shares(
thr,
setup_dkg::<BCurve>(n),
invalid2,
id_resp,
check2,
id_out,
)
.unwrap();
}
}
| {
return Err(DKGError::PrivateKeyInvalid);
} | conditional_block |
joint_feldman.rs | //! Implements the Distributed Key Generation protocol from
//! [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
//! The protocol runs at minimum in two phases and at most in three phases.
use super::common::*;
use crate::primitives::{
group::Group,
phases::{Phase0, Phase1, Phase2, Phase3},
status::{Status, StatusMatrix},
types::*,
DKGError, DKGResult,
};
use threshold_bls::{
group::{Curve, Element},
poly::{Idx, Poly, PrivatePoly, PublicPoly},
sig::Share,
};
use rand_core::RngCore;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Debug};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
struct DKGInfo<C: Curve> {
private_key: C::Scalar,
public_key: C::Point,
index: Idx,
group: Group<C>,
secret: Poly<C::Scalar>,
public: Poly<C::Point>,
}
impl<C: Curve> DKGInfo<C> {
/// Returns the number of nodes participating in the group for this DKG
fn n(&self) -> usize {
self.group.len()
}
/// Returns the threshold of the group for this DKG
fn thr(&self) -> usize {
self.group.threshold
}
}
/// DKG is the struct containing the logic to run the Distributed Key Generation
/// protocol from [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
///
/// The protocol runs at minimum in two phases and at most in three phases as
/// described in the module documentation.
///
/// Each transition to a new phase is consuming the DKG state (struct) to produce
/// a new state that only accepts to transition to the next phase.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
pub struct DKG<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> DKG<C> {
/// Creates a new DKG instance from the provided private key and group.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new(private_key: C::Scalar, group: Group<C>) -> Result<DKG<C>, DKGError> {
use rand::prelude::*;
Self::new_rand(private_key, group, &mut thread_rng())
}
/// Creates a new DKG instance from the provided private key, group and RNG.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new_rand<R: RngCore>(
private_key: C::Scalar,
group: Group<C>,
rng: &mut R,
) -> Result<DKG<C>, DKGError> {
// get the public key
let mut public_key = C::Point::one();
public_key.mul(&private_key);
// make sure the private key is not identity element nor neutral element
if private_key == C::Scalar::zero() || private_key == C::Scalar::one() {
return Err(DKGError::PrivateKeyInvalid);
}
// check if the public key is part of the group
let index = group
.index(&public_key)
.ok_or(DKGError::PublicKeyNotFound)?;
// Generate a secret polynomial and commit to it
let secret = PrivatePoly::<C>::new_from(group.threshold - 1, rng);
let public = secret.commit::<C::Point>();
let info = DKGInfo {
private_key,
public_key,
index,
group,
secret,
public,
};
Ok(DKG { info })
}
}
impl<C: Curve> Phase0<C> for DKG<C> {
type Next = DKGWaitingShare<C>;
/// Evaluates the secret polynomial at the index of each DKG participant and encrypts
/// the result with the corresponding public key. Returns the bundled encrypted shares
/// as well as the next phase of the DKG.
fn encrypt_shares<R: RngCore>(
self,
rng: &mut R,
) -> DKGResult<(DKGWaitingShare<C>, Option<BundledShares<C>>)> |
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the shares from the previous phase's participants
/// as input. After processing the shares, if there were any complaints it will generate
/// a bundle of responses for the next phase.
pub struct DKGWaitingShare<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> Phase1<C> for DKGWaitingShare<C> {
type Next = DKGWaitingResponse<C>;
#[allow(unused_assignments)]
/// Tries to decrypt the provided shares and calculate the secret key and the
/// threshold public key. If `publish_all` is set to true then the returned
/// responses will include both complaints and successful statuses. Consider setting
/// it to false when communication complexity is high.
///
/// A complaint is returned in the following cases:
/// - invalid dealer index
/// - absentee shares for us
/// - invalid encryption
/// - invalid length of public polynomial
/// - invalid share w.r.t. public polynomial
fn process_shares(
self,
bundles: &[BundledShares<C>],
mut publish_all: bool,
) -> DKGResult<(DKGWaitingResponse<C>, Option<BundledResponses>)> {
publish_all = false;
let thr = self.info.thr();
let my_idx = self.info.index;
let (shares, publics, mut statuses) = process_shares_get_all(
&self.info.group,
&self.info.group,
Some(my_idx),
my_idx,
&self.info.private_key,
bundles,
)?;
// in DKG every dealer is also a share holder, we assume that a dealer
// will issue a valid share for itself
for n in self.info.group.nodes.iter() {
statuses.set(n.id(), n.id(), Status::Success);
}
// we check with `thr - 1` because we already have our shares
if shares.len() < thr - 1 {
// that means the threat model is not respected since there should
// be at least a threshold of honest shares
return Err(DKGError::NotEnoughValidShares(shares.len(), thr));
}
// The user's secret share is the sum of all received shares (remember:
// each share is an evaluation of a participant's private polynomial at
// our index)
let mut fshare = self.info.secret.eval(self.info.index).value;
// The public key polynomial is the sum of all shared polynomials
let mut fpub = self.info.public.clone();
shares.iter().for_each(|(&dealer_idx, share)| {
fpub.add(publics.get(&dealer_idx).unwrap());
fshare.add(share);
});
let bundle = compute_bundle_response(my_idx, &statuses, publish_all);
let new_dkg = DKGWaitingResponse::new(self.info, fshare, fpub, statuses, publics);
Ok((new_dkg, bundle))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the responses from the previous phase's participants
/// as input. The responses will be processed and justifications may be generated as a byproduct
/// if there are complaints.
pub struct DKGWaitingResponse<C: Curve> {
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
}
impl<C: Curve> DKGWaitingResponse<C> {
fn new(
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
) -> Self {
Self {
info,
dist_share,
dist_pub,
statuses,
publics,
}
}
}
impl<C: Curve> Phase2<C> for DKGWaitingResponse<C> {
type Next = DKGWaitingJustification<C>;
#[allow(clippy::type_complexity)]
/// Checks if the responses when applied to the status matrix result in a
/// matrix with only `Success` elements. If so, the protocol terminates.
///
/// If there are complaints in the Status matrix, then it will return an
/// error with the justifications required for Phase 3 of the DKG.
fn process_responses(
self,
responses: &[BundledResponses],
) -> Result<DKGOutput<C>, DKGResult<(Self::Next, Option<BundledJustification<C>>)>> {
let info = self.info;
let mut statuses = self.statuses;
set_statuses(
info.index,
&info.group,
&info.group,
&mut statuses,
responses,
);
// find out if justifications are required
// if there is a least one participant that issued one complaint
let justifications_required = info.group.nodes.iter().any(|n|!statuses.all_true(n.id()));
if justifications_required {
let bundled_justifications =
get_justification(info.index, &info.secret, &info.public, &statuses);
let dkg = DKGWaitingJustification {
info,
dist_share: self.dist_share,
dist_pub: self.dist_pub,
statuses: RefCell::new(statuses),
publics: self.publics,
};
return Err(Ok((dkg, bundled_justifications)));
}
// bingo! Returns the final share now and stop the protocol
let share = Share {
index: info.index,
private: self.dist_share,
};
Ok(DKGOutput {
// everybody is qualified in this case since there is no
// complaint at all
qual: info.group,
public: self.dist_pub,
share,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the justifications from the previous phase's participants
/// as input to produce either the final DKG Output, or an error.
pub struct DKGWaitingJustification<C: Curve> {
// TODO: transform that into one info variable that gets default value for
// missing parts depending in the round of the protocol.
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
// guaranteed to be of the right size (n)
statuses: RefCell<StatusMatrix>,
publics: HashMap<Idx, PublicPoly<C>>,
}
impl<C> Phase3<C> for DKGWaitingJustification<C>
where
C: Curve,
{
/// Accept a justification if the following conditions are true:
/// - bundle's dealer index is in range
/// - a justification was required for the given share (no-op)
/// - share corresponds to public polynomial received in the bundled shares during
/// first period.
/// Return an output if `len(qual) > thr`
fn process_justifications(
self,
justifs: &[BundledJustification<C>],
) -> Result<DKGOutput<C>, DKGError> {
// Calculate the share and public polynomial from the provided justifications
// (they will later be added to our existing share and public polynomial)
let mut add_share = C::Scalar::zero();
let mut add_public = PublicPoly::<C>::zero();
let valid_shares = internal_process_justifications(
self.info.index,
&self.info.group,
&mut self.statuses.borrow_mut(),
&self.publics,
justifs,
);
for (idx, share) in &valid_shares {
add_share.add(share);
// unwrap since internal_process_justi. gauarantees each share comes
// from a public polynomial we've seen in the first round.
add_public.add(self.publics.get(idx).unwrap());
}
// QUAL is the set of all entries in the matrix where all bits are set
let statuses = self.statuses.borrow();
let qual_indices = (0..self.info.n())
.filter(|&dealer| statuses.all_true(dealer as Idx))
.collect::<Vec<_>>();
let thr = self.info.group.threshold;
if qual_indices.len() < thr {
// too many unanswered justifications, DKG abort!
return Err(DKGError::NotEnoughJustifications(qual_indices.len(), thr));
}
// create a group out of the qualifying nodes
let qual_nodes = self
.info
.group
.nodes
.into_iter()
.filter(|n| qual_indices.contains(&(n.id() as usize)))
.collect();
let group = Group::<C>::new(qual_nodes, thr)?;
// add all good shares and public poly together
add_share.add(&self.dist_share);
add_public.add(&self.dist_pub);
let ds = Share {
index: self.info.index,
private: add_share,
};
Ok(DKGOutput {
qual: group,
public: add_public,
share: ds,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::primitives::{
common::tests::{check2, full_dkg, id_out, id_resp, invalid2, invalid_shares, setup_group},
default_threshold,
};
use std::fmt::Debug;
use threshold_bls::curve::bls12377::{G1Curve as BCurve, G1};
use serde::{de::DeserializeOwned, Serialize};
use static_assertions::assert_impl_all;
assert_impl_all!(Group<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGInfo<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKG<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(EncryptedShare<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledShares<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGOutput<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledJustification<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
fn setup_dkg<C: Curve>(n: usize) -> Vec<DKG<C>> {
let (privs, group) = setup_group::<C>(n, default_threshold(n));
privs
.into_iter()
.map(|p| DKG::new(p, group.clone()).unwrap())
.collect::<Vec<_>>()
}
#[test]
fn group_index() {
let n = 6;
let (privs, group) = setup_group::<BCurve>(n, default_threshold(n));
for (i, private) in privs.iter().enumerate() {
let mut public = G1::one();
public.mul(private);
let idx = group.index(&public).expect("should find public key");
assert_eq!(idx, i as Idx);
}
}
#[test]
fn test_full_dkg() {
let n = 5;
let thr = default_threshold(n);
full_dkg(thr, setup_dkg::<BCurve>(n));
}
#[test]
fn test_invalid_shares_dkg() {
let n = 5;
let thr = default_threshold(n);
invalid_shares(
thr,
setup_dkg::<BCurve>(n),
invalid2,
id_resp,
check2,
id_out,
)
.unwrap();
}
}
| {
let bundle = create_share_bundle(
self.info.index,
&self.info.secret,
&self.info.public,
&self.info.group,
rng,
)?;
let dw = DKGWaitingShare { info: self.info };
Ok((dw, Some(bundle)))
} | identifier_body |
joint_feldman.rs | //! Implements the Distributed Key Generation protocol from
//! [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
//! The protocol runs at minimum in two phases and at most in three phases.
use super::common::*;
use crate::primitives::{
group::Group,
phases::{Phase0, Phase1, Phase2, Phase3},
status::{Status, StatusMatrix},
types::*,
DKGError, DKGResult,
};
use threshold_bls::{
group::{Curve, Element},
poly::{Idx, Poly, PrivatePoly, PublicPoly},
sig::Share,
};
use rand_core::RngCore;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Debug};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
struct DKGInfo<C: Curve> {
private_key: C::Scalar,
public_key: C::Point,
index: Idx,
group: Group<C>,
secret: Poly<C::Scalar>,
public: Poly<C::Point>,
}
impl<C: Curve> DKGInfo<C> {
/// Returns the number of nodes participating in the group for this DKG
fn n(&self) -> usize {
self.group.len()
}
/// Returns the threshold of the group for this DKG
fn thr(&self) -> usize {
self.group.threshold
}
}
/// DKG is the struct containing the logic to run the Distributed Key Generation
/// protocol from [Pedersen](https://link.springer.com/content/pdf/10.1007%2F3-540-48910-X_21.pdf).
///
/// The protocol runs at minimum in two phases and at most in three phases as
/// described in the module documentation.
///
/// Each transition to a new phase is consuming the DKG state (struct) to produce
/// a new state that only accepts to transition to the next phase.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
pub struct DKG<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> DKG<C> {
/// Creates a new DKG instance from the provided private key and group.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn new(private_key: C::Scalar, group: Group<C>) -> Result<DKG<C>, DKGError> {
use rand::prelude::*;
Self::new_rand(private_key, group, &mut thread_rng())
}
/// Creates a new DKG instance from the provided private key, group and RNG.
///
/// The private key must be part of the group, otherwise this will return an error.
pub fn | <R: RngCore>(
private_key: C::Scalar,
group: Group<C>,
rng: &mut R,
) -> Result<DKG<C>, DKGError> {
// get the public key
let mut public_key = C::Point::one();
public_key.mul(&private_key);
// make sure the private key is not identity element nor neutral element
if private_key == C::Scalar::zero() || private_key == C::Scalar::one() {
return Err(DKGError::PrivateKeyInvalid);
}
// check if the public key is part of the group
let index = group
.index(&public_key)
.ok_or(DKGError::PublicKeyNotFound)?;
// Generate a secret polynomial and commit to it
let secret = PrivatePoly::<C>::new_from(group.threshold - 1, rng);
let public = secret.commit::<C::Point>();
let info = DKGInfo {
private_key,
public_key,
index,
group,
secret,
public,
};
Ok(DKG { info })
}
}
impl<C: Curve> Phase0<C> for DKG<C> {
type Next = DKGWaitingShare<C>;
/// Evaluates the secret polynomial at the index of each DKG participant and encrypts
/// the result with the corresponding public key. Returns the bundled encrypted shares
/// as well as the next phase of the DKG.
fn encrypt_shares<R: RngCore>(
self,
rng: &mut R,
) -> DKGResult<(DKGWaitingShare<C>, Option<BundledShares<C>>)> {
let bundle = create_share_bundle(
self.info.index,
&self.info.secret,
&self.info.public,
&self.info.group,
rng,
)?;
let dw = DKGWaitingShare { info: self.info };
Ok((dw, Some(bundle)))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the shares from the previous phase's participants
/// as input. After processing the shares, if there were any complaints it will generate
/// a bundle of responses for the next phase.
pub struct DKGWaitingShare<C: Curve> {
/// Metadata about the DKG
info: DKGInfo<C>,
}
impl<C: Curve> Phase1<C> for DKGWaitingShare<C> {
type Next = DKGWaitingResponse<C>;
#[allow(unused_assignments)]
/// Tries to decrypt the provided shares and calculate the secret key and the
/// threshold public key. If `publish_all` is set to true then the returned
/// responses will include both complaints and successful statuses. Consider setting
/// it to false when communication complexity is high.
///
/// A complaint is returned in the following cases:
/// - invalid dealer index
/// - absentee shares for us
/// - invalid encryption
/// - invalid length of public polynomial
/// - invalid share w.r.t. public polynomial
fn process_shares(
self,
bundles: &[BundledShares<C>],
mut publish_all: bool,
) -> DKGResult<(DKGWaitingResponse<C>, Option<BundledResponses>)> {
publish_all = false;
let thr = self.info.thr();
let my_idx = self.info.index;
let (shares, publics, mut statuses) = process_shares_get_all(
&self.info.group,
&self.info.group,
Some(my_idx),
my_idx,
&self.info.private_key,
bundles,
)?;
// in DKG every dealer is also a share holder, we assume that a dealer
// will issue a valid share for itself
for n in self.info.group.nodes.iter() {
statuses.set(n.id(), n.id(), Status::Success);
}
// we check with `thr - 1` because we already have our shares
if shares.len() < thr - 1 {
// that means the threat model is not respected since there should
// be at least a threshold of honest shares
return Err(DKGError::NotEnoughValidShares(shares.len(), thr));
}
// The user's secret share is the sum of all received shares (remember:
// each share is an evaluation of a participant's private polynomial at
// our index)
let mut fshare = self.info.secret.eval(self.info.index).value;
// The public key polynomial is the sum of all shared polynomials
let mut fpub = self.info.public.clone();
shares.iter().for_each(|(&dealer_idx, share)| {
fpub.add(publics.get(&dealer_idx).unwrap());
fshare.add(share);
});
let bundle = compute_bundle_response(my_idx, &statuses, publish_all);
let new_dkg = DKGWaitingResponse::new(self.info, fshare, fpub, statuses, publics);
Ok((new_dkg, bundle))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the responses from the previous phase's participants
/// as input. The responses will be processed and justifications may be generated as a byproduct
/// if there are complaints.
pub struct DKGWaitingResponse<C: Curve> {
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
}
impl<C: Curve> DKGWaitingResponse<C> {
fn new(
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
statuses: StatusMatrix,
publics: PublicInfo<C>,
) -> Self {
Self {
info,
dist_share,
dist_pub,
statuses,
publics,
}
}
}
impl<C: Curve> Phase2<C> for DKGWaitingResponse<C> {
type Next = DKGWaitingJustification<C>;
#[allow(clippy::type_complexity)]
/// Checks if the responses when applied to the status matrix result in a
/// matrix with only `Success` elements. If so, the protocol terminates.
///
/// If there are complaints in the Status matrix, then it will return an
/// error with the justifications required for Phase 3 of the DKG.
fn process_responses(
self,
responses: &[BundledResponses],
) -> Result<DKGOutput<C>, DKGResult<(Self::Next, Option<BundledJustification<C>>)>> {
let info = self.info;
let mut statuses = self.statuses;
set_statuses(
info.index,
&info.group,
&info.group,
&mut statuses,
responses,
);
// find out if justifications are required
// if there is a least one participant that issued one complaint
let justifications_required = info.group.nodes.iter().any(|n|!statuses.all_true(n.id()));
if justifications_required {
let bundled_justifications =
get_justification(info.index, &info.secret, &info.public, &statuses);
let dkg = DKGWaitingJustification {
info,
dist_share: self.dist_share,
dist_pub: self.dist_pub,
statuses: RefCell::new(statuses),
publics: self.publics,
};
return Err(Ok((dkg, bundled_justifications)));
}
// bingo! Returns the final share now and stop the protocol
let share = Share {
index: info.index,
private: self.dist_share,
};
Ok(DKGOutput {
// everybody is qualified in this case since there is no
// complaint at all
qual: info.group,
public: self.dist_pub,
share,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(bound = "C::Scalar: DeserializeOwned")]
/// DKG Stage which waits to receive the justifications from the previous phase's participants
/// as input to produce either the final DKG Output, or an error.
pub struct DKGWaitingJustification<C: Curve> {
// TODO: transform that into one info variable that gets default value for
// missing parts depending in the round of the protocol.
info: DKGInfo<C>,
dist_share: C::Scalar,
dist_pub: PublicPoly<C>,
// guaranteed to be of the right size (n)
statuses: RefCell<StatusMatrix>,
publics: HashMap<Idx, PublicPoly<C>>,
}
impl<C> Phase3<C> for DKGWaitingJustification<C>
where
C: Curve,
{
/// Accept a justification if the following conditions are true:
/// - bundle's dealer index is in range
/// - a justification was required for the given share (no-op)
/// - share corresponds to public polynomial received in the bundled shares during
/// first period.
/// Return an output if `len(qual) > thr`
fn process_justifications(
self,
justifs: &[BundledJustification<C>],
) -> Result<DKGOutput<C>, DKGError> {
// Calculate the share and public polynomial from the provided justifications
// (they will later be added to our existing share and public polynomial)
let mut add_share = C::Scalar::zero();
let mut add_public = PublicPoly::<C>::zero();
let valid_shares = internal_process_justifications(
self.info.index,
&self.info.group,
&mut self.statuses.borrow_mut(),
&self.publics,
justifs,
);
for (idx, share) in &valid_shares {
add_share.add(share);
// unwrap since internal_process_justi. gauarantees each share comes
// from a public polynomial we've seen in the first round.
add_public.add(self.publics.get(idx).unwrap());
}
// QUAL is the set of all entries in the matrix where all bits are set
let statuses = self.statuses.borrow();
let qual_indices = (0..self.info.n())
.filter(|&dealer| statuses.all_true(dealer as Idx))
.collect::<Vec<_>>();
let thr = self.info.group.threshold;
if qual_indices.len() < thr {
// too many unanswered justifications, DKG abort!
return Err(DKGError::NotEnoughJustifications(qual_indices.len(), thr));
}
// create a group out of the qualifying nodes
let qual_nodes = self
.info
.group
.nodes
.into_iter()
.filter(|n| qual_indices.contains(&(n.id() as usize)))
.collect();
let group = Group::<C>::new(qual_nodes, thr)?;
// add all good shares and public poly together
add_share.add(&self.dist_share);
add_public.add(&self.dist_pub);
let ds = Share {
index: self.info.index,
private: add_share,
};
Ok(DKGOutput {
qual: group,
public: add_public,
share: ds,
})
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::primitives::{
common::tests::{check2, full_dkg, id_out, id_resp, invalid2, invalid_shares, setup_group},
default_threshold,
};
use std::fmt::Debug;
use threshold_bls::curve::bls12377::{G1Curve as BCurve, G1};
use serde::{de::DeserializeOwned, Serialize};
use static_assertions::assert_impl_all;
assert_impl_all!(Group<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGInfo<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKG<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(EncryptedShare<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledShares<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(DKGOutput<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
assert_impl_all!(BundledJustification<BCurve>: Serialize, DeserializeOwned, Clone, Debug);
fn setup_dkg<C: Curve>(n: usize) -> Vec<DKG<C>> {
let (privs, group) = setup_group::<C>(n, default_threshold(n));
privs
.into_iter()
.map(|p| DKG::new(p, group.clone()).unwrap())
.collect::<Vec<_>>()
}
#[test]
fn group_index() {
let n = 6;
let (privs, group) = setup_group::<BCurve>(n, default_threshold(n));
for (i, private) in privs.iter().enumerate() {
let mut public = G1::one();
public.mul(private);
let idx = group.index(&public).expect("should find public key");
assert_eq!(idx, i as Idx);
}
}
#[test]
fn test_full_dkg() {
let n = 5;
let thr = default_threshold(n);
full_dkg(thr, setup_dkg::<BCurve>(n));
}
#[test]
fn test_invalid_shares_dkg() {
let n = 5;
let thr = default_threshold(n);
invalid_shares(
thr,
setup_dkg::<BCurve>(n),
invalid2,
id_resp,
check2,
id_out,
)
.unwrap();
}
}
| new_rand | identifier_name |
paging.rs | >, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(dead_code)]
use crate::arch::x86_64::kernel::irq;
use crate::arch::x86_64::kernel::processor;
use crate::arch::x86_64::kernel::BOOT_INFO;
use crate::arch::x86_64::mm::{physicalmem, virtualmem};
use crate::consts::*;
use crate::logging::*;
use crate::scheduler;
use core::arch::asm;
use core::convert::TryInto;
use core::marker::PhantomData;
use core::mem::size_of;
use core::ptr::write_bytes;
use num_traits::CheckedShr;
use x86::controlregs;
use x86::irq::*;
/// Pointer to the root page table (PML4)
const PML4_ADDRESS: *mut PageTable<PML4> = 0xFFFF_FFFF_FFFF_F000 as *mut PageTable<PML4>;
/// Number of Offset bits of a virtual address for a 4 KiB page, which are shifted away to get its Page Frame Number (PFN).
const PAGE_BITS: usize = 12;
/// Number of bits of the index in each table (PML4, PDPT, PD, PT).
const PAGE_MAP_BITS: usize = 9;
/// A mask where PAGE_MAP_BITS are set to calculate a table index.
const PAGE_MAP_MASK: usize = 0x1FF;
bitflags! {
/// Possible flags for an entry in either table (PML4, PDPT, PD, PT)
///
/// See Intel Vol. 3A, Tables 4-14 through 4-19
pub struct PageTableEntryFlags: usize {
/// Set if this entry is valid and points to a page or table.
const PRESENT = 1 << 0;
/// Set if memory referenced by this entry shall be writable.
const WRITABLE = 1 << 1;
/// Set if memory referenced by this entry shall be accessible from user-mode (Ring 3).
const USER_ACCESSIBLE = 1 << 2;
/// Set if Write-Through caching shall be enabled for memory referenced by this entry.
/// Otherwise, Write-Back caching is used.
const WRITE_THROUGH = 1 << 3;
/// Set if caching shall be disabled for memory referenced by this entry.
const CACHE_DISABLE = 1 << 4;
/// Set if software has accessed this entry (for memory access or address translation).
const ACCESSED = 1 << 5;
/// Only for page entries: Set if software has written to the memory referenced by this entry.
const DIRTY = 1 << 6;
/// Only for page entries in PDPT or PDT: Set if this entry references a 1 GiB (PDPT) or 2 MiB (PDT) page.
const HUGE_PAGE = 1 << 7;
/// Only for page entries: Set if this address translation is global for all tasks and does not need to
/// be flushed from the TLB when CR3 is reset.
const GLOBAL = 1 << 8;
/// Set if code execution shall be disabled for memory referenced by this entry.
const EXECUTE_DISABLE = 1 << 63;
}
}
impl PageTableEntryFlags {
/// An empty set of flags for unused/zeroed table entries.
/// Needed as long as empty() is no const function.
const BLANK: PageTableEntryFlags = PageTableEntryFlags { bits: 0 };
pub fn device(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn normal(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn read_only(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::WRITABLE);
self
}
pub fn writable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::WRITABLE);
self
}
pub fn execute_disable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::EXECUTE_DISABLE);
self
}
}
/// An entry in either table (PML4, PDPT, PD, PT)
#[derive(Clone, Copy)]
pub struct PageTableEntry {
/// Physical memory address this entry refers, combined with flags from PageTableEntryFlags.
physical_address_and_flags: usize,
}
impl PageTableEntry {
/// Return the stored physical address.
pub fn address(&self) -> usize {
self.physical_address_and_flags
&!(BasePageSize::SIZE - 1)
&!(PageTableEntryFlags::EXECUTE_DISABLE).bits()
}
/// Returns whether this entry is valid (present).
fn is_present(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::PRESENT.bits())!= 0
}
fn is_huge(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::HUGE_PAGE.bits())!= 0
}
fn is_user(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::USER_ACCESSIBLE.bits())!= 0
}
/// Mark this as a valid (present) entry and set address translation and flags.
///
/// # Arguments
///
/// * `physical_address` - The physical memory address this entry shall translate to
/// * `flags` - Flags from PageTableEntryFlags (note that the PRESENT and ACCESSED flags are set automatically)
fn set(&mut self, physical_address: usize, flags: PageTableEntryFlags) {
if flags.contains(PageTableEntryFlags::HUGE_PAGE) {
// HUGE_PAGE may indicate a 2 MiB or 1 GiB page.
// We don't know this here, so we can only verify that at least the offset bits for a 2 MiB page are zero.
assert!(
(physical_address % LargePageSize::SIZE) == 0,
"Physical address is not on a 2 MiB page boundary (physical_address = {:#X})",
physical_address
);
} else {
// Verify that the offset bits for a 4 KiB page are zero.
assert!(
(physical_address % BasePageSize::SIZE) == 0,
"Physical address is not on a 4 KiB page boundary (physical_address = {:#X})",
physical_address
);
}
// Verify that the physical address does not exceed the CPU's physical address width.
assert!(
CheckedShr::checked_shr(
&physical_address,
processor::get_physical_address_bits() as u32
) == Some(0),
"Physical address exceeds CPU's physical address width (physical_address = {:#X})",
physical_address
);
let mut flags_to_set = flags;
flags_to_set.insert(PageTableEntryFlags::PRESENT);
flags_to_set.insert(PageTableEntryFlags::ACCESSED);
self.physical_address_and_flags = physical_address | flags_to_set.bits();
}
}
/// A generic interface to support all possible page sizes.
///
/// This is defined as a subtrait of Copy to enable #[derive(Clone, Copy)] for Page.
/// Currently, deriving implementations for these traits only works if all dependent types implement it as well.
pub trait PageSize: Copy {
/// The page size in bytes.
const SIZE: usize;
/// The page table level at which a page of this size is mapped (from 0 for PT through 3 for PML4).
/// Implemented as a numeric value to enable numeric comparisons.
const MAP_LEVEL: usize;
/// Any extra flag that needs to be set to map a page of this size.
/// For example: PageTableEntryFlags::HUGE_PAGE
const MAP_EXTRA_FLAG: PageTableEntryFlags;
}
/// A 4 KiB page mapped in the PT.
#[derive(Clone, Copy)]
pub enum BasePageSize {}
impl PageSize for BasePageSize {
const SIZE: usize = 0x1000;
const MAP_LEVEL: usize = 0;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::BLANK;
}
/// A 2 MiB page mapped in the PD.
#[derive(Clone, Copy)]
pub enum LargePageSize {}
impl PageSize for LargePageSize {
const SIZE: usize = 0x200000;
const MAP_LEVEL: usize = 1;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A 1 GiB page mapped in the PDPT.
#[derive(Clone, Copy)]
pub enum HugePageSize {}
impl PageSize for HugePageSize {
const SIZE: usize = 0x40000000;
const MAP_LEVEL: usize = 2;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A memory page of the size given by S.
#[derive(Clone, Copy)]
struct Page<S: PageSize> {
/// Virtual memory address of this page.
/// This is rounded to a page size boundary on creation.
virtual_address: usize,
/// Required by Rust to support the S parameter.
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// Return the stored virtual address.
fn address(&self) -> usize {
self.virtual_address
}
/// Flushes this page from the TLB of this CPU.
#[inline(always)]
fn flush_from_tlb(&self) {
unsafe {
asm!("invlpg [{}]", in(reg) self.virtual_address, options(preserves_flags, nostack));
}
}
/// Returns whether the given virtual address is a valid one in the x86-64 memory model.
///
/// Current x86-64 supports only 48-bit for virtual memory addresses.
/// This is enforced by requiring bits 63 through 48 to replicate bit 47 (cf. Intel Vol. 1, 3.3.7.1).
/// As a consequence, the address space is divided into the two valid regions 0x8000_0000_0000
/// and 0xFFFF_8000_0000_0000.
///
/// Although we could make this check depend on the actual linear address width from the CPU,
/// any extension above 48-bit would require a new page table level, which we don't implement.
fn is_valid_address(virtual_address: usize) -> bool {
virtual_address < 0x8000_0000_0000 || virtual_address >= 0xFFFF_8000_0000_0000
}
/// Returns a Page including the given virtual address.
/// That means, the address is rounded down to a page size boundary.
fn including_address(virtual_address: usize) -> Self {
assert!(
Self::is_valid_address(virtual_address),
"Virtual address {:#X} is invalid",
virtual_address
);
if S::SIZE == 1024 * 1024 * 1024 {
assert!(processor::supports_1gib_pages());
}
Self {
virtual_address: align_down!(virtual_address, S::SIZE),
size: PhantomData,
}
}
/// Returns a PageIter to iterate from the given first Page to the given last Page (inclusive).
fn range(first: Self, last: Self) -> PageIter<S> {
assert!(first.virtual_address <= last.virtual_address);
PageIter {
current: first,
last: last,
}
}
/// Returns the index of this page in the table given by L.
fn table_index<L: PageTableLevel>(&self) -> usize {
assert!(L::LEVEL >= S::MAP_LEVEL);
self.virtual_address >> PAGE_BITS >> L::LEVEL * PAGE_MAP_BITS & PAGE_MAP_MASK
}
}
/// An iterator to walk through a range of pages of size S.
struct PageIter<S: PageSize> {
current: Page<S>,
last: Page<S>,
}
impl<S: PageSize> Iterator for PageIter<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Page<S>> {
if self.current.virtual_address <= self.last.virtual_address {
let p = self.current;
self.current.virtual_address += S::SIZE;
Some(p)
} else {
None
}
}
}
/// An interface to allow for a generic implementation of struct PageTable for all 4 page tables.
/// Must be implemented by all page tables.
trait PageTableLevel {
/// Numeric page table level (from 0 for PT through 3 for PML4) to enable numeric comparisons.
const LEVEL: usize;
}
/// An interface for page tables with sub page tables (all except PT).
/// Having both PageTableLevel and PageTableLevelWithSubtables leverages Rust's typing system to provide
/// a subtable method only for those that have sub page tables.
///
/// Kudos to Philipp Oppermann for the trick!
trait PageTableLevelWithSubtables: PageTableLevel {
type SubtableLevel;
}
/// The Page Map Level 4 (PML4) table, with numeric level 3 and PDPT subtables.
enum PML4 {}
impl PageTableLevel for PML4 {
const LEVEL: usize = 3;
}
impl PageTableLevelWithSubtables for PML4 {
type SubtableLevel = PDPT;
}
/// A Page Directory Pointer Table (PDPT), with numeric level 2 and PDT subtables.
enum PDPT {}
impl PageTableLevel for PDPT {
const LEVEL: usize = 2;
}
impl PageTableLevelWithSubtables for PDPT {
type SubtableLevel = PD;
}
/// A Page Directory (PD), with numeric level 1 and PT subtables.
enum PD {}
impl PageTableLevel for PD {
const LEVEL: usize = 1;
}
impl PageTableLevelWithSubtables for PD {
type SubtableLevel = PT;
}
/// A Page Table (PT), with numeric level 0 and no subtables.
enum PT {}
impl PageTableLevel for PT {
const LEVEL: usize = 0;
}
/// Representation of any page table (PML4, PDPT, PD, PT) in memory.
/// Parameter L supplies information for Rust's typing system to distinguish between the different tables.
struct PageTable<L> {
/// Each page table has 512 entries (can be calculated using PAGE_MAP_BITS).
entries: [PageTableEntry; 1 << PAGE_MAP_BITS],
/// Required by Rust to support the L parameter.
level: PhantomData<L>,
}
/// A trait defining methods every page table has to implement.
/// This additional trait is necessary to make use of Rust's specialization feature and provide a default
/// implementation of some methods.
trait PageTableMethods {
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry>;
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn drop_user_space(&mut self);
}
impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
/// Maps a single page in this table to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// Must only be called if a page of this size is mapped at this page table level!
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
self.entries[index].set(
physical_address,
PageTableEntryFlags::DIRTY | S::MAP_EXTRA_FLAG | flags,
);
if flush {
page.flush_from_tlb();
}
flush
}
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the default implementation called only for PT.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
Some(self.entries[index])
} else {
None
}
}
default fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
let physical_address = self.entries[index].address();
debug!("Free page frame at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the default implementation that just calls the map_page_in_this_table method.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
impl<L: PageTableLevelWithSubtables> PageTableMethods for PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL >= S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() | else {
None
}
}
fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// currently, the user space uses only 4KB pages
if L::LEVEL > BasePageSize::MAP_LEVEL {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
//let physical_address = self.entries[index].address();
//debug!("Free page table at 0x{:x}", physical_address);
//physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL >= S::MAP_LEVEL);
if L::LEVEL > S::MAP_LEVEL {
let index = page.table_index::<L>();
// Does the table exist yet?
if!self.entries[index].is_present() {
// Allocate a single 4 KiB page for the new entry and mark it as a valid, writable subtable.
let pt_addr = physicalmem::allocate(BasePageSize::SIZE);
if flags.contains(PageTableEntryFlags::USER_ACCESSIBLE) {
self.entries[index].set(
pt_addr,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::USER_ACCESSIBLE,
);
} else {
self.entries[index].set(pt_addr, PageTableEntryFlags::WRITABLE);
}
// Mark all entries as unused in the newly created table.
let subtable = self.subtable::<S>(page);
for entry in subtable.entries.iter_mut() {
entry.physical_address_and_flags = 0;
}
subtable.map_page::<S>(page, physical_address, flags)
} else {
let subtable = self.subtable::<S>(page);
subtable.map_page::<S>(page, physical_address, flags)
}
} else {
// Calling the default implementation from a specialized one is not supported (yet),
// so we have to resort to an extra function.
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
}
impl<L: PageTableLevelWithSubtables> PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the next subtable for the given page in the page table hierarchy.
///
/// Must only be called if a page of this size is mapped in a subtable!
fn subtable<S: PageSize>(&self, page: Page<S>) -> &mut PageTable<L::SubtableLevel> {
assert!(L::LEVEL > S::MAP_LEVEL);
// Calculate the address of the subtable.
let index = page.table_index::<L>();
let table_address = self as *const PageTable<L> as usize;
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) }
}
/// Maps a continuous range of pages.
///
/// # Arguments
///
/// * `range` - The range of pages of size S
/// * `physical_address` - First physical address to map these pages to
/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or EXECUTE_DISABLE).
/// The PRESENT, ACCESSED, and DIRTY flags are already set automatically.
fn map_pages<S: PageSize>(
&mut self,
range: PageIter<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) {
let mut current_physical_address = physical_address;
for page in range {
self.map_page(page, current_physical_address, flags);
current_physical_address += S::SIZE;
}
}
fn drop_user_space(&mut self) {
assert!(L::LEVEL == PML4::LEVEL);
// the last entry is required to get access to the page tables
let last = (1 << PAGE_MAP_BITS) - 1;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
let physical_address = self.entries[index].address();
debug!("Free page table at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
pub extern "x86-interrupt" fn page_fault_handler(
stack_frame: irq::ExceptionStackFrame,
error_code: u64,
) {
let mut virtual_address = unsafe { controlregs::cr2() };
// do we have to create the user-space stack?
if virtual_address > USER_SPACE_START {
virtual_address = align_down!(virtual_address, BasePageSize::SIZE);
// Ok, user space want to have memory (for the stack / heap)
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map 0x{:x} into the user space at 0x{:x}",
physical_address, virtual_address
);
map::<BasePageSize>(
virtual_address,
physical_address,
1,
PageTableEntryFlags::WRITABLE
| PageTableEntryFlags::USER_ACCESSIBLE
| PageTableEntryFlags::EXECUTE_DISABLE,
);
unsafe {
// clear new page
write_bytes(virtual_address as *mut u8, 0x00, BasePageSize::SIZE);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
controlregs::cr2_write(0);
}
} else {
// Anything else is an error!
let pferror = PageFaultError::from_bits_truncate(error_code as u32);
error!("Page Fault (#PF) Exception: {:#?}", stack_frame);
error!(
"virtual_address = {:#X}, page fault error = {}",
virtual_address, pferror
);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
unsafe {
controlregs::cr2_write(0);
}
scheduler::abort();
}
}
fn get_page_range<S: PageSize>(virtual_address: usize, count: usize) -> PageIter<S> {
let first_page = Page::<S>::including_address(virtual_address);
let last_page = Page::<S>::including_address(virtual_address + (count - 1) * S::SIZE);
Page::range(first_page, last_page)
}
pub fn get_page_table_entry<S: PageSize>(virtual_address: usize) -> Option<PageTableEntry> {
debug!("Looking up Page Table Entry for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.get_page_table_entry(page)
}
pub fn get_physical_address<S: PageSize>(virtual_address: usize) -> usize {
debug!("Getting physical address for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
let address = root_pagetable
.get_page_table_entry(page)
.expect("Entry not present")
.address();
let offset = virtual_address & (S::SIZE - 1);
address | offset
}
/// Translate a virtual memory address to a physical one.
/// Just like get_physical_address, but automatically uses the correct page size for the respective memory address.
pub fn virtual_to_physical(virtual_address: usize) -> usize {
get_physical_address::<BasePageSize>(virtual_address)
}
pub fn unmap<S: PageSize>(virtual_address: usize, count: usize) {
debug!(
"Unmapping virtual address {:#X} ({} pages)",
virtual_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, 0, PageTableEntryFlags::BLANK);
}
pub fn map<S: PageSize>(
virtual_address: usize,
physical_address: usize,
count: usize,
flags: PageTableEntryFlags,
) {
debug!(
"Mapping virtual address {:#X} to physical address {:#X} ({} pages)",
virtual_address, physical_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, physical_address, flags);
}
static mut ROOT_PAGE_TABLE: usize = 0;
#[inline(always)]
pub fn get_kernel_root_page_table() -> usize {
unsafe { ROOT_PAGE_TABLE }
}
pub fn drop_user_space() {
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.drop_user_space();
}
// just an workaround to explaine the difference between
// kernel and user space
pub fn create_usr_pgd() -> usize {
debug!("Create 1st level page table for the user-level task");
unsafe {
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
let user_page_table: usize =
virtualmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map page frame 0x{:x} at virtual address 0x{:x}",
physical_address, user_page_table
);
map::<BasePageSize>(
user_page_table,
physical_address,
1,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::EXECUTE_DISABLE,
);
write_bytes(user_page_table as *mut u8, 0x00, BasePageSize::SIZE);
let recursive_pgt = BOOT_INFO.unwrap().recursive_page_table_addr as *const u64;
let recursive_pgt_idx = BOOT_INFO.unwrap().recursive_index();
let pml4 = user_page_table as *mut u64;
for i in 0..recursive_pgt_idx + 2 {
*pml4.offset(i.try_into().unwrap()) = *recursive_pgt.offset(i.try_into().unwrap());
}
let pml4 =
(user_page_table + BasePageSize::SIZE - size_of::<usize>()) as *mut PageTableEntry;
(*pml4).set(physical_address, PageTableEntryFlags::WRITABLE);
// unmap page table
unmap::<BasePageSize>(user_page_table, 1);
virtualmem::deallocate(user_page_table, BasePageSize::SIZE);
scheduler::set_root_page_table(physical_address);
physical_address
}
}
pub fn init() {
let recursive_pgt = unsafe { BOOT_INFO.unwrap().recursive_page_table_addr } as *mut u64;
let recursive_pgt_idx = unsafe { BOOT_INFO.unwrap().recursive_index() };
debug!(
"Found recursive_page_table_addr at 0x{:x}",
recursive_pgt as u64
);
debug!("Recursive index | {
if L::LEVEL > S::MAP_LEVEL {
let subtable = self.subtable::<S>(page);
subtable.get_page_table_entry::<S>(page)
} else {
Some(self.entries[index])
}
} | conditional_block |
paging.rs | >, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(dead_code)]
use crate::arch::x86_64::kernel::irq;
use crate::arch::x86_64::kernel::processor;
use crate::arch::x86_64::kernel::BOOT_INFO;
use crate::arch::x86_64::mm::{physicalmem, virtualmem};
use crate::consts::*;
use crate::logging::*;
use crate::scheduler;
use core::arch::asm;
use core::convert::TryInto;
use core::marker::PhantomData;
use core::mem::size_of;
use core::ptr::write_bytes;
use num_traits::CheckedShr;
use x86::controlregs;
use x86::irq::*;
/// Pointer to the root page table (PML4)
const PML4_ADDRESS: *mut PageTable<PML4> = 0xFFFF_FFFF_FFFF_F000 as *mut PageTable<PML4>;
/// Number of Offset bits of a virtual address for a 4 KiB page, which are shifted away to get its Page Frame Number (PFN).
const PAGE_BITS: usize = 12;
/// Number of bits of the index in each table (PML4, PDPT, PD, PT).
const PAGE_MAP_BITS: usize = 9;
/// A mask where PAGE_MAP_BITS are set to calculate a table index.
const PAGE_MAP_MASK: usize = 0x1FF;
bitflags! {
/// Possible flags for an entry in either table (PML4, PDPT, PD, PT)
///
/// See Intel Vol. 3A, Tables 4-14 through 4-19
pub struct PageTableEntryFlags: usize {
/// Set if this entry is valid and points to a page or table.
const PRESENT = 1 << 0;
/// Set if memory referenced by this entry shall be writable.
const WRITABLE = 1 << 1;
/// Set if memory referenced by this entry shall be accessible from user-mode (Ring 3).
const USER_ACCESSIBLE = 1 << 2;
/// Set if Write-Through caching shall be enabled for memory referenced by this entry.
/// Otherwise, Write-Back caching is used.
const WRITE_THROUGH = 1 << 3;
/// Set if caching shall be disabled for memory referenced by this entry.
const CACHE_DISABLE = 1 << 4;
/// Set if software has accessed this entry (for memory access or address translation).
const ACCESSED = 1 << 5;
/// Only for page entries: Set if software has written to the memory referenced by this entry.
const DIRTY = 1 << 6;
/// Only for page entries in PDPT or PDT: Set if this entry references a 1 GiB (PDPT) or 2 MiB (PDT) page.
const HUGE_PAGE = 1 << 7;
/// Only for page entries: Set if this address translation is global for all tasks and does not need to
/// be flushed from the TLB when CR3 is reset.
const GLOBAL = 1 << 8;
/// Set if code execution shall be disabled for memory referenced by this entry.
const EXECUTE_DISABLE = 1 << 63;
}
}
impl PageTableEntryFlags {
/// An empty set of flags for unused/zeroed table entries.
/// Needed as long as empty() is no const function.
const BLANK: PageTableEntryFlags = PageTableEntryFlags { bits: 0 };
pub fn device(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn normal(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn read_only(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::WRITABLE);
self
}
pub fn writable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::WRITABLE);
self
}
pub fn execute_disable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::EXECUTE_DISABLE);
self
}
}
/// An entry in either table (PML4, PDPT, PD, PT)
#[derive(Clone, Copy)]
pub struct PageTableEntry {
/// Physical memory address this entry refers, combined with flags from PageTableEntryFlags.
physical_address_and_flags: usize,
}
impl PageTableEntry {
/// Return the stored physical address.
pub fn address(&self) -> usize {
self.physical_address_and_flags
&!(BasePageSize::SIZE - 1)
&!(PageTableEntryFlags::EXECUTE_DISABLE).bits()
}
/// Returns whether this entry is valid (present).
fn is_present(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::PRESENT.bits())!= 0
}
fn is_huge(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::HUGE_PAGE.bits())!= 0
}
fn is_user(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::USER_ACCESSIBLE.bits())!= 0
}
/// Mark this as a valid (present) entry and set address translation and flags.
///
/// # Arguments
///
/// * `physical_address` - The physical memory address this entry shall translate to
/// * `flags` - Flags from PageTableEntryFlags (note that the PRESENT and ACCESSED flags are set automatically)
fn set(&mut self, physical_address: usize, flags: PageTableEntryFlags) {
if flags.contains(PageTableEntryFlags::HUGE_PAGE) {
// HUGE_PAGE may indicate a 2 MiB or 1 GiB page.
// We don't know this here, so we can only verify that at least the offset bits for a 2 MiB page are zero.
assert!(
(physical_address % LargePageSize::SIZE) == 0,
"Physical address is not on a 2 MiB page boundary (physical_address = {:#X})",
physical_address
);
} else {
// Verify that the offset bits for a 4 KiB page are zero.
assert!(
(physical_address % BasePageSize::SIZE) == 0,
"Physical address is not on a 4 KiB page boundary (physical_address = {:#X})",
physical_address
);
}
// Verify that the physical address does not exceed the CPU's physical address width.
assert!(
CheckedShr::checked_shr(
&physical_address,
processor::get_physical_address_bits() as u32
) == Some(0),
"Physical address exceeds CPU's physical address width (physical_address = {:#X})",
physical_address
);
let mut flags_to_set = flags;
flags_to_set.insert(PageTableEntryFlags::PRESENT);
flags_to_set.insert(PageTableEntryFlags::ACCESSED);
self.physical_address_and_flags = physical_address | flags_to_set.bits();
}
}
/// A generic interface to support all possible page sizes.
///
/// This is defined as a subtrait of Copy to enable #[derive(Clone, Copy)] for Page.
/// Currently, deriving implementations for these traits only works if all dependent types implement it as well.
pub trait PageSize: Copy {
/// The page size in bytes.
const SIZE: usize;
/// The page table level at which a page of this size is mapped (from 0 for PT through 3 for PML4).
/// Implemented as a numeric value to enable numeric comparisons.
const MAP_LEVEL: usize;
/// Any extra flag that needs to be set to map a page of this size.
/// For example: PageTableEntryFlags::HUGE_PAGE
const MAP_EXTRA_FLAG: PageTableEntryFlags;
}
/// A 4 KiB page mapped in the PT.
#[derive(Clone, Copy)]
pub enum | {}
impl PageSize for BasePageSize {
const SIZE: usize = 0x1000;
const MAP_LEVEL: usize = 0;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::BLANK;
}
/// A 2 MiB page mapped in the PD.
#[derive(Clone, Copy)]
pub enum LargePageSize {}
impl PageSize for LargePageSize {
const SIZE: usize = 0x200000;
const MAP_LEVEL: usize = 1;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A 1 GiB page mapped in the PDPT.
#[derive(Clone, Copy)]
pub enum HugePageSize {}
impl PageSize for HugePageSize {
const SIZE: usize = 0x40000000;
const MAP_LEVEL: usize = 2;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A memory page of the size given by S.
#[derive(Clone, Copy)]
struct Page<S: PageSize> {
/// Virtual memory address of this page.
/// This is rounded to a page size boundary on creation.
virtual_address: usize,
/// Required by Rust to support the S parameter.
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// Return the stored virtual address.
fn address(&self) -> usize {
self.virtual_address
}
/// Flushes this page from the TLB of this CPU.
#[inline(always)]
fn flush_from_tlb(&self) {
unsafe {
asm!("invlpg [{}]", in(reg) self.virtual_address, options(preserves_flags, nostack));
}
}
/// Returns whether the given virtual address is a valid one in the x86-64 memory model.
///
/// Current x86-64 supports only 48-bit for virtual memory addresses.
/// This is enforced by requiring bits 63 through 48 to replicate bit 47 (cf. Intel Vol. 1, 3.3.7.1).
/// As a consequence, the address space is divided into the two valid regions 0x8000_0000_0000
/// and 0xFFFF_8000_0000_0000.
///
/// Although we could make this check depend on the actual linear address width from the CPU,
/// any extension above 48-bit would require a new page table level, which we don't implement.
fn is_valid_address(virtual_address: usize) -> bool {
virtual_address < 0x8000_0000_0000 || virtual_address >= 0xFFFF_8000_0000_0000
}
/// Returns a Page including the given virtual address.
/// That means, the address is rounded down to a page size boundary.
fn including_address(virtual_address: usize) -> Self {
assert!(
Self::is_valid_address(virtual_address),
"Virtual address {:#X} is invalid",
virtual_address
);
if S::SIZE == 1024 * 1024 * 1024 {
assert!(processor::supports_1gib_pages());
}
Self {
virtual_address: align_down!(virtual_address, S::SIZE),
size: PhantomData,
}
}
/// Returns a PageIter to iterate from the given first Page to the given last Page (inclusive).
fn range(first: Self, last: Self) -> PageIter<S> {
assert!(first.virtual_address <= last.virtual_address);
PageIter {
current: first,
last: last,
}
}
/// Returns the index of this page in the table given by L.
fn table_index<L: PageTableLevel>(&self) -> usize {
assert!(L::LEVEL >= S::MAP_LEVEL);
self.virtual_address >> PAGE_BITS >> L::LEVEL * PAGE_MAP_BITS & PAGE_MAP_MASK
}
}
/// An iterator to walk through a range of pages of size S.
struct PageIter<S: PageSize> {
current: Page<S>,
last: Page<S>,
}
impl<S: PageSize> Iterator for PageIter<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Page<S>> {
if self.current.virtual_address <= self.last.virtual_address {
let p = self.current;
self.current.virtual_address += S::SIZE;
Some(p)
} else {
None
}
}
}
/// An interface to allow for a generic implementation of struct PageTable for all 4 page tables.
/// Must be implemented by all page tables.
trait PageTableLevel {
/// Numeric page table level (from 0 for PT through 3 for PML4) to enable numeric comparisons.
const LEVEL: usize;
}
/// An interface for page tables with sub page tables (all except PT).
/// Having both PageTableLevel and PageTableLevelWithSubtables leverages Rust's typing system to provide
/// a subtable method only for those that have sub page tables.
///
/// Kudos to Philipp Oppermann for the trick!
trait PageTableLevelWithSubtables: PageTableLevel {
type SubtableLevel;
}
/// The Page Map Level 4 (PML4) table, with numeric level 3 and PDPT subtables.
enum PML4 {}
impl PageTableLevel for PML4 {
const LEVEL: usize = 3;
}
impl PageTableLevelWithSubtables for PML4 {
type SubtableLevel = PDPT;
}
/// A Page Directory Pointer Table (PDPT), with numeric level 2 and PDT subtables.
enum PDPT {}
impl PageTableLevel for PDPT {
const LEVEL: usize = 2;
}
impl PageTableLevelWithSubtables for PDPT {
type SubtableLevel = PD;
}
/// A Page Directory (PD), with numeric level 1 and PT subtables.
enum PD {}
impl PageTableLevel for PD {
const LEVEL: usize = 1;
}
impl PageTableLevelWithSubtables for PD {
type SubtableLevel = PT;
}
/// A Page Table (PT), with numeric level 0 and no subtables.
enum PT {}
impl PageTableLevel for PT {
const LEVEL: usize = 0;
}
/// Representation of any page table (PML4, PDPT, PD, PT) in memory.
/// Parameter L supplies information for Rust's typing system to distinguish between the different tables.
struct PageTable<L> {
/// Each page table has 512 entries (can be calculated using PAGE_MAP_BITS).
entries: [PageTableEntry; 1 << PAGE_MAP_BITS],
/// Required by Rust to support the L parameter.
level: PhantomData<L>,
}
/// A trait defining methods every page table has to implement.
/// This additional trait is necessary to make use of Rust's specialization feature and provide a default
/// implementation of some methods.
trait PageTableMethods {
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry>;
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn drop_user_space(&mut self);
}
impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
/// Maps a single page in this table to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// Must only be called if a page of this size is mapped at this page table level!
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
self.entries[index].set(
physical_address,
PageTableEntryFlags::DIRTY | S::MAP_EXTRA_FLAG | flags,
);
if flush {
page.flush_from_tlb();
}
flush
}
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the default implementation called only for PT.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
Some(self.entries[index])
} else {
None
}
}
default fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
let physical_address = self.entries[index].address();
debug!("Free page frame at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the default implementation that just calls the map_page_in_this_table method.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
impl<L: PageTableLevelWithSubtables> PageTableMethods for PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL >= S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
if L::LEVEL > S::MAP_LEVEL {
let subtable = self.subtable::<S>(page);
subtable.get_page_table_entry::<S>(page)
} else {
Some(self.entries[index])
}
} else {
None
}
}
fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// currently, the user space uses only 4KB pages
if L::LEVEL > BasePageSize::MAP_LEVEL {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
//let physical_address = self.entries[index].address();
//debug!("Free page table at 0x{:x}", physical_address);
//physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL >= S::MAP_LEVEL);
if L::LEVEL > S::MAP_LEVEL {
let index = page.table_index::<L>();
// Does the table exist yet?
if!self.entries[index].is_present() {
// Allocate a single 4 KiB page for the new entry and mark it as a valid, writable subtable.
let pt_addr = physicalmem::allocate(BasePageSize::SIZE);
if flags.contains(PageTableEntryFlags::USER_ACCESSIBLE) {
self.entries[index].set(
pt_addr,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::USER_ACCESSIBLE,
);
} else {
self.entries[index].set(pt_addr, PageTableEntryFlags::WRITABLE);
}
// Mark all entries as unused in the newly created table.
let subtable = self.subtable::<S>(page);
for entry in subtable.entries.iter_mut() {
entry.physical_address_and_flags = 0;
}
subtable.map_page::<S>(page, physical_address, flags)
} else {
let subtable = self.subtable::<S>(page);
subtable.map_page::<S>(page, physical_address, flags)
}
} else {
// Calling the default implementation from a specialized one is not supported (yet),
// so we have to resort to an extra function.
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
}
impl<L: PageTableLevelWithSubtables> PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the next subtable for the given page in the page table hierarchy.
///
/// Must only be called if a page of this size is mapped in a subtable!
fn subtable<S: PageSize>(&self, page: Page<S>) -> &mut PageTable<L::SubtableLevel> {
assert!(L::LEVEL > S::MAP_LEVEL);
// Calculate the address of the subtable.
let index = page.table_index::<L>();
let table_address = self as *const PageTable<L> as usize;
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) }
}
/// Maps a continuous range of pages.
///
/// # Arguments
///
/// * `range` - The range of pages of size S
/// * `physical_address` - First physical address to map these pages to
/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or EXECUTE_DISABLE).
/// The PRESENT, ACCESSED, and DIRTY flags are already set automatically.
fn map_pages<S: PageSize>(
&mut self,
range: PageIter<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) {
let mut current_physical_address = physical_address;
for page in range {
self.map_page(page, current_physical_address, flags);
current_physical_address += S::SIZE;
}
}
fn drop_user_space(&mut self) {
assert!(L::LEVEL == PML4::LEVEL);
// the last entry is required to get access to the page tables
let last = (1 << PAGE_MAP_BITS) - 1;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
let physical_address = self.entries[index].address();
debug!("Free page table at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
pub extern "x86-interrupt" fn page_fault_handler(
stack_frame: irq::ExceptionStackFrame,
error_code: u64,
) {
let mut virtual_address = unsafe { controlregs::cr2() };
// do we have to create the user-space stack?
if virtual_address > USER_SPACE_START {
virtual_address = align_down!(virtual_address, BasePageSize::SIZE);
// Ok, user space want to have memory (for the stack / heap)
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map 0x{:x} into the user space at 0x{:x}",
physical_address, virtual_address
);
map::<BasePageSize>(
virtual_address,
physical_address,
1,
PageTableEntryFlags::WRITABLE
| PageTableEntryFlags::USER_ACCESSIBLE
| PageTableEntryFlags::EXECUTE_DISABLE,
);
unsafe {
// clear new page
write_bytes(virtual_address as *mut u8, 0x00, BasePageSize::SIZE);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
controlregs::cr2_write(0);
}
} else {
// Anything else is an error!
let pferror = PageFaultError::from_bits_truncate(error_code as u32);
error!("Page Fault (#PF) Exception: {:#?}", stack_frame);
error!(
"virtual_address = {:#X}, page fault error = {}",
virtual_address, pferror
);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
unsafe {
controlregs::cr2_write(0);
}
scheduler::abort();
}
}
fn get_page_range<S: PageSize>(virtual_address: usize, count: usize) -> PageIter<S> {
let first_page = Page::<S>::including_address(virtual_address);
let last_page = Page::<S>::including_address(virtual_address + (count - 1) * S::SIZE);
Page::range(first_page, last_page)
}
pub fn get_page_table_entry<S: PageSize>(virtual_address: usize) -> Option<PageTableEntry> {
debug!("Looking up Page Table Entry for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.get_page_table_entry(page)
}
pub fn get_physical_address<S: PageSize>(virtual_address: usize) -> usize {
debug!("Getting physical address for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
let address = root_pagetable
.get_page_table_entry(page)
.expect("Entry not present")
.address();
let offset = virtual_address & (S::SIZE - 1);
address | offset
}
/// Translate a virtual memory address to a physical one.
/// Just like get_physical_address, but automatically uses the correct page size for the respective memory address.
pub fn virtual_to_physical(virtual_address: usize) -> usize {
get_physical_address::<BasePageSize>(virtual_address)
}
pub fn unmap<S: PageSize>(virtual_address: usize, count: usize) {
debug!(
"Unmapping virtual address {:#X} ({} pages)",
virtual_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, 0, PageTableEntryFlags::BLANK);
}
pub fn map<S: PageSize>(
virtual_address: usize,
physical_address: usize,
count: usize,
flags: PageTableEntryFlags,
) {
debug!(
"Mapping virtual address {:#X} to physical address {:#X} ({} pages)",
virtual_address, physical_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, physical_address, flags);
}
static mut ROOT_PAGE_TABLE: usize = 0;
#[inline(always)]
pub fn get_kernel_root_page_table() -> usize {
unsafe { ROOT_PAGE_TABLE }
}
pub fn drop_user_space() {
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.drop_user_space();
}
// just an workaround to explaine the difference between
// kernel and user space
pub fn create_usr_pgd() -> usize {
debug!("Create 1st level page table for the user-level task");
unsafe {
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
let user_page_table: usize =
virtualmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map page frame 0x{:x} at virtual address 0x{:x}",
physical_address, user_page_table
);
map::<BasePageSize>(
user_page_table,
physical_address,
1,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::EXECUTE_DISABLE,
);
write_bytes(user_page_table as *mut u8, 0x00, BasePageSize::SIZE);
let recursive_pgt = BOOT_INFO.unwrap().recursive_page_table_addr as *const u64;
let recursive_pgt_idx = BOOT_INFO.unwrap().recursive_index();
let pml4 = user_page_table as *mut u64;
for i in 0..recursive_pgt_idx + 2 {
*pml4.offset(i.try_into().unwrap()) = *recursive_pgt.offset(i.try_into().unwrap());
}
let pml4 =
(user_page_table + BasePageSize::SIZE - size_of::<usize>()) as *mut PageTableEntry;
(*pml4).set(physical_address, PageTableEntryFlags::WRITABLE);
// unmap page table
unmap::<BasePageSize>(user_page_table, 1);
virtualmem::deallocate(user_page_table, BasePageSize::SIZE);
scheduler::set_root_page_table(physical_address);
physical_address
}
}
pub fn init() {
let recursive_pgt = unsafe { BOOT_INFO.unwrap().recursive_page_table_addr } as *mut u64;
let recursive_pgt_idx = unsafe { BOOT_INFO.unwrap().recursive_index() };
debug!(
"Found recursive_page_table_addr at 0x{:x}",
recursive_pgt as u64
);
debug!("Recursive index | BasePageSize | identifier_name |
paging.rs | >, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(dead_code)]
use crate::arch::x86_64::kernel::irq;
use crate::arch::x86_64::kernel::processor;
use crate::arch::x86_64::kernel::BOOT_INFO;
use crate::arch::x86_64::mm::{physicalmem, virtualmem};
use crate::consts::*;
use crate::logging::*;
use crate::scheduler;
use core::arch::asm;
use core::convert::TryInto;
use core::marker::PhantomData;
use core::mem::size_of;
use core::ptr::write_bytes;
use num_traits::CheckedShr;
use x86::controlregs;
use x86::irq::*;
/// Pointer to the root page table (PML4)
const PML4_ADDRESS: *mut PageTable<PML4> = 0xFFFF_FFFF_FFFF_F000 as *mut PageTable<PML4>;
/// Number of Offset bits of a virtual address for a 4 KiB page, which are shifted away to get its Page Frame Number (PFN).
const PAGE_BITS: usize = 12;
/// Number of bits of the index in each table (PML4, PDPT, PD, PT).
const PAGE_MAP_BITS: usize = 9;
/// A mask where PAGE_MAP_BITS are set to calculate a table index.
const PAGE_MAP_MASK: usize = 0x1FF;
bitflags! {
/// Possible flags for an entry in either table (PML4, PDPT, PD, PT)
///
/// See Intel Vol. 3A, Tables 4-14 through 4-19
pub struct PageTableEntryFlags: usize {
/// Set if this entry is valid and points to a page or table.
const PRESENT = 1 << 0;
/// Set if memory referenced by this entry shall be writable.
const WRITABLE = 1 << 1;
/// Set if memory referenced by this entry shall be accessible from user-mode (Ring 3).
const USER_ACCESSIBLE = 1 << 2;
/// Set if Write-Through caching shall be enabled for memory referenced by this entry.
/// Otherwise, Write-Back caching is used.
const WRITE_THROUGH = 1 << 3;
/// Set if caching shall be disabled for memory referenced by this entry.
const CACHE_DISABLE = 1 << 4;
/// Set if software has accessed this entry (for memory access or address translation).
const ACCESSED = 1 << 5;
/// Only for page entries: Set if software has written to the memory referenced by this entry.
const DIRTY = 1 << 6;
/// Only for page entries in PDPT or PDT: Set if this entry references a 1 GiB (PDPT) or 2 MiB (PDT) page.
const HUGE_PAGE = 1 << 7;
/// Only for page entries: Set if this address translation is global for all tasks and does not need to
/// be flushed from the TLB when CR3 is reset.
const GLOBAL = 1 << 8;
/// Set if code execution shall be disabled for memory referenced by this entry.
const EXECUTE_DISABLE = 1 << 63;
}
}
impl PageTableEntryFlags {
/// An empty set of flags for unused/zeroed table entries.
/// Needed as long as empty() is no const function.
const BLANK: PageTableEntryFlags = PageTableEntryFlags { bits: 0 };
pub fn device(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn normal(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn read_only(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::WRITABLE);
self
}
pub fn writable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::WRITABLE);
self
}
pub fn execute_disable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::EXECUTE_DISABLE);
self
}
}
/// An entry in either table (PML4, PDPT, PD, PT)
#[derive(Clone, Copy)]
pub struct PageTableEntry {
/// Physical memory address this entry refers, combined with flags from PageTableEntryFlags.
physical_address_and_flags: usize,
}
impl PageTableEntry {
/// Return the stored physical address.
pub fn address(&self) -> usize {
self.physical_address_and_flags
&!(BasePageSize::SIZE - 1)
&!(PageTableEntryFlags::EXECUTE_DISABLE).bits()
}
/// Returns whether this entry is valid (present).
fn is_present(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::PRESENT.bits())!= 0
}
fn is_huge(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::HUGE_PAGE.bits())!= 0
}
fn is_user(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::USER_ACCESSIBLE.bits())!= 0
}
/// Mark this as a valid (present) entry and set address translation and flags.
///
/// # Arguments
///
/// * `physical_address` - The physical memory address this entry shall translate to
/// * `flags` - Flags from PageTableEntryFlags (note that the PRESENT and ACCESSED flags are set automatically)
fn set(&mut self, physical_address: usize, flags: PageTableEntryFlags) {
if flags.contains(PageTableEntryFlags::HUGE_PAGE) {
// HUGE_PAGE may indicate a 2 MiB or 1 GiB page.
// We don't know this here, so we can only verify that at least the offset bits for a 2 MiB page are zero.
assert!(
(physical_address % LargePageSize::SIZE) == 0,
"Physical address is not on a 2 MiB page boundary (physical_address = {:#X})",
physical_address
);
} else {
// Verify that the offset bits for a 4 KiB page are zero.
assert!(
(physical_address % BasePageSize::SIZE) == 0,
"Physical address is not on a 4 KiB page boundary (physical_address = {:#X})",
physical_address
);
}
// Verify that the physical address does not exceed the CPU's physical address width.
assert!(
CheckedShr::checked_shr(
&physical_address,
processor::get_physical_address_bits() as u32
) == Some(0),
"Physical address exceeds CPU's physical address width (physical_address = {:#X})",
physical_address
);
let mut flags_to_set = flags;
flags_to_set.insert(PageTableEntryFlags::PRESENT);
flags_to_set.insert(PageTableEntryFlags::ACCESSED);
self.physical_address_and_flags = physical_address | flags_to_set.bits();
}
}
/// A generic interface to support all possible page sizes.
///
/// This is defined as a subtrait of Copy to enable #[derive(Clone, Copy)] for Page.
/// Currently, deriving implementations for these traits only works if all dependent types implement it as well.
pub trait PageSize: Copy {
/// The page size in bytes.
const SIZE: usize;
/// The page table level at which a page of this size is mapped (from 0 for PT through 3 for PML4).
/// Implemented as a numeric value to enable numeric comparisons.
const MAP_LEVEL: usize;
/// Any extra flag that needs to be set to map a page of this size.
/// For example: PageTableEntryFlags::HUGE_PAGE
const MAP_EXTRA_FLAG: PageTableEntryFlags;
}
/// A 4 KiB page mapped in the PT.
#[derive(Clone, Copy)]
pub enum BasePageSize {}
impl PageSize for BasePageSize {
const SIZE: usize = 0x1000;
const MAP_LEVEL: usize = 0;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::BLANK;
}
/// A 2 MiB page mapped in the PD.
#[derive(Clone, Copy)]
pub enum LargePageSize {}
impl PageSize for LargePageSize {
const SIZE: usize = 0x200000;
const MAP_LEVEL: usize = 1;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A 1 GiB page mapped in the PDPT.
#[derive(Clone, Copy)]
pub enum HugePageSize {}
impl PageSize for HugePageSize {
const SIZE: usize = 0x40000000;
const MAP_LEVEL: usize = 2;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A memory page of the size given by S.
#[derive(Clone, Copy)]
struct Page<S: PageSize> {
/// Virtual memory address of this page.
/// This is rounded to a page size boundary on creation.
virtual_address: usize,
/// Required by Rust to support the S parameter.
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// Return the stored virtual address.
fn address(&self) -> usize {
self.virtual_address
}
/// Flushes this page from the TLB of this CPU.
#[inline(always)]
fn flush_from_tlb(&self) {
unsafe {
asm!("invlpg [{}]", in(reg) self.virtual_address, options(preserves_flags, nostack));
}
}
/// Returns whether the given virtual address is a valid one in the x86-64 memory model.
///
/// Current x86-64 supports only 48-bit for virtual memory addresses.
/// This is enforced by requiring bits 63 through 48 to replicate bit 47 (cf. Intel Vol. 1, 3.3.7.1).
/// As a consequence, the address space is divided into the two valid regions 0x8000_0000_0000
/// and 0xFFFF_8000_0000_0000.
///
/// Although we could make this check depend on the actual linear address width from the CPU,
/// any extension above 48-bit would require a new page table level, which we don't implement.
fn is_valid_address(virtual_address: usize) -> bool {
virtual_address < 0x8000_0000_0000 || virtual_address >= 0xFFFF_8000_0000_0000
}
/// Returns a Page including the given virtual address.
/// That means, the address is rounded down to a page size boundary.
fn including_address(virtual_address: usize) -> Self {
assert!(
Self::is_valid_address(virtual_address),
"Virtual address {:#X} is invalid",
virtual_address
);
if S::SIZE == 1024 * 1024 * 1024 {
assert!(processor::supports_1gib_pages());
}
Self {
virtual_address: align_down!(virtual_address, S::SIZE),
size: PhantomData,
}
}
/// Returns a PageIter to iterate from the given first Page to the given last Page (inclusive).
fn range(first: Self, last: Self) -> PageIter<S> {
assert!(first.virtual_address <= last.virtual_address);
PageIter {
current: first,
last: last,
}
}
/// Returns the index of this page in the table given by L.
fn table_index<L: PageTableLevel>(&self) -> usize {
assert!(L::LEVEL >= S::MAP_LEVEL);
self.virtual_address >> PAGE_BITS >> L::LEVEL * PAGE_MAP_BITS & PAGE_MAP_MASK
}
}
/// An iterator to walk through a range of pages of size S.
struct PageIter<S: PageSize> {
current: Page<S>,
last: Page<S>,
}
impl<S: PageSize> Iterator for PageIter<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Page<S>> {
if self.current.virtual_address <= self.last.virtual_address {
let p = self.current;
self.current.virtual_address += S::SIZE;
Some(p)
} else {
None
}
}
}
/// An interface to allow for a generic implementation of struct PageTable for all 4 page tables.
/// Must be implemented by all page tables.
trait PageTableLevel {
/// Numeric page table level (from 0 for PT through 3 for PML4) to enable numeric comparisons.
const LEVEL: usize;
}
/// An interface for page tables with sub page tables (all except PT).
/// Having both PageTableLevel and PageTableLevelWithSubtables leverages Rust's typing system to provide
/// a subtable method only for those that have sub page tables.
///
/// Kudos to Philipp Oppermann for the trick!
trait PageTableLevelWithSubtables: PageTableLevel {
type SubtableLevel;
}
/// The Page Map Level 4 (PML4) table, with numeric level 3 and PDPT subtables.
enum PML4 {}
impl PageTableLevel for PML4 {
const LEVEL: usize = 3;
}
impl PageTableLevelWithSubtables for PML4 {
type SubtableLevel = PDPT;
}
/// A Page Directory Pointer Table (PDPT), with numeric level 2 and PDT subtables.
enum PDPT {}
impl PageTableLevel for PDPT {
const LEVEL: usize = 2;
}
impl PageTableLevelWithSubtables for PDPT {
type SubtableLevel = PD;
}
/// A Page Directory (PD), with numeric level 1 and PT subtables.
enum PD {}
impl PageTableLevel for PD {
const LEVEL: usize = 1;
}
impl PageTableLevelWithSubtables for PD {
type SubtableLevel = PT;
}
/// A Page Table (PT), with numeric level 0 and no subtables.
enum PT {}
impl PageTableLevel for PT {
const LEVEL: usize = 0;
}
/// Representation of any page table (PML4, PDPT, PD, PT) in memory.
/// Parameter L supplies information for Rust's typing system to distinguish between the different tables.
struct PageTable<L> {
/// Each page table has 512 entries (can be calculated using PAGE_MAP_BITS).
entries: [PageTableEntry; 1 << PAGE_MAP_BITS],
/// Required by Rust to support the L parameter.
level: PhantomData<L>,
}
/// A trait defining methods every page table has to implement.
/// This additional trait is necessary to make use of Rust's specialization feature and provide a default
/// implementation of some methods.
trait PageTableMethods {
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry>;
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn drop_user_space(&mut self);
}
impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
/// Maps a single page in this table to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// Must only be called if a page of this size is mapped at this page table level!
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
self.entries[index].set(
physical_address,
PageTableEntryFlags::DIRTY | S::MAP_EXTRA_FLAG | flags,
);
if flush {
page.flush_from_tlb();
}
flush
}
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the default implementation called only for PT.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
Some(self.entries[index])
} else {
None
}
}
default fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
let physical_address = self.entries[index].address();
debug!("Free page frame at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the default implementation that just calls the map_page_in_this_table method.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
impl<L: PageTableLevelWithSubtables> PageTableMethods for PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL >= S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
if L::LEVEL > S::MAP_LEVEL {
let subtable = self.subtable::<S>(page);
subtable.get_page_table_entry::<S>(page)
} else {
Some(self.entries[index])
}
} else {
None
}
}
fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// currently, the user space uses only 4KB pages
if L::LEVEL > BasePageSize::MAP_LEVEL {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
//let physical_address = self.entries[index].address();
//debug!("Free page table at 0x{:x}", physical_address);
//physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL >= S::MAP_LEVEL);
if L::LEVEL > S::MAP_LEVEL {
let index = page.table_index::<L>();
// Does the table exist yet?
if!self.entries[index].is_present() {
// Allocate a single 4 KiB page for the new entry and mark it as a valid, writable subtable.
let pt_addr = physicalmem::allocate(BasePageSize::SIZE);
if flags.contains(PageTableEntryFlags::USER_ACCESSIBLE) {
self.entries[index].set(
pt_addr,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::USER_ACCESSIBLE,
);
} else {
self.entries[index].set(pt_addr, PageTableEntryFlags::WRITABLE);
}
// Mark all entries as unused in the newly created table.
let subtable = self.subtable::<S>(page);
for entry in subtable.entries.iter_mut() {
entry.physical_address_and_flags = 0;
}
subtable.map_page::<S>(page, physical_address, flags)
} else {
let subtable = self.subtable::<S>(page);
subtable.map_page::<S>(page, physical_address, flags)
}
} else {
// Calling the default implementation from a specialized one is not supported (yet),
// so we have to resort to an extra function.
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
}
impl<L: PageTableLevelWithSubtables> PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the next subtable for the given page in the page table hierarchy.
///
/// Must only be called if a page of this size is mapped in a subtable!
fn subtable<S: PageSize>(&self, page: Page<S>) -> &mut PageTable<L::SubtableLevel> {
assert!(L::LEVEL > S::MAP_LEVEL);
// Calculate the address of the subtable.
let index = page.table_index::<L>();
let table_address = self as *const PageTable<L> as usize;
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) }
}
/// Maps a continuous range of pages.
///
/// # Arguments
///
/// * `range` - The range of pages of size S
/// * `physical_address` - First physical address to map these pages to
/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or EXECUTE_DISABLE).
/// The PRESENT, ACCESSED, and DIRTY flags are already set automatically.
fn map_pages<S: PageSize>(
&mut self,
range: PageIter<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) {
let mut current_physical_address = physical_address;
for page in range {
self.map_page(page, current_physical_address, flags);
current_physical_address += S::SIZE;
}
}
fn drop_user_space(&mut self) {
assert!(L::LEVEL == PML4::LEVEL);
// the last entry is required to get access to the page tables
let last = (1 << PAGE_MAP_BITS) - 1;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
let physical_address = self.entries[index].address();
debug!("Free page table at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
pub extern "x86-interrupt" fn page_fault_handler(
stack_frame: irq::ExceptionStackFrame,
error_code: u64,
) {
let mut virtual_address = unsafe { controlregs::cr2() };
// do we have to create the user-space stack?
if virtual_address > USER_SPACE_START {
virtual_address = align_down!(virtual_address, BasePageSize::SIZE);
// Ok, user space want to have memory (for the stack / heap)
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map 0x{:x} into the user space at 0x{:x}",
physical_address, virtual_address
);
map::<BasePageSize>(
virtual_address,
physical_address,
1,
PageTableEntryFlags::WRITABLE
| PageTableEntryFlags::USER_ACCESSIBLE
| PageTableEntryFlags::EXECUTE_DISABLE,
);
unsafe {
// clear new page
write_bytes(virtual_address as *mut u8, 0x00, BasePageSize::SIZE);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
controlregs::cr2_write(0);
}
} else {
// Anything else is an error!
let pferror = PageFaultError::from_bits_truncate(error_code as u32);
error!("Page Fault (#PF) Exception: {:#?}", stack_frame);
error!(
"virtual_address = {:#X}, page fault error = {}",
virtual_address, pferror
);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
unsafe {
controlregs::cr2_write(0);
}
scheduler::abort();
}
}
fn get_page_range<S: PageSize>(virtual_address: usize, count: usize) -> PageIter<S> {
let first_page = Page::<S>::including_address(virtual_address);
let last_page = Page::<S>::including_address(virtual_address + (count - 1) * S::SIZE);
Page::range(first_page, last_page)
}
pub fn get_page_table_entry<S: PageSize>(virtual_address: usize) -> Option<PageTableEntry> |
pub fn get_physical_address<S: PageSize>(virtual_address: usize) -> usize {
debug!("Getting physical address for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
let address = root_pagetable
.get_page_table_entry(page)
.expect("Entry not present")
.address();
let offset = virtual_address & (S::SIZE - 1);
address | offset
}
/// Translate a virtual memory address to a physical one.
/// Just like get_physical_address, but automatically uses the correct page size for the respective memory address.
pub fn virtual_to_physical(virtual_address: usize) -> usize {
get_physical_address::<BasePageSize>(virtual_address)
}
pub fn unmap<S: PageSize>(virtual_address: usize, count: usize) {
debug!(
"Unmapping virtual address {:#X} ({} pages)",
virtual_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, 0, PageTableEntryFlags::BLANK);
}
pub fn map<S: PageSize>(
virtual_address: usize,
physical_address: usize,
count: usize,
flags: PageTableEntryFlags,
) {
debug!(
"Mapping virtual address {:#X} to physical address {:#X} ({} pages)",
virtual_address, physical_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, physical_address, flags);
}
static mut ROOT_PAGE_TABLE: usize = 0;
#[inline(always)]
pub fn get_kernel_root_page_table() -> usize {
unsafe { ROOT_PAGE_TABLE }
}
pub fn drop_user_space() {
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.drop_user_space();
}
// just an workaround to explaine the difference between
// kernel and user space
pub fn create_usr_pgd() -> usize {
debug!("Create 1st level page table for the user-level task");
unsafe {
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
let user_page_table: usize =
virtualmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map page frame 0x{:x} at virtual address 0x{:x}",
physical_address, user_page_table
);
map::<BasePageSize>(
user_page_table,
physical_address,
1,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::EXECUTE_DISABLE,
);
write_bytes(user_page_table as *mut u8, 0x00, BasePageSize::SIZE);
let recursive_pgt = BOOT_INFO.unwrap().recursive_page_table_addr as *const u64;
let recursive_pgt_idx = BOOT_INFO.unwrap().recursive_index();
let pml4 = user_page_table as *mut u64;
for i in 0..recursive_pgt_idx + 2 {
*pml4.offset(i.try_into().unwrap()) = *recursive_pgt.offset(i.try_into().unwrap());
}
let pml4 =
(user_page_table + BasePageSize::SIZE - size_of::<usize>()) as *mut PageTableEntry;
(*pml4).set(physical_address, PageTableEntryFlags::WRITABLE);
// unmap page table
unmap::<BasePageSize>(user_page_table, 1);
virtualmem::deallocate(user_page_table, BasePageSize::SIZE);
scheduler::set_root_page_table(physical_address);
physical_address
}
}
pub fn init() {
let recursive_pgt = unsafe { BOOT_INFO.unwrap().recursive_page_table_addr } as *mut u64;
let recursive_pgt_idx = unsafe { BOOT_INFO.unwrap().recursive_index() };
debug!(
"Found recursive_page_table_addr at 0x{:x}",
recursive_pgt as u64
);
debug!("Recursive index | {
debug!("Looking up Page Table Entry for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.get_page_table_entry(page)
} | identifier_body |
paging.rs | MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#![allow(dead_code)]
use crate::arch::x86_64::kernel::irq;
use crate::arch::x86_64::kernel::processor;
use crate::arch::x86_64::kernel::BOOT_INFO;
use crate::arch::x86_64::mm::{physicalmem, virtualmem};
use crate::consts::*;
use crate::logging::*;
use crate::scheduler;
use core::arch::asm;
use core::convert::TryInto;
use core::marker::PhantomData;
use core::mem::size_of;
use core::ptr::write_bytes;
use num_traits::CheckedShr;
use x86::controlregs;
use x86::irq::*;
/// Pointer to the root page table (PML4)
const PML4_ADDRESS: *mut PageTable<PML4> = 0xFFFF_FFFF_FFFF_F000 as *mut PageTable<PML4>;
/// Number of Offset bits of a virtual address for a 4 KiB page, which are shifted away to get its Page Frame Number (PFN).
const PAGE_BITS: usize = 12;
/// Number of bits of the index in each table (PML4, PDPT, PD, PT).
const PAGE_MAP_BITS: usize = 9;
/// A mask where PAGE_MAP_BITS are set to calculate a table index.
const PAGE_MAP_MASK: usize = 0x1FF;
bitflags! {
/// Possible flags for an entry in either table (PML4, PDPT, PD, PT)
///
/// See Intel Vol. 3A, Tables 4-14 through 4-19
pub struct PageTableEntryFlags: usize {
/// Set if this entry is valid and points to a page or table.
const PRESENT = 1 << 0;
/// Set if memory referenced by this entry shall be writable.
const WRITABLE = 1 << 1;
/// Set if memory referenced by this entry shall be accessible from user-mode (Ring 3).
const USER_ACCESSIBLE = 1 << 2;
/// Set if Write-Through caching shall be enabled for memory referenced by this entry.
/// Otherwise, Write-Back caching is used.
const WRITE_THROUGH = 1 << 3;
/// Set if caching shall be disabled for memory referenced by this entry.
const CACHE_DISABLE = 1 << 4;
/// Set if software has accessed this entry (for memory access or address translation).
const ACCESSED = 1 << 5;
/// Only for page entries: Set if software has written to the memory referenced by this entry.
const DIRTY = 1 << 6;
/// Only for page entries in PDPT or PDT: Set if this entry references a 1 GiB (PDPT) or 2 MiB (PDT) page.
const HUGE_PAGE = 1 << 7;
/// Only for page entries: Set if this address translation is global for all tasks and does not need to
/// be flushed from the TLB when CR3 is reset.
const GLOBAL = 1 << 8;
/// Set if code execution shall be disabled for memory referenced by this entry.
const EXECUTE_DISABLE = 1 << 63;
}
}
impl PageTableEntryFlags {
/// An empty set of flags for unused/zeroed table entries.
/// Needed as long as empty() is no const function.
const BLANK: PageTableEntryFlags = PageTableEntryFlags { bits: 0 };
pub fn device(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn normal(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::CACHE_DISABLE);
self
}
pub fn read_only(&mut self) -> &mut Self {
self.remove(PageTableEntryFlags::WRITABLE);
self
}
pub fn writable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::WRITABLE);
self
}
pub fn execute_disable(&mut self) -> &mut Self {
self.insert(PageTableEntryFlags::EXECUTE_DISABLE);
self
}
}
/// An entry in either table (PML4, PDPT, PD, PT)
#[derive(Clone, Copy)]
pub struct PageTableEntry {
/// Physical memory address this entry refers, combined with flags from PageTableEntryFlags.
physical_address_and_flags: usize,
}
impl PageTableEntry {
/// Return the stored physical address.
pub fn address(&self) -> usize {
self.physical_address_and_flags
&!(BasePageSize::SIZE - 1)
&!(PageTableEntryFlags::EXECUTE_DISABLE).bits()
}
/// Returns whether this entry is valid (present).
fn is_present(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::PRESENT.bits())!= 0
}
fn is_huge(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::HUGE_PAGE.bits())!= 0
}
fn is_user(&self) -> bool {
(self.physical_address_and_flags & PageTableEntryFlags::USER_ACCESSIBLE.bits())!= 0
}
/// Mark this as a valid (present) entry and set address translation and flags.
///
/// # Arguments
///
/// * `physical_address` - The physical memory address this entry shall translate to
/// * `flags` - Flags from PageTableEntryFlags (note that the PRESENT and ACCESSED flags are set automatically)
fn set(&mut self, physical_address: usize, flags: PageTableEntryFlags) {
if flags.contains(PageTableEntryFlags::HUGE_PAGE) {
// HUGE_PAGE may indicate a 2 MiB or 1 GiB page.
// We don't know this here, so we can only verify that at least the offset bits for a 2 MiB page are zero.
assert!(
(physical_address % LargePageSize::SIZE) == 0,
"Physical address is not on a 2 MiB page boundary (physical_address = {:#X})",
physical_address
);
} else {
// Verify that the offset bits for a 4 KiB page are zero.
assert!(
(physical_address % BasePageSize::SIZE) == 0,
"Physical address is not on a 4 KiB page boundary (physical_address = {:#X})",
physical_address
);
}
// Verify that the physical address does not exceed the CPU's physical address width.
assert!(
CheckedShr::checked_shr(
&physical_address,
processor::get_physical_address_bits() as u32
) == Some(0),
"Physical address exceeds CPU's physical address width (physical_address = {:#X})",
physical_address
);
let mut flags_to_set = flags;
flags_to_set.insert(PageTableEntryFlags::PRESENT);
flags_to_set.insert(PageTableEntryFlags::ACCESSED);
self.physical_address_and_flags = physical_address | flags_to_set.bits();
}
}
/// A generic interface to support all possible page sizes.
///
/// This is defined as a subtrait of Copy to enable #[derive(Clone, Copy)] for Page.
/// Currently, deriving implementations for these traits only works if all dependent types implement it as well.
pub trait PageSize: Copy {
/// The page size in bytes.
const SIZE: usize;
/// The page table level at which a page of this size is mapped (from 0 for PT through 3 for PML4).
/// Implemented as a numeric value to enable numeric comparisons.
const MAP_LEVEL: usize;
/// Any extra flag that needs to be set to map a page of this size.
/// For example: PageTableEntryFlags::HUGE_PAGE
const MAP_EXTRA_FLAG: PageTableEntryFlags;
}
/// A 4 KiB page mapped in the PT.
#[derive(Clone, Copy)]
pub enum BasePageSize {}
impl PageSize for BasePageSize {
const SIZE: usize = 0x1000;
const MAP_LEVEL: usize = 0;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::BLANK;
}
/// A 2 MiB page mapped in the PD.
#[derive(Clone, Copy)]
pub enum LargePageSize {}
impl PageSize for LargePageSize {
const SIZE: usize = 0x200000;
const MAP_LEVEL: usize = 1;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A 1 GiB page mapped in the PDPT.
#[derive(Clone, Copy)]
pub enum HugePageSize {}
impl PageSize for HugePageSize {
const SIZE: usize = 0x40000000;
const MAP_LEVEL: usize = 2;
const MAP_EXTRA_FLAG: PageTableEntryFlags = PageTableEntryFlags::HUGE_PAGE;
}
/// A memory page of the size given by S.
#[derive(Clone, Copy)]
struct Page<S: PageSize> {
/// Virtual memory address of this page.
/// This is rounded to a page size boundary on creation.
virtual_address: usize,
/// Required by Rust to support the S parameter.
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// Return the stored virtual address.
fn address(&self) -> usize {
self.virtual_address
}
/// Flushes this page from the TLB of this CPU.
#[inline(always)]
fn flush_from_tlb(&self) {
unsafe {
asm!("invlpg [{}]", in(reg) self.virtual_address, options(preserves_flags, nostack));
}
}
/// Returns whether the given virtual address is a valid one in the x86-64 memory model.
///
/// Current x86-64 supports only 48-bit for virtual memory addresses.
/// This is enforced by requiring bits 63 through 48 to replicate bit 47 (cf. Intel Vol. 1, 3.3.7.1).
/// As a consequence, the address space is divided into the two valid regions 0x8000_0000_0000
/// and 0xFFFF_8000_0000_0000.
///
/// Although we could make this check depend on the actual linear address width from the CPU,
/// any extension above 48-bit would require a new page table level, which we don't implement.
fn is_valid_address(virtual_address: usize) -> bool {
virtual_address < 0x8000_0000_0000 || virtual_address >= 0xFFFF_8000_0000_0000
}
/// Returns a Page including the given virtual address.
/// That means, the address is rounded down to a page size boundary.
fn including_address(virtual_address: usize) -> Self {
assert!(
Self::is_valid_address(virtual_address),
"Virtual address {:#X} is invalid",
virtual_address
);
if S::SIZE == 1024 * 1024 * 1024 {
assert!(processor::supports_1gib_pages());
}
Self {
virtual_address: align_down!(virtual_address, S::SIZE),
size: PhantomData,
}
}
/// Returns a PageIter to iterate from the given first Page to the given last Page (inclusive).
fn range(first: Self, last: Self) -> PageIter<S> {
assert!(first.virtual_address <= last.virtual_address);
PageIter {
current: first,
last: last,
}
}
/// Returns the index of this page in the table given by L.
fn table_index<L: PageTableLevel>(&self) -> usize {
assert!(L::LEVEL >= S::MAP_LEVEL);
self.virtual_address >> PAGE_BITS >> L::LEVEL * PAGE_MAP_BITS & PAGE_MAP_MASK
}
}
/// An iterator to walk through a range of pages of size S.
struct PageIter<S: PageSize> {
current: Page<S>,
last: Page<S>,
}
impl<S: PageSize> Iterator for PageIter<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Page<S>> {
if self.current.virtual_address <= self.last.virtual_address {
let p = self.current;
self.current.virtual_address += S::SIZE;
Some(p)
} else {
None
}
}
}
/// An interface to allow for a generic implementation of struct PageTable for all 4 page tables.
/// Must be implemented by all page tables.
trait PageTableLevel {
/// Numeric page table level (from 0 for PT through 3 for PML4) to enable numeric comparisons.
const LEVEL: usize;
}
/// An interface for page tables with sub page tables (all except PT).
/// Having both PageTableLevel and PageTableLevelWithSubtables leverages Rust's typing system to provide
/// a subtable method only for those that have sub page tables.
///
/// Kudos to Philipp Oppermann for the trick!
trait PageTableLevelWithSubtables: PageTableLevel {
type SubtableLevel;
}
/// The Page Map Level 4 (PML4) table, with numeric level 3 and PDPT subtables.
enum PML4 {}
impl PageTableLevel for PML4 {
const LEVEL: usize = 3;
}
impl PageTableLevelWithSubtables for PML4 {
type SubtableLevel = PDPT;
}
/// A Page Directory Pointer Table (PDPT), with numeric level 2 and PDT subtables.
enum PDPT {}
impl PageTableLevel for PDPT {
const LEVEL: usize = 2;
}
impl PageTableLevelWithSubtables for PDPT {
type SubtableLevel = PD; |
/// A Page Directory (PD), with numeric level 1 and PT subtables.
enum PD {}
impl PageTableLevel for PD {
const LEVEL: usize = 1;
}
impl PageTableLevelWithSubtables for PD {
type SubtableLevel = PT;
}
/// A Page Table (PT), with numeric level 0 and no subtables.
enum PT {}
impl PageTableLevel for PT {
const LEVEL: usize = 0;
}
/// Representation of any page table (PML4, PDPT, PD, PT) in memory.
/// Parameter L supplies information for Rust's typing system to distinguish between the different tables.
struct PageTable<L> {
/// Each page table has 512 entries (can be calculated using PAGE_MAP_BITS).
entries: [PageTableEntry; 1 << PAGE_MAP_BITS],
/// Required by Rust to support the L parameter.
level: PhantomData<L>,
}
/// A trait defining methods every page table has to implement.
/// This additional trait is necessary to make use of Rust's specialization feature and provide a default
/// implementation of some methods.
trait PageTableMethods {
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry>;
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool;
fn drop_user_space(&mut self);
}
impl<L: PageTableLevel> PageTableMethods for PageTable<L> {
/// Maps a single page in this table to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// Must only be called if a page of this size is mapped at this page table level!
fn map_page_in_this_table<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
let flush = self.entries[index].is_present();
self.entries[index].set(
physical_address,
PageTableEntryFlags::DIRTY | S::MAP_EXTRA_FLAG | flags,
);
if flush {
page.flush_from_tlb();
}
flush
}
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the default implementation called only for PT.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL == S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
Some(self.entries[index])
} else {
None
}
}
default fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
let physical_address = self.entries[index].address();
debug!("Free page frame at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the default implementation that just calls the map_page_in_this_table method.
/// It is overridden by a specialized implementation for all tables with sub tables (all except PT).
default fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
impl<L: PageTableLevelWithSubtables> PageTableMethods for PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the PageTableEntry for the given page if it is present, otherwise returns None.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn get_page_table_entry<S: PageSize>(&self, page: Page<S>) -> Option<PageTableEntry> {
assert!(L::LEVEL >= S::MAP_LEVEL);
let index = page.table_index::<L>();
if self.entries[index].is_present() {
if L::LEVEL > S::MAP_LEVEL {
let subtable = self.subtable::<S>(page);
subtable.get_page_table_entry::<S>(page)
} else {
Some(self.entries[index])
}
} else {
None
}
}
fn drop_user_space(&mut self) {
let last = 1 << PAGE_MAP_BITS;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// currently, the user space uses only 4KB pages
if L::LEVEL > BasePageSize::MAP_LEVEL {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
//let physical_address = self.entries[index].address();
//debug!("Free page table at 0x{:x}", physical_address);
//physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
/// Maps a single page to the given physical address.
/// Returns whether an existing entry was updated. You can use this return value to flush TLBs.
///
/// This is the implementation for all tables with subtables (PML4, PDPT, PDT).
/// It overrides the default implementation above.
fn map_page<S: PageSize>(
&mut self,
page: Page<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) -> bool {
assert!(L::LEVEL >= S::MAP_LEVEL);
if L::LEVEL > S::MAP_LEVEL {
let index = page.table_index::<L>();
// Does the table exist yet?
if!self.entries[index].is_present() {
// Allocate a single 4 KiB page for the new entry and mark it as a valid, writable subtable.
let pt_addr = physicalmem::allocate(BasePageSize::SIZE);
if flags.contains(PageTableEntryFlags::USER_ACCESSIBLE) {
self.entries[index].set(
pt_addr,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::USER_ACCESSIBLE,
);
} else {
self.entries[index].set(pt_addr, PageTableEntryFlags::WRITABLE);
}
// Mark all entries as unused in the newly created table.
let subtable = self.subtable::<S>(page);
for entry in subtable.entries.iter_mut() {
entry.physical_address_and_flags = 0;
}
subtable.map_page::<S>(page, physical_address, flags)
} else {
let subtable = self.subtable::<S>(page);
subtable.map_page::<S>(page, physical_address, flags)
}
} else {
// Calling the default implementation from a specialized one is not supported (yet),
// so we have to resort to an extra function.
self.map_page_in_this_table::<S>(page, physical_address, flags)
}
}
}
impl<L: PageTableLevelWithSubtables> PageTable<L>
where
L::SubtableLevel: PageTableLevel,
{
/// Returns the next subtable for the given page in the page table hierarchy.
///
/// Must only be called if a page of this size is mapped in a subtable!
fn subtable<S: PageSize>(&self, page: Page<S>) -> &mut PageTable<L::SubtableLevel> {
assert!(L::LEVEL > S::MAP_LEVEL);
// Calculate the address of the subtable.
let index = page.table_index::<L>();
let table_address = self as *const PageTable<L> as usize;
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) }
}
/// Maps a continuous range of pages.
///
/// # Arguments
///
/// * `range` - The range of pages of size S
/// * `physical_address` - First physical address to map these pages to
/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or EXECUTE_DISABLE).
/// The PRESENT, ACCESSED, and DIRTY flags are already set automatically.
fn map_pages<S: PageSize>(
&mut self,
range: PageIter<S>,
physical_address: usize,
flags: PageTableEntryFlags,
) {
let mut current_physical_address = physical_address;
for page in range {
self.map_page(page, current_physical_address, flags);
current_physical_address += S::SIZE;
}
}
fn drop_user_space(&mut self) {
assert!(L::LEVEL == PML4::LEVEL);
// the last entry is required to get access to the page tables
let last = (1 << PAGE_MAP_BITS) - 1;
let table_address = self as *const PageTable<L> as usize;
for index in 0..last {
if self.entries[index].is_present() && self.entries[index].is_user() {
// Calculate the address of the subtable.
let subtable_address = (table_address << PAGE_MAP_BITS) | (index << PAGE_BITS);
let subtable =
unsafe { &mut *(subtable_address as *mut PageTable<L::SubtableLevel>) };
subtable.drop_user_space();
let physical_address = self.entries[index].address();
debug!("Free page table at 0x{:x}", physical_address);
physicalmem::deallocate(physical_address, BasePageSize::SIZE);
}
}
}
}
pub extern "x86-interrupt" fn page_fault_handler(
stack_frame: irq::ExceptionStackFrame,
error_code: u64,
) {
let mut virtual_address = unsafe { controlregs::cr2() };
// do we have to create the user-space stack?
if virtual_address > USER_SPACE_START {
virtual_address = align_down!(virtual_address, BasePageSize::SIZE);
// Ok, user space want to have memory (for the stack / heap)
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map 0x{:x} into the user space at 0x{:x}",
physical_address, virtual_address
);
map::<BasePageSize>(
virtual_address,
physical_address,
1,
PageTableEntryFlags::WRITABLE
| PageTableEntryFlags::USER_ACCESSIBLE
| PageTableEntryFlags::EXECUTE_DISABLE,
);
unsafe {
// clear new page
write_bytes(virtual_address as *mut u8, 0x00, BasePageSize::SIZE);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
controlregs::cr2_write(0);
}
} else {
// Anything else is an error!
let pferror = PageFaultError::from_bits_truncate(error_code as u32);
error!("Page Fault (#PF) Exception: {:#?}", stack_frame);
error!(
"virtual_address = {:#X}, page fault error = {}",
virtual_address, pferror
);
// clear cr2 to signalize that the pagefault is solved by the pagefault handler
unsafe {
controlregs::cr2_write(0);
}
scheduler::abort();
}
}
fn get_page_range<S: PageSize>(virtual_address: usize, count: usize) -> PageIter<S> {
let first_page = Page::<S>::including_address(virtual_address);
let last_page = Page::<S>::including_address(virtual_address + (count - 1) * S::SIZE);
Page::range(first_page, last_page)
}
pub fn get_page_table_entry<S: PageSize>(virtual_address: usize) -> Option<PageTableEntry> {
debug!("Looking up Page Table Entry for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.get_page_table_entry(page)
}
pub fn get_physical_address<S: PageSize>(virtual_address: usize) -> usize {
debug!("Getting physical address for {:#X}", virtual_address);
let page = Page::<S>::including_address(virtual_address);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
let address = root_pagetable
.get_page_table_entry(page)
.expect("Entry not present")
.address();
let offset = virtual_address & (S::SIZE - 1);
address | offset
}
/// Translate a virtual memory address to a physical one.
/// Just like get_physical_address, but automatically uses the correct page size for the respective memory address.
pub fn virtual_to_physical(virtual_address: usize) -> usize {
get_physical_address::<BasePageSize>(virtual_address)
}
pub fn unmap<S: PageSize>(virtual_address: usize, count: usize) {
debug!(
"Unmapping virtual address {:#X} ({} pages)",
virtual_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, 0, PageTableEntryFlags::BLANK);
}
pub fn map<S: PageSize>(
virtual_address: usize,
physical_address: usize,
count: usize,
flags: PageTableEntryFlags,
) {
debug!(
"Mapping virtual address {:#X} to physical address {:#X} ({} pages)",
virtual_address, physical_address, count
);
let range = get_page_range::<S>(virtual_address, count);
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.map_pages(range, physical_address, flags);
}
static mut ROOT_PAGE_TABLE: usize = 0;
#[inline(always)]
pub fn get_kernel_root_page_table() -> usize {
unsafe { ROOT_PAGE_TABLE }
}
pub fn drop_user_space() {
let root_pagetable = unsafe { &mut *PML4_ADDRESS };
root_pagetable.drop_user_space();
}
// just an workaround to explaine the difference between
// kernel and user space
pub fn create_usr_pgd() -> usize {
debug!("Create 1st level page table for the user-level task");
unsafe {
let physical_address =
physicalmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
let user_page_table: usize =
virtualmem::allocate_aligned(BasePageSize::SIZE, BasePageSize::SIZE);
debug!(
"Map page frame 0x{:x} at virtual address 0x{:x}",
physical_address, user_page_table
);
map::<BasePageSize>(
user_page_table,
physical_address,
1,
PageTableEntryFlags::WRITABLE | PageTableEntryFlags::EXECUTE_DISABLE,
);
write_bytes(user_page_table as *mut u8, 0x00, BasePageSize::SIZE);
let recursive_pgt = BOOT_INFO.unwrap().recursive_page_table_addr as *const u64;
let recursive_pgt_idx = BOOT_INFO.unwrap().recursive_index();
let pml4 = user_page_table as *mut u64;
for i in 0..recursive_pgt_idx + 2 {
*pml4.offset(i.try_into().unwrap()) = *recursive_pgt.offset(i.try_into().unwrap());
}
let pml4 =
(user_page_table + BasePageSize::SIZE - size_of::<usize>()) as *mut PageTableEntry;
(*pml4).set(physical_address, PageTableEntryFlags::WRITABLE);
// unmap page table
unmap::<BasePageSize>(user_page_table, 1);
virtualmem::deallocate(user_page_table, BasePageSize::SIZE);
scheduler::set_root_page_table(physical_address);
physical_address
}
}
pub fn init() {
let recursive_pgt = unsafe { BOOT_INFO.unwrap().recursive_page_table_addr } as *mut u64;
let recursive_pgt_idx = unsafe { BOOT_INFO.unwrap().recursive_index() };
debug!(
"Found recursive_page_table_addr at 0x{:x}",
recursive_pgt as u64
);
debug!("Recursive index: {}", | } | random_line_split |
inherents.rs | use nimiq_account::StakingContract;
use nimiq_block::{ForkProof, MacroBlock, MacroHeader, SkipBlockInfo};
use nimiq_blockchain_interface::AbstractBlockchain;
use nimiq_database as db;
use nimiq_keys::Address;
use nimiq_primitives::{
account::AccountType,
coin::Coin,
policy::Policy,
slots_allocation::{JailedValidator, PenalizedSlot},
};
use nimiq_transaction::{inherent::Inherent, reward::RewardTransaction};
use nimiq_vrf::{AliasMethod, VrfUseCase};
use crate::{blockchain_state::BlockchainState, reward::block_reward_for_batch, Blockchain};
/// Implements methods that create inherents.
impl Blockchain {
pub fn create_macro_block_inherents(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
let mut inherents: Vec<Inherent> = vec![];
// Every macro block is the end of a batch, so we need to finalize the batch.
inherents.append(&mut self.finalize_previous_batch(macro_block));
// If this block is an election block, we also need to finalize the epoch.
if Policy::is_election_block_at(macro_block.block_number()) {
// On election the previous epoch needs to be finalized.
// We can rely on `state` here, since we cannot revert macro blocks.
inherents.push(self.finalize_previous_epoch());
}
inherents
}
/// Given fork proofs and (or) a skip block, it returns the respective punishment inherents. It expects
/// verified fork proofs and (or) skip block.
pub fn create_punishment_inherents(
&self,
block_number: u32,
fork_proofs: &[ForkProof],
skip_block_info: Option<SkipBlockInfo>,
txn_option: Option<&db::TransactionProxy>,
) -> Vec<Inherent> {
let mut inherents = vec![];
for fork_proof in fork_proofs {
trace!("Creating inherent from fork proof: {:?}", fork_proof);
inherents.push(self.inherent_from_fork_proof(block_number, fork_proof, txn_option));
}
if let Some(skip_block_info) = skip_block_info {
trace!("Creating inherent from skip block: {:?}", skip_block_info);
inherents.push(self.inherent_from_skip_block_info(&skip_block_info, txn_option));
}
inherents
}
/// It creates a jail inherent from a fork proof. It expects a *verified* fork proof!
pub fn inherent_from_fork_proof(
&self,
reporting_block: u32, // PITODO: we can get it from the blockchain, should be head block number + 1
fork_proof: &ForkProof,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
fork_proof.header1.block_number,
fork_proof.header1.block_number,
fork_proof.prev_vrf_seed.entropy(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
// If the reporting block is in a new epoch, we check if the proposer is still a validator in this epoch
// and retrieve its new slots.
let new_epoch_slot_range = if Policy::epoch_at(reporting_block)
> Policy::epoch_at(fork_proof.header1.block_number)
{
self.current_validators()
.expect("We need to have validators")
.get_validator_by_address(&proposer_slot.validator.address)
.map(|validator| validator.slots.clone())
} else {
None
};
// Create the JailedValidator struct.
let jailed_validator = JailedValidator {
slots: proposer_slot.validator.slots,
validator_address: proposer_slot.validator.address,
offense_event_block: fork_proof.header1.block_number,
};
// Create the corresponding jail inherent.
Inherent::Jail {
jailed_validator,
new_epoch_slot_range,
}
}
/// It creates a penalize inherent from a skip block. It expects a *verified* skip block!
pub fn inherent_from_skip_block_info(
&self,
skip_block_info: &SkipBlockInfo,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
skip_block_info.block_number,
skip_block_info.block_number,
skip_block_info.vrf_entropy.clone(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
debug!(
address = %proposer_slot.validator.address,
"Penalize inherent from skip block"
);
// Create the PenalizedSlot struct.
let slot = PenalizedSlot {
slot: proposer_slot.number,
validator_address: proposer_slot.validator.address,
offense_event_block: skip_block_info.block_number,
};
// Create the corresponding penalize inherent.
Inherent::Penalize { slot }
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn finalize_previous_batch(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_block.block_number()) - 1 == 0 {
return vec![];
}
// To get the inherents we either fetch the reward transactions from the macro body;
// or we create the transactions when there is no macro body.
let mut inherents: Vec<Inherent> = if let Some(body) = macro_block.body.as_ref() {
body.transactions.iter().map(Inherent::from).collect()
} else {
self.create_reward_transactions(
self.state(),
¯o_block.header,
&self.get_staking_contract(),
)
.iter()
.map(Inherent::from)
.collect()
};
// Push FinalizeBatch inherent to update StakingContract.
inherents.push(Inherent::FinalizeBatch);
inherents
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn create_reward_transactions(
&self,
state: &BlockchainState,
macro_header: &MacroHeader,
staking_contract: &StakingContract,
) -> Vec<RewardTransaction> {
let prev_macro_info = &state.macro_info;
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_header.block_number) - 1 == 0 {
return vec![];
}
// Get validator slots
// NOTE: Fields `current_slots` and `previous_slots` are expected to always be set.
let validator_slots = if Policy::first_batch_of_epoch(macro_header.block_number) {
state
.previous_slots
.as_ref()
.expect("Slots for last batch are missing")
} else {
state
.current_slots
.as_ref()
.expect("Slots for current batch are missing")
};
// Calculate the slots that will receive rewards.
// Rewards are for the previous batch (to give validators time to report misbehavior)
let penalized_set = staking_contract
.punished_slots
.previous_batch_punished_slots();
// Total reward for the previous batch
let block_reward = block_reward_for_batch(
macro_header,
&prev_macro_info.head.unwrap_macro_ref().header,
self.genesis_supply,
self.genesis_timestamp,
);
let tx_fees = prev_macro_info.cum_tx_fees;
let reward_pot = block_reward + tx_fees;
// Distribute reward between all slots and calculate the remainder
let slot_reward = reward_pot / Policy::SLOTS as u64;
let remainder = reward_pot % Policy::SLOTS as u64;
// The first slot number of the current validator
let mut first_slot_number = 0;
// Peekable iterator to collect penalized slots for validator
let mut penalized_set_iter = penalized_set.iter().peekable();
// All accepted inherents.
let mut transactions = Vec::new();
// Remember the number of eligible slots that a validator had (that was able to accept the inherent)
let mut num_eligible_slots_for_accepted_tx = Vec::new();
// Remember that the total amount of reward must be burned. The reward for a slot is burned
// either because the slot was penalized or because the corresponding validator was unable to
// accept the inherent.
let mut burned_reward = Coin::ZERO;
// Compute inherents
for validator_slot in validator_slots.iter() {
// The interval of slot numbers for the current slot band is
// [first_slot_number, last_slot_number). So it actually doesn't include
// `last_slot_number`.
let last_slot_number = first_slot_number + validator_slot.num_slots();
// Compute the number of punishments for this validator slot band.
let mut num_eligible_slots = validator_slot.num_slots();
let mut num_penalized_slots = 0;
while let Some(next_penalized_slot) = penalized_set_iter.peek() {
let next_penalized_slot = *next_penalized_slot as u16;
assert!(next_penalized_slot >= first_slot_number);
if next_penalized_slot < last_slot_number {
assert!(num_eligible_slots > 0);
penalized_set_iter.next();
num_eligible_slots -= 1;
num_penalized_slots += 1;
} else {
break;
}
}
// Compute reward from slot reward and number of eligible slots. Also update the burned
// reward from the number of penalized slots.
let reward = slot_reward
.checked_mul(num_eligible_slots as u64)
.expect("Overflow in reward");
burned_reward += slot_reward
.checked_mul(num_penalized_slots as u64)
.expect("Overflow in reward");
// Do not create reward transactions for zero rewards
if!reward.is_zero() {
// Create inherent for the reward.
let staking_contract = self.get_staking_contract();
let data_store = self.get_staking_contract_store();
let txn = self.read_transaction();
let validator = staking_contract
.get_validator(&data_store.read(&txn), &validator_slot.address)
.expect("Couldn't find validator in the accounts trie when paying rewards!");
let tx: RewardTransaction = RewardTransaction {
recipient: validator.reward_address.clone(),
value: reward,
};
// Test whether account will accept inherent. If it can't then the reward will be
// burned.
// TODO Improve this check: it assumes that only BasicAccounts can receive transactions.
let account = state.accounts.get_complete(&tx.recipient, Some(&txn));
if account.account_type() == AccountType::Basic {
num_eligible_slots_for_accepted_tx.push(num_eligible_slots);
transactions.push(tx);
} else {
debug!(
target_address = %tx.recipient,
reward = %tx.value,
"Can't accept batch reward"
);
burned_reward += reward;
}
}
// Update first_slot_number for next iteration
first_slot_number = last_slot_number;
}
// Check that number of accepted inherents is equal to length of the map that gives us the
// corresponding number of slots for that staker (which should be equal to the number of
// validators that will receive rewards).
assert_eq!(transactions.len(), num_eligible_slots_for_accepted_tx.len());
// Get RNG from last block's seed and build lookup table based on number of eligible slots.
let mut rng = macro_header.seed.rng(VrfUseCase::RewardDistribution);
let lookup = AliasMethod::new(num_eligible_slots_for_accepted_tx);
// Randomly give remainder to one accepting slot. We don't bother to distribute it over all
// accepting slots because the remainder is always at most SLOTS - 1 Lunas.
let index = lookup.sample(&mut rng);
transactions[index].value += remainder;
// Create the inherent for the burned reward.
if burned_reward > Coin::ZERO {
let tx = RewardTransaction {
recipient: Address::burn_address(),
value: burned_reward,
};
transactions.push(tx);
}
transactions
}
/// Creates the inherent to finalize an epoch. The inherent is for updating the StakingContract.
pub fn | (&self) -> Inherent {
// Create the FinalizeEpoch inherent.
Inherent::FinalizeEpoch
}
}
| finalize_previous_epoch | identifier_name |
inherents.rs | use nimiq_account::StakingContract;
use nimiq_block::{ForkProof, MacroBlock, MacroHeader, SkipBlockInfo};
use nimiq_blockchain_interface::AbstractBlockchain;
use nimiq_database as db;
use nimiq_keys::Address;
use nimiq_primitives::{
account::AccountType,
coin::Coin,
policy::Policy,
slots_allocation::{JailedValidator, PenalizedSlot},
};
use nimiq_transaction::{inherent::Inherent, reward::RewardTransaction};
use nimiq_vrf::{AliasMethod, VrfUseCase};
use crate::{blockchain_state::BlockchainState, reward::block_reward_for_batch, Blockchain};
/// Implements methods that create inherents.
impl Blockchain {
pub fn create_macro_block_inherents(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
let mut inherents: Vec<Inherent> = vec![];
// Every macro block is the end of a batch, so we need to finalize the batch.
inherents.append(&mut self.finalize_previous_batch(macro_block));
// If this block is an election block, we also need to finalize the epoch.
if Policy::is_election_block_at(macro_block.block_number()) {
// On election the previous epoch needs to be finalized.
// We can rely on `state` here, since we cannot revert macro blocks.
inherents.push(self.finalize_previous_epoch());
}
inherents
}
/// Given fork proofs and (or) a skip block, it returns the respective punishment inherents. It expects
/// verified fork proofs and (or) skip block.
pub fn create_punishment_inherents(
&self,
block_number: u32,
fork_proofs: &[ForkProof],
skip_block_info: Option<SkipBlockInfo>,
txn_option: Option<&db::TransactionProxy>,
) -> Vec<Inherent> {
let mut inherents = vec![];
for fork_proof in fork_proofs {
trace!("Creating inherent from fork proof: {:?}", fork_proof);
inherents.push(self.inherent_from_fork_proof(block_number, fork_proof, txn_option));
}
if let Some(skip_block_info) = skip_block_info {
trace!("Creating inherent from skip block: {:?}", skip_block_info);
inherents.push(self.inherent_from_skip_block_info(&skip_block_info, txn_option));
}
inherents
}
/// It creates a jail inherent from a fork proof. It expects a *verified* fork proof!
pub fn inherent_from_fork_proof(
&self,
reporting_block: u32, // PITODO: we can get it from the blockchain, should be head block number + 1
fork_proof: &ForkProof,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
fork_proof.header1.block_number,
fork_proof.header1.block_number,
fork_proof.prev_vrf_seed.entropy(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
// If the reporting block is in a new epoch, we check if the proposer is still a validator in this epoch
// and retrieve its new slots.
let new_epoch_slot_range = if Policy::epoch_at(reporting_block)
> Policy::epoch_at(fork_proof.header1.block_number)
{
self.current_validators()
.expect("We need to have validators")
.get_validator_by_address(&proposer_slot.validator.address)
.map(|validator| validator.slots.clone()) | None
};
// Create the JailedValidator struct.
let jailed_validator = JailedValidator {
slots: proposer_slot.validator.slots,
validator_address: proposer_slot.validator.address,
offense_event_block: fork_proof.header1.block_number,
};
// Create the corresponding jail inherent.
Inherent::Jail {
jailed_validator,
new_epoch_slot_range,
}
}
/// It creates a penalize inherent from a skip block. It expects a *verified* skip block!
pub fn inherent_from_skip_block_info(
&self,
skip_block_info: &SkipBlockInfo,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
skip_block_info.block_number,
skip_block_info.block_number,
skip_block_info.vrf_entropy.clone(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
debug!(
address = %proposer_slot.validator.address,
"Penalize inherent from skip block"
);
// Create the PenalizedSlot struct.
let slot = PenalizedSlot {
slot: proposer_slot.number,
validator_address: proposer_slot.validator.address,
offense_event_block: skip_block_info.block_number,
};
// Create the corresponding penalize inherent.
Inherent::Penalize { slot }
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn finalize_previous_batch(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_block.block_number()) - 1 == 0 {
return vec![];
}
// To get the inherents we either fetch the reward transactions from the macro body;
// or we create the transactions when there is no macro body.
let mut inherents: Vec<Inherent> = if let Some(body) = macro_block.body.as_ref() {
body.transactions.iter().map(Inherent::from).collect()
} else {
self.create_reward_transactions(
self.state(),
¯o_block.header,
&self.get_staking_contract(),
)
.iter()
.map(Inherent::from)
.collect()
};
// Push FinalizeBatch inherent to update StakingContract.
inherents.push(Inherent::FinalizeBatch);
inherents
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn create_reward_transactions(
&self,
state: &BlockchainState,
macro_header: &MacroHeader,
staking_contract: &StakingContract,
) -> Vec<RewardTransaction> {
let prev_macro_info = &state.macro_info;
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_header.block_number) - 1 == 0 {
return vec![];
}
// Get validator slots
// NOTE: Fields `current_slots` and `previous_slots` are expected to always be set.
let validator_slots = if Policy::first_batch_of_epoch(macro_header.block_number) {
state
.previous_slots
.as_ref()
.expect("Slots for last batch are missing")
} else {
state
.current_slots
.as_ref()
.expect("Slots for current batch are missing")
};
// Calculate the slots that will receive rewards.
// Rewards are for the previous batch (to give validators time to report misbehavior)
let penalized_set = staking_contract
.punished_slots
.previous_batch_punished_slots();
// Total reward for the previous batch
let block_reward = block_reward_for_batch(
macro_header,
&prev_macro_info.head.unwrap_macro_ref().header,
self.genesis_supply,
self.genesis_timestamp,
);
let tx_fees = prev_macro_info.cum_tx_fees;
let reward_pot = block_reward + tx_fees;
// Distribute reward between all slots and calculate the remainder
let slot_reward = reward_pot / Policy::SLOTS as u64;
let remainder = reward_pot % Policy::SLOTS as u64;
// The first slot number of the current validator
let mut first_slot_number = 0;
// Peekable iterator to collect penalized slots for validator
let mut penalized_set_iter = penalized_set.iter().peekable();
// All accepted inherents.
let mut transactions = Vec::new();
// Remember the number of eligible slots that a validator had (that was able to accept the inherent)
let mut num_eligible_slots_for_accepted_tx = Vec::new();
// Remember that the total amount of reward must be burned. The reward for a slot is burned
// either because the slot was penalized or because the corresponding validator was unable to
// accept the inherent.
let mut burned_reward = Coin::ZERO;
// Compute inherents
for validator_slot in validator_slots.iter() {
// The interval of slot numbers for the current slot band is
// [first_slot_number, last_slot_number). So it actually doesn't include
// `last_slot_number`.
let last_slot_number = first_slot_number + validator_slot.num_slots();
// Compute the number of punishments for this validator slot band.
let mut num_eligible_slots = validator_slot.num_slots();
let mut num_penalized_slots = 0;
while let Some(next_penalized_slot) = penalized_set_iter.peek() {
let next_penalized_slot = *next_penalized_slot as u16;
assert!(next_penalized_slot >= first_slot_number);
if next_penalized_slot < last_slot_number {
assert!(num_eligible_slots > 0);
penalized_set_iter.next();
num_eligible_slots -= 1;
num_penalized_slots += 1;
} else {
break;
}
}
// Compute reward from slot reward and number of eligible slots. Also update the burned
// reward from the number of penalized slots.
let reward = slot_reward
.checked_mul(num_eligible_slots as u64)
.expect("Overflow in reward");
burned_reward += slot_reward
.checked_mul(num_penalized_slots as u64)
.expect("Overflow in reward");
// Do not create reward transactions for zero rewards
if!reward.is_zero() {
// Create inherent for the reward.
let staking_contract = self.get_staking_contract();
let data_store = self.get_staking_contract_store();
let txn = self.read_transaction();
let validator = staking_contract
.get_validator(&data_store.read(&txn), &validator_slot.address)
.expect("Couldn't find validator in the accounts trie when paying rewards!");
let tx: RewardTransaction = RewardTransaction {
recipient: validator.reward_address.clone(),
value: reward,
};
// Test whether account will accept inherent. If it can't then the reward will be
// burned.
// TODO Improve this check: it assumes that only BasicAccounts can receive transactions.
let account = state.accounts.get_complete(&tx.recipient, Some(&txn));
if account.account_type() == AccountType::Basic {
num_eligible_slots_for_accepted_tx.push(num_eligible_slots);
transactions.push(tx);
} else {
debug!(
target_address = %tx.recipient,
reward = %tx.value,
"Can't accept batch reward"
);
burned_reward += reward;
}
}
// Update first_slot_number for next iteration
first_slot_number = last_slot_number;
}
// Check that number of accepted inherents is equal to length of the map that gives us the
// corresponding number of slots for that staker (which should be equal to the number of
// validators that will receive rewards).
assert_eq!(transactions.len(), num_eligible_slots_for_accepted_tx.len());
// Get RNG from last block's seed and build lookup table based on number of eligible slots.
let mut rng = macro_header.seed.rng(VrfUseCase::RewardDistribution);
let lookup = AliasMethod::new(num_eligible_slots_for_accepted_tx);
// Randomly give remainder to one accepting slot. We don't bother to distribute it over all
// accepting slots because the remainder is always at most SLOTS - 1 Lunas.
let index = lookup.sample(&mut rng);
transactions[index].value += remainder;
// Create the inherent for the burned reward.
if burned_reward > Coin::ZERO {
let tx = RewardTransaction {
recipient: Address::burn_address(),
value: burned_reward,
};
transactions.push(tx);
}
transactions
}
/// Creates the inherent to finalize an epoch. The inherent is for updating the StakingContract.
pub fn finalize_previous_epoch(&self) -> Inherent {
// Create the FinalizeEpoch inherent.
Inherent::FinalizeEpoch
}
} | } else { | random_line_split |
inherents.rs | use nimiq_account::StakingContract;
use nimiq_block::{ForkProof, MacroBlock, MacroHeader, SkipBlockInfo};
use nimiq_blockchain_interface::AbstractBlockchain;
use nimiq_database as db;
use nimiq_keys::Address;
use nimiq_primitives::{
account::AccountType,
coin::Coin,
policy::Policy,
slots_allocation::{JailedValidator, PenalizedSlot},
};
use nimiq_transaction::{inherent::Inherent, reward::RewardTransaction};
use nimiq_vrf::{AliasMethod, VrfUseCase};
use crate::{blockchain_state::BlockchainState, reward::block_reward_for_batch, Blockchain};
/// Implements methods that create inherents.
impl Blockchain {
pub fn create_macro_block_inherents(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
let mut inherents: Vec<Inherent> = vec![];
// Every macro block is the end of a batch, so we need to finalize the batch.
inherents.append(&mut self.finalize_previous_batch(macro_block));
// If this block is an election block, we also need to finalize the epoch.
if Policy::is_election_block_at(macro_block.block_number()) {
// On election the previous epoch needs to be finalized.
// We can rely on `state` here, since we cannot revert macro blocks.
inherents.push(self.finalize_previous_epoch());
}
inherents
}
/// Given fork proofs and (or) a skip block, it returns the respective punishment inherents. It expects
/// verified fork proofs and (or) skip block.
pub fn create_punishment_inherents(
&self,
block_number: u32,
fork_proofs: &[ForkProof],
skip_block_info: Option<SkipBlockInfo>,
txn_option: Option<&db::TransactionProxy>,
) -> Vec<Inherent> {
let mut inherents = vec![];
for fork_proof in fork_proofs {
trace!("Creating inherent from fork proof: {:?}", fork_proof);
inherents.push(self.inherent_from_fork_proof(block_number, fork_proof, txn_option));
}
if let Some(skip_block_info) = skip_block_info {
trace!("Creating inherent from skip block: {:?}", skip_block_info);
inherents.push(self.inherent_from_skip_block_info(&skip_block_info, txn_option));
}
inherents
}
/// It creates a jail inherent from a fork proof. It expects a *verified* fork proof!
pub fn inherent_from_fork_proof(
&self,
reporting_block: u32, // PITODO: we can get it from the blockchain, should be head block number + 1
fork_proof: &ForkProof,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
fork_proof.header1.block_number,
fork_proof.header1.block_number,
fork_proof.prev_vrf_seed.entropy(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
// If the reporting block is in a new epoch, we check if the proposer is still a validator in this epoch
// and retrieve its new slots.
let new_epoch_slot_range = if Policy::epoch_at(reporting_block)
> Policy::epoch_at(fork_proof.header1.block_number)
{
self.current_validators()
.expect("We need to have validators")
.get_validator_by_address(&proposer_slot.validator.address)
.map(|validator| validator.slots.clone())
} else {
None
};
// Create the JailedValidator struct.
let jailed_validator = JailedValidator {
slots: proposer_slot.validator.slots,
validator_address: proposer_slot.validator.address,
offense_event_block: fork_proof.header1.block_number,
};
// Create the corresponding jail inherent.
Inherent::Jail {
jailed_validator,
new_epoch_slot_range,
}
}
/// It creates a penalize inherent from a skip block. It expects a *verified* skip block!
pub fn inherent_from_skip_block_info(
&self,
skip_block_info: &SkipBlockInfo,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
skip_block_info.block_number,
skip_block_info.block_number,
skip_block_info.vrf_entropy.clone(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
debug!(
address = %proposer_slot.validator.address,
"Penalize inherent from skip block"
);
// Create the PenalizedSlot struct.
let slot = PenalizedSlot {
slot: proposer_slot.number,
validator_address: proposer_slot.validator.address,
offense_event_block: skip_block_info.block_number,
};
// Create the corresponding penalize inherent.
Inherent::Penalize { slot }
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn finalize_previous_batch(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_block.block_number()) - 1 == 0 {
return vec![];
}
// To get the inherents we either fetch the reward transactions from the macro body;
// or we create the transactions when there is no macro body.
let mut inherents: Vec<Inherent> = if let Some(body) = macro_block.body.as_ref() {
body.transactions.iter().map(Inherent::from).collect()
} else {
self.create_reward_transactions(
self.state(),
¯o_block.header,
&self.get_staking_contract(),
)
.iter()
.map(Inherent::from)
.collect()
};
// Push FinalizeBatch inherent to update StakingContract.
inherents.push(Inherent::FinalizeBatch);
inherents
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn create_reward_transactions(
&self,
state: &BlockchainState,
macro_header: &MacroHeader,
staking_contract: &StakingContract,
) -> Vec<RewardTransaction> | };
// Calculate the slots that will receive rewards.
// Rewards are for the previous batch (to give validators time to report misbehavior)
let penalized_set = staking_contract
.punished_slots
.previous_batch_punished_slots();
// Total reward for the previous batch
let block_reward = block_reward_for_batch(
macro_header,
&prev_macro_info.head.unwrap_macro_ref().header,
self.genesis_supply,
self.genesis_timestamp,
);
let tx_fees = prev_macro_info.cum_tx_fees;
let reward_pot = block_reward + tx_fees;
// Distribute reward between all slots and calculate the remainder
let slot_reward = reward_pot / Policy::SLOTS as u64;
let remainder = reward_pot % Policy::SLOTS as u64;
// The first slot number of the current validator
let mut first_slot_number = 0;
// Peekable iterator to collect penalized slots for validator
let mut penalized_set_iter = penalized_set.iter().peekable();
// All accepted inherents.
let mut transactions = Vec::new();
// Remember the number of eligible slots that a validator had (that was able to accept the inherent)
let mut num_eligible_slots_for_accepted_tx = Vec::new();
// Remember that the total amount of reward must be burned. The reward for a slot is burned
// either because the slot was penalized or because the corresponding validator was unable to
// accept the inherent.
let mut burned_reward = Coin::ZERO;
// Compute inherents
for validator_slot in validator_slots.iter() {
// The interval of slot numbers for the current slot band is
// [first_slot_number, last_slot_number). So it actually doesn't include
// `last_slot_number`.
let last_slot_number = first_slot_number + validator_slot.num_slots();
// Compute the number of punishments for this validator slot band.
let mut num_eligible_slots = validator_slot.num_slots();
let mut num_penalized_slots = 0;
while let Some(next_penalized_slot) = penalized_set_iter.peek() {
let next_penalized_slot = *next_penalized_slot as u16;
assert!(next_penalized_slot >= first_slot_number);
if next_penalized_slot < last_slot_number {
assert!(num_eligible_slots > 0);
penalized_set_iter.next();
num_eligible_slots -= 1;
num_penalized_slots += 1;
} else {
break;
}
}
// Compute reward from slot reward and number of eligible slots. Also update the burned
// reward from the number of penalized slots.
let reward = slot_reward
.checked_mul(num_eligible_slots as u64)
.expect("Overflow in reward");
burned_reward += slot_reward
.checked_mul(num_penalized_slots as u64)
.expect("Overflow in reward");
// Do not create reward transactions for zero rewards
if!reward.is_zero() {
// Create inherent for the reward.
let staking_contract = self.get_staking_contract();
let data_store = self.get_staking_contract_store();
let txn = self.read_transaction();
let validator = staking_contract
.get_validator(&data_store.read(&txn), &validator_slot.address)
.expect("Couldn't find validator in the accounts trie when paying rewards!");
let tx: RewardTransaction = RewardTransaction {
recipient: validator.reward_address.clone(),
value: reward,
};
// Test whether account will accept inherent. If it can't then the reward will be
// burned.
// TODO Improve this check: it assumes that only BasicAccounts can receive transactions.
let account = state.accounts.get_complete(&tx.recipient, Some(&txn));
if account.account_type() == AccountType::Basic {
num_eligible_slots_for_accepted_tx.push(num_eligible_slots);
transactions.push(tx);
} else {
debug!(
target_address = %tx.recipient,
reward = %tx.value,
"Can't accept batch reward"
);
burned_reward += reward;
}
}
// Update first_slot_number for next iteration
first_slot_number = last_slot_number;
}
// Check that number of accepted inherents is equal to length of the map that gives us the
// corresponding number of slots for that staker (which should be equal to the number of
// validators that will receive rewards).
assert_eq!(transactions.len(), num_eligible_slots_for_accepted_tx.len());
// Get RNG from last block's seed and build lookup table based on number of eligible slots.
let mut rng = macro_header.seed.rng(VrfUseCase::RewardDistribution);
let lookup = AliasMethod::new(num_eligible_slots_for_accepted_tx);
// Randomly give remainder to one accepting slot. We don't bother to distribute it over all
// accepting slots because the remainder is always at most SLOTS - 1 Lunas.
let index = lookup.sample(&mut rng);
transactions[index].value += remainder;
// Create the inherent for the burned reward.
if burned_reward > Coin::ZERO {
let tx = RewardTransaction {
recipient: Address::burn_address(),
value: burned_reward,
};
transactions.push(tx);
}
transactions
}
/// Creates the inherent to finalize an epoch. The inherent is for updating the StakingContract.
pub fn finalize_previous_epoch(&self) -> Inherent {
// Create the FinalizeEpoch inherent.
Inherent::FinalizeEpoch
}
}
| {
let prev_macro_info = &state.macro_info;
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_header.block_number) - 1 == 0 {
return vec![];
}
// Get validator slots
// NOTE: Fields `current_slots` and `previous_slots` are expected to always be set.
let validator_slots = if Policy::first_batch_of_epoch(macro_header.block_number) {
state
.previous_slots
.as_ref()
.expect("Slots for last batch are missing")
} else {
state
.current_slots
.as_ref()
.expect("Slots for current batch are missing") | identifier_body |
inherents.rs | use nimiq_account::StakingContract;
use nimiq_block::{ForkProof, MacroBlock, MacroHeader, SkipBlockInfo};
use nimiq_blockchain_interface::AbstractBlockchain;
use nimiq_database as db;
use nimiq_keys::Address;
use nimiq_primitives::{
account::AccountType,
coin::Coin,
policy::Policy,
slots_allocation::{JailedValidator, PenalizedSlot},
};
use nimiq_transaction::{inherent::Inherent, reward::RewardTransaction};
use nimiq_vrf::{AliasMethod, VrfUseCase};
use crate::{blockchain_state::BlockchainState, reward::block_reward_for_batch, Blockchain};
/// Implements methods that create inherents.
impl Blockchain {
pub fn create_macro_block_inherents(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
let mut inherents: Vec<Inherent> = vec![];
// Every macro block is the end of a batch, so we need to finalize the batch.
inherents.append(&mut self.finalize_previous_batch(macro_block));
// If this block is an election block, we also need to finalize the epoch.
if Policy::is_election_block_at(macro_block.block_number()) {
// On election the previous epoch needs to be finalized.
// We can rely on `state` here, since we cannot revert macro blocks.
inherents.push(self.finalize_previous_epoch());
}
inherents
}
/// Given fork proofs and (or) a skip block, it returns the respective punishment inherents. It expects
/// verified fork proofs and (or) skip block.
pub fn create_punishment_inherents(
&self,
block_number: u32,
fork_proofs: &[ForkProof],
skip_block_info: Option<SkipBlockInfo>,
txn_option: Option<&db::TransactionProxy>,
) -> Vec<Inherent> {
let mut inherents = vec![];
for fork_proof in fork_proofs {
trace!("Creating inherent from fork proof: {:?}", fork_proof);
inherents.push(self.inherent_from_fork_proof(block_number, fork_proof, txn_option));
}
if let Some(skip_block_info) = skip_block_info {
trace!("Creating inherent from skip block: {:?}", skip_block_info);
inherents.push(self.inherent_from_skip_block_info(&skip_block_info, txn_option));
}
inherents
}
/// It creates a jail inherent from a fork proof. It expects a *verified* fork proof!
pub fn inherent_from_fork_proof(
&self,
reporting_block: u32, // PITODO: we can get it from the blockchain, should be head block number + 1
fork_proof: &ForkProof,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
fork_proof.header1.block_number,
fork_proof.header1.block_number,
fork_proof.prev_vrf_seed.entropy(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
// If the reporting block is in a new epoch, we check if the proposer is still a validator in this epoch
// and retrieve its new slots.
let new_epoch_slot_range = if Policy::epoch_at(reporting_block)
> Policy::epoch_at(fork_proof.header1.block_number)
{
self.current_validators()
.expect("We need to have validators")
.get_validator_by_address(&proposer_slot.validator.address)
.map(|validator| validator.slots.clone())
} else {
None
};
// Create the JailedValidator struct.
let jailed_validator = JailedValidator {
slots: proposer_slot.validator.slots,
validator_address: proposer_slot.validator.address,
offense_event_block: fork_proof.header1.block_number,
};
// Create the corresponding jail inherent.
Inherent::Jail {
jailed_validator,
new_epoch_slot_range,
}
}
/// It creates a penalize inherent from a skip block. It expects a *verified* skip block!
pub fn inherent_from_skip_block_info(
&self,
skip_block_info: &SkipBlockInfo,
txn_option: Option<&db::TransactionProxy>,
) -> Inherent {
// Get the slot owner and slot number for this block number.
let proposer_slot = self
.get_proposer_at(
skip_block_info.block_number,
skip_block_info.block_number,
skip_block_info.vrf_entropy.clone(),
txn_option,
)
.expect("Couldn't calculate slot owner!");
debug!(
address = %proposer_slot.validator.address,
"Penalize inherent from skip block"
);
// Create the PenalizedSlot struct.
let slot = PenalizedSlot {
slot: proposer_slot.number,
validator_address: proposer_slot.validator.address,
offense_event_block: skip_block_info.block_number,
};
// Create the corresponding penalize inherent.
Inherent::Penalize { slot }
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn finalize_previous_batch(&self, macro_block: &MacroBlock) -> Vec<Inherent> {
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_block.block_number()) - 1 == 0 {
return vec![];
}
// To get the inherents we either fetch the reward transactions from the macro body;
// or we create the transactions when there is no macro body.
let mut inherents: Vec<Inherent> = if let Some(body) = macro_block.body.as_ref() {
body.transactions.iter().map(Inherent::from).collect()
} else {
self.create_reward_transactions(
self.state(),
¯o_block.header,
&self.get_staking_contract(),
)
.iter()
.map(Inherent::from)
.collect()
};
// Push FinalizeBatch inherent to update StakingContract.
inherents.push(Inherent::FinalizeBatch);
inherents
}
/// Creates the inherents to finalize a batch. The inherents are for reward distribution and
/// updating the StakingContract.
pub fn create_reward_transactions(
&self,
state: &BlockchainState,
macro_header: &MacroHeader,
staking_contract: &StakingContract,
) -> Vec<RewardTransaction> {
let prev_macro_info = &state.macro_info;
// Special case for first batch: Batch 0 is finalized by definition.
if Policy::batch_at(macro_header.block_number) - 1 == 0 {
return vec![];
}
// Get validator slots
// NOTE: Fields `current_slots` and `previous_slots` are expected to always be set.
let validator_slots = if Policy::first_batch_of_epoch(macro_header.block_number) {
state
.previous_slots
.as_ref()
.expect("Slots for last batch are missing")
} else {
state
.current_slots
.as_ref()
.expect("Slots for current batch are missing")
};
// Calculate the slots that will receive rewards.
// Rewards are for the previous batch (to give validators time to report misbehavior)
let penalized_set = staking_contract
.punished_slots
.previous_batch_punished_slots();
// Total reward for the previous batch
let block_reward = block_reward_for_batch(
macro_header,
&prev_macro_info.head.unwrap_macro_ref().header,
self.genesis_supply,
self.genesis_timestamp,
);
let tx_fees = prev_macro_info.cum_tx_fees;
let reward_pot = block_reward + tx_fees;
// Distribute reward between all slots and calculate the remainder
let slot_reward = reward_pot / Policy::SLOTS as u64;
let remainder = reward_pot % Policy::SLOTS as u64;
// The first slot number of the current validator
let mut first_slot_number = 0;
// Peekable iterator to collect penalized slots for validator
let mut penalized_set_iter = penalized_set.iter().peekable();
// All accepted inherents.
let mut transactions = Vec::new();
// Remember the number of eligible slots that a validator had (that was able to accept the inherent)
let mut num_eligible_slots_for_accepted_tx = Vec::new();
// Remember that the total amount of reward must be burned. The reward for a slot is burned
// either because the slot was penalized or because the corresponding validator was unable to
// accept the inherent.
let mut burned_reward = Coin::ZERO;
// Compute inherents
for validator_slot in validator_slots.iter() {
// The interval of slot numbers for the current slot band is
// [first_slot_number, last_slot_number). So it actually doesn't include
// `last_slot_number`.
let last_slot_number = first_slot_number + validator_slot.num_slots();
// Compute the number of punishments for this validator slot band.
let mut num_eligible_slots = validator_slot.num_slots();
let mut num_penalized_slots = 0;
while let Some(next_penalized_slot) = penalized_set_iter.peek() {
let next_penalized_slot = *next_penalized_slot as u16;
assert!(next_penalized_slot >= first_slot_number);
if next_penalized_slot < last_slot_number | else {
break;
}
}
// Compute reward from slot reward and number of eligible slots. Also update the burned
// reward from the number of penalized slots.
let reward = slot_reward
.checked_mul(num_eligible_slots as u64)
.expect("Overflow in reward");
burned_reward += slot_reward
.checked_mul(num_penalized_slots as u64)
.expect("Overflow in reward");
// Do not create reward transactions for zero rewards
if!reward.is_zero() {
// Create inherent for the reward.
let staking_contract = self.get_staking_contract();
let data_store = self.get_staking_contract_store();
let txn = self.read_transaction();
let validator = staking_contract
.get_validator(&data_store.read(&txn), &validator_slot.address)
.expect("Couldn't find validator in the accounts trie when paying rewards!");
let tx: RewardTransaction = RewardTransaction {
recipient: validator.reward_address.clone(),
value: reward,
};
// Test whether account will accept inherent. If it can't then the reward will be
// burned.
// TODO Improve this check: it assumes that only BasicAccounts can receive transactions.
let account = state.accounts.get_complete(&tx.recipient, Some(&txn));
if account.account_type() == AccountType::Basic {
num_eligible_slots_for_accepted_tx.push(num_eligible_slots);
transactions.push(tx);
} else {
debug!(
target_address = %tx.recipient,
reward = %tx.value,
"Can't accept batch reward"
);
burned_reward += reward;
}
}
// Update first_slot_number for next iteration
first_slot_number = last_slot_number;
}
// Check that number of accepted inherents is equal to length of the map that gives us the
// corresponding number of slots for that staker (which should be equal to the number of
// validators that will receive rewards).
assert_eq!(transactions.len(), num_eligible_slots_for_accepted_tx.len());
// Get RNG from last block's seed and build lookup table based on number of eligible slots.
let mut rng = macro_header.seed.rng(VrfUseCase::RewardDistribution);
let lookup = AliasMethod::new(num_eligible_slots_for_accepted_tx);
// Randomly give remainder to one accepting slot. We don't bother to distribute it over all
// accepting slots because the remainder is always at most SLOTS - 1 Lunas.
let index = lookup.sample(&mut rng);
transactions[index].value += remainder;
// Create the inherent for the burned reward.
if burned_reward > Coin::ZERO {
let tx = RewardTransaction {
recipient: Address::burn_address(),
value: burned_reward,
};
transactions.push(tx);
}
transactions
}
/// Creates the inherent to finalize an epoch. The inherent is for updating the StakingContract.
pub fn finalize_previous_epoch(&self) -> Inherent {
// Create the FinalizeEpoch inherent.
Inherent::FinalizeEpoch
}
}
| {
assert!(num_eligible_slots > 0);
penalized_set_iter.next();
num_eligible_slots -= 1;
num_penalized_slots += 1;
} | conditional_block |
testclient.rs | extern crate byteorder;
extern crate clap;
extern crate data_encoding;
extern crate env_logger;
#[macro_use] extern crate log;
extern crate qrcodegen;
extern crate saltyrtc_client;
extern crate saltyrtc_task_relayed_data;
extern crate tokio_core;
use std::env;
use std::io::Write;
use std::process;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use byteorder::{BigEndian, WriteBytesExt};
use clap::{Arg, App, SubCommand};
use data_encoding::{HEXLOWER, HEXLOWER_PERMISSIVE, BASE64};
use qrcodegen::{QrCode, QrCodeEcc};
use saltyrtc_client::{SaltyClient, Role, BoxedFuture};
use saltyrtc_client::crypto::{KeyPair, AuthToken, public_key_from_hex_str};
use saltyrtc_client::dep::futures::{future, Future, Stream};
use saltyrtc_client::dep::futures::sync::mpsc;
use saltyrtc_client::dep::native_tls::{TlsConnector, Protocol};
use saltyrtc_client::tasks::Task;
use saltyrtc_task_relayed_data::{RelayedDataTask, RelayedDataError, MessageEvent};
use tokio_core::reactor::Core;
const ARG_PING_INTERVAL: &str = "ping_interval";
const ARG_SRV_HOST: &str = "host";
const ARG_SRV_PORT: &str = "port";
const ARG_SRV_PUBKEY: &str = "pubkey";
const ARG_PATH: &str = "path";
const ARG_AUTHTOKEN: &str = "auth_token";
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// Wrap future in a box with type erasure.
macro_rules! boxed {
($future:expr) => {{
Box::new($future) as BoxedFuture<_, _>
}}
}
/// Create the QR code payload
fn make_qrcode_payload(version: u16, permanent: bool, host: &str, port: u16, pubkey: &[u8], auth_token: &[u8], server_pubkey: &[u8]) -> Vec<u8> {
let mut data: Vec<u8> = Vec::with_capacity(101 + host.as_bytes().len());
data.write_u16::<BigEndian>(version).unwrap();
data.push(if permanent { 0x02 } else { 0x00 });
data.write_all(pubkey).unwrap();
data.write_all(auth_token).unwrap();
data.write_all(server_pubkey).unwrap();
data.write_u16::<BigEndian>(port).unwrap();
data.write_all(host.as_bytes()).unwrap();
data
}
/// Print the QR code payload to the terminal
fn print_qrcode(payload: &[u8]) {
let base64 = BASE64.encode(payload);
let qr = QrCode::encode_text(&base64, QrCodeEcc::Low).unwrap();
let border = 1;
for y in -border.. qr.size() + border {
for x in -border.. qr.size() + border {
let c: char = if qr.get_module(x, y) { '█' } else {'' };
print!("{0}{0}", c);
}
println!();
}
println!();
}
fn main() {
// Set up CLI arguments
let arg_srv_host = Arg::with_name(ARG_SRV_HOST)
.short('h')
.takes_value(true)
.value_name("SRV_HOST")
.required(true)
.default_value("server.saltyrtc.org")
.help("The SaltyRTC server hostname");
let arg_srv_port = Arg::with_name(ARG_SRV_PORT)
.short('p')
.takes_value(true)
.value_name("SRV_PORT")
.required(true)
.default_value("443")
.help("The SaltyRTC server port");
let arg_srv_pubkey = Arg::with_name(ARG_SRV_PUBKEY)
.short('s')
.takes_value(true)
.value_name("SRV_PUBKEY")
.required(true)
.default_value("f77fe623b6977d470ac8c7bf7011c4ad08a1d126896795db9d2b4b7a49ae1045")
.help("The SaltyRTC server public permanent key");
let arg_ping_interval = Arg::with_name(ARG_PING_INTERVAL)
.short('i')
.takes_value(true)
.value_name("SECONDS")
.required(false)
.default_value("60")
.help("The WebSocket ping interval (set to 0 to disable pings)");
let app = App::new("SaltyRTC Relayed Data Test Initiator")
.version(VERSION)
.author("Danilo Bargen <[email protected]>")
.about("Test client for SaltyRTC Relayed Data Task.")
.subcommand(SubCommand::with_name("initiator")
.about("Start client as initiator")
.arg(arg_srv_host.clone())
.arg(arg_srv_port.clone())
.arg(arg_srv_pubkey.clone())
.arg(arg_ping_interval.clone()))
.subcommand(SubCommand::with_name("responder")
.about("Start client as responder")
.arg(Arg::with_name(ARG_PATH)
.short('k')
.takes_value(true)
.value_name("INITIATOR_PUBKEY")
.required(true)
.help("The hex encoded public key of the initiator"))
.arg(Arg::with_name(ARG_AUTHTOKEN)
.short('a')
.alias("token")
.alias("authtoken")
.takes_value(true)
.value_name("AUTHTOKEN")
.required(true)
.help("The auth token (hex encoded)"))
.arg(arg_srv_host)
.arg(arg_srv_port)
.arg(arg_srv_pubkey)
.arg(arg_ping_interval));
// Parse arguments
let matches = app.get_matches();
let (subcommand_name, args) = matches.subcommand().unwrap_or_else(|| {
println!("Missing subcommand.");
println!("Use -h or --help to see usage.");
process::exit(1);
});
// Determine role
let role = match subcommand_name {
"initiator" => Role::Initiator,
"responder" => Role::Responder,
other => {
println!("Invalid subcommand: {}", other);
process::exit(1);
},
};
// Set up logging
env::set_var("RUST_LOG", "saltyrtc_client=debug,saltyrtc_task_relayed_data=debug,testclient=trace");
env_logger::init();
// Tokio reactor core
let mut core = Core::new().unwrap();
// Create TLS connector instance
let tls_connector = TlsConnector::builder()
.min_protocol_version(Some(Protocol::Tlsv11))
.build()
.unwrap_or_else(|e| panic!("Could not initialize TlsConnector: {}", e));
// Create new public permanent keypair
let keypair = KeyPair::new();
let pubkey = keypair.public_key().clone();
// Determine websocket path
let path: String = match role {
Role::Initiator => keypair.public_key_hex(),
Role::Responder => args.value_of(ARG_PATH).expect("Initiator pubkey not supplied").to_lowercase(),
};
// Determine ping interval
let ping_interval = {
let seconds: u64 = args.value_of(ARG_PING_INTERVAL).expect("Ping interval not supplied")
.parse().expect("Could not parse interval seconds to a number");
Duration::from_secs(seconds)
};
// Determine server info
let server_host: &str = args.value_of(ARG_SRV_HOST).expect("Server hostname not supplied");
let server_port: u16 = args.value_of(ARG_SRV_PORT).expect("Server port not supplied").parse().expect("Could not parse port to a number");
let server_pubkey: Vec<u8> = HEXLOWER_PERMISSIVE.decode(
args.value_of(ARG_SRV_PUBKEY).expect("Server pubkey not supplied").as_bytes()
).unwrap();
// Set up task instance
let (incoming_tx, incoming_rx) = mpsc::unbounded();
let task = RelayedDataTask::new(core.remote(), incoming_tx);
// Set up client instance
let client = Arc::new(RwLock::new({
let builder = SaltyClient::build(keypair)
.add_task(Box::new(task))
.with_ping_interval(Some(ping_interval));
match role {
Role::Initiator => builder
.initiator()
.expect("Could not create SaltyClient instance"),
Role::Responder => {
let auth_token_hex = args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string();
let auth_token = AuthToken::from_hex_str(&auth_token_hex).expect("Invalid auth token hex string");
let initiator_pubkey = public_key_from_hex_str(&path).unwrap();
builder
.responder(initiator_pubkey, auth_token)
.expect("Could not create SaltyClient instance")
}
}
}));
// Connect future
let (connect_future, event_channel) = saltyrtc_client::connect(
server_host,
server_port,
Some(tls_connector),
client.clone(),
)
.unwrap();
// Handshake future
let event_tx = event_channel.clone_tx();
let handshake_future = connect_future
.and_then(|ws_client| saltyrtc_client::do_handshake(ws_client, client.clone(), event_tx, None));
// Determine QR code payload
let payload = make_qrcode_payload(
1,
false,
server_host,
server_port,
pubkey.as_bytes(),
client.read().unwrap().auth_token().unwrap().secret_key_bytes(),
&server_pubkey,
);
// Print connection info
println!("\n#====================#");
println!("Host: {}:{}", server_host, server_port);
match role {
Role::Initiator => {
println!("Pubkey: {}", HEXLOWER.encode(pubkey.as_bytes()));
println!("Auth token: {}", HEXLOWER.encode(client.read().unwrap().auth_token().unwrap().secret_key_bytes()));
println!();
println!("QR Code:");
print_qrcode(&payload);
println!("{}", BASE64.encode(&payload));
println!("\n#====================#\n");
}
Role::Responder => {
println!("Pubkey: {}", args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string());
println!("#====================#\n");
}
}
// Run connect future to completion
let ws_client = core.run(handshake_future).expect("Could not connect");
// Setup task loop
let event_tx = event_channel.clone_tx();
let (task, task_loop) = saltyrtc_client::task_loop(ws_client, client.clone(), event_tx).unwrap();
// Get access to outgoing channel
let _outgoing_tx = {
// Get reference to task and downcast it to `RelayedDataTask`.
// We can be sure that it's a `RelayedDataTask` since that's the only one we proposed.
let mut t = task.lock().expect("Could not lock task mutex");
let rd_task: &mut RelayedDataTask = (&mut **t as &mut dyn Task)
.downcast_mut::<RelayedDataTask>()
.expect("Chosen task is not a RelayedDataTask");
// Get unbounded senders for outgoing messages
rd_task.get_sender().unwrap()
};
// Print all incoming events to stdout
let recv_loop = incoming_rx
.map_err(|_| Err(RelayedDataError::Channel(("Could not read from rx_responder").into())))
.for_each(move |ev: MessageEvent| match ev {
MessageEvent::Data(data) => {
println!("Incoming data message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Application(data) => {
println!("Incoming application message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Close(reason) => {
println!("Connection was closed: {}", reason);
boxed!(future::err(Ok(())))
}
})
.or_else(|e| e)
.then(|f| { debug!("† recv_loop done"); f });
match core.run(
task_loop
.map_err(|e| e.to_string())
.then(|f| { debug!("† task_loop done"); f })
.join(recv_loop.map_err(|e| e.to_string()))
) {
Ok(_) => info!("Done."),
Err(e) => panic!("Error: {}", e),
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_m | let pubkey = HEXLOWER.decode(b"4242424242424242424242424242424242424242424242424242424242424242").unwrap();
let auth_token = HEXLOWER.decode(b"2323232323232323232323232323232323232323232323232323232323232323").unwrap();
let server_pubkey = HEXLOWER.decode(b"1337133713371337133713371337133713371337133713371337133713371337").unwrap();
let data = make_qrcode_payload(1337, true, "saltyrtc.example.org", 1234, &pubkey, &auth_token, &server_pubkey);
let expected = BASE64.decode(b"BTkCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkIjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3BNJzYWx0eXJ0Yy5leGFtcGxlLm9yZw==").unwrap();
assert_eq!(data, expected);
}
}
| ake_qrcode_data() {
| identifier_name |
testclient.rs | extern crate byteorder;
extern crate clap;
extern crate data_encoding;
extern crate env_logger;
#[macro_use] extern crate log;
extern crate qrcodegen;
extern crate saltyrtc_client;
extern crate saltyrtc_task_relayed_data;
extern crate tokio_core;
use std::env;
use std::io::Write;
use std::process;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use byteorder::{BigEndian, WriteBytesExt};
use clap::{Arg, App, SubCommand};
use data_encoding::{HEXLOWER, HEXLOWER_PERMISSIVE, BASE64};
use qrcodegen::{QrCode, QrCodeEcc};
use saltyrtc_client::{SaltyClient, Role, BoxedFuture};
use saltyrtc_client::crypto::{KeyPair, AuthToken, public_key_from_hex_str};
use saltyrtc_client::dep::futures::{future, Future, Stream};
use saltyrtc_client::dep::futures::sync::mpsc;
use saltyrtc_client::dep::native_tls::{TlsConnector, Protocol};
use saltyrtc_client::tasks::Task;
use saltyrtc_task_relayed_data::{RelayedDataTask, RelayedDataError, MessageEvent};
use tokio_core::reactor::Core;
const ARG_PING_INTERVAL: &str = "ping_interval";
const ARG_SRV_HOST: &str = "host";
const ARG_SRV_PORT: &str = "port";
const ARG_SRV_PUBKEY: &str = "pubkey";
const ARG_PATH: &str = "path";
const ARG_AUTHTOKEN: &str = "auth_token";
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// Wrap future in a box with type erasure.
macro_rules! boxed {
($future:expr) => {{
Box::new($future) as BoxedFuture<_, _>
}}
}
/// Create the QR code payload
fn make_qrcode_payload(version: u16, permanent: bool, host: &str, port: u16, pubkey: &[u8], auth_token: &[u8], server_pubkey: &[u8]) -> Vec<u8> {
let mut data: Vec<u8> = Vec::with_capacity(101 + host.as_bytes().len());
data.write_u16::<BigEndian>(version).unwrap();
data.push(if permanent { 0x02 } else { 0x00 });
data.write_all(pubkey).unwrap();
data.write_all(auth_token).unwrap();
data.write_all(server_pubkey).unwrap();
data.write_u16::<BigEndian>(port).unwrap();
data.write_all(host.as_bytes()).unwrap();
data
}
/// Print the QR code payload to the terminal
fn print_qrcode(payload: &[u8]) {
let base64 = BASE64.encode(payload);
let qr = QrCode::encode_text(&base64, QrCodeEcc::Low).unwrap();
let border = 1;
for y in -border.. qr.size() + border {
for x in -border.. qr.size() + border {
let c: char = if qr.get_module(x, y) { '█' } else {'' };
print!("{0}{0}", c);
}
println!();
}
println!();
}
fn main() {
| .required(true)
.default_value("f77fe623b6977d470ac8c7bf7011c4ad08a1d126896795db9d2b4b7a49ae1045")
.help("The SaltyRTC server public permanent key");
let arg_ping_interval = Arg::with_name(ARG_PING_INTERVAL)
.short('i')
.takes_value(true)
.value_name("SECONDS")
.required(false)
.default_value("60")
.help("The WebSocket ping interval (set to 0 to disable pings)");
let app = App::new("SaltyRTC Relayed Data Test Initiator")
.version(VERSION)
.author("Danilo Bargen <[email protected]>")
.about("Test client for SaltyRTC Relayed Data Task.")
.subcommand(SubCommand::with_name("initiator")
.about("Start client as initiator")
.arg(arg_srv_host.clone())
.arg(arg_srv_port.clone())
.arg(arg_srv_pubkey.clone())
.arg(arg_ping_interval.clone()))
.subcommand(SubCommand::with_name("responder")
.about("Start client as responder")
.arg(Arg::with_name(ARG_PATH)
.short('k')
.takes_value(true)
.value_name("INITIATOR_PUBKEY")
.required(true)
.help("The hex encoded public key of the initiator"))
.arg(Arg::with_name(ARG_AUTHTOKEN)
.short('a')
.alias("token")
.alias("authtoken")
.takes_value(true)
.value_name("AUTHTOKEN")
.required(true)
.help("The auth token (hex encoded)"))
.arg(arg_srv_host)
.arg(arg_srv_port)
.arg(arg_srv_pubkey)
.arg(arg_ping_interval));
// Parse arguments
let matches = app.get_matches();
let (subcommand_name, args) = matches.subcommand().unwrap_or_else(|| {
println!("Missing subcommand.");
println!("Use -h or --help to see usage.");
process::exit(1);
});
// Determine role
let role = match subcommand_name {
"initiator" => Role::Initiator,
"responder" => Role::Responder,
other => {
println!("Invalid subcommand: {}", other);
process::exit(1);
},
};
// Set up logging
env::set_var("RUST_LOG", "saltyrtc_client=debug,saltyrtc_task_relayed_data=debug,testclient=trace");
env_logger::init();
// Tokio reactor core
let mut core = Core::new().unwrap();
// Create TLS connector instance
let tls_connector = TlsConnector::builder()
.min_protocol_version(Some(Protocol::Tlsv11))
.build()
.unwrap_or_else(|e| panic!("Could not initialize TlsConnector: {}", e));
// Create new public permanent keypair
let keypair = KeyPair::new();
let pubkey = keypair.public_key().clone();
// Determine websocket path
let path: String = match role {
Role::Initiator => keypair.public_key_hex(),
Role::Responder => args.value_of(ARG_PATH).expect("Initiator pubkey not supplied").to_lowercase(),
};
// Determine ping interval
let ping_interval = {
let seconds: u64 = args.value_of(ARG_PING_INTERVAL).expect("Ping interval not supplied")
.parse().expect("Could not parse interval seconds to a number");
Duration::from_secs(seconds)
};
// Determine server info
let server_host: &str = args.value_of(ARG_SRV_HOST).expect("Server hostname not supplied");
let server_port: u16 = args.value_of(ARG_SRV_PORT).expect("Server port not supplied").parse().expect("Could not parse port to a number");
let server_pubkey: Vec<u8> = HEXLOWER_PERMISSIVE.decode(
args.value_of(ARG_SRV_PUBKEY).expect("Server pubkey not supplied").as_bytes()
).unwrap();
// Set up task instance
let (incoming_tx, incoming_rx) = mpsc::unbounded();
let task = RelayedDataTask::new(core.remote(), incoming_tx);
// Set up client instance
let client = Arc::new(RwLock::new({
let builder = SaltyClient::build(keypair)
.add_task(Box::new(task))
.with_ping_interval(Some(ping_interval));
match role {
Role::Initiator => builder
.initiator()
.expect("Could not create SaltyClient instance"),
Role::Responder => {
let auth_token_hex = args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string();
let auth_token = AuthToken::from_hex_str(&auth_token_hex).expect("Invalid auth token hex string");
let initiator_pubkey = public_key_from_hex_str(&path).unwrap();
builder
.responder(initiator_pubkey, auth_token)
.expect("Could not create SaltyClient instance")
}
}
}));
// Connect future
let (connect_future, event_channel) = saltyrtc_client::connect(
server_host,
server_port,
Some(tls_connector),
client.clone(),
)
.unwrap();
// Handshake future
let event_tx = event_channel.clone_tx();
let handshake_future = connect_future
.and_then(|ws_client| saltyrtc_client::do_handshake(ws_client, client.clone(), event_tx, None));
// Determine QR code payload
let payload = make_qrcode_payload(
1,
false,
server_host,
server_port,
pubkey.as_bytes(),
client.read().unwrap().auth_token().unwrap().secret_key_bytes(),
&server_pubkey,
);
// Print connection info
println!("\n#====================#");
println!("Host: {}:{}", server_host, server_port);
match role {
Role::Initiator => {
println!("Pubkey: {}", HEXLOWER.encode(pubkey.as_bytes()));
println!("Auth token: {}", HEXLOWER.encode(client.read().unwrap().auth_token().unwrap().secret_key_bytes()));
println!();
println!("QR Code:");
print_qrcode(&payload);
println!("{}", BASE64.encode(&payload));
println!("\n#====================#\n");
}
Role::Responder => {
println!("Pubkey: {}", args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string());
println!("#====================#\n");
}
}
// Run connect future to completion
let ws_client = core.run(handshake_future).expect("Could not connect");
// Setup task loop
let event_tx = event_channel.clone_tx();
let (task, task_loop) = saltyrtc_client::task_loop(ws_client, client.clone(), event_tx).unwrap();
// Get access to outgoing channel
let _outgoing_tx = {
// Get reference to task and downcast it to `RelayedDataTask`.
// We can be sure that it's a `RelayedDataTask` since that's the only one we proposed.
let mut t = task.lock().expect("Could not lock task mutex");
let rd_task: &mut RelayedDataTask = (&mut **t as &mut dyn Task)
.downcast_mut::<RelayedDataTask>()
.expect("Chosen task is not a RelayedDataTask");
// Get unbounded senders for outgoing messages
rd_task.get_sender().unwrap()
};
// Print all incoming events to stdout
let recv_loop = incoming_rx
.map_err(|_| Err(RelayedDataError::Channel(("Could not read from rx_responder").into())))
.for_each(move |ev: MessageEvent| match ev {
MessageEvent::Data(data) => {
println!("Incoming data message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Application(data) => {
println!("Incoming application message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Close(reason) => {
println!("Connection was closed: {}", reason);
boxed!(future::err(Ok(())))
}
})
.or_else(|e| e)
.then(|f| { debug!("† recv_loop done"); f });
match core.run(
task_loop
.map_err(|e| e.to_string())
.then(|f| { debug!("† task_loop done"); f })
.join(recv_loop.map_err(|e| e.to_string()))
) {
Ok(_) => info!("Done."),
Err(e) => panic!("Error: {}", e),
};
}
#[cf
g(test)]
mod tests {
use super::*;
#[test]
fn test_make_qrcode_data() {
let pubkey = HEXLOWER.decode(b"4242424242424242424242424242424242424242424242424242424242424242").unwrap();
let auth_token = HEXLOWER.decode(b"2323232323232323232323232323232323232323232323232323232323232323").unwrap();
let server_pubkey = HEXLOWER.decode(b"1337133713371337133713371337133713371337133713371337133713371337").unwrap();
let data = make_qrcode_payload(1337, true, "saltyrtc.example.org", 1234, &pubkey, &auth_token, &server_pubkey);
let expected = BASE64.decode(b"BTkCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkIjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3BNJzYWx0eXJ0Yy5leGFtcGxlLm9yZw==").unwrap();
assert_eq!(data, expected);
}
}
|
// Set up CLI arguments
let arg_srv_host = Arg::with_name(ARG_SRV_HOST)
.short('h')
.takes_value(true)
.value_name("SRV_HOST")
.required(true)
.default_value("server.saltyrtc.org")
.help("The SaltyRTC server hostname");
let arg_srv_port = Arg::with_name(ARG_SRV_PORT)
.short('p')
.takes_value(true)
.value_name("SRV_PORT")
.required(true)
.default_value("443")
.help("The SaltyRTC server port");
let arg_srv_pubkey = Arg::with_name(ARG_SRV_PUBKEY)
.short('s')
.takes_value(true)
.value_name("SRV_PUBKEY") | identifier_body |
testclient.rs | extern crate byteorder;
extern crate clap;
extern crate data_encoding;
extern crate env_logger;
#[macro_use] extern crate log;
extern crate qrcodegen;
extern crate saltyrtc_client;
extern crate saltyrtc_task_relayed_data;
extern crate tokio_core;
use std::env;
use std::io::Write;
use std::process;
use std::sync::{Arc, RwLock};
use std::time::Duration;
use byteorder::{BigEndian, WriteBytesExt};
use clap::{Arg, App, SubCommand};
use data_encoding::{HEXLOWER, HEXLOWER_PERMISSIVE, BASE64};
use qrcodegen::{QrCode, QrCodeEcc};
use saltyrtc_client::{SaltyClient, Role, BoxedFuture};
use saltyrtc_client::crypto::{KeyPair, AuthToken, public_key_from_hex_str};
use saltyrtc_client::dep::futures::{future, Future, Stream};
use saltyrtc_client::dep::futures::sync::mpsc;
use saltyrtc_client::dep::native_tls::{TlsConnector, Protocol};
use saltyrtc_client::tasks::Task;
use saltyrtc_task_relayed_data::{RelayedDataTask, RelayedDataError, MessageEvent};
use tokio_core::reactor::Core;
const ARG_PING_INTERVAL: &str = "ping_interval";
const ARG_SRV_HOST: &str = "host";
const ARG_SRV_PORT: &str = "port";
const ARG_SRV_PUBKEY: &str = "pubkey";
const ARG_PATH: &str = "path";
const ARG_AUTHTOKEN: &str = "auth_token";
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// Wrap future in a box with type erasure.
macro_rules! boxed {
($future:expr) => {{
Box::new($future) as BoxedFuture<_, _>
}}
}
/// Create the QR code payload
fn make_qrcode_payload(version: u16, permanent: bool, host: &str, port: u16, pubkey: &[u8], auth_token: &[u8], server_pubkey: &[u8]) -> Vec<u8> {
let mut data: Vec<u8> = Vec::with_capacity(101 + host.as_bytes().len());
data.write_u16::<BigEndian>(version).unwrap();
data.push(if permanent { 0x02 } else { 0x00 });
data.write_all(pubkey).unwrap();
data.write_all(auth_token).unwrap();
data.write_all(server_pubkey).unwrap();
data.write_u16::<BigEndian>(port).unwrap();
data.write_all(host.as_bytes()).unwrap();
data
}
/// Print the QR code payload to the terminal
fn print_qrcode(payload: &[u8]) {
let base64 = BASE64.encode(payload);
let qr = QrCode::encode_text(&base64, QrCodeEcc::Low).unwrap();
let border = 1;
for y in -border.. qr.size() + border {
for x in -border.. qr.size() + border {
let c: char = if qr.get_module(x, y) { '█' } else {'' };
print!("{0}{0}", c);
}
println!();
}
println!();
}
fn main() {
// Set up CLI arguments
let arg_srv_host = Arg::with_name(ARG_SRV_HOST)
.short('h')
.takes_value(true)
.value_name("SRV_HOST")
.required(true)
.default_value("server.saltyrtc.org")
.help("The SaltyRTC server hostname");
let arg_srv_port = Arg::with_name(ARG_SRV_PORT)
.short('p')
.takes_value(true)
.value_name("SRV_PORT")
.required(true)
.default_value("443")
.help("The SaltyRTC server port");
let arg_srv_pubkey = Arg::with_name(ARG_SRV_PUBKEY)
.short('s')
.takes_value(true)
.value_name("SRV_PUBKEY")
.required(true)
.default_value("f77fe623b6977d470ac8c7bf7011c4ad08a1d126896795db9d2b4b7a49ae1045")
.help("The SaltyRTC server public permanent key");
let arg_ping_interval = Arg::with_name(ARG_PING_INTERVAL)
.short('i')
.takes_value(true)
.value_name("SECONDS")
.required(false)
.default_value("60")
.help("The WebSocket ping interval (set to 0 to disable pings)");
let app = App::new("SaltyRTC Relayed Data Test Initiator")
.version(VERSION)
.author("Danilo Bargen <[email protected]>")
.about("Test client for SaltyRTC Relayed Data Task.")
.subcommand(SubCommand::with_name("initiator")
.about("Start client as initiator")
.arg(arg_srv_host.clone())
.arg(arg_srv_port.clone())
.arg(arg_srv_pubkey.clone())
.arg(arg_ping_interval.clone()))
.subcommand(SubCommand::with_name("responder")
.about("Start client as responder")
.arg(Arg::with_name(ARG_PATH)
.short('k')
.takes_value(true)
.value_name("INITIATOR_PUBKEY")
.required(true)
.help("The hex encoded public key of the initiator"))
.arg(Arg::with_name(ARG_AUTHTOKEN)
.short('a')
.alias("token")
.alias("authtoken")
.takes_value(true)
.value_name("AUTHTOKEN")
.required(true)
.help("The auth token (hex encoded)"))
.arg(arg_srv_host)
.arg(arg_srv_port)
.arg(arg_srv_pubkey)
.arg(arg_ping_interval));
// Parse arguments
let matches = app.get_matches();
let (subcommand_name, args) = matches.subcommand().unwrap_or_else(|| {
println!("Missing subcommand.");
println!("Use -h or --help to see usage.");
process::exit(1);
});
// Determine role
let role = match subcommand_name {
"initiator" => Role::Initiator,
"responder" => Role::Responder,
other => {
println!("Invalid subcommand: {}", other);
process::exit(1);
},
};
// Set up logging
env::set_var("RUST_LOG", "saltyrtc_client=debug,saltyrtc_task_relayed_data=debug,testclient=trace");
env_logger::init();
// Tokio reactor core
let mut core = Core::new().unwrap();
// Create TLS connector instance
let tls_connector = TlsConnector::builder()
.min_protocol_version(Some(Protocol::Tlsv11))
.build()
.unwrap_or_else(|e| panic!("Could not initialize TlsConnector: {}", e));
// Create new public permanent keypair
let keypair = KeyPair::new();
let pubkey = keypair.public_key().clone();
// Determine websocket path
let path: String = match role {
Role::Initiator => keypair.public_key_hex(),
Role::Responder => args.value_of(ARG_PATH).expect("Initiator pubkey not supplied").to_lowercase(),
};
// Determine ping interval
let ping_interval = {
let seconds: u64 = args.value_of(ARG_PING_INTERVAL).expect("Ping interval not supplied")
.parse().expect("Could not parse interval seconds to a number");
Duration::from_secs(seconds)
};
// Determine server info
let server_host: &str = args.value_of(ARG_SRV_HOST).expect("Server hostname not supplied");
let server_port: u16 = args.value_of(ARG_SRV_PORT).expect("Server port not supplied").parse().expect("Could not parse port to a number");
let server_pubkey: Vec<u8> = HEXLOWER_PERMISSIVE.decode(
args.value_of(ARG_SRV_PUBKEY).expect("Server pubkey not supplied").as_bytes()
).unwrap();
// Set up task instance
let (incoming_tx, incoming_rx) = mpsc::unbounded();
let task = RelayedDataTask::new(core.remote(), incoming_tx);
// Set up client instance
let client = Arc::new(RwLock::new({
let builder = SaltyClient::build(keypair)
.add_task(Box::new(task))
.with_ping_interval(Some(ping_interval));
match role {
Role::Initiator => builder
.initiator()
.expect("Could not create SaltyClient instance"),
Role::Responder => {
let auth_token_hex = args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string();
let auth_token = AuthToken::from_hex_str(&auth_token_hex).expect("Invalid auth token hex string");
let initiator_pubkey = public_key_from_hex_str(&path).unwrap();
builder
.responder(initiator_pubkey, auth_token)
.expect("Could not create SaltyClient instance")
}
}
}));
// Connect future
let (connect_future, event_channel) = saltyrtc_client::connect(
server_host,
server_port,
Some(tls_connector),
client.clone(),
)
.unwrap();
// Handshake future
let event_tx = event_channel.clone_tx();
let handshake_future = connect_future
.and_then(|ws_client| saltyrtc_client::do_handshake(ws_client, client.clone(), event_tx, None));
// Determine QR code payload
let payload = make_qrcode_payload(
1,
false, | client.read().unwrap().auth_token().unwrap().secret_key_bytes(),
&server_pubkey,
);
// Print connection info
println!("\n#====================#");
println!("Host: {}:{}", server_host, server_port);
match role {
Role::Initiator => {
println!("Pubkey: {}", HEXLOWER.encode(pubkey.as_bytes()));
println!("Auth token: {}", HEXLOWER.encode(client.read().unwrap().auth_token().unwrap().secret_key_bytes()));
println!();
println!("QR Code:");
print_qrcode(&payload);
println!("{}", BASE64.encode(&payload));
println!("\n#====================#\n");
}
Role::Responder => {
println!("Pubkey: {}", args.value_of(ARG_AUTHTOKEN).expect("Auth token not supplied").to_string());
println!("#====================#\n");
}
}
// Run connect future to completion
let ws_client = core.run(handshake_future).expect("Could not connect");
// Setup task loop
let event_tx = event_channel.clone_tx();
let (task, task_loop) = saltyrtc_client::task_loop(ws_client, client.clone(), event_tx).unwrap();
// Get access to outgoing channel
let _outgoing_tx = {
// Get reference to task and downcast it to `RelayedDataTask`.
// We can be sure that it's a `RelayedDataTask` since that's the only one we proposed.
let mut t = task.lock().expect("Could not lock task mutex");
let rd_task: &mut RelayedDataTask = (&mut **t as &mut dyn Task)
.downcast_mut::<RelayedDataTask>()
.expect("Chosen task is not a RelayedDataTask");
// Get unbounded senders for outgoing messages
rd_task.get_sender().unwrap()
};
// Print all incoming events to stdout
let recv_loop = incoming_rx
.map_err(|_| Err(RelayedDataError::Channel(("Could not read from rx_responder").into())))
.for_each(move |ev: MessageEvent| match ev {
MessageEvent::Data(data) => {
println!("Incoming data message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Application(data) => {
println!("Incoming application message: {}", data);
boxed!(future::ok(()))
},
MessageEvent::Close(reason) => {
println!("Connection was closed: {}", reason);
boxed!(future::err(Ok(())))
}
})
.or_else(|e| e)
.then(|f| { debug!("† recv_loop done"); f });
match core.run(
task_loop
.map_err(|e| e.to_string())
.then(|f| { debug!("† task_loop done"); f })
.join(recv_loop.map_err(|e| e.to_string()))
) {
Ok(_) => info!("Done."),
Err(e) => panic!("Error: {}", e),
};
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_make_qrcode_data() {
let pubkey = HEXLOWER.decode(b"4242424242424242424242424242424242424242424242424242424242424242").unwrap();
let auth_token = HEXLOWER.decode(b"2323232323232323232323232323232323232323232323232323232323232323").unwrap();
let server_pubkey = HEXLOWER.decode(b"1337133713371337133713371337133713371337133713371337133713371337").unwrap();
let data = make_qrcode_payload(1337, true, "saltyrtc.example.org", 1234, &pubkey, &auth_token, &server_pubkey);
let expected = BASE64.decode(b"BTkCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkJCQkIjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3EzcTNxM3BNJzYWx0eXJ0Yy5leGFtcGxlLm9yZw==").unwrap();
assert_eq!(data, expected);
}
} | server_host,
server_port,
pubkey.as_bytes(), | random_line_split |
main.rs | mod cmd;
mod config;
mod edit;
mod error;
mod fmt;
mod opts;
use std::env;
use std::ffi::OsString;
use std::fs;
use std::io::{self, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use atty::Stream::{Stderr, Stdout};
use prettyprint::{PagingMode, PrettyPrinter};
use quote::quote;
use structopt::StructOpt;
use termcolor::{Color::Green, ColorChoice, ColorSpec, StandardStream, WriteColor};
use crate::cmd::Line;
use crate::error::Result;
use crate::opts::Coloring::*;
use crate::opts::{Args, Coloring, Opts};
fn main() {
let result = cargo_expand_or_run_nightly();
process::exit(match result {
Ok(code) => code,
Err(err) => {
let _ = writeln!(io::stderr(), "{}", err);
1
}
});
}
fn cargo_expand_or_run_nightly() -> Result<i32> {
const NO_RUN_NIGHTLY: &str = "CARGO_EXPAND_NO_RUN_NIGHTLY";
let maybe_nightly =!definitely_not_nightly();
if maybe_nightly || env::var_os(NO_RUN_NIGHTLY).is_some() {
return cargo_expand();
}
let mut nightly = Command::new("cargo");
nightly.arg("+nightly");
nightly.arg("expand");
let mut args = env::args_os().peekable();
args.next().unwrap(); // cargo
if args.peek().map_or(false, |arg| arg == "expand") {
args.next().unwrap(); // expand
}
nightly.args(args);
// Hopefully prevent infinite re-run loop.
nightly.env(NO_RUN_NIGHTLY, "");
let status = nightly.status()?;
Ok(match status.code() {
Some(code) => code,
None => {
if status.success() {
0
} else {
1
}
}
})
}
fn definitely_not_nightly() -> bool {
let mut cmd = Command::new(cargo_binary());
cmd.arg("--version");
let output = match cmd.output() {
Ok(output) => output,
Err(_) => return false,
};
let version = match String::from_utf8(output.stdout) {
Ok(version) => version,
Err(_) => return false,
};
version.starts_with("cargo 1") &&!version.contains("nightly")
}
fn cargo_binary() -> OsString {
env::var_os("CARGO").unwrap_or_else(|| "cargo".to_owned().into())
}
fn cargo_expand() -> Result<i32> {
let Opts::Expand(args) = Opts::from_args();
let config = config::deserialize();
if args.themes {
for theme in PrettyPrinter::default()
.build()
.unwrap()
.get_themes()
.keys()
{
let _ = writeln!(io::stdout(), "{}", theme);
}
return Ok(0);
}
let rustfmt;
match (&args.item, args.ugly) {
(Some(item), true) => {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) in ugly mode.",
item,
);
return Ok(1);
}
(Some(item), false) => {
rustfmt = which_rustfmt();
if rustfmt.is_none() {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) without rustfmt.",
item,
);
let _ = writeln!(
io::stderr(),
"Install rustfmt by running `rustup component add rustfmt --toolchain nightly`.",
);
return Ok(1);
}
}
(None, true) => rustfmt = None,
(None, false) => rustfmt = which_rustfmt(),
}
let mut builder = tempfile::Builder::new();
builder.prefix("cargo-expand");
let outdir = builder.tempdir().expect("failed to create tmp file");
let outfile_path = outdir.path().join("expanded");
// Run cargo
let mut cmd = Command::new(cargo_binary());
apply_args(&mut cmd, &args, &outfile_path);
let code = filter_err(&mut cmd, ignore_cargo_err)?;
if!outfile_path.exists() {
return Ok(1);
}
let mut content = fs::read_to_string(&outfile_path)?;
if content.is_empty() {
let _ = writeln!(io::stderr(), "ERROR: rustc produced no expanded output",);
return Ok(if code == 0 { 1 } else { code });
}
// Run rustfmt
if let Some(rustfmt) = rustfmt {
// Work around rustfmt not being able to parse paths containing $crate.
// This placeholder should be the same width as $crate to preserve
// alignments.
const DOLLAR_CRATE_PLACEHOLDER: &str = "Ξcrate";
content = content.replace("$crate", DOLLAR_CRATE_PLACEHOLDER);
// Discard comments, which are misplaced by the compiler
if let Ok(mut syntax_tree) = syn::parse_file(&content) {
edit::remove_macro_rules(&mut syntax_tree);
if let Some(filter) = args.item {
syntax_tree.shebang = None;
syntax_tree.attrs.clear();
syntax_tree.items = filter.apply_to(&syntax_tree);
if syntax_tree.items.is_empty() {
let _ = writeln!(io::stderr(), "WARNING: no such item: {}", filter);
return Ok(1);
}
}
content = quote!(#syntax_tree).to_string();
}
fs::write(&outfile_path, content)?;
fmt::write_rustfmt_config(&outdir)?;
// Ignore any errors.
let _status = Command::new(rustfmt)
.arg("--edition=2018")
.arg(&outfile_path)
.stderr(Stdio::null())
.status();
content = fs::read_to_string(&outfile_path)?;
content = content.replace(DOLLAR_CRATE_PLACEHOLDER, "$crate");
}
// Run pretty printer
let theme = args.theme.or(config.theme);
let none_theme = theme.as_ref().map(String::as_str) == Some("none");
let do_color = match args.color {
Some(Always) => true,
Some(Never) => false,
None | Some(Auto) =>!none_theme && atty::is(Stdout),
};
let _ = writeln!(io::stderr());
if do_color {
if content.ends_with('\n') {
// Pretty printer seems to print an extra trailing newline.
content.truncate(content.len() - 1);
}
let mut builder = PrettyPrinter::default();
builder.header(false);
builder.grid(false);
builder.line_numbers(false);
builder.language("rust");
builder.paging_mode(PagingMode::Never);
if let Some(theme) = theme {
builder.theme(theme);
}
let printer = builder.build().unwrap();
// Ignore any errors.
let _ = printer.string(content);
} else {
let _ = write!(io::stdout(), "{}", content);
}
Ok(0)
}
fn which_rustfmt() -> Option<PathBuf> { |
// Based on https://github.com/rsolomo/cargo-check
fn apply_args(cmd: &mut Command, args: &Args, outfile: &Path) {
let mut line = Line::new("cargo");
line.arg("rustc");
if args.tests && args.test.is_none() {
line.arg("--profile=test");
} else {
line.arg("--profile=check");
}
if let Some(features) = &args.features {
line.arg("--features");
line.arg(features);
}
if args.all_features {
line.arg("--all-features");
}
if args.no_default_features {
line.arg("--no-default-features");
}
if args.lib {
line.arg("--lib");
}
if let Some(bin) = &args.bin {
line.arg("--bin");
line.arg(bin);
}
if let Some(example) = &args.example {
line.arg("--example");
line.arg(example);
}
if let Some(test) = &args.test {
line.arg("--test");
line.arg(test);
}
if let Some(bench) = &args.bench {
line.arg("--bench");
line.arg(bench);
}
if let Some(target) = &args.target {
line.arg("--target");
line.arg(target);
}
if let Some(target_dir) = &args.target_dir {
line.arg("--target-dir");
line.arg(target_dir);
}
if let Some(manifest_path) = &args.manifest_path {
line.arg("--manifest-path");
line.arg(manifest_path);
}
if let Some(package) = &args.package {
line.arg("--package");
line.arg(package);
}
if let Some(jobs) = args.jobs {
line.arg("--jobs");
line.arg(jobs.to_string());
}
if args.verbose {
line.arg("--verbose");
}
line.arg("--color");
if let Some(color) = &args.color {
line.arg(color.to_string());
} else {
line.arg(if atty::is(Stderr) { "always" } else { "never" });
}
if args.frozen {
line.arg("--frozen");
}
if args.locked {
line.arg("--locked");
}
for unstable_flag in &args.unstable_flags {
line.arg("-Z");
line.arg(unstable_flag);
}
line.arg("--");
line.arg("-o");
line.arg(outfile);
line.arg("-Zunstable-options");
line.arg("--pretty=expanded");
if args.verbose {
let mut display = line.clone();
display.insert(0, "+nightly");
print_command(display, args);
}
cmd.args(line);
}
fn print_command(line: Line, args: &Args) {
let color_choice = match args.color {
Some(Coloring::Auto) | None => ColorChoice::Auto,
Some(Coloring::Always) => ColorChoice::Always,
Some(Coloring::Never) => ColorChoice::Never,
};
let mut stream = StandardStream::stderr(color_choice);
let _ = stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Green)));
let _ = write!(stream, "{:>12}", "Running");
let _ = stream.reset();
let _ = writeln!(stream, " `{}`", line);
}
fn filter_err(cmd: &mut Command, ignore: fn(&str) -> bool) -> io::Result<i32> {
let mut child = cmd.stderr(Stdio::piped()).spawn()?;
let mut stderr = io::BufReader::new(child.stderr.take().unwrap());
let mut line = String::new();
while let Ok(n) = stderr.read_line(&mut line) {
if n == 0 {
break;
}
if!ignore(&line) {
let _ = write!(io::stderr(), "{}", line);
}
line.clear();
}
let code = child.wait()?.code().unwrap_or(1);
Ok(code)
}
fn ignore_cargo_err(line: &str) -> bool {
if line.trim().is_empty() {
return true;
}
let blacklist = [
"ignoring specified output filename because multiple outputs were \
requested",
"ignoring specified output filename for 'link' output because multiple \
outputs were requested",
"ignoring --out-dir flag due to -o flag",
"ignoring -C extra-filename flag due to -o flag",
"due to multiple output types requested, the explicitly specified \
output file name will be adapted for each output type",
];
for s in &blacklist {
if line.contains(s) {
return true;
}
}
false
}
|
match env::var_os("RUSTFMT") {
Some(which) => {
if which.is_empty() {
None
} else {
Some(PathBuf::from(which))
}
}
None => toolchain_find::find_installed_component("rustfmt"),
}
}
| identifier_body |
main.rs | mod cmd;
mod config;
mod edit;
mod error;
mod fmt;
mod opts;
use std::env;
use std::ffi::OsString;
use std::fs;
use std::io::{self, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use atty::Stream::{Stderr, Stdout};
use prettyprint::{PagingMode, PrettyPrinter};
use quote::quote;
use structopt::StructOpt;
use termcolor::{Color::Green, ColorChoice, ColorSpec, StandardStream, WriteColor};
use crate::cmd::Line;
use crate::error::Result;
use crate::opts::Coloring::*;
use crate::opts::{Args, Coloring, Opts};
fn main() {
let result = cargo_expand_or_run_nightly();
process::exit(match result {
Ok(code) => code,
Err(err) => {
let _ = writeln!(io::stderr(), "{}", err);
1
}
});
}
fn | () -> Result<i32> {
const NO_RUN_NIGHTLY: &str = "CARGO_EXPAND_NO_RUN_NIGHTLY";
let maybe_nightly =!definitely_not_nightly();
if maybe_nightly || env::var_os(NO_RUN_NIGHTLY).is_some() {
return cargo_expand();
}
let mut nightly = Command::new("cargo");
nightly.arg("+nightly");
nightly.arg("expand");
let mut args = env::args_os().peekable();
args.next().unwrap(); // cargo
if args.peek().map_or(false, |arg| arg == "expand") {
args.next().unwrap(); // expand
}
nightly.args(args);
// Hopefully prevent infinite re-run loop.
nightly.env(NO_RUN_NIGHTLY, "");
let status = nightly.status()?;
Ok(match status.code() {
Some(code) => code,
None => {
if status.success() {
0
} else {
1
}
}
})
}
fn definitely_not_nightly() -> bool {
let mut cmd = Command::new(cargo_binary());
cmd.arg("--version");
let output = match cmd.output() {
Ok(output) => output,
Err(_) => return false,
};
let version = match String::from_utf8(output.stdout) {
Ok(version) => version,
Err(_) => return false,
};
version.starts_with("cargo 1") &&!version.contains("nightly")
}
fn cargo_binary() -> OsString {
env::var_os("CARGO").unwrap_or_else(|| "cargo".to_owned().into())
}
fn cargo_expand() -> Result<i32> {
let Opts::Expand(args) = Opts::from_args();
let config = config::deserialize();
if args.themes {
for theme in PrettyPrinter::default()
.build()
.unwrap()
.get_themes()
.keys()
{
let _ = writeln!(io::stdout(), "{}", theme);
}
return Ok(0);
}
let rustfmt;
match (&args.item, args.ugly) {
(Some(item), true) => {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) in ugly mode.",
item,
);
return Ok(1);
}
(Some(item), false) => {
rustfmt = which_rustfmt();
if rustfmt.is_none() {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) without rustfmt.",
item,
);
let _ = writeln!(
io::stderr(),
"Install rustfmt by running `rustup component add rustfmt --toolchain nightly`.",
);
return Ok(1);
}
}
(None, true) => rustfmt = None,
(None, false) => rustfmt = which_rustfmt(),
}
let mut builder = tempfile::Builder::new();
builder.prefix("cargo-expand");
let outdir = builder.tempdir().expect("failed to create tmp file");
let outfile_path = outdir.path().join("expanded");
// Run cargo
let mut cmd = Command::new(cargo_binary());
apply_args(&mut cmd, &args, &outfile_path);
let code = filter_err(&mut cmd, ignore_cargo_err)?;
if!outfile_path.exists() {
return Ok(1);
}
let mut content = fs::read_to_string(&outfile_path)?;
if content.is_empty() {
let _ = writeln!(io::stderr(), "ERROR: rustc produced no expanded output",);
return Ok(if code == 0 { 1 } else { code });
}
// Run rustfmt
if let Some(rustfmt) = rustfmt {
// Work around rustfmt not being able to parse paths containing $crate.
// This placeholder should be the same width as $crate to preserve
// alignments.
const DOLLAR_CRATE_PLACEHOLDER: &str = "Ξcrate";
content = content.replace("$crate", DOLLAR_CRATE_PLACEHOLDER);
// Discard comments, which are misplaced by the compiler
if let Ok(mut syntax_tree) = syn::parse_file(&content) {
edit::remove_macro_rules(&mut syntax_tree);
if let Some(filter) = args.item {
syntax_tree.shebang = None;
syntax_tree.attrs.clear();
syntax_tree.items = filter.apply_to(&syntax_tree);
if syntax_tree.items.is_empty() {
let _ = writeln!(io::stderr(), "WARNING: no such item: {}", filter);
return Ok(1);
}
}
content = quote!(#syntax_tree).to_string();
}
fs::write(&outfile_path, content)?;
fmt::write_rustfmt_config(&outdir)?;
// Ignore any errors.
let _status = Command::new(rustfmt)
.arg("--edition=2018")
.arg(&outfile_path)
.stderr(Stdio::null())
.status();
content = fs::read_to_string(&outfile_path)?;
content = content.replace(DOLLAR_CRATE_PLACEHOLDER, "$crate");
}
// Run pretty printer
let theme = args.theme.or(config.theme);
let none_theme = theme.as_ref().map(String::as_str) == Some("none");
let do_color = match args.color {
Some(Always) => true,
Some(Never) => false,
None | Some(Auto) =>!none_theme && atty::is(Stdout),
};
let _ = writeln!(io::stderr());
if do_color {
if content.ends_with('\n') {
// Pretty printer seems to print an extra trailing newline.
content.truncate(content.len() - 1);
}
let mut builder = PrettyPrinter::default();
builder.header(false);
builder.grid(false);
builder.line_numbers(false);
builder.language("rust");
builder.paging_mode(PagingMode::Never);
if let Some(theme) = theme {
builder.theme(theme);
}
let printer = builder.build().unwrap();
// Ignore any errors.
let _ = printer.string(content);
} else {
let _ = write!(io::stdout(), "{}", content);
}
Ok(0)
}
fn which_rustfmt() -> Option<PathBuf> {
match env::var_os("RUSTFMT") {
Some(which) => {
if which.is_empty() {
None
} else {
Some(PathBuf::from(which))
}
}
None => toolchain_find::find_installed_component("rustfmt"),
}
}
// Based on https://github.com/rsolomo/cargo-check
fn apply_args(cmd: &mut Command, args: &Args, outfile: &Path) {
let mut line = Line::new("cargo");
line.arg("rustc");
if args.tests && args.test.is_none() {
line.arg("--profile=test");
} else {
line.arg("--profile=check");
}
if let Some(features) = &args.features {
line.arg("--features");
line.arg(features);
}
if args.all_features {
line.arg("--all-features");
}
if args.no_default_features {
line.arg("--no-default-features");
}
if args.lib {
line.arg("--lib");
}
if let Some(bin) = &args.bin {
line.arg("--bin");
line.arg(bin);
}
if let Some(example) = &args.example {
line.arg("--example");
line.arg(example);
}
if let Some(test) = &args.test {
line.arg("--test");
line.arg(test);
}
if let Some(bench) = &args.bench {
line.arg("--bench");
line.arg(bench);
}
if let Some(target) = &args.target {
line.arg("--target");
line.arg(target);
}
if let Some(target_dir) = &args.target_dir {
line.arg("--target-dir");
line.arg(target_dir);
}
if let Some(manifest_path) = &args.manifest_path {
line.arg("--manifest-path");
line.arg(manifest_path);
}
if let Some(package) = &args.package {
line.arg("--package");
line.arg(package);
}
if let Some(jobs) = args.jobs {
line.arg("--jobs");
line.arg(jobs.to_string());
}
if args.verbose {
line.arg("--verbose");
}
line.arg("--color");
if let Some(color) = &args.color {
line.arg(color.to_string());
} else {
line.arg(if atty::is(Stderr) { "always" } else { "never" });
}
if args.frozen {
line.arg("--frozen");
}
if args.locked {
line.arg("--locked");
}
for unstable_flag in &args.unstable_flags {
line.arg("-Z");
line.arg(unstable_flag);
}
line.arg("--");
line.arg("-o");
line.arg(outfile);
line.arg("-Zunstable-options");
line.arg("--pretty=expanded");
if args.verbose {
let mut display = line.clone();
display.insert(0, "+nightly");
print_command(display, args);
}
cmd.args(line);
}
fn print_command(line: Line, args: &Args) {
let color_choice = match args.color {
Some(Coloring::Auto) | None => ColorChoice::Auto,
Some(Coloring::Always) => ColorChoice::Always,
Some(Coloring::Never) => ColorChoice::Never,
};
let mut stream = StandardStream::stderr(color_choice);
let _ = stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Green)));
let _ = write!(stream, "{:>12}", "Running");
let _ = stream.reset();
let _ = writeln!(stream, " `{}`", line);
}
fn filter_err(cmd: &mut Command, ignore: fn(&str) -> bool) -> io::Result<i32> {
let mut child = cmd.stderr(Stdio::piped()).spawn()?;
let mut stderr = io::BufReader::new(child.stderr.take().unwrap());
let mut line = String::new();
while let Ok(n) = stderr.read_line(&mut line) {
if n == 0 {
break;
}
if!ignore(&line) {
let _ = write!(io::stderr(), "{}", line);
}
line.clear();
}
let code = child.wait()?.code().unwrap_or(1);
Ok(code)
}
fn ignore_cargo_err(line: &str) -> bool {
if line.trim().is_empty() {
return true;
}
let blacklist = [
"ignoring specified output filename because multiple outputs were \
requested",
"ignoring specified output filename for 'link' output because multiple \
outputs were requested",
"ignoring --out-dir flag due to -o flag",
"ignoring -C extra-filename flag due to -o flag",
"due to multiple output types requested, the explicitly specified \
output file name will be adapted for each output type",
];
for s in &blacklist {
if line.contains(s) {
return true;
}
}
false
}
| cargo_expand_or_run_nightly | identifier_name |
main.rs | mod cmd;
mod config;
mod edit;
mod error;
mod fmt;
mod opts;
use std::env;
use std::ffi::OsString;
use std::fs;
use std::io::{self, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use atty::Stream::{Stderr, Stdout};
use prettyprint::{PagingMode, PrettyPrinter};
use quote::quote;
use structopt::StructOpt;
use termcolor::{Color::Green, ColorChoice, ColorSpec, StandardStream, WriteColor};
use crate::cmd::Line;
use crate::error::Result;
use crate::opts::Coloring::*;
use crate::opts::{Args, Coloring, Opts};
fn main() {
let result = cargo_expand_or_run_nightly();
process::exit(match result {
Ok(code) => code,
Err(err) => {
let _ = writeln!(io::stderr(), "{}", err);
1
}
});
}
fn cargo_expand_or_run_nightly() -> Result<i32> {
const NO_RUN_NIGHTLY: &str = "CARGO_EXPAND_NO_RUN_NIGHTLY";
let maybe_nightly =!definitely_not_nightly();
if maybe_nightly || env::var_os(NO_RUN_NIGHTLY).is_some() {
return cargo_expand();
}
let mut nightly = Command::new("cargo");
nightly.arg("+nightly");
nightly.arg("expand");
let mut args = env::args_os().peekable();
args.next().unwrap(); // cargo
if args.peek().map_or(false, |arg| arg == "expand") {
args.next().unwrap(); // expand
}
nightly.args(args);
// Hopefully prevent infinite re-run loop.
nightly.env(NO_RUN_NIGHTLY, "");
let status = nightly.status()?;
Ok(match status.code() {
Some(code) => code,
None => {
if status.success() {
0
} else {
1 | })
}
fn definitely_not_nightly() -> bool {
let mut cmd = Command::new(cargo_binary());
cmd.arg("--version");
let output = match cmd.output() {
Ok(output) => output,
Err(_) => return false,
};
let version = match String::from_utf8(output.stdout) {
Ok(version) => version,
Err(_) => return false,
};
version.starts_with("cargo 1") &&!version.contains("nightly")
}
fn cargo_binary() -> OsString {
env::var_os("CARGO").unwrap_or_else(|| "cargo".to_owned().into())
}
fn cargo_expand() -> Result<i32> {
let Opts::Expand(args) = Opts::from_args();
let config = config::deserialize();
if args.themes {
for theme in PrettyPrinter::default()
.build()
.unwrap()
.get_themes()
.keys()
{
let _ = writeln!(io::stdout(), "{}", theme);
}
return Ok(0);
}
let rustfmt;
match (&args.item, args.ugly) {
(Some(item), true) => {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) in ugly mode.",
item,
);
return Ok(1);
}
(Some(item), false) => {
rustfmt = which_rustfmt();
if rustfmt.is_none() {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) without rustfmt.",
item,
);
let _ = writeln!(
io::stderr(),
"Install rustfmt by running `rustup component add rustfmt --toolchain nightly`.",
);
return Ok(1);
}
}
(None, true) => rustfmt = None,
(None, false) => rustfmt = which_rustfmt(),
}
let mut builder = tempfile::Builder::new();
builder.prefix("cargo-expand");
let outdir = builder.tempdir().expect("failed to create tmp file");
let outfile_path = outdir.path().join("expanded");
// Run cargo
let mut cmd = Command::new(cargo_binary());
apply_args(&mut cmd, &args, &outfile_path);
let code = filter_err(&mut cmd, ignore_cargo_err)?;
if!outfile_path.exists() {
return Ok(1);
}
let mut content = fs::read_to_string(&outfile_path)?;
if content.is_empty() {
let _ = writeln!(io::stderr(), "ERROR: rustc produced no expanded output",);
return Ok(if code == 0 { 1 } else { code });
}
// Run rustfmt
if let Some(rustfmt) = rustfmt {
// Work around rustfmt not being able to parse paths containing $crate.
// This placeholder should be the same width as $crate to preserve
// alignments.
const DOLLAR_CRATE_PLACEHOLDER: &str = "Ξcrate";
content = content.replace("$crate", DOLLAR_CRATE_PLACEHOLDER);
// Discard comments, which are misplaced by the compiler
if let Ok(mut syntax_tree) = syn::parse_file(&content) {
edit::remove_macro_rules(&mut syntax_tree);
if let Some(filter) = args.item {
syntax_tree.shebang = None;
syntax_tree.attrs.clear();
syntax_tree.items = filter.apply_to(&syntax_tree);
if syntax_tree.items.is_empty() {
let _ = writeln!(io::stderr(), "WARNING: no such item: {}", filter);
return Ok(1);
}
}
content = quote!(#syntax_tree).to_string();
}
fs::write(&outfile_path, content)?;
fmt::write_rustfmt_config(&outdir)?;
// Ignore any errors.
let _status = Command::new(rustfmt)
.arg("--edition=2018")
.arg(&outfile_path)
.stderr(Stdio::null())
.status();
content = fs::read_to_string(&outfile_path)?;
content = content.replace(DOLLAR_CRATE_PLACEHOLDER, "$crate");
}
// Run pretty printer
let theme = args.theme.or(config.theme);
let none_theme = theme.as_ref().map(String::as_str) == Some("none");
let do_color = match args.color {
Some(Always) => true,
Some(Never) => false,
None | Some(Auto) =>!none_theme && atty::is(Stdout),
};
let _ = writeln!(io::stderr());
if do_color {
if content.ends_with('\n') {
// Pretty printer seems to print an extra trailing newline.
content.truncate(content.len() - 1);
}
let mut builder = PrettyPrinter::default();
builder.header(false);
builder.grid(false);
builder.line_numbers(false);
builder.language("rust");
builder.paging_mode(PagingMode::Never);
if let Some(theme) = theme {
builder.theme(theme);
}
let printer = builder.build().unwrap();
// Ignore any errors.
let _ = printer.string(content);
} else {
let _ = write!(io::stdout(), "{}", content);
}
Ok(0)
}
fn which_rustfmt() -> Option<PathBuf> {
match env::var_os("RUSTFMT") {
Some(which) => {
if which.is_empty() {
None
} else {
Some(PathBuf::from(which))
}
}
None => toolchain_find::find_installed_component("rustfmt"),
}
}
// Based on https://github.com/rsolomo/cargo-check
fn apply_args(cmd: &mut Command, args: &Args, outfile: &Path) {
let mut line = Line::new("cargo");
line.arg("rustc");
if args.tests && args.test.is_none() {
line.arg("--profile=test");
} else {
line.arg("--profile=check");
}
if let Some(features) = &args.features {
line.arg("--features");
line.arg(features);
}
if args.all_features {
line.arg("--all-features");
}
if args.no_default_features {
line.arg("--no-default-features");
}
if args.lib {
line.arg("--lib");
}
if let Some(bin) = &args.bin {
line.arg("--bin");
line.arg(bin);
}
if let Some(example) = &args.example {
line.arg("--example");
line.arg(example);
}
if let Some(test) = &args.test {
line.arg("--test");
line.arg(test);
}
if let Some(bench) = &args.bench {
line.arg("--bench");
line.arg(bench);
}
if let Some(target) = &args.target {
line.arg("--target");
line.arg(target);
}
if let Some(target_dir) = &args.target_dir {
line.arg("--target-dir");
line.arg(target_dir);
}
if let Some(manifest_path) = &args.manifest_path {
line.arg("--manifest-path");
line.arg(manifest_path);
}
if let Some(package) = &args.package {
line.arg("--package");
line.arg(package);
}
if let Some(jobs) = args.jobs {
line.arg("--jobs");
line.arg(jobs.to_string());
}
if args.verbose {
line.arg("--verbose");
}
line.arg("--color");
if let Some(color) = &args.color {
line.arg(color.to_string());
} else {
line.arg(if atty::is(Stderr) { "always" } else { "never" });
}
if args.frozen {
line.arg("--frozen");
}
if args.locked {
line.arg("--locked");
}
for unstable_flag in &args.unstable_flags {
line.arg("-Z");
line.arg(unstable_flag);
}
line.arg("--");
line.arg("-o");
line.arg(outfile);
line.arg("-Zunstable-options");
line.arg("--pretty=expanded");
if args.verbose {
let mut display = line.clone();
display.insert(0, "+nightly");
print_command(display, args);
}
cmd.args(line);
}
fn print_command(line: Line, args: &Args) {
let color_choice = match args.color {
Some(Coloring::Auto) | None => ColorChoice::Auto,
Some(Coloring::Always) => ColorChoice::Always,
Some(Coloring::Never) => ColorChoice::Never,
};
let mut stream = StandardStream::stderr(color_choice);
let _ = stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Green)));
let _ = write!(stream, "{:>12}", "Running");
let _ = stream.reset();
let _ = writeln!(stream, " `{}`", line);
}
fn filter_err(cmd: &mut Command, ignore: fn(&str) -> bool) -> io::Result<i32> {
let mut child = cmd.stderr(Stdio::piped()).spawn()?;
let mut stderr = io::BufReader::new(child.stderr.take().unwrap());
let mut line = String::new();
while let Ok(n) = stderr.read_line(&mut line) {
if n == 0 {
break;
}
if!ignore(&line) {
let _ = write!(io::stderr(), "{}", line);
}
line.clear();
}
let code = child.wait()?.code().unwrap_or(1);
Ok(code)
}
fn ignore_cargo_err(line: &str) -> bool {
if line.trim().is_empty() {
return true;
}
let blacklist = [
"ignoring specified output filename because multiple outputs were \
requested",
"ignoring specified output filename for 'link' output because multiple \
outputs were requested",
"ignoring --out-dir flag due to -o flag",
"ignoring -C extra-filename flag due to -o flag",
"due to multiple output types requested, the explicitly specified \
output file name will be adapted for each output type",
];
for s in &blacklist {
if line.contains(s) {
return true;
}
}
false
} | }
} | random_line_split |
main.rs | mod cmd;
mod config;
mod edit;
mod error;
mod fmt;
mod opts;
use std::env;
use std::ffi::OsString;
use std::fs;
use std::io::{self, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::{self, Command, Stdio};
use atty::Stream::{Stderr, Stdout};
use prettyprint::{PagingMode, PrettyPrinter};
use quote::quote;
use structopt::StructOpt;
use termcolor::{Color::Green, ColorChoice, ColorSpec, StandardStream, WriteColor};
use crate::cmd::Line;
use crate::error::Result;
use crate::opts::Coloring::*;
use crate::opts::{Args, Coloring, Opts};
fn main() {
let result = cargo_expand_or_run_nightly();
process::exit(match result {
Ok(code) => code,
Err(err) => {
let _ = writeln!(io::stderr(), "{}", err);
1
}
});
}
fn cargo_expand_or_run_nightly() -> Result<i32> {
const NO_RUN_NIGHTLY: &str = "CARGO_EXPAND_NO_RUN_NIGHTLY";
let maybe_nightly =!definitely_not_nightly();
if maybe_nightly || env::var_os(NO_RUN_NIGHTLY).is_some() {
return cargo_expand();
}
let mut nightly = Command::new("cargo");
nightly.arg("+nightly");
nightly.arg("expand");
let mut args = env::args_os().peekable();
args.next().unwrap(); // cargo
if args.peek().map_or(false, |arg| arg == "expand") {
args.next().unwrap(); // expand
}
nightly.args(args);
// Hopefully prevent infinite re-run loop.
nightly.env(NO_RUN_NIGHTLY, "");
let status = nightly.status()?;
Ok(match status.code() {
Some(code) => code,
None => {
if status.success() {
0
} else {
1
}
}
})
}
fn definitely_not_nightly() -> bool {
let mut cmd = Command::new(cargo_binary());
cmd.arg("--version");
let output = match cmd.output() {
Ok(output) => output,
Err(_) => return false,
};
let version = match String::from_utf8(output.stdout) {
Ok(version) => version,
Err(_) => return false,
};
version.starts_with("cargo 1") &&!version.contains("nightly")
}
fn cargo_binary() -> OsString {
env::var_os("CARGO").unwrap_or_else(|| "cargo".to_owned().into())
}
fn cargo_expand() -> Result<i32> {
let Opts::Expand(args) = Opts::from_args();
let config = config::deserialize();
if args.themes {
for theme in PrettyPrinter::default()
.build()
.unwrap()
.get_themes()
.keys()
{
let _ = writeln!(io::stdout(), "{}", theme);
}
return Ok(0);
}
let rustfmt;
match (&args.item, args.ugly) {
(Some(item), true) => {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) in ugly mode.",
item,
);
return Ok(1);
}
(Some(item), false) => {
rustfmt = which_rustfmt();
if rustfmt.is_none() {
let _ = writeln!(
io::stderr(),
"ERROR: cannot expand single item ({}) without rustfmt.",
item,
);
let _ = writeln!(
io::stderr(),
"Install rustfmt by running `rustup component add rustfmt --toolchain nightly`.",
);
return Ok(1);
}
}
(None, true) => rustfmt = None,
(None, false) => rustfmt = which_rustfmt(),
}
let mut builder = tempfile::Builder::new();
builder.prefix("cargo-expand");
let outdir = builder.tempdir().expect("failed to create tmp file");
let outfile_path = outdir.path().join("expanded");
// Run cargo
let mut cmd = Command::new(cargo_binary());
apply_args(&mut cmd, &args, &outfile_path);
let code = filter_err(&mut cmd, ignore_cargo_err)?;
if!outfile_path.exists() {
return Ok(1);
}
let mut content = fs::read_to_string(&outfile_path)?;
if content.is_empty() {
let _ = writeln!(io::stderr(), "ERROR: rustc produced no expanded output",);
return Ok(if code == 0 { 1 } else { code });
}
// Run rustfmt
if let Some(rustfmt) = rustfmt {
// Work around rustfmt not being able to parse paths containing $crate.
// This placeholder should be the same width as $crate to preserve
// alignments.
const DOLLAR_CRATE_PLACEHOLDER: &str = "Ξcrate";
content = content.replace("$crate", DOLLAR_CRATE_PLACEHOLDER);
// Discard comments, which are misplaced by the compiler
if let Ok(mut syntax_tree) = syn::parse_file(&content) {
edit::remove_macro_rules(&mut syntax_tree);
if let Some(filter) = args.item {
syntax_tree.shebang = None;
syntax_tree.attrs.clear();
syntax_tree.items = filter.apply_to(&syntax_tree);
if syntax_tree.items.is_empty() {
let _ = writeln!(io::stderr(), "WARNING: no such item: {}", filter);
return Ok(1);
}
}
content = quote!(#syntax_tree).to_string();
}
fs::write(&outfile_path, content)?;
fmt::write_rustfmt_config(&outdir)?;
// Ignore any errors.
let _status = Command::new(rustfmt)
.arg("--edition=2018")
.arg(&outfile_path)
.stderr(Stdio::null())
.status();
content = fs::read_to_string(&outfile_path)?;
content = content.replace(DOLLAR_CRATE_PLACEHOLDER, "$crate");
}
// Run pretty printer
let theme = args.theme.or(config.theme);
let none_theme = theme.as_ref().map(String::as_str) == Some("none");
let do_color = match args.color {
Some(Always) => true,
Some(Never) => false,
None | Some(Auto) =>!none_theme && atty::is(Stdout),
};
let _ = writeln!(io::stderr());
if do_color {
if content.ends_with('\n') {
// Pretty printer seems to print an extra trailing newline.
content.truncate(content.len() - 1);
}
let mut builder = PrettyPrinter::default();
builder.header(false);
builder.grid(false);
builder.line_numbers(false);
builder.language("rust");
builder.paging_mode(PagingMode::Never);
if let Some(theme) = theme {
builder.theme(theme);
}
let printer = builder.build().unwrap();
// Ignore any errors.
let _ = printer.string(content);
} else {
let _ = write!(io::stdout(), "{}", content);
}
Ok(0)
}
fn which_rustfmt() -> Option<PathBuf> {
match env::var_os("RUSTFMT") {
Some(which) => {
if which.is_empty() {
None
} else {
Some(PathBuf::from(which))
}
}
None => toolchain_find::find_installed_component("rustfmt"),
}
}
// Based on https://github.com/rsolomo/cargo-check
fn apply_args(cmd: &mut Command, args: &Args, outfile: &Path) {
let mut line = Line::new("cargo");
line.arg("rustc");
if args.tests && args.test.is_none() {
line.arg("--profile=test");
} else {
line.arg("--profile=check");
}
if let Some(features) = &args.features {
line.arg("--features");
line.arg(features);
}
if args.all_features {
line.arg("--all-features");
}
if args.no_default_features {
line.arg("--no-default-features");
}
if args.lib {
line.arg("--lib");
}
if let Some(bin) = &args.bin {
line.arg("--bin");
line.arg(bin);
}
if let Some(example) = &args.example {
line.arg("--example");
line.arg(example);
}
if let Some(test) = &args.test {
line.arg("--test");
line.arg(test);
}
if let Some(bench) = &args.bench {
line.arg("--bench");
line.arg(bench);
}
if let Some(target) = &args.target {
line.arg("--target");
line.arg(target);
}
if let Some(target_dir) = &args.target_dir {
line.arg("--target-dir");
line.arg(target_dir);
}
if let Some(manifest_path) = &args.manifest_path {
line.arg("--manifest-path");
line.arg(manifest_path);
}
if let Some(package) = &args.package {
line.arg("--package");
line.arg(package);
}
if let Some(jobs) = args.jobs {
line.arg("--jobs");
line.arg(jobs.to_string());
}
if args.verbose {
line.arg("--verbose");
}
line.arg("--color");
if let Some(color) = &args.color {
line.arg(color.to_string());
} else { |
if args.frozen {
line.arg("--frozen");
}
if args.locked {
line.arg("--locked");
}
for unstable_flag in &args.unstable_flags {
line.arg("-Z");
line.arg(unstable_flag);
}
line.arg("--");
line.arg("-o");
line.arg(outfile);
line.arg("-Zunstable-options");
line.arg("--pretty=expanded");
if args.verbose {
let mut display = line.clone();
display.insert(0, "+nightly");
print_command(display, args);
}
cmd.args(line);
}
fn print_command(line: Line, args: &Args) {
let color_choice = match args.color {
Some(Coloring::Auto) | None => ColorChoice::Auto,
Some(Coloring::Always) => ColorChoice::Always,
Some(Coloring::Never) => ColorChoice::Never,
};
let mut stream = StandardStream::stderr(color_choice);
let _ = stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(Green)));
let _ = write!(stream, "{:>12}", "Running");
let _ = stream.reset();
let _ = writeln!(stream, " `{}`", line);
}
fn filter_err(cmd: &mut Command, ignore: fn(&str) -> bool) -> io::Result<i32> {
let mut child = cmd.stderr(Stdio::piped()).spawn()?;
let mut stderr = io::BufReader::new(child.stderr.take().unwrap());
let mut line = String::new();
while let Ok(n) = stderr.read_line(&mut line) {
if n == 0 {
break;
}
if!ignore(&line) {
let _ = write!(io::stderr(), "{}", line);
}
line.clear();
}
let code = child.wait()?.code().unwrap_or(1);
Ok(code)
}
fn ignore_cargo_err(line: &str) -> bool {
if line.trim().is_empty() {
return true;
}
let blacklist = [
"ignoring specified output filename because multiple outputs were \
requested",
"ignoring specified output filename for 'link' output because multiple \
outputs were requested",
"ignoring --out-dir flag due to -o flag",
"ignoring -C extra-filename flag due to -o flag",
"due to multiple output types requested, the explicitly specified \
output file name will be adapted for each output type",
];
for s in &blacklist {
if line.contains(s) {
return true;
}
}
false
}
|
line.arg(if atty::is(Stderr) { "always" } else { "never" });
}
| conditional_block |
miner.rs | use crate::network::message::{Message};
use crate::network::server::Handle as ServerHandle;
use std::sync::{Arc, Mutex};
use crate::crypto::hash::{H256, Hashable, H160};
use crate::blockchain::Blockchain;
use crate::block::{Block,Header,Content};
use crate::crypto::merkle::{MerkleTree};
use crate::transaction::{Transaction, Mempool, SignedTransaction, StateWitness};
use rand::{thread_rng, Rng};
use ring::{digest};
use log::{info,debug};
use crossbeam::channel::{unbounded, Receiver, Sender, TryRecvError};
use std::{time, fs};
use std::time::{SystemTime, UNIX_EPOCH};
use std::thread;
use ring::signature::Ed25519KeyPair;
enum ControlSignal {
Start(u64), // the number controls the lambda of interval between block generation
Exit,
}
enum OperatingState {
Paused,
Run(u64),
ShutDown,
}
pub struct Context {
/// Channel for receiving control signal
local_address: H160,
local_public_key: Vec<u8>,
mempool: Arc<Mutex<Mempool>>,
stateWitness: Arc<Mutex<StateWitness>>,
//stateSet: Arc<Mutex<StateSet>>,
blockchain: Arc<Mutex<Blockchain>>,
control_chan: Receiver<ControlSignal>,
operating_state: OperatingState,
server: ServerHandle,
ifArchival: bool,
}
#[derive(Clone)]
pub struct Handle {
/// Channel for sending signal to the miner thread
control_chan: Sender<ControlSignal>,
}
pub fn new(
server: &ServerHandle,
mempool: &Arc<Mutex<Mempool>>,
stateWitness: &Arc<Mutex<StateWitness>>,
//stateSet: &Arc<Mutex<StateSet>>,
blockchain: &Arc<Mutex<Blockchain>>,
local_public_key: &[u8],
local_address: &H160,
ifArchival: bool,
) -> (Context, Handle) {
let (signal_chan_sender, signal_chan_receiver) = unbounded();
let ctx = Context {
local_address: *local_address,
local_public_key: (*local_public_key).to_owned(),
mempool: Arc::clone(mempool),
stateWitness: Arc::clone(stateWitness),
//stateSet: Arc::clone(stateSet),
blockchain: Arc::clone(blockchain),
control_chan: signal_chan_receiver,
operating_state: OperatingState::Paused,
server: server.clone(),
ifArchival: ifArchival,
};
let handle = Handle {
control_chan: signal_chan_sender,
};
(ctx, handle)
}
impl Handle {
pub fn exit(&self) {
self.control_chan.send(ControlSignal::Exit).unwrap();
}
pub fn start(&self, lambda: u64) {
self.control_chan
.send(ControlSignal::Start(lambda))
.unwrap();
}
}
impl Context {
pub fn start(mut self) {
thread::Builder::new()
.name("miner".to_string())
.spawn(move || {
self.miner_loop();
})
.unwrap();
info!("Miner initialized into paused mode");
}
fn handle_control_signal(&mut self, signal: ControlSignal) {
match signal {
ControlSignal::Exit => {
info!("Miner shutting down");
self.operating_state = OperatingState::ShutDown;
}
ControlSignal::Start(i) => {
info!("Miner starting in continuous mode with lambda {}", i);
self.operating_state = OperatingState::Run(i);
}
}
}
fn miner_loop(&mut self) {
let mut miner_counter:i32 = 0;
//let mut readICO = false;
// main mining loop
loop {
// check and react to control signals
match self.operating_state {
OperatingState::Paused => {
let signal = self.control_chan.recv().unwrap();
self.handle_control_signal(signal);
continue;
}
OperatingState::ShutDown => {
return;
}
_ => match self.control_chan.try_recv() {
Ok(signal) => {
self.handle_control_signal(signal);
}
Err(TryRecvError::Empty) => {}
Err(TryRecvError::Disconnected) => panic!("Miner control channel detached"),
},
}
if let OperatingState::ShutDown = self.operating_state {
return;
}
//Read ICO & Update initial state
/*
if!readICO {
// Initialize State
//println!("local: {:?}", self.local_address);
let mut state = self.state.lock().unwrap();
println!("ICO: THE ICO IS WORKING ON PROCESSES: {:?}",self.local_address);
let data = fs::read("ICO.txt").expect("Unable to read file");
let data_len: usize = (data.len() / 20) as usize;
println!("data_length: {:?}", data.len());
for i in 0..data_len {
let mut start = i * 20;
let mut end = (i + 1) * 20;
let mut addr_u8: [u8; 20] = [0; 20];
addr_u8.clone_from_slice(&data[start..end]);
let mut address: H160 = <H160>::from(addr_u8);
//println!("all: {:?}", address);
state.Outputs.insert((<H256>::from(digest::digest(&digest::SHA256, &[0x00 as u8])), i as u32), (100.0 as f32, address));
}
readICO = true;
println!("LOCAL STATES: {:?}", state.Outputs);
println!("PROCESS {:?} CAN START TO MINE BLOCKS.",self.local_address);
std::mem::drop(state);
}
*/
// TODO: actual mining
if self.mempool.lock().unwrap().Transactions.keys().len() > 0 &&!self.ifArchival {
//info!("MINER: STARTING...");
let nonce:u32 = thread_rng().gen();
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_millis();
// difficulty
let mut bytes32 = [255u8;32];
bytes32[0]=1;
bytes32[1]=1;
let difficulty : H256 = bytes32.into();
// read transactions from mempool
let mut signedTransaction = Vec::<SignedTransaction>::new();
let block_size_limit = 5;
let mut tx_counter = 0;
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
let mut key_iter= mempool.Transactions.keys();
for key in mempool.Transactions.keys(){
//println!("MINER: MEMPOOL KEYS:{:?}, INPUT: {:?}, OUTPUT: {:?}", key, mempool.Transactions.get(key).unwrap().transaction.Input, mempool.Transactions.get(key).unwrap().transaction.Output);
}
while tx_counter < block_size_limit {
match key_iter.next() {
Some(hash) => {
//println!("Miner: tx: {:?}",hash);
//println!("Miner: preTx: {:?}, PreIndex: {:?}",mempool.getPreTxHash(hash), mempool.getPreIndex(hash));
//double spent check and verify signature
if stateWitness.ifNotDoubleSpent(&mempool.Transactions.get(&hash).unwrap().transaction.Input,&self.blockchain.lock().unwrap().tip.0 )// // NEW TODO Change it to state witness
&& mempool.Transactions.get(hash).unwrap().verifySignedTransaction() {
//info!("Miner: Adding to block HERE");
signedTransaction.push(mempool.Transactions.get(hash).unwrap().clone());
tx_counter = tx_counter + 1;
}
}
None => {
break;
}
}
}
std::mem::drop(mempool);
std::mem::drop(stateWitness);
if signedTransaction.capacity() > 0 {
//info!("MINER: ADDING...");
//info!("MINER: MERKLETREE CHECKING...");
let mut MerkleTree = MerkleTree::new(&signedTransaction);
//info!("MINER: MERKLETREE CHECKED");
let newContent = Content{
content: signedTransaction,
};
let newHeader = Header{
parent: self.blockchain.lock().unwrap().tip(),
nonce: nonce,
difficulty: difficulty,
timestamp: timestamp,
merkleRoot: MerkleTree.root(),
};
let newBlock = Block{
Header: newHeader,
Content: newContent,
};
//println!("1: {:?}", newBlock.hash() );
//println!("2: {:?}", difficulty );
//info!("MINER: BLOCK CREATED");
if newBlock.hash() <= difficulty {
let mut contents = newBlock.Content.content.clone();
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
//let mut stateSet = self.stateSet.lock().unwrap();
let mut check = true;
for content in contents.iter(){
if stateWitness.ifNotDoubleSpent(&content.transaction.Input,&self.blockchain.lock().unwrap().tip.0 )
&& content.verifySignedTransaction() {//state.ifNotDoubleSpent(content)
check = check && true;
}
else{
check = check && false;
break;
}
}
std::mem::drop(stateWitness);
std::mem::drop(mempool);
if check {
let mut blockchain = self.blockchain.lock().unwrap();
let tip_hash = blockchain.insert(&newBlock);
//info!("MINER: NEW BLOCK ADDED");
miner_counter += 1;
println!("MINER: CURRENT MINER COUNT: {:?}", miner_counter);
println!("MINER: CURRENT BLOCKCHAIN HEIGHT: {:?}", blockchain.tip.1);
//let mut state = self.state.lock().unwrap();
//let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
mempool.updateMempool(&contents);
/*for key in state.Outputs.keys() {
println!("MINER: RECP: {:?}, VALUE {:?}", state.Outputs.get(key).unwrap().1, state.Outputs.get(key).unwrap().0);
}*/
self.server.broadcast(Message::NewBlockHashes(blockchain.all_blocks_in_longest_chain()));
//info!("MINER: BLOCK MESSAGES SENT");
std::mem::drop(blockchain);
std::mem::drop(mempool);
}
}
}
}
if let OperatingState::Run(i) = self.operating_state {
if i!= 0 {
let interval = time::Duration::from_micros(i as u64); | thread::sleep(interval);
}
}
} | thread::sleep(interval);
}
}
let interval = time::Duration::from_micros(1000 as u64); | random_line_split |
miner.rs | use crate::network::message::{Message};
use crate::network::server::Handle as ServerHandle;
use std::sync::{Arc, Mutex};
use crate::crypto::hash::{H256, Hashable, H160};
use crate::blockchain::Blockchain;
use crate::block::{Block,Header,Content};
use crate::crypto::merkle::{MerkleTree};
use crate::transaction::{Transaction, Mempool, SignedTransaction, StateWitness};
use rand::{thread_rng, Rng};
use ring::{digest};
use log::{info,debug};
use crossbeam::channel::{unbounded, Receiver, Sender, TryRecvError};
use std::{time, fs};
use std::time::{SystemTime, UNIX_EPOCH};
use std::thread;
use ring::signature::Ed25519KeyPair;
enum ControlSignal {
Start(u64), // the number controls the lambda of interval between block generation
Exit,
}
enum OperatingState {
Paused,
Run(u64),
ShutDown,
}
pub struct Context {
/// Channel for receiving control signal
local_address: H160,
local_public_key: Vec<u8>,
mempool: Arc<Mutex<Mempool>>,
stateWitness: Arc<Mutex<StateWitness>>,
//stateSet: Arc<Mutex<StateSet>>,
blockchain: Arc<Mutex<Blockchain>>,
control_chan: Receiver<ControlSignal>,
operating_state: OperatingState,
server: ServerHandle,
ifArchival: bool,
}
#[derive(Clone)]
pub struct Handle {
/// Channel for sending signal to the miner thread
control_chan: Sender<ControlSignal>,
}
pub fn new(
server: &ServerHandle,
mempool: &Arc<Mutex<Mempool>>,
stateWitness: &Arc<Mutex<StateWitness>>,
//stateSet: &Arc<Mutex<StateSet>>,
blockchain: &Arc<Mutex<Blockchain>>,
local_public_key: &[u8],
local_address: &H160,
ifArchival: bool,
) -> (Context, Handle) {
let (signal_chan_sender, signal_chan_receiver) = unbounded();
let ctx = Context {
local_address: *local_address,
local_public_key: (*local_public_key).to_owned(),
mempool: Arc::clone(mempool),
stateWitness: Arc::clone(stateWitness),
//stateSet: Arc::clone(stateSet),
blockchain: Arc::clone(blockchain),
control_chan: signal_chan_receiver,
operating_state: OperatingState::Paused,
server: server.clone(),
ifArchival: ifArchival,
};
let handle = Handle {
control_chan: signal_chan_sender,
};
(ctx, handle)
}
impl Handle {
pub fn | (&self) {
self.control_chan.send(ControlSignal::Exit).unwrap();
}
pub fn start(&self, lambda: u64) {
self.control_chan
.send(ControlSignal::Start(lambda))
.unwrap();
}
}
impl Context {
pub fn start(mut self) {
thread::Builder::new()
.name("miner".to_string())
.spawn(move || {
self.miner_loop();
})
.unwrap();
info!("Miner initialized into paused mode");
}
fn handle_control_signal(&mut self, signal: ControlSignal) {
match signal {
ControlSignal::Exit => {
info!("Miner shutting down");
self.operating_state = OperatingState::ShutDown;
}
ControlSignal::Start(i) => {
info!("Miner starting in continuous mode with lambda {}", i);
self.operating_state = OperatingState::Run(i);
}
}
}
fn miner_loop(&mut self) {
let mut miner_counter:i32 = 0;
//let mut readICO = false;
// main mining loop
loop {
// check and react to control signals
match self.operating_state {
OperatingState::Paused => {
let signal = self.control_chan.recv().unwrap();
self.handle_control_signal(signal);
continue;
}
OperatingState::ShutDown => {
return;
}
_ => match self.control_chan.try_recv() {
Ok(signal) => {
self.handle_control_signal(signal);
}
Err(TryRecvError::Empty) => {}
Err(TryRecvError::Disconnected) => panic!("Miner control channel detached"),
},
}
if let OperatingState::ShutDown = self.operating_state {
return;
}
//Read ICO & Update initial state
/*
if!readICO {
// Initialize State
//println!("local: {:?}", self.local_address);
let mut state = self.state.lock().unwrap();
println!("ICO: THE ICO IS WORKING ON PROCESSES: {:?}",self.local_address);
let data = fs::read("ICO.txt").expect("Unable to read file");
let data_len: usize = (data.len() / 20) as usize;
println!("data_length: {:?}", data.len());
for i in 0..data_len {
let mut start = i * 20;
let mut end = (i + 1) * 20;
let mut addr_u8: [u8; 20] = [0; 20];
addr_u8.clone_from_slice(&data[start..end]);
let mut address: H160 = <H160>::from(addr_u8);
//println!("all: {:?}", address);
state.Outputs.insert((<H256>::from(digest::digest(&digest::SHA256, &[0x00 as u8])), i as u32), (100.0 as f32, address));
}
readICO = true;
println!("LOCAL STATES: {:?}", state.Outputs);
println!("PROCESS {:?} CAN START TO MINE BLOCKS.",self.local_address);
std::mem::drop(state);
}
*/
// TODO: actual mining
if self.mempool.lock().unwrap().Transactions.keys().len() > 0 &&!self.ifArchival {
//info!("MINER: STARTING...");
let nonce:u32 = thread_rng().gen();
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_millis();
// difficulty
let mut bytes32 = [255u8;32];
bytes32[0]=1;
bytes32[1]=1;
let difficulty : H256 = bytes32.into();
// read transactions from mempool
let mut signedTransaction = Vec::<SignedTransaction>::new();
let block_size_limit = 5;
let mut tx_counter = 0;
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
let mut key_iter= mempool.Transactions.keys();
for key in mempool.Transactions.keys(){
//println!("MINER: MEMPOOL KEYS:{:?}, INPUT: {:?}, OUTPUT: {:?}", key, mempool.Transactions.get(key).unwrap().transaction.Input, mempool.Transactions.get(key).unwrap().transaction.Output);
}
while tx_counter < block_size_limit {
match key_iter.next() {
Some(hash) => {
//println!("Miner: tx: {:?}",hash);
//println!("Miner: preTx: {:?}, PreIndex: {:?}",mempool.getPreTxHash(hash), mempool.getPreIndex(hash));
//double spent check and verify signature
if stateWitness.ifNotDoubleSpent(&mempool.Transactions.get(&hash).unwrap().transaction.Input,&self.blockchain.lock().unwrap().tip.0 )// // NEW TODO Change it to state witness
&& mempool.Transactions.get(hash).unwrap().verifySignedTransaction() {
//info!("Miner: Adding to block HERE");
signedTransaction.push(mempool.Transactions.get(hash).unwrap().clone());
tx_counter = tx_counter + 1;
}
}
None => {
break;
}
}
}
std::mem::drop(mempool);
std::mem::drop(stateWitness);
if signedTransaction.capacity() > 0 {
//info!("MINER: ADDING...");
//info!("MINER: MERKLETREE CHECKING...");
let mut MerkleTree = MerkleTree::new(&signedTransaction);
//info!("MINER: MERKLETREE CHECKED");
let newContent = Content{
content: signedTransaction,
};
let newHeader = Header{
parent: self.blockchain.lock().unwrap().tip(),
nonce: nonce,
difficulty: difficulty,
timestamp: timestamp,
merkleRoot: MerkleTree.root(),
};
let newBlock = Block{
Header: newHeader,
Content: newContent,
};
//println!("1: {:?}", newBlock.hash() );
//println!("2: {:?}", difficulty );
//info!("MINER: BLOCK CREATED");
if newBlock.hash() <= difficulty {
let mut contents = newBlock.Content.content.clone();
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
//let mut stateSet = self.stateSet.lock().unwrap();
let mut check = true;
for content in contents.iter(){
if stateWitness.ifNotDoubleSpent(&content.transaction.Input,&self.blockchain.lock().unwrap().tip.0 )
&& content.verifySignedTransaction() {//state.ifNotDoubleSpent(content)
check = check && true;
}
else{
check = check && false;
break;
}
}
std::mem::drop(stateWitness);
std::mem::drop(mempool);
if check {
let mut blockchain = self.blockchain.lock().unwrap();
let tip_hash = blockchain.insert(&newBlock);
//info!("MINER: NEW BLOCK ADDED");
miner_counter += 1;
println!("MINER: CURRENT MINER COUNT: {:?}", miner_counter);
println!("MINER: CURRENT BLOCKCHAIN HEIGHT: {:?}", blockchain.tip.1);
//let mut state = self.state.lock().unwrap();
//let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
mempool.updateMempool(&contents);
/*for key in state.Outputs.keys() {
println!("MINER: RECP: {:?}, VALUE {:?}", state.Outputs.get(key).unwrap().1, state.Outputs.get(key).unwrap().0);
}*/
self.server.broadcast(Message::NewBlockHashes(blockchain.all_blocks_in_longest_chain()));
//info!("MINER: BLOCK MESSAGES SENT");
std::mem::drop(blockchain);
std::mem::drop(mempool);
}
}
}
}
if let OperatingState::Run(i) = self.operating_state {
if i!= 0 {
let interval = time::Duration::from_micros(i as u64);
thread::sleep(interval);
}
}
let interval = time::Duration::from_micros(1000 as u64);
thread::sleep(interval);
}
}
}
| exit | identifier_name |
miner.rs | use crate::network::message::{Message};
use crate::network::server::Handle as ServerHandle;
use std::sync::{Arc, Mutex};
use crate::crypto::hash::{H256, Hashable, H160};
use crate::blockchain::Blockchain;
use crate::block::{Block,Header,Content};
use crate::crypto::merkle::{MerkleTree};
use crate::transaction::{Transaction, Mempool, SignedTransaction, StateWitness};
use rand::{thread_rng, Rng};
use ring::{digest};
use log::{info,debug};
use crossbeam::channel::{unbounded, Receiver, Sender, TryRecvError};
use std::{time, fs};
use std::time::{SystemTime, UNIX_EPOCH};
use std::thread;
use ring::signature::Ed25519KeyPair;
enum ControlSignal {
Start(u64), // the number controls the lambda of interval between block generation
Exit,
}
enum OperatingState {
Paused,
Run(u64),
ShutDown,
}
pub struct Context {
/// Channel for receiving control signal
local_address: H160,
local_public_key: Vec<u8>,
mempool: Arc<Mutex<Mempool>>,
stateWitness: Arc<Mutex<StateWitness>>,
//stateSet: Arc<Mutex<StateSet>>,
blockchain: Arc<Mutex<Blockchain>>,
control_chan: Receiver<ControlSignal>,
operating_state: OperatingState,
server: ServerHandle,
ifArchival: bool,
}
#[derive(Clone)]
pub struct Handle {
/// Channel for sending signal to the miner thread
control_chan: Sender<ControlSignal>,
}
pub fn new(
server: &ServerHandle,
mempool: &Arc<Mutex<Mempool>>,
stateWitness: &Arc<Mutex<StateWitness>>,
//stateSet: &Arc<Mutex<StateSet>>,
blockchain: &Arc<Mutex<Blockchain>>,
local_public_key: &[u8],
local_address: &H160,
ifArchival: bool,
) -> (Context, Handle) {
let (signal_chan_sender, signal_chan_receiver) = unbounded();
let ctx = Context {
local_address: *local_address,
local_public_key: (*local_public_key).to_owned(),
mempool: Arc::clone(mempool),
stateWitness: Arc::clone(stateWitness),
//stateSet: Arc::clone(stateSet),
blockchain: Arc::clone(blockchain),
control_chan: signal_chan_receiver,
operating_state: OperatingState::Paused,
server: server.clone(),
ifArchival: ifArchival,
};
let handle = Handle {
control_chan: signal_chan_sender,
};
(ctx, handle)
}
impl Handle {
pub fn exit(&self) {
self.control_chan.send(ControlSignal::Exit).unwrap();
}
pub fn start(&self, lambda: u64) {
self.control_chan
.send(ControlSignal::Start(lambda))
.unwrap();
}
}
impl Context {
pub fn start(mut self) {
thread::Builder::new()
.name("miner".to_string())
.spawn(move || {
self.miner_loop();
})
.unwrap();
info!("Miner initialized into paused mode");
}
fn handle_control_signal(&mut self, signal: ControlSignal) {
match signal {
ControlSignal::Exit => {
info!("Miner shutting down");
self.operating_state = OperatingState::ShutDown;
}
ControlSignal::Start(i) => {
info!("Miner starting in continuous mode with lambda {}", i);
self.operating_state = OperatingState::Run(i);
}
}
}
fn miner_loop(&mut self) {
let mut miner_counter:i32 = 0;
//let mut readICO = false;
// main mining loop
loop {
// check and react to control signals
match self.operating_state {
OperatingState::Paused => {
let signal = self.control_chan.recv().unwrap();
self.handle_control_signal(signal);
continue;
}
OperatingState::ShutDown => {
return;
}
_ => match self.control_chan.try_recv() {
Ok(signal) => {
self.handle_control_signal(signal);
}
Err(TryRecvError::Empty) => {}
Err(TryRecvError::Disconnected) => panic!("Miner control channel detached"),
},
}
if let OperatingState::ShutDown = self.operating_state {
return;
}
//Read ICO & Update initial state
/*
if!readICO {
// Initialize State
//println!("local: {:?}", self.local_address);
let mut state = self.state.lock().unwrap();
println!("ICO: THE ICO IS WORKING ON PROCESSES: {:?}",self.local_address);
let data = fs::read("ICO.txt").expect("Unable to read file");
let data_len: usize = (data.len() / 20) as usize;
println!("data_length: {:?}", data.len());
for i in 0..data_len {
let mut start = i * 20;
let mut end = (i + 1) * 20;
let mut addr_u8: [u8; 20] = [0; 20];
addr_u8.clone_from_slice(&data[start..end]);
let mut address: H160 = <H160>::from(addr_u8);
//println!("all: {:?}", address);
state.Outputs.insert((<H256>::from(digest::digest(&digest::SHA256, &[0x00 as u8])), i as u32), (100.0 as f32, address));
}
readICO = true;
println!("LOCAL STATES: {:?}", state.Outputs);
println!("PROCESS {:?} CAN START TO MINE BLOCKS.",self.local_address);
std::mem::drop(state);
}
*/
// TODO: actual mining
if self.mempool.lock().unwrap().Transactions.keys().len() > 0 &&!self.ifArchival {
//info!("MINER: STARTING...");
let nonce:u32 = thread_rng().gen();
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_millis();
// difficulty
let mut bytes32 = [255u8;32];
bytes32[0]=1;
bytes32[1]=1;
let difficulty : H256 = bytes32.into();
// read transactions from mempool
let mut signedTransaction = Vec::<SignedTransaction>::new();
let block_size_limit = 5;
let mut tx_counter = 0;
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
let mut key_iter= mempool.Transactions.keys();
for key in mempool.Transactions.keys(){
//println!("MINER: MEMPOOL KEYS:{:?}, INPUT: {:?}, OUTPUT: {:?}", key, mempool.Transactions.get(key).unwrap().transaction.Input, mempool.Transactions.get(key).unwrap().transaction.Output);
}
while tx_counter < block_size_limit {
match key_iter.next() {
Some(hash) => {
//println!("Miner: tx: {:?}",hash);
//println!("Miner: preTx: {:?}, PreIndex: {:?}",mempool.getPreTxHash(hash), mempool.getPreIndex(hash));
//double spent check and verify signature
if stateWitness.ifNotDoubleSpent(&mempool.Transactions.get(&hash).unwrap().transaction.Input,&self.blockchain.lock().unwrap().tip.0 )// // NEW TODO Change it to state witness
&& mempool.Transactions.get(hash).unwrap().verifySignedTransaction() {
//info!("Miner: Adding to block HERE");
signedTransaction.push(mempool.Transactions.get(hash).unwrap().clone());
tx_counter = tx_counter + 1;
}
}
None => {
break;
}
}
}
std::mem::drop(mempool);
std::mem::drop(stateWitness);
if signedTransaction.capacity() > 0 {
//info!("MINER: ADDING...");
//info!("MINER: MERKLETREE CHECKING...");
let mut MerkleTree = MerkleTree::new(&signedTransaction);
//info!("MINER: MERKLETREE CHECKED");
let newContent = Content{
content: signedTransaction,
};
let newHeader = Header{
parent: self.blockchain.lock().unwrap().tip(),
nonce: nonce,
difficulty: difficulty,
timestamp: timestamp,
merkleRoot: MerkleTree.root(),
};
let newBlock = Block{
Header: newHeader,
Content: newContent,
};
//println!("1: {:?}", newBlock.hash() );
//println!("2: {:?}", difficulty );
//info!("MINER: BLOCK CREATED");
if newBlock.hash() <= difficulty | if check {
let mut blockchain = self.blockchain.lock().unwrap();
let tip_hash = blockchain.insert(&newBlock);
//info!("MINER: NEW BLOCK ADDED");
miner_counter += 1;
println!("MINER: CURRENT MINER COUNT: {:?}", miner_counter);
println!("MINER: CURRENT BLOCKCHAIN HEIGHT: {:?}", blockchain.tip.1);
//let mut state = self.state.lock().unwrap();
//let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
mempool.updateMempool(&contents);
/*for key in state.Outputs.keys() {
println!("MINER: RECP: {:?}, VALUE {:?}", state.Outputs.get(key).unwrap().1, state.Outputs.get(key).unwrap().0);
}*/
self.server.broadcast(Message::NewBlockHashes(blockchain.all_blocks_in_longest_chain()));
//info!("MINER: BLOCK MESSAGES SENT");
std::mem::drop(blockchain);
std::mem::drop(mempool);
}
}
}
}
if let OperatingState::Run(i) = self.operating_state {
if i!= 0 {
let interval = time::Duration::from_micros(i as u64);
thread::sleep(interval);
}
}
let interval = time::Duration::from_micros(1000 as u64);
thread::sleep(interval);
}
}
}
| {
let mut contents = newBlock.Content.content.clone();
//let mut state = self.state.lock().unwrap();
let mut stateWitness = self.stateWitness.lock().unwrap();
let mut mempool = self.mempool.lock().unwrap();
//let mut stateSet = self.stateSet.lock().unwrap();
let mut check = true;
for content in contents.iter(){
if stateWitness.ifNotDoubleSpent(&content.transaction.Input,&self.blockchain.lock().unwrap().tip.0 )
&& content.verifySignedTransaction() {//state.ifNotDoubleSpent(content)
check = check && true;
}
else{
check = check && false;
break;
}
}
std::mem::drop(stateWitness);
std::mem::drop(mempool); | conditional_block |
graphics.rs | use crate::mthelper::SharedRef;
use bedrock as br;
use br::{
CommandBuffer, CommandPool, Device, Instance, InstanceChild, PhysicalDevice, Queue,
SubmissionBatch,
};
use log::{debug, info, warn};
use std::ops::Deref;
pub type InstanceObject = SharedRef<br::InstanceObject>;
pub type DeviceObject = SharedRef<br::DeviceObject<InstanceObject>>;
/// Queue object with family index
pub struct QueueSet<Device: br::Device> {
pub(crate) q: parking_lot::Mutex<br::QueueObject<Device>>,
pub(crate) family: u32,
}
mod command_bundle;
pub use self::command_bundle::*;
#[cfg(feature = "mt")]
mod async_fence_driver;
#[cfg(feature = "mt")]
pub use self::async_fence_driver::*;
#[derive(Debug)]
pub enum GraphicsInitializationError {
LayerEnumerationFailed(br::VkResultBox),
VulkanError(br::VkResultBox),
NoPhysicalDevices,
NoSuitableGraphicsQueue,
}
impl From<br::VkResultBox> for GraphicsInitializationError {
fn from(value: br::VkResultBox) -> Self {
Self::VulkanError(value)
}
}
impl std::fmt::Display for GraphicsInitializationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::LayerEnumerationFailed(r) => write!(f, "vk layer enumeration failed: {r}"),
Self::VulkanError(r) => std::fmt::Display::fmt(r, f),
Self::NoPhysicalDevices => write!(f, "no physical devices available on this machine"),
Self::NoSuitableGraphicsQueue => {
write!(f, "no suitable graphics queue found on device")
}
}
}
}
impl std::error::Error for GraphicsInitializationError {}
/// Graphics manager
pub struct Graphics {
pub(crate) adapter: br::PhysicalDeviceObject<InstanceObject>,
pub(crate) device: DeviceObject,
pub(crate) graphics_queue: QueueSet<DeviceObject>,
cp_onetime_submit: br::CommandPoolObject<DeviceObject>,
pub memory_type_manager: MemoryTypeManager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread<DeviceObject>,
#[cfg(feature = "debug")]
_debug_instance: br::DebugUtilsMessengerObject<InstanceObject>,
}
impl Graphics {
pub(crate) fn new(
app_name: &str,
app_version: (u32, u32, u32),
instance_extensions: Vec<&str>,
device_extensions: Vec<&str>,
features: br::vk::VkPhysicalDeviceFeatures,
) -> Result<Self, GraphicsInitializationError> {
info!("Supported Layers: ");
let mut validation_layer_available = false;
#[cfg(debug_assertions)]
for l in br::enumerate_layer_properties()
.map_err(GraphicsInitializationError::LayerEnumerationFailed)?
{
let name_str = l
.layerName
.as_cstr()
.expect("Failed to decode")
.to_str()
.expect("invalid sequence in layer name");
info!(
"* {name_str} :: {}/{}",
l.specVersion, l.implementationVersion
);
if name_str == "VK_LAYER_KHRONOS_validation" {
validation_layer_available = true;
}
}
let mut ib =
br::InstanceBuilder::new(app_name, app_version, "Interlude2:Peridot", (0, 1, 0));
ib.add_extensions(instance_extensions);
#[cfg(debug_assertions)]
ib.add_extension("VK_EXT_debug_report");
if validation_layer_available {
ib.add_layer("VK_LAYER_KHRONOS_validation");
} else {
| #[cfg(feature = "debug")]
{
ib.add_extension("VK_EXT_debug_utils");
debug!("Debug reporting activated");
}
let instance = SharedRef::new(ib.create()?);
#[cfg(feature = "debug")]
let _debug_instance = br::DebugUtilsMessengerCreateInfo::new(crate::debug::debug_utils_out)
.filter_severity(br::DebugUtilsMessageSeverityFlags::ERROR.and_warning())
.create(instance.clone())?;
let adapter = instance
.iter_physical_devices()?
.next()
.ok_or(GraphicsInitializationError::NoPhysicalDevices)?;
let memory_type_manager = MemoryTypeManager::new(&adapter);
MemoryTypeManager::diagnose_heaps(&adapter);
memory_type_manager.diagnose_types();
let gqf_index = adapter
.queue_family_properties()
.find_matching_index(br::QueueFlags::GRAPHICS)
.ok_or(GraphicsInitializationError::NoSuitableGraphicsQueue)?;
let qci = br::DeviceQueueCreateInfo(gqf_index, vec![0.0]);
let device = {
let mut db = br::DeviceBuilder::new(&adapter);
db.add_extensions(device_extensions).add_queue(qci);
if validation_layer_available {
db.add_layer("VK_LAYER_KHRONOS_validation");
}
*db.mod_features() = features;
SharedRef::new(db.create()?.clone_parent())
};
Ok(Self {
cp_onetime_submit: device.clone().new_command_pool(gqf_index, true, false)?,
graphics_queue: QueueSet {
q: parking_lot::Mutex::new(device.clone().queue(gqf_index, 0)),
family: gqf_index,
},
adapter: adapter.clone_parent(),
device,
memory_type_manager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread::new(),
#[cfg(feature = "debug")]
_debug_instance,
})
}
/// Submits any commands as transient commands.
pub fn submit_commands(
&mut self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<()> {
let mut cb = LocalCommandBundle(
self.cp_onetime_submit.alloc(1, true)?,
&mut self.cp_onetime_submit,
);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.get_mut().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
None::<&mut br::FenceObject<DeviceObject>>,
)?;
self.graphics_queue.q.get_mut().wait()
}
pub fn submit_buffered_commands(
&mut self,
batches: &[impl br::SubmissionBatch],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue.q.get_mut().submit(batches, Some(fence))
}
pub fn submit_buffered_commands_raw(
&mut self,
batches: &[br::vk::VkSubmitInfo],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue
.q
.get_mut()
.submit_raw(batches, Some(fence))
}
/// Submits any commands as transient commands.
/// ## Note
/// Unlike other futures, commands are submitted **immediately**(even if not awaiting the returned future).
#[cfg(feature = "mt")]
pub fn submit_commands_async<'s>(
&'s self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<impl std::future::Future<Output = br::Result<()>> +'s> {
let mut fence = std::sync::Arc::new(self.device.clone().new_fence(false)?);
let mut pool = self.device.clone().new_command_pool(
self.graphics_queue_family_index(),
true,
false,
)?;
let mut cb = CommandBundle(pool.alloc(1, true)?, pool);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.lock().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
Some(unsafe { std::sync::Arc::get_mut(&mut fence).unwrap_unchecked() }),
)?;
Ok(async move {
self.await_fence(fence).await?;
// keep alive command buffers while execution
drop(cb);
Ok(())
})
}
/// Awaits fence on background thread
#[cfg(feature = "mt")]
pub const fn await_fence<'s>(
&'s self,
fence: std::sync::Arc<
impl br::Fence<ConcreteDevice = DeviceObject> + Send + Sync +'static,
>,
) -> impl std::future::Future<Output = br::Result<()>> +'s {
FenceWaitFuture {
reactor: &self.fence_reactor,
object: fence,
registered: false,
}
}
pub fn instance(&self) -> &InstanceObject {
self.device.instance()
}
pub const fn adapter(&self) -> &br::PhysicalDeviceObject<InstanceObject> {
&self.adapter
}
pub const fn device(&self) -> &DeviceObject {
&self.device
}
pub const fn graphics_queue_family_index(&self) -> u32 {
self.graphics_queue.family
}
}
impl Deref for Graphics {
type Target = DeviceObject;
fn deref(&self) -> &DeviceObject {
&self.device
}
}
#[derive(Clone)]
pub struct MemoryType(u32, br::vk::VkMemoryType);
impl MemoryType {
pub const fn index(&self) -> u32 {
self.0
}
pub const fn corresponding_mask(&self) -> u32 {
0x01 << self.0
}
pub const fn has_covered_by_mask(&self, mask: u32) -> bool {
(mask & self.corresponding_mask())!= 0
}
pub const fn has_property_flags(&self, other: br::MemoryPropertyFlags) -> bool {
(self.1.propertyFlags & other.bits())!= 0
}
pub const fn is_device_local(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::DEVICE_LOCAL)
}
pub const fn visible_from_host(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_VISIBLE)
}
pub const fn is_host_coherent(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_COHERENT)
}
pub const fn is_host_cached(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_CACHED)
}
}
impl std::fmt::Debug for MemoryType {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut flags = Vec::with_capacity(6);
if self.is_device_local() {
flags.push("DEVICE LOCAL");
}
if self.visible_from_host() {
flags.push("HOST VISIBLE");
}
if self.is_host_cached() {
flags.push("CACHED");
}
if self.is_host_coherent() {
flags.push("COHERENT");
}
if (self.1.propertyFlags & br::vk::VK_MEMORY_PROPERTY_PROTECTED_BIT)!= 0 {
flags.push("PROTECTED");
}
if self.has_property_flags(br::MemoryPropertyFlags::LAZILY_ALLOCATED) {
flags.push("LAZILY ALLOCATED");
}
write!(
fmt,
"{}: [{}] in heap #{}",
self.index(),
flags.join("/"),
self.1.heapIndex
)
}
}
pub struct MemoryTypeManager {
device_memory_types: Vec<MemoryType>,
host_memory_types: Vec<MemoryType>,
}
impl MemoryTypeManager {
fn new(pd: &impl br::PhysicalDevice) -> Self {
let mem = pd.memory_properties();
let (mut device_memory_types, mut host_memory_types) = (Vec::new(), Vec::new());
for mt in mem
.types()
.enumerate()
.map(|(n, mt)| MemoryType(n as _, mt.clone()))
{
if mt.is_device_local() {
device_memory_types.push(mt.clone());
}
if mt.visible_from_host() {
host_memory_types.push(mt.clone());
}
}
Self {
device_memory_types,
host_memory_types,
}
}
pub fn exact_host_visible_index(
&self,
mask: u32,
required: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask) && mt.has_property_flags(required))
}
pub fn host_visible_index(
&self,
mask: u32,
preference: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.exact_host_visible_index(mask, preference).or_else(|| {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
})
}
pub fn device_local_index(&self, mask: u32) -> Option<&MemoryType> {
self.device_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
}
fn diagnose_heaps(p: &impl br::PhysicalDevice) {
info!("Memory Heaps: ");
for (n, h) in p.memory_properties().heaps().enumerate() {
let (mut nb, mut unit) = (h.size as f32, "bytes");
if nb >= 10000.0 {
nb /= 1024.0;
unit = "KB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "MB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "GB";
}
let is_device_local = (h.flags & br::vk::VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)!= 0;
info!(
" #{n}: {nb} {unit} {}",
if is_device_local {
"[DEVICE_LOCAL]"
} else {
""
}
);
}
}
fn diagnose_types(&self) {
info!("Device Memory Types: ");
for mt in &self.device_memory_types {
info!(" {:?}", mt);
}
info!("Host Visible Memory Types: ");
for mt in &self.host_memory_types {
info!(" {:?}", mt);
}
}
} | warn!("Validation Layer is not found!");
}
| random_line_split |
graphics.rs | use crate::mthelper::SharedRef;
use bedrock as br;
use br::{
CommandBuffer, CommandPool, Device, Instance, InstanceChild, PhysicalDevice, Queue,
SubmissionBatch,
};
use log::{debug, info, warn};
use std::ops::Deref;
pub type InstanceObject = SharedRef<br::InstanceObject>;
pub type DeviceObject = SharedRef<br::DeviceObject<InstanceObject>>;
/// Queue object with family index
pub struct QueueSet<Device: br::Device> {
pub(crate) q: parking_lot::Mutex<br::QueueObject<Device>>,
pub(crate) family: u32,
}
mod command_bundle;
pub use self::command_bundle::*;
#[cfg(feature = "mt")]
mod async_fence_driver;
#[cfg(feature = "mt")]
pub use self::async_fence_driver::*;
#[derive(Debug)]
pub enum GraphicsInitializationError {
LayerEnumerationFailed(br::VkResultBox),
VulkanError(br::VkResultBox),
NoPhysicalDevices,
NoSuitableGraphicsQueue,
}
impl From<br::VkResultBox> for GraphicsInitializationError {
fn from(value: br::VkResultBox) -> Self {
Self::VulkanError(value)
}
}
impl std::fmt::Display for GraphicsInitializationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::LayerEnumerationFailed(r) => write!(f, "vk layer enumeration failed: {r}"),
Self::VulkanError(r) => std::fmt::Display::fmt(r, f),
Self::NoPhysicalDevices => write!(f, "no physical devices available on this machine"),
Self::NoSuitableGraphicsQueue => {
write!(f, "no suitable graphics queue found on device")
}
}
}
}
impl std::error::Error for GraphicsInitializationError {}
/// Graphics manager
pub struct Graphics {
pub(crate) adapter: br::PhysicalDeviceObject<InstanceObject>,
pub(crate) device: DeviceObject,
pub(crate) graphics_queue: QueueSet<DeviceObject>,
cp_onetime_submit: br::CommandPoolObject<DeviceObject>,
pub memory_type_manager: MemoryTypeManager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread<DeviceObject>,
#[cfg(feature = "debug")]
_debug_instance: br::DebugUtilsMessengerObject<InstanceObject>,
}
impl Graphics {
pub(crate) fn new(
app_name: &str,
app_version: (u32, u32, u32),
instance_extensions: Vec<&str>,
device_extensions: Vec<&str>,
features: br::vk::VkPhysicalDeviceFeatures,
) -> Result<Self, GraphicsInitializationError> {
info!("Supported Layers: ");
let mut validation_layer_available = false;
#[cfg(debug_assertions)]
for l in br::enumerate_layer_properties()
.map_err(GraphicsInitializationError::LayerEnumerationFailed)?
{
let name_str = l
.layerName
.as_cstr()
.expect("Failed to decode")
.to_str()
.expect("invalid sequence in layer name");
info!(
"* {name_str} :: {}/{}",
l.specVersion, l.implementationVersion
);
if name_str == "VK_LAYER_KHRONOS_validation" {
validation_layer_available = true;
}
}
let mut ib =
br::InstanceBuilder::new(app_name, app_version, "Interlude2:Peridot", (0, 1, 0));
ib.add_extensions(instance_extensions);
#[cfg(debug_assertions)]
ib.add_extension("VK_EXT_debug_report");
if validation_layer_available {
ib.add_layer("VK_LAYER_KHRONOS_validation");
} else {
warn!("Validation Layer is not found!");
}
#[cfg(feature = "debug")]
{
ib.add_extension("VK_EXT_debug_utils");
debug!("Debug reporting activated");
}
let instance = SharedRef::new(ib.create()?);
#[cfg(feature = "debug")]
let _debug_instance = br::DebugUtilsMessengerCreateInfo::new(crate::debug::debug_utils_out)
.filter_severity(br::DebugUtilsMessageSeverityFlags::ERROR.and_warning())
.create(instance.clone())?;
let adapter = instance
.iter_physical_devices()?
.next()
.ok_or(GraphicsInitializationError::NoPhysicalDevices)?;
let memory_type_manager = MemoryTypeManager::new(&adapter);
MemoryTypeManager::diagnose_heaps(&adapter);
memory_type_manager.diagnose_types();
let gqf_index = adapter
.queue_family_properties()
.find_matching_index(br::QueueFlags::GRAPHICS)
.ok_or(GraphicsInitializationError::NoSuitableGraphicsQueue)?;
let qci = br::DeviceQueueCreateInfo(gqf_index, vec![0.0]);
let device = {
let mut db = br::DeviceBuilder::new(&adapter);
db.add_extensions(device_extensions).add_queue(qci);
if validation_layer_available {
db.add_layer("VK_LAYER_KHRONOS_validation");
}
*db.mod_features() = features;
SharedRef::new(db.create()?.clone_parent())
};
Ok(Self {
cp_onetime_submit: device.clone().new_command_pool(gqf_index, true, false)?,
graphics_queue: QueueSet {
q: parking_lot::Mutex::new(device.clone().queue(gqf_index, 0)),
family: gqf_index,
},
adapter: adapter.clone_parent(),
device,
memory_type_manager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread::new(),
#[cfg(feature = "debug")]
_debug_instance,
})
}
/// Submits any commands as transient commands.
pub fn submit_commands(
&mut self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<()> {
let mut cb = LocalCommandBundle(
self.cp_onetime_submit.alloc(1, true)?,
&mut self.cp_onetime_submit,
);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.get_mut().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
None::<&mut br::FenceObject<DeviceObject>>,
)?;
self.graphics_queue.q.get_mut().wait()
}
pub fn submit_buffered_commands(
&mut self,
batches: &[impl br::SubmissionBatch],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue.q.get_mut().submit(batches, Some(fence))
}
pub fn | (
&mut self,
batches: &[br::vk::VkSubmitInfo],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue
.q
.get_mut()
.submit_raw(batches, Some(fence))
}
/// Submits any commands as transient commands.
/// ## Note
/// Unlike other futures, commands are submitted **immediately**(even if not awaiting the returned future).
#[cfg(feature = "mt")]
pub fn submit_commands_async<'s>(
&'s self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<impl std::future::Future<Output = br::Result<()>> +'s> {
let mut fence = std::sync::Arc::new(self.device.clone().new_fence(false)?);
let mut pool = self.device.clone().new_command_pool(
self.graphics_queue_family_index(),
true,
false,
)?;
let mut cb = CommandBundle(pool.alloc(1, true)?, pool);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.lock().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
Some(unsafe { std::sync::Arc::get_mut(&mut fence).unwrap_unchecked() }),
)?;
Ok(async move {
self.await_fence(fence).await?;
// keep alive command buffers while execution
drop(cb);
Ok(())
})
}
/// Awaits fence on background thread
#[cfg(feature = "mt")]
pub const fn await_fence<'s>(
&'s self,
fence: std::sync::Arc<
impl br::Fence<ConcreteDevice = DeviceObject> + Send + Sync +'static,
>,
) -> impl std::future::Future<Output = br::Result<()>> +'s {
FenceWaitFuture {
reactor: &self.fence_reactor,
object: fence,
registered: false,
}
}
pub fn instance(&self) -> &InstanceObject {
self.device.instance()
}
pub const fn adapter(&self) -> &br::PhysicalDeviceObject<InstanceObject> {
&self.adapter
}
pub const fn device(&self) -> &DeviceObject {
&self.device
}
pub const fn graphics_queue_family_index(&self) -> u32 {
self.graphics_queue.family
}
}
impl Deref for Graphics {
type Target = DeviceObject;
fn deref(&self) -> &DeviceObject {
&self.device
}
}
#[derive(Clone)]
pub struct MemoryType(u32, br::vk::VkMemoryType);
impl MemoryType {
pub const fn index(&self) -> u32 {
self.0
}
pub const fn corresponding_mask(&self) -> u32 {
0x01 << self.0
}
pub const fn has_covered_by_mask(&self, mask: u32) -> bool {
(mask & self.corresponding_mask())!= 0
}
pub const fn has_property_flags(&self, other: br::MemoryPropertyFlags) -> bool {
(self.1.propertyFlags & other.bits())!= 0
}
pub const fn is_device_local(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::DEVICE_LOCAL)
}
pub const fn visible_from_host(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_VISIBLE)
}
pub const fn is_host_coherent(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_COHERENT)
}
pub const fn is_host_cached(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_CACHED)
}
}
impl std::fmt::Debug for MemoryType {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut flags = Vec::with_capacity(6);
if self.is_device_local() {
flags.push("DEVICE LOCAL");
}
if self.visible_from_host() {
flags.push("HOST VISIBLE");
}
if self.is_host_cached() {
flags.push("CACHED");
}
if self.is_host_coherent() {
flags.push("COHERENT");
}
if (self.1.propertyFlags & br::vk::VK_MEMORY_PROPERTY_PROTECTED_BIT)!= 0 {
flags.push("PROTECTED");
}
if self.has_property_flags(br::MemoryPropertyFlags::LAZILY_ALLOCATED) {
flags.push("LAZILY ALLOCATED");
}
write!(
fmt,
"{}: [{}] in heap #{}",
self.index(),
flags.join("/"),
self.1.heapIndex
)
}
}
pub struct MemoryTypeManager {
device_memory_types: Vec<MemoryType>,
host_memory_types: Vec<MemoryType>,
}
impl MemoryTypeManager {
fn new(pd: &impl br::PhysicalDevice) -> Self {
let mem = pd.memory_properties();
let (mut device_memory_types, mut host_memory_types) = (Vec::new(), Vec::new());
for mt in mem
.types()
.enumerate()
.map(|(n, mt)| MemoryType(n as _, mt.clone()))
{
if mt.is_device_local() {
device_memory_types.push(mt.clone());
}
if mt.visible_from_host() {
host_memory_types.push(mt.clone());
}
}
Self {
device_memory_types,
host_memory_types,
}
}
pub fn exact_host_visible_index(
&self,
mask: u32,
required: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask) && mt.has_property_flags(required))
}
pub fn host_visible_index(
&self,
mask: u32,
preference: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.exact_host_visible_index(mask, preference).or_else(|| {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
})
}
pub fn device_local_index(&self, mask: u32) -> Option<&MemoryType> {
self.device_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
}
fn diagnose_heaps(p: &impl br::PhysicalDevice) {
info!("Memory Heaps: ");
for (n, h) in p.memory_properties().heaps().enumerate() {
let (mut nb, mut unit) = (h.size as f32, "bytes");
if nb >= 10000.0 {
nb /= 1024.0;
unit = "KB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "MB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "GB";
}
let is_device_local = (h.flags & br::vk::VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)!= 0;
info!(
" #{n}: {nb} {unit} {}",
if is_device_local {
"[DEVICE_LOCAL]"
} else {
""
}
);
}
}
fn diagnose_types(&self) {
info!("Device Memory Types: ");
for mt in &self.device_memory_types {
info!(" {:?}", mt);
}
info!("Host Visible Memory Types: ");
for mt in &self.host_memory_types {
info!(" {:?}", mt);
}
}
}
| submit_buffered_commands_raw | identifier_name |
graphics.rs | use crate::mthelper::SharedRef;
use bedrock as br;
use br::{
CommandBuffer, CommandPool, Device, Instance, InstanceChild, PhysicalDevice, Queue,
SubmissionBatch,
};
use log::{debug, info, warn};
use std::ops::Deref;
pub type InstanceObject = SharedRef<br::InstanceObject>;
pub type DeviceObject = SharedRef<br::DeviceObject<InstanceObject>>;
/// Queue object with family index
pub struct QueueSet<Device: br::Device> {
pub(crate) q: parking_lot::Mutex<br::QueueObject<Device>>,
pub(crate) family: u32,
}
mod command_bundle;
pub use self::command_bundle::*;
#[cfg(feature = "mt")]
mod async_fence_driver;
#[cfg(feature = "mt")]
pub use self::async_fence_driver::*;
#[derive(Debug)]
pub enum GraphicsInitializationError {
LayerEnumerationFailed(br::VkResultBox),
VulkanError(br::VkResultBox),
NoPhysicalDevices,
NoSuitableGraphicsQueue,
}
impl From<br::VkResultBox> for GraphicsInitializationError {
fn from(value: br::VkResultBox) -> Self {
Self::VulkanError(value)
}
}
impl std::fmt::Display for GraphicsInitializationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::LayerEnumerationFailed(r) => write!(f, "vk layer enumeration failed: {r}"),
Self::VulkanError(r) => std::fmt::Display::fmt(r, f),
Self::NoPhysicalDevices => write!(f, "no physical devices available on this machine"),
Self::NoSuitableGraphicsQueue => {
write!(f, "no suitable graphics queue found on device")
}
}
}
}
impl std::error::Error for GraphicsInitializationError {}
/// Graphics manager
pub struct Graphics {
pub(crate) adapter: br::PhysicalDeviceObject<InstanceObject>,
pub(crate) device: DeviceObject,
pub(crate) graphics_queue: QueueSet<DeviceObject>,
cp_onetime_submit: br::CommandPoolObject<DeviceObject>,
pub memory_type_manager: MemoryTypeManager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread<DeviceObject>,
#[cfg(feature = "debug")]
_debug_instance: br::DebugUtilsMessengerObject<InstanceObject>,
}
impl Graphics {
pub(crate) fn new(
app_name: &str,
app_version: (u32, u32, u32),
instance_extensions: Vec<&str>,
device_extensions: Vec<&str>,
features: br::vk::VkPhysicalDeviceFeatures,
) -> Result<Self, GraphicsInitializationError> {
info!("Supported Layers: ");
let mut validation_layer_available = false;
#[cfg(debug_assertions)]
for l in br::enumerate_layer_properties()
.map_err(GraphicsInitializationError::LayerEnumerationFailed)?
{
let name_str = l
.layerName
.as_cstr()
.expect("Failed to decode")
.to_str()
.expect("invalid sequence in layer name");
info!(
"* {name_str} :: {}/{}",
l.specVersion, l.implementationVersion
);
if name_str == "VK_LAYER_KHRONOS_validation" {
validation_layer_available = true;
}
}
let mut ib =
br::InstanceBuilder::new(app_name, app_version, "Interlude2:Peridot", (0, 1, 0));
ib.add_extensions(instance_extensions);
#[cfg(debug_assertions)]
ib.add_extension("VK_EXT_debug_report");
if validation_layer_available {
ib.add_layer("VK_LAYER_KHRONOS_validation");
} else {
warn!("Validation Layer is not found!");
}
#[cfg(feature = "debug")]
{
ib.add_extension("VK_EXT_debug_utils");
debug!("Debug reporting activated");
}
let instance = SharedRef::new(ib.create()?);
#[cfg(feature = "debug")]
let _debug_instance = br::DebugUtilsMessengerCreateInfo::new(crate::debug::debug_utils_out)
.filter_severity(br::DebugUtilsMessageSeverityFlags::ERROR.and_warning())
.create(instance.clone())?;
let adapter = instance
.iter_physical_devices()?
.next()
.ok_or(GraphicsInitializationError::NoPhysicalDevices)?;
let memory_type_manager = MemoryTypeManager::new(&adapter);
MemoryTypeManager::diagnose_heaps(&adapter);
memory_type_manager.diagnose_types();
let gqf_index = adapter
.queue_family_properties()
.find_matching_index(br::QueueFlags::GRAPHICS)
.ok_or(GraphicsInitializationError::NoSuitableGraphicsQueue)?;
let qci = br::DeviceQueueCreateInfo(gqf_index, vec![0.0]);
let device = {
let mut db = br::DeviceBuilder::new(&adapter);
db.add_extensions(device_extensions).add_queue(qci);
if validation_layer_available {
db.add_layer("VK_LAYER_KHRONOS_validation");
}
*db.mod_features() = features;
SharedRef::new(db.create()?.clone_parent())
};
Ok(Self {
cp_onetime_submit: device.clone().new_command_pool(gqf_index, true, false)?,
graphics_queue: QueueSet {
q: parking_lot::Mutex::new(device.clone().queue(gqf_index, 0)),
family: gqf_index,
},
adapter: adapter.clone_parent(),
device,
memory_type_manager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread::new(),
#[cfg(feature = "debug")]
_debug_instance,
})
}
/// Submits any commands as transient commands.
pub fn submit_commands(
&mut self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<()> {
let mut cb = LocalCommandBundle(
self.cp_onetime_submit.alloc(1, true)?,
&mut self.cp_onetime_submit,
);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.get_mut().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
None::<&mut br::FenceObject<DeviceObject>>,
)?;
self.graphics_queue.q.get_mut().wait()
}
pub fn submit_buffered_commands(
&mut self,
batches: &[impl br::SubmissionBatch],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue.q.get_mut().submit(batches, Some(fence))
}
pub fn submit_buffered_commands_raw(
&mut self,
batches: &[br::vk::VkSubmitInfo],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue
.q
.get_mut()
.submit_raw(batches, Some(fence))
}
/// Submits any commands as transient commands.
/// ## Note
/// Unlike other futures, commands are submitted **immediately**(even if not awaiting the returned future).
#[cfg(feature = "mt")]
pub fn submit_commands_async<'s>(
&'s self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<impl std::future::Future<Output = br::Result<()>> +'s> {
let mut fence = std::sync::Arc::new(self.device.clone().new_fence(false)?);
let mut pool = self.device.clone().new_command_pool(
self.graphics_queue_family_index(),
true,
false,
)?;
let mut cb = CommandBundle(pool.alloc(1, true)?, pool);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.lock().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
Some(unsafe { std::sync::Arc::get_mut(&mut fence).unwrap_unchecked() }),
)?;
Ok(async move {
self.await_fence(fence).await?;
// keep alive command buffers while execution
drop(cb);
Ok(())
})
}
/// Awaits fence on background thread
#[cfg(feature = "mt")]
pub const fn await_fence<'s>(
&'s self,
fence: std::sync::Arc<
impl br::Fence<ConcreteDevice = DeviceObject> + Send + Sync +'static,
>,
) -> impl std::future::Future<Output = br::Result<()>> +'s {
FenceWaitFuture {
reactor: &self.fence_reactor,
object: fence,
registered: false,
}
}
pub fn instance(&self) -> &InstanceObject {
self.device.instance()
}
pub const fn adapter(&self) -> &br::PhysicalDeviceObject<InstanceObject> {
&self.adapter
}
pub const fn device(&self) -> &DeviceObject {
&self.device
}
pub const fn graphics_queue_family_index(&self) -> u32 {
self.graphics_queue.family
}
}
impl Deref for Graphics {
type Target = DeviceObject;
fn deref(&self) -> &DeviceObject {
&self.device
}
}
#[derive(Clone)]
pub struct MemoryType(u32, br::vk::VkMemoryType);
impl MemoryType {
pub const fn index(&self) -> u32 {
self.0
}
pub const fn corresponding_mask(&self) -> u32 {
0x01 << self.0
}
pub const fn has_covered_by_mask(&self, mask: u32) -> bool {
(mask & self.corresponding_mask())!= 0
}
pub const fn has_property_flags(&self, other: br::MemoryPropertyFlags) -> bool {
(self.1.propertyFlags & other.bits())!= 0
}
pub const fn is_device_local(&self) -> bool |
pub const fn visible_from_host(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_VISIBLE)
}
pub const fn is_host_coherent(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_COHERENT)
}
pub const fn is_host_cached(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_CACHED)
}
}
impl std::fmt::Debug for MemoryType {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut flags = Vec::with_capacity(6);
if self.is_device_local() {
flags.push("DEVICE LOCAL");
}
if self.visible_from_host() {
flags.push("HOST VISIBLE");
}
if self.is_host_cached() {
flags.push("CACHED");
}
if self.is_host_coherent() {
flags.push("COHERENT");
}
if (self.1.propertyFlags & br::vk::VK_MEMORY_PROPERTY_PROTECTED_BIT)!= 0 {
flags.push("PROTECTED");
}
if self.has_property_flags(br::MemoryPropertyFlags::LAZILY_ALLOCATED) {
flags.push("LAZILY ALLOCATED");
}
write!(
fmt,
"{}: [{}] in heap #{}",
self.index(),
flags.join("/"),
self.1.heapIndex
)
}
}
pub struct MemoryTypeManager {
device_memory_types: Vec<MemoryType>,
host_memory_types: Vec<MemoryType>,
}
impl MemoryTypeManager {
fn new(pd: &impl br::PhysicalDevice) -> Self {
let mem = pd.memory_properties();
let (mut device_memory_types, mut host_memory_types) = (Vec::new(), Vec::new());
for mt in mem
.types()
.enumerate()
.map(|(n, mt)| MemoryType(n as _, mt.clone()))
{
if mt.is_device_local() {
device_memory_types.push(mt.clone());
}
if mt.visible_from_host() {
host_memory_types.push(mt.clone());
}
}
Self {
device_memory_types,
host_memory_types,
}
}
pub fn exact_host_visible_index(
&self,
mask: u32,
required: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask) && mt.has_property_flags(required))
}
pub fn host_visible_index(
&self,
mask: u32,
preference: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.exact_host_visible_index(mask, preference).or_else(|| {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
})
}
pub fn device_local_index(&self, mask: u32) -> Option<&MemoryType> {
self.device_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
}
fn diagnose_heaps(p: &impl br::PhysicalDevice) {
info!("Memory Heaps: ");
for (n, h) in p.memory_properties().heaps().enumerate() {
let (mut nb, mut unit) = (h.size as f32, "bytes");
if nb >= 10000.0 {
nb /= 1024.0;
unit = "KB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "MB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "GB";
}
let is_device_local = (h.flags & br::vk::VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)!= 0;
info!(
" #{n}: {nb} {unit} {}",
if is_device_local {
"[DEVICE_LOCAL]"
} else {
""
}
);
}
}
fn diagnose_types(&self) {
info!("Device Memory Types: ");
for mt in &self.device_memory_types {
info!(" {:?}", mt);
}
info!("Host Visible Memory Types: ");
for mt in &self.host_memory_types {
info!(" {:?}", mt);
}
}
}
| {
self.has_property_flags(br::MemoryPropertyFlags::DEVICE_LOCAL)
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.